diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile index 60a17b7da83440..164c1c76971f09 100644 --- a/Documentation/DocBook/Makefile +++ b/Documentation/DocBook/Makefile @@ -7,12 +7,12 @@ # list of DOCBOOKS. DOCBOOKS := z8530book.xml \ - kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ + kernel-hacking.xml kernel-locking.xml \ writing_usb_driver.xml networking.xml \ kernel-api.xml filesystems.xml lsm.xml kgdb.xml \ gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ genericirq.xml s390-drivers.xml scsi.xml \ - sh.xml regulator.xml w1.xml \ + sh.xml w1.xml \ writing_musb_glue_layer.xml ifeq ($(DOCBOOKS),) diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt index ea8cafba255c8b..acd0dddd6bb8ba 100644 --- a/Documentation/PCI/pcieaer-howto.txt +++ b/Documentation/PCI/pcieaer-howto.txt @@ -256,7 +256,7 @@ After reboot with new kernel or insert the module, a device file named Then, you need a user space tool named aer-inject, which can be gotten from: - http://www.kernel.org/pub/linux/utils/pci/aer-inject/ + https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/ More information about aer-inject can be found in the document comes with its source code. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 986e44387dad49..facc20a3f96280 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -653,6 +653,9 @@ cpuidle.off=1 [CPU_IDLE] disable the cpuidle sub-system + cpufreq.off=1 [CPU_FREQ] + disable the cpufreq sub-system + cpu_init_udelay=N [X86] Delay for N microsec between assert and de-assert of APIC INIT to start processors. This delay occurs @@ -1183,6 +1186,12 @@ functions that can be changed at run time by the set_graph_notrace file in the debugfs tracing directory. + ftrace_graph_max_depth= + [FTRACE] Used with the function graph tracer. This is + the max depth it will trace into a function. This value + can be changed at run time by the max_graph_depth file + in the tracefs tracing directory. default: 0 (no limit) + gamecon.map[2|3]= [HW,JOY] Multisystem joystick and NES/SNES/PSX pad support via parallel port (up to 5 devices per port) @@ -1716,6 +1725,12 @@ kernel and module base offset ASLR (Address Space Layout Randomization). + kasan_multi_shot + [KNL] Enforce KASAN (Kernel Address Sanitizer) to print + report on every invalid memory access. Without this + parameter KASAN will print report only for the first + invalid access. + keepinitrd [HW,ARM] kernelcore= [KNL,X86,IA-64,PPC] diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index a71b8095dbd8df..2f66683500b8e4 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -68,3 +68,4 @@ stable kernels. | | | | | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | +| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt index 3b8449f8ac7e80..49d7c997fa1ee7 100644 --- a/Documentation/cgroup-v2.txt +++ b/Documentation/cgroup-v2.txt @@ -1142,16 +1142,17 @@ used by the kernel. pids.max - A read-write single value file which exists on non-root cgroups. The - default is "max". + A read-write single value file which exists on non-root + cgroups. The default is "max". - Hard limit of number of processes. + Hard limit of number of processes. pids.current - A read-only single value file which exists on all cgroups. + A read-only single value file which exists on all cgroups. - The number of processes currently in the cgroup and its descendants. + The number of processes currently in the cgroup and its + descendants. Organisational operations are not blocked by cgroup policies, so it is possible to have pids.current > pids.max. This can be done by either diff --git a/Documentation/conf.py b/Documentation/conf.py index f6823cf01275aa..7fadb3b8329343 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -135,7 +135,7 @@ # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False -primary_domain = 'C' +primary_domain = 'c' highlight_language = 'none' # -- Options for HTML output ---------------------------------------------- diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst index 2c41b713841fd4..44886c91e112d4 100644 --- a/Documentation/dev-tools/kcov.rst +++ b/Documentation/dev-tools/kcov.rst @@ -10,7 +10,7 @@ Note that kcov does not aim to collect as much coverage as possible. It aims to collect more or less stable coverage that is function of syscall inputs. To achieve this goal it does not collect coverage in soft/hard interrupts and instrumentation of some inherently non-deterministic parts of kernel is -disbled (e.g. scheduler, locking). +disabled (e.g. scheduler, locking). Usage ----- diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt index 9b2b41ab68177d..c246cd2730d906 100644 --- a/Documentation/devicetree/bindings/arm/amlogic.txt +++ b/Documentation/devicetree/bindings/arm/amlogic.txt @@ -40,6 +40,8 @@ Board compatible values: - "hardkernel,odroid-c2" (Meson gxbb) - "amlogic,p200" (Meson gxbb) - "amlogic,p201" (Meson gxbb) + - "wetek,hub" (Meson gxbb) + - "wetek,play2" (Meson gxbb) - "amlogic,p212" (Meson gxl s905x) - "amlogic,p230" (Meson gxl s905d) - "amlogic,p231" (Meson gxl s905d) diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt index 30c546900b6021..07dbb358182ccd 100644 --- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt +++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt @@ -45,7 +45,7 @@ The following clocks are available: - 1 15 SATA - 1 16 SATA USB - 1 17 Main - - 1 18 SD/MMC + - 1 18 SD/MMC/GOP - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART) - 1 22 USB3H0 - 1 23 USB3H1 @@ -65,7 +65,7 @@ Required properties: "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", - "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", + "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io", "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; Example: @@ -78,6 +78,6 @@ Example: gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", - "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", + "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io", "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; }; diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt index a78265993665a6..ca5204b3bc218b 100644 --- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt +++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt @@ -4,7 +4,6 @@ Required properties: - compatible: value should be one of the following "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */ "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ - "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */ "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt index 18645e0228b054..5837402c3adeae 100644 --- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt +++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt @@ -11,7 +11,6 @@ Required properties: "samsung,s5pv210-fimd"; /* for S5PV210 SoC */ "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ - "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */ "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt index ea9c1c9607f612..520d61dad6dd7f 100644 --- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt @@ -13,7 +13,7 @@ Required Properties: - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following, before RK3288 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288 - - "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108 + - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108 - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036 - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368 - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399 diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt index ecdcfb79070494..63725498bd2068 100644 --- a/Documentation/devicetree/bindings/opp/opp.txt +++ b/Documentation/devicetree/bindings/opp/opp.txt @@ -188,14 +188,14 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together. opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>; + opp-microvolt = <975000 970000 985000>; opp-microamp = <70000>; clock-latency-ns = <300000>; opp-suspend; }; opp@1100000000 { opp-hz = /bits/ 64 <1100000000>; - opp-microvolt = <980000 1000000 1010000>; + opp-microvolt = <1000000 980000 1010000>; opp-microamp = <80000>; clock-latency-ns = <310000>; }; @@ -267,14 +267,14 @@ independently. opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>; + opp-microvolt = <975000 970000 985000>; opp-microamp = <70000>; clock-latency-ns = <300000>; opp-suspend; }; opp@1100000000 { opp-hz = /bits/ 64 <1100000000>; - opp-microvolt = <980000 1000000 1010000>; + opp-microvolt = <1000000 980000 1010000>; opp-microamp = <80000>; clock-latency-ns = <310000>; }; @@ -343,14 +343,14 @@ DVFS state together. opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>; + opp-microvolt = <975000 970000 985000>; opp-microamp = <70000>; clock-latency-ns = <300000>; opp-suspend; }; opp@1100000000 { opp-hz = /bits/ 64 <1100000000>; - opp-microvolt = <980000 1000000 1010000>; + opp-microvolt = <1000000 980000 1010000>; opp-microamp = <80000>; clock-latency-ns = <310000>; }; @@ -369,7 +369,7 @@ DVFS state together. opp@1300000000 { opp-hz = /bits/ 64 <1300000000>; - opp-microvolt = <1045000 1050000 1055000>; + opp-microvolt = <1050000 1045000 1055000>; opp-microamp = <95000>; clock-latency-ns = <400000>; opp-suspend; @@ -382,7 +382,7 @@ DVFS state together. }; opp@1500000000 { opp-hz = /bits/ 64 <1500000000>; - opp-microvolt = <1010000 1100000 1110000>; + opp-microvolt = <1100000 1010000 1110000>; opp-microamp = <95000>; clock-latency-ns = <400000>; turbo-mode; @@ -424,9 +424,9 @@ Example 4: Handling multiple regulators opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>, /* Supply 0 */ - <960000 965000 975000>, /* Supply 1 */ - <960000 965000 975000>; /* Supply 2 */ + opp-microvolt = <975000 970000 985000>, /* Supply 0 */ + <965000 960000 975000>, /* Supply 1 */ + <965000 960000 975000>; /* Supply 2 */ opp-microamp = <70000>, /* Supply 0 */ <70000>, /* Supply 1 */ <70000>; /* Supply 2 */ @@ -437,9 +437,9 @@ Example 4: Handling multiple regulators opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <970000 975000 985000>, /* Supply 0 */ - <960000 965000 975000>, /* Supply 1 */ - <960000 965000 975000>; /* Supply 2 */ + opp-microvolt = <975000 970000 985000>, /* Supply 0 */ + <965000 960000 975000>, /* Supply 1 */ + <965000 960000 975000>; /* Supply 2 */ opp-microamp = <70000>, /* Supply 0 */ <0>, /* Supply 1 doesn't need this */ <70000>; /* Supply 2 */ @@ -474,7 +474,7 @@ Example 5: opp-supported-hw */ opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF> opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <900000 915000 925000>; + opp-microvolt = <915000 900000 925000>; ... }; @@ -487,7 +487,7 @@ Example 5: opp-supported-hw */ opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0> opp-hz = /bits/ 64 <800000000>; - opp-microvolt = <900000 915000 925000>; + opp-microvolt = <915000 900000 925000>; ... }; }; @@ -512,18 +512,18 @@ Example 6: opp-microvolt-, opp-microamp-: opp@1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-microvolt-slow = <900000 915000 925000>; - opp-microvolt-fast = <970000 975000 985000>; + opp-microvolt-slow = <915000 900000 925000>; + opp-microvolt-fast = <975000 970000 985000>; opp-microamp-slow = <70000>; opp-microamp-fast = <71000>; }; opp@1200000000 { opp-hz = /bits/ 64 <1200000000>; - opp-microvolt-slow = <900000 915000 925000>, /* Supply vcc0 */ - <910000 925000 935000>; /* Supply vcc1 */ - opp-microvolt-fast = <970000 975000 985000>, /* Supply vcc0 */ - <960000 965000 975000>; /* Supply vcc1 */ + opp-microvolt-slow = <915000 900000 925000>, /* Supply vcc0 */ + <925000 910000 935000>; /* Supply vcc1 */ + opp-microvolt-fast = <975000 970000 985000>, /* Supply vcc0 */ + <965000 960000 975000>; /* Supply vcc1 */ opp-microamp = <70000>; /* Will be used for both slow/fast */ }; }; diff --git a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt b/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt deleted file mode 100644 index e68ae5dec9c9ef..00000000000000 --- a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt +++ /dev/null @@ -1,39 +0,0 @@ -Broadcom USB3 phy binding for northstar plus SoC -The USB3 phy is internal to the SoC and is accessed using mdio interface. - -Required mdio bus properties: -- reg: Should be 0x0 for SoC internal USB3 phy -- #address-cells: must be 1 -- #size-cells: must be 0 - -Required USB3 PHY properties: -- compatible: should be "brcm,nsp-usb3-phy" -- reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10. -- usb3-ctrl-syscon: handler of syscon node defining physical address - of usb3 control register. -- #phy-cells: must be 0 - -Required usb3 control properties: -- compatible: should be "brcm,nsp-usb3-ctrl" -- reg: offset and length of the control registers - -Example: - - mdio@0 { - reg = <0x0>; - #address-cells = <1>; - #size-cells = <0>; - - usb3_phy: usb-phy@10 { - compatible = "brcm,nsp-usb3-phy"; - reg = <0x10>; - usb3-ctrl-syscon = <&usb3_ctrl>; - #phy-cells = <0>; - status = "disabled"; - }; - }; - - usb3_ctrl: syscon@104408 { - compatible = "brcm,nsp-usb3-ctrl", "syscon"; - reg = <0x104408 0x3fc>; - }; diff --git a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt index 712baf6c3e246f..44b842b6ca154d 100644 --- a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt @@ -71,6 +71,9 @@ For Axon it can be absent, though my current driver doesn't handle phy-address yet so for now, keep 0x00ffffff in it. + - phy-handle : Used to describe configurations where a external PHY + is used. Please refer to: + Documentation/devicetree/bindings/net/ethernet.txt - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec operations (if absent the value is the same as rx-fifo-size). For Axon, either absent or 2048. @@ -81,8 +84,22 @@ offload, phandle of the TAH device node. - tah-channel : 1 cell, optional. If appropriate, channel used on the TAH engine. + - fixed-link : Fixed-link subnode describing a link to a non-MDIO + managed entity. See + Documentation/devicetree/bindings/net/fixed-link.txt + for details. + - mdio subnode : When the EMAC has a phy connected to its local + mdio, which us supported by the kernel's network + PHY library in drivers/net/phy, there must be device + tree subnode with the following required properties: + - #address-cells: Must be <1>. + - #size-cells: Must be <0>. - Example: + For PHY definitions: Please refer to + Documentation/devicetree/bindings/net/phy.txt and + Documentation/devicetree/bindings/net/ethernet.txt + + Examples: EMAC0: ethernet@40000800 { device_type = "network"; @@ -104,6 +121,48 @@ zmii-channel = <0>; }; + EMAC1: ethernet@ef600c00 { + device_type = "network"; + compatible = "ibm,emac-apm821xx", "ibm,emac4sync"; + interrupt-parent = <&EMAC1>; + interrupts = <0 1>; + #interrupt-cells = <1>; + #address-cells = <0>; + #size-cells = <0>; + interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */ + 1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>; + reg = <0xef600c00 0x000000c4>; + local-mac-address = [000000000000]; /* Filled in by U-Boot */ + mal-device = <&MAL0>; + mal-tx-channel = <0>; + mal-rx-channel = <0>; + cell-index = <0>; + max-frame-size = <9000>; + rx-fifo-size = <16384>; + tx-fifo-size = <2048>; + fifo-entry-size = <10>; + phy-mode = "rgmii"; + phy-handle = <&phy0>; + phy-map = <0x00000000>; + rgmii-device = <&RGMII0>; + rgmii-channel = <0>; + tah-device = <&TAH0>; + tah-channel = <0>; + has-inverted-stacr-oc; + has-new-stacr-staopc; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + phy0: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; + }; + }; + + ii) McMAL node Required properties: @@ -145,4 +204,3 @@ - revision : as provided by the RGMII new version register if available. For Axon: 0x0000012a - diff --git a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt index c3f6546ebac777..6a23ad9ac53a4c 100644 --- a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt @@ -45,7 +45,7 @@ Required Properties: Optional Properties: - reg-names: In addition to the required properties, the following are optional - "efuse-address" - Contains efuse base address used to pick up ABB info. - - "ldo-address" - Contains address of ABB LDO overide register address. + - "ldo-address" - Contains address of ABB LDO override register. "efuse-address" is required for this. - ti,ldovbb-vset-mask - Required if ldo-address is set, mask for LDO override register to provide override vset value. diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt index 471477299ece16..9cf7876ab43444 100644 --- a/Documentation/devicetree/bindings/rng/omap_rng.txt +++ b/Documentation/devicetree/bindings/rng/omap_rng.txt @@ -12,7 +12,8 @@ Required properties: - reg : Offset and length of the register set for the module - interrupts : the interrupt number for the RNG module. Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76" -- clocks: the trng clock source +- clocks: the trng clock source. Only mandatory for the + "inside-secure,safexcel-eip76" compatible. Example: /* AM335x */ diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt index 0c065f77658f13..3957d4edaa745f 100644 --- a/Documentation/devicetree/bindings/usb/usb251xb.txt +++ b/Documentation/devicetree/bindings/usb/usb251xb.txt @@ -7,18 +7,18 @@ Required properties : - compatible : Should be "microchip,usb251xb" or one of the specific types: "microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b", "microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi" - - hub-reset-gpios : Should specify the gpio for hub reset + - reset-gpios : Should specify the gpio for hub reset + - reg : I2C address on the selected bus (default is <0x2C>) Optional properties : - - reg : I2C address on the selected bus (default is <0x2C>) - skip-config : Skip Hub configuration, but only send the USB-Attach command - - vendor-id : USB Vendor ID of the hub (16 bit, default is 0x0424) - - product-id : USB Product ID of the hub (16 bit, default depends on type) - - device-id : USB Device ID of the hub (16 bit, default is 0x0bb3) - - language-id : USB Language ID (16 bit, default is 0x0000) - - manufacturer : USB Manufacturer string (max 31 characters long) - - product : USB Product string (max 31 characters long) - - serial : USB Serial string (max 31 characters long) + - vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424) + - product-id : Set USB Product ID of the hub (16 bit, default depends on type) + - device-id : Set USB Device ID of the hub (16 bit, default is 0x0bb3) + - language-id : Set USB Language ID (16 bit, default is 0x0000) + - manufacturer : Set USB Manufacturer string (max 31 characters long) + - product : Set USB Product string (max 31 characters long) + - serial : Set USB Serial string (max 31 characters long) - {bus,self}-powered : selects between self- and bus-powered operation (default is self-powered) - disable-hi-speed : disable USB Hi-Speed support @@ -31,8 +31,10 @@ Optional properties : (default is individual) - dynamic-power-switching : enable auto-switching from self- to bus-powered operation if the local power source is removed or unavailable - - oc-delay-{100us,4ms,8ms,16ms} : set over current timer delay (default is 8ms) - - compound-device : indicated the hub is part of a compound device + - oc-delay-us : Delay time (in microseconds) for filtering the over-current + sense inputs. Valid values are 100, 4000, 8000 (default) and 16000. If + an invalid value is given, the default is used instead. + - compound-device : indicate the hub is part of a compound device - port-mapping-mode : enable port mapping mode - string-support : enable string descriptor support (required for manufacturer, product and serial string configuration) @@ -40,34 +42,15 @@ Optional properties : device connected. - sp-disabled-ports : Specifies the ports which will be self-power disabled - bp-disabled-ports : Specifies the ports which will be bus-power disabled - - max-sp-power : Specifies the maximum current the hub consumes from an - upstream port when operating as self-powered hub including the power - consumption of a permanently attached peripheral if the hub is - configured as a compound device. The value is given in mA in a 0 - 500 - range (default is 2). - - max-bp-power : Specifies the maximum current the hub consumes from an - upstream port when operating as bus-powered hub including the power - consumption of a permanently attached peripheral if the hub is - configured as a compound device. The value is given in mA in a 0 - 500 - range (default is 100). - - max-sp-current : Specifies the maximum current the hub consumes from an - upstream port when operating as self-powered hub EXCLUDING the power - consumption of a permanently attached peripheral if the hub is - configured as a compound device. The value is given in mA in a 0 - 500 - range (default is 2). - - max-bp-current : Specifies the maximum current the hub consumes from an - upstream port when operating as bus-powered hub EXCLUDING the power - consumption of a permanently attached peripheral if the hub is - configured as a compound device. The value is given in mA in a 0 - 500 - range (default is 100). - - power-on-time : Specifies the time it takes from the time the host initiates - the power-on sequence to a port until the port has adequate power. The - value is given in ms in a 0 - 510 range (default is 100ms). + - power-on-time-ms : Specifies the time it takes from the time the host + initiates the power-on sequence to a port until the port has adequate + power. The value is given in ms in a 0 - 510 range (default is 100ms). Examples: usb2512b@2c { compatible = "microchip,usb2512b"; - hub-reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + reg = <0x2c>; + reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; }; usb2514b@2c { diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index bd0ed3cb49946c..ec0bfb9bbebd42 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -332,6 +332,7 @@ virtio Virtual I/O Device Specification, developed by the OASIS consortium vivante Vivante Corporation voipac Voipac Technologies s.r.o. wd Western Digital Corp. +wetek WeTek Electronics, limited. wexler Wexler winbond Winbond Electronics corp. wlf Wolfson Microelectronics diff --git a/Documentation/extcon/intel-int3496.txt b/Documentation/extcon/intel-int3496.txt index af0b366c25b733..8155dbc7fad36a 100644 --- a/Documentation/extcon/intel-int3496.txt +++ b/Documentation/extcon/intel-int3496.txt @@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg Index 2: The output gpio for muxing of the data pins between the USB host and the USB peripheral controller, write 1 to mux to the peripheral controller + +There is a mapping between indices and GPIO connection IDs as follows + id index 0 + vbus index 1 + mux index 2 diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index ace63cd7af8c0d..fdcfdd79682a00 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -58,7 +58,8 @@ prototypes: int (*permission) (struct inode *, int, unsigned int); int (*get_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); + int (*getattr) (const struct path *, struct dentry *, struct kstat *, + u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); void (*update_time)(struct inode *, struct timespec *, int); diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index b968084eeac14b..569211703721fe 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -382,7 +382,8 @@ struct inode_operations { int (*permission) (struct inode *, int); int (*get_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); + int (*getattr) (const struct path *, struct dentry *, struct kstat *, + u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); void (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, diff --git a/Documentation/gcc-plugins.txt b/Documentation/gcc-plugins.txt index 891c6946443482..433eaefb4aa171 100644 --- a/Documentation/gcc-plugins.txt +++ b/Documentation/gcc-plugins.txt @@ -18,8 +18,8 @@ because gcc versions 4.5 and 4.6 are compiled by a C compiler, gcc-4.7 can be compiled by a C or a C++ compiler, and versions 4.8+ can only be compiled by a C++ compiler. -Currently the GCC plugin infrastructure supports only the x86, arm and arm64 -architectures. +Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and +powerpc architectures. This infrastructure was ported from grsecurity [6] and PaX [7]. diff --git a/Documentation/media/v4l-drivers/bttv.rst b/Documentation/media/v4l-drivers/bttv.rst index bc63b12efafd0c..195ccaac281615 100644 --- a/Documentation/media/v4l-drivers/bttv.rst +++ b/Documentation/media/v4l-drivers/bttv.rst @@ -312,7 +312,7 @@ information out of a register+stack dump printed by the kernel on protection faults (so-called "kernel oops"). If you run into some kind of deadlock, you can try to dump a call trace -for each process using sysrq-t (see Documentation/sysrq.txt). +for each process using sysrq-t (see Documentation/admin-guide/sysrq.rst). This way it is possible to figure where *exactly* some process in "D" state is stuck. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index fc73eeb7b3b8b1..ab02304613771b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN FALSE (router) forwarding - BOOLEAN - Enable IP forwarding on this interface. + Enable IP forwarding on this interface. This controls whether packets + received _on_ this interface can be forwarded. mc_forwarding - BOOLEAN Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt index 129f7c0e148398..21d2d48f87a254 100644 --- a/Documentation/power/pm_qos_interface.txt +++ b/Documentation/power/pm_qos_interface.txt @@ -163,8 +163,7 @@ of flags and remove sysfs attributes pm_qos_no_power_off and pm_qos_remote_wakeu under the device's power directory. Notification mechanisms: -The per-device PM QoS framework has 2 different and distinct notification trees: -a per-device notification tree and a global notification tree. +The per-device PM QoS framework has a per-device notification tree. int dev_pm_qos_add_notifier(device, notifier): Adds a notification callback function for the device. @@ -174,16 +173,6 @@ is changed (for resume latency device PM QoS only). int dev_pm_qos_remove_notifier(device, notifier): Removes the notification callback function for the device. -int dev_pm_qos_add_global_notifier(notifier): -Adds a notification callback function in the global notification tree of the -framework. -The callback is called when the aggregated value for any device is changed -(for resume latency device PM QoS only). - -int dev_pm_qos_remove_global_notifier(notifier): -Removes the notification callback function from the global notification tree -of the framework. - Active state latency tolerance diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 4870980e967e01..64546eb9a16a11 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -100,7 +100,7 @@ knows what to do to handle the device). * If the suspend callback returns an error code different from -EBUSY and -EAGAIN, the PM core regards this as a fatal error and will refuse to run the helper functions described in Section 4 for the device until its status - is directly set to either'active', or 'suspended' (the PM core provides + is directly set to either 'active', or 'suspended' (the PM core provides special helper functions for this purpose). In particular, if the driver requires remote wakeup capability (i.e. hardware @@ -217,7 +217,7 @@ defined in include/linux/pm.h: one to complete spinlock_t lock; - - lock used for synchronisation + - lock used for synchronization atomic_t usage_count; - the usage counter of the device @@ -565,7 +565,7 @@ appropriate to ensure that the device is not put back to sleep during the probe. This can happen with systems such as the network device layer. It may be desirable to suspend the device once ->probe() has finished. -Therefore the driver core uses the asyncronous pm_request_idle() to submit a +Therefore the driver core uses the asynchronous pm_request_idle() to submit a request to execute the subsystem-level idle callback for the device at that time. A driver that makes use of the runtime autosuspend feature, may want to update the last busy mark before returning from ->probe(). diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt index 3df8babcdc41ed..5ae7f868a007bd 100644 --- a/Documentation/s390/Debugging390.txt +++ b/Documentation/s390/Debugging390.txt @@ -2116,7 +2116,7 @@ The sysrq key reading is very picky ( I have to type the keys in an This is particularly useful for syncing disks unmounting & rebooting if the machine gets partially hung. -Read Documentation/sysrq.txt for more info +Read Documentation/admin-guide/sysrq.rst for more info References: =========== diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt index 3849814bfe6dd2..0e03baf271bdb0 100644 --- a/Documentation/security/keys.txt +++ b/Documentation/security/keys.txt @@ -1151,8 +1151,21 @@ access the data: usage. This is called key->payload.rcu_data0. The following accessors wrap the RCU calls to this element: - rcu_assign_keypointer(struct key *key, void *data); - void *rcu_dereference_key(struct key *key); + (a) Set or change the first payload pointer: + + rcu_assign_keypointer(struct key *key, void *data); + + (b) Read the first payload pointer with the key semaphore held: + + [const] void *dereference_key_locked([const] struct key *key); + + Note that the return value will inherit its constness from the key + parameter. Static analysis will give an error if it things the lock + isn't held. + + (c) Read the first payload pointer with the RCU read lock held: + + const void *dereference_key_rcu(const struct key *key); =================== diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index a32b4b74864498..bac23c19836050 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -85,7 +85,7 @@ show up in /proc/sys/kernel: - softlockup_all_cpu_backtrace - soft_watchdog - stop-a [ SPARC only ] -- sysrq ==> Documentation/sysrq.txt +- sysrq ==> Documentation/admin-guide/sysrq.rst - sysctl_writes_strict - tainted - threads-max diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index e4991fb1eedcd4..41ef9d8efe9517 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes functions). Unlike the Tracepoint based event, this can be added and removed dynamically, on the fly. -To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y. +To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y. Similar to the events tracer, this doesn't need to be activated via current_tracer. Instead of that, add probe points via diff --git a/Documentation/trace/uprobetracer.txt b/Documentation/trace/uprobetracer.txt index fa7b680ee8a005..bf526a7c5559a8 100644 --- a/Documentation/trace/uprobetracer.txt +++ b/Documentation/trace/uprobetracer.txt @@ -7,7 +7,7 @@ Overview -------- Uprobe based trace events are similar to kprobe based trace events. -To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y. +To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y. Similar to the kprobe-event tracer, this doesn't need to be activated via current_tracer. Instead of that, add probe points via diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index a3228a676cc18b..ce0b48d69eaae9 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -662,6 +662,10 @@ include/linux/rcupdate.h 의 rcu_assign_pointer() 와 rcu_dereference() 를 컨트롤 의존성 ------------- +현재의 컴파일러들은 컨트롤 의존성을 이해하고 있지 않기 때문에 컨트롤 의존성은 +약간 다루기 어려울 수 있습니다. 이 섹션의 목적은 여러분이 컴파일러의 무시로 +인해 여러분의 코드가 망가지는 걸 막을 수 있도록 돕는겁니다. + 로드-로드 컨트롤 의존성은 데이터 의존성 배리어만으로는 정확히 동작할 수가 없어서 읽기 메모리 배리어를 필요로 합니다. 아래의 코드를 봅시다: @@ -689,20 +693,21 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 q = READ_ONCE(a); if (q) { - WRITE_ONCE(b, p); + WRITE_ONCE(b, 1); } 컨트롤 의존성은 보통 다른 타입의 배리어들과 짝을 맞춰 사용됩니다. 그렇다곤 -하나, READ_ONCE() 는 반드시 사용해야 함을 부디 명심하세요! READ_ONCE() 가 -없다면, 컴파일러가 'a' 로부터의 로드를 'a' 로부터의 또다른 로드와, 'b' 로의 -스토어를 'b' 로의 또다른 스토어와 조합해 버려 매우 비직관적인 결과를 초래할 수 -있습니다. +하나, READ_ONCE() 도 WRITE_ONCE() 도 선택사항이 아니라 필수사항임을 부디 +명심하세요! READ_ONCE() 가 없다면, 컴파일러는 'a' 로부터의 로드를 'a' 로부터의 +또다른 로드와 조합할 수 있습니다. WRITE_ONCE() 가 없다면, 컴파일러는 'b' 로의 +스토어를 'b' 로의 또라느 스토어들과 조합할 수 있습니다. 두 경우 모두 순서에 +있어 상당히 비직관적인 결과를 초래할 수 있습니다. 이걸로 끝이 아닌게, 컴파일러가 변수 'a' 의 값이 항상 0이 아니라고 증명할 수 있다면, 앞의 예에서 "if" 문을 없애서 다음과 같이 최적화 할 수도 있습니다: q = a; - b = p; /* BUG: Compiler and CPU can both reorder!!! */ + b = 1; /* BUG: Compiler and CPU can both reorder!!! */ 그러니 READ_ONCE() 를 반드시 사용하세요. @@ -712,11 +717,11 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 q = READ_ONCE(a); if (q) { barrier(); - WRITE_ONCE(b, p); + WRITE_ONCE(b, 1); do_something(); } else { barrier(); - WRITE_ONCE(b, p); + WRITE_ONCE(b, 1); do_something_else(); } @@ -725,12 +730,12 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 q = READ_ONCE(a); barrier(); - WRITE_ONCE(b, p); /* BUG: No ordering vs. load from a!!! */ + WRITE_ONCE(b, 1); /* BUG: No ordering vs. load from a!!! */ if (q) { - /* WRITE_ONCE(b, p); -- moved up, BUG!!! */ + /* WRITE_ONCE(b, 1); -- moved up, BUG!!! */ do_something(); } else { - /* WRITE_ONCE(b, p); -- moved up, BUG!!! */ + /* WRITE_ONCE(b, 1); -- moved up, BUG!!! */ do_something_else(); } @@ -742,10 +747,10 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 q = READ_ONCE(a); if (q) { - smp_store_release(&b, p); + smp_store_release(&b, 1); do_something(); } else { - smp_store_release(&b, p); + smp_store_release(&b, 1); do_something_else(); } @@ -754,10 +759,10 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 q = READ_ONCE(a); if (q) { - WRITE_ONCE(b, p); + WRITE_ONCE(b, 1); do_something(); } else { - WRITE_ONCE(b, r); + WRITE_ONCE(b, 2); do_something_else(); } @@ -770,10 +775,10 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 q = READ_ONCE(a); if (q % MAX) { - WRITE_ONCE(b, p); + WRITE_ONCE(b, 1); do_something(); } else { - WRITE_ONCE(b, r); + WRITE_ONCE(b, 2); do_something_else(); } @@ -781,7 +786,7 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 위의 코드를 아래와 같이 바꿔버릴 수 있습니다: q = READ_ONCE(a); - WRITE_ONCE(b, p); + WRITE_ONCE(b, 1); do_something_else(); 이렇게 되면, CPU 는 변수 'a' 로부터의 로드와 변수 'b' 로의 스토어 사이의 순서를 @@ -793,10 +798,10 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 q = READ_ONCE(a); BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */ if (q % MAX) { - WRITE_ONCE(b, p); + WRITE_ONCE(b, 1); do_something(); } else { - WRITE_ONCE(b, r); + WRITE_ONCE(b, 2); do_something_else(); } @@ -828,35 +833,33 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 q = READ_ONCE(a); if (q) { - WRITE_ONCE(b, p); + WRITE_ONCE(b, 1); } else { - WRITE_ONCE(b, r); + WRITE_ONCE(b, 2); } - WRITE_ONCE(c, 1); /* BUG: No ordering against the read from "a". */ + WRITE_ONCE(c, 1); /* BUG: No ordering against the read from 'a'. */ -컴파일러는 volatile 타입에 대한 액세스를 재배치 할 수 없고 이 조건 하의 "b" +컴파일러는 volatile 타입에 대한 액세스를 재배치 할 수 없고 이 조건 하의 'b' 로의 쓰기를 재배치 할 수 없기 때문에 여기에 순서 규칙이 존재한다고 주장하고 싶을 겁니다. 불행히도 이 경우에, 컴파일러는 다음의 가상의 pseudo-assembly 언어 -코드처럼 "b" 로의 두개의 쓰기 오퍼레이션을 conditional-move 인스트럭션으로 +코드처럼 'b' 로의 두개의 쓰기 오퍼레이션을 conditional-move 인스트럭션으로 번역할 수 있습니다: ld r1,a - ld r2,p - ld r3,r cmp r1,$0 - cmov,ne r4,r2 - cmov,eq r4,r3 + cmov,ne r4,$1 + cmov,eq r4,$2 st r4,b st $1,c -완화된 순서 규칙의 CPU 는 "a" 로부터의 로드와 "c" 로의 스토어 사이에 어떤 +완화된 순서 규칙의 CPU 는 'a' 로부터의 로드와 'c' 로의 스토어 사이에 어떤 종류의 의존성도 갖지 않을 겁니다. 이 컨트롤 의존성은 두개의 cmov 인스트럭션과 거기에 의존하는 스토어 에게만 적용될 겁니다. 짧게 말하자면, 컨트롤 의존성은 주어진 if 문의 then 절과 else 절에게만 (그리고 이 두 절 내에서 호출되는 함수들에게까지) 적용되지, 이 if 문을 뒤따르는 코드에는 적용되지 않습니다. 마지막으로, 컨트롤 의존성은 이행성 (transitivity) 을 제공하지 -않습니다-. 이건 -x 와 y 가 둘 다 0 이라는 초기값을 가졌다는 가정 하의 두개의 예제로 +'x' 와 'y' 가 둘 다 0 이라는 초기값을 가졌다는 가정 하의 두개의 예제로 보이겠습니다: CPU 0 CPU 1 @@ -924,6 +927,9 @@ http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf 와 (*) 컨트롤 의존성은 이행성을 제공하지 -않습니다-. 이행성이 필요하다면, smp_mb() 를 사용하세요. + (*) 컴파일러는 컨트롤 의존성을 이해하고 있지 않습니다. 따라서 컴파일러가 + 여러분의 코드를 망가뜨리지 않도록 하는건 여러분이 해야 하는 일입니다. + SMP 배리어 짝맞추기 -------------------- diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 069450938b795d..fd106899afd1b2 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -951,6 +951,10 @@ This ioctl allows the user to create or modify a guest physical memory slot. When changing an existing slot, it may be moved in the guest physical memory space, or its flags may be modified. It may not be resized. Slots may not overlap in guest physical address space. +Bits 0-15 of "slot" specifies the slot id and this value should be +less than the maximum number of user memory slots supported per VM. +The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS, +if this capability is supported by the architecture. If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" specifies the address space which is being modified. They must be @@ -3373,6 +3377,69 @@ struct kvm_ppc_resize_hpt { __u32 pad; }; +4.104 KVM_X86_GET_MCE_CAP_SUPPORTED + +Capability: KVM_CAP_MCE +Architectures: x86 +Type: system ioctl +Parameters: u64 mce_cap (out) +Returns: 0 on success, -1 on error + +Returns supported MCE capabilities. The u64 mce_cap parameter +has the same format as the MSR_IA32_MCG_CAP register. Supported +capabilities will have the corresponding bits set. + +4.105 KVM_X86_SETUP_MCE + +Capability: KVM_CAP_MCE +Architectures: x86 +Type: vcpu ioctl +Parameters: u64 mcg_cap (in) +Returns: 0 on success, + -EFAULT if u64 mcg_cap cannot be read, + -EINVAL if the requested number of banks is invalid, + -EINVAL if requested MCE capability is not supported. + +Initializes MCE support for use. The u64 mcg_cap parameter +has the same format as the MSR_IA32_MCG_CAP register and +specifies which capabilities should be enabled. The maximum +supported number of error-reporting banks can be retrieved when +checking for KVM_CAP_MCE. The supported capabilities can be +retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED. + +4.106 KVM_X86_SET_MCE + +Capability: KVM_CAP_MCE +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_x86_mce (in) +Returns: 0 on success, + -EFAULT if struct kvm_x86_mce cannot be read, + -EINVAL if the bank number is invalid, + -EINVAL if VAL bit is not set in status field. + +Inject a machine check error (MCE) into the guest. The input +parameter is: + +struct kvm_x86_mce { + __u64 status; + __u64 addr; + __u64 misc; + __u64 mcg_status; + __u8 bank; + __u8 pad1[7]; + __u64 pad2[3]; +}; + +If the MCE being reported is an uncorrected error, KVM will +inject it as an MCE exception into the guest. If the guest +MCG_STATUS register reports that an MCE is in progress, KVM +causes an KVM_EXIT_SHUTDOWN vmexit. + +Otherwise, if the MCE is a corrected error, KVM will just +store it in the corresponding bank (provided this bank is +not holding a previously reported uncorrected error). + 5. The kvm_run structure ------------------------ diff --git a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt index f4099ca6b48354..87b80f589e1c01 100644 --- a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt +++ b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt @@ -2401,9 +2401,9 @@ This takes one argument, which is a single letter. It calls the generic kernel's SysRq driver, which does whatever is called for by - that argument. See the SysRq documentation in Documentation/sysrq.txt - in your favorite kernel tree to see what letters are valid and what - they do. + that argument. See the SysRq documentation in + Documentation/admin-guide/sysrq.rst in your favorite kernel tree to + see what letters are valid and what they do. diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt index 0e5543a920e5b2..bb2f945f87ab6a 100644 --- a/Documentation/vm/userfaultfd.txt +++ b/Documentation/vm/userfaultfd.txt @@ -172,10 +172,6 @@ the same read(2) protocol as for the page fault notifications. The manager has to explicitly enable these events by setting appropriate bits in uffdio_api.features passed to UFFDIO_API ioctl: -UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the -non-cooperative process. When the monitored process exits, the uffd -manager will get UFFD_EVENT_EXIT. - UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When this feature is enabled, the userfaultfd context of the parent process is duplicated into the newly created process. The manager receives diff --git a/MAINTAINERS b/MAINTAINERS index 00018356f4a520..882ea01b4efe26 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3216,7 +3216,6 @@ F: drivers/platform/chrome/ CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti -M: Sujith Sankar M: Govindarajulu Varadarajan <_govind@gmx.com> M: Neel Patel S: Supported @@ -4776,6 +4775,12 @@ L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/mpc85xx_edac.[ch] +EDAC-PND2 +M: Tony Luck +L: linux-edac@vger.kernel.org +S: Maintained +F: drivers/edac/pnd2_edac.[ch] + EDAC-PASEMI M: Egor Martovetsky L: linux-edac@vger.kernel.org @@ -4923,6 +4928,7 @@ F: include/linux/netfilter_bridge/ F: net/bridge/ ETHERNET PHY LIBRARY +M: Andrew Lunn M: Florian Fainelli L: netdev@vger.kernel.org S: Maintained @@ -5034,7 +5040,6 @@ F: lib/fault-inject.c FBTFT Framebuffer drivers M: Thomas Petazzoni -M: Noralf Trønnes S: Maintained F: drivers/staging/fbtft/ @@ -6012,9 +6017,8 @@ F: include/linux/hsi/ F: include/uapi/linux/hsi/ HSO 3G MODEM DRIVER -M: Jan Dumon -W: http://www.pharscape.org -S: Maintained +L: linux-usb@vger.kernel.org +S: Orphan F: drivers/net/usb/hso.c HSR NETWORK PROTOCOL @@ -7086,9 +7090,9 @@ S: Maintained F: fs/autofs4/ KERNEL BUILD + files below scripts/ (unless maintained elsewhere) +M: Masahiro Yamada M: Michal Marek -T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next -T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes +T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git L: linux-kbuild@vger.kernel.org S: Maintained F: Documentation/kbuild/ @@ -7776,13 +7780,6 @@ F: include/net/mac80211.h F: net/mac80211/ F: drivers/net/wireless/mac80211_hwsim.[ch] -MACVLAN DRIVER -M: Patrick McHardy -L: netdev@vger.kernel.org -S: Maintained -F: drivers/net/macvlan.c -F: include/linux/if_macvlan.h - MAILBOX API M: Jassi Brar L: linux-kernel@vger.kernel.org @@ -7855,6 +7852,8 @@ F: drivers/net/ethernet/marvell/mvneta.* MARVELL MWIFIEX WIRELESS DRIVER M: Amitkumar Karwar M: Nishant Sarmukadam +M: Ganapathi Bhat +M: Xinming Hu L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/marvell/mwifiex/ @@ -8309,7 +8308,6 @@ M: Richard Leitner L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/misc/usb251xb.c -F: include/linux/platform_data/usb251xb.h F: Documentation/devicetree/bindings/usb/usb251xb.txt MICROSOFT SURFACE PRO 3 BUTTON DRIVER @@ -10336,6 +10334,12 @@ L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/qedi/ +QLOGIC QL41xxx FCOE DRIVER +M: QLogic-Storage-Upstream@cavium.com +L: linux-scsi@vger.kernel.org +S: Supported +F: drivers/scsi/qedf/ + QNX4 FILESYSTEM M: Anders Larsen W: http://www.alarsen.net/linux/qnx4fs/ @@ -10811,6 +10815,7 @@ F: drivers/s390/block/dasd* F: block/partitions/ibm.c S390 NETWORK DRIVERS +M: Julian Wiedmann M: Ursula Braun L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ @@ -10841,6 +10846,7 @@ S: Supported F: drivers/s390/scsi/zfcp_* S390 IUCV NETWORK LAYER +M: Julian Wiedmann M: Ursula Braun L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ @@ -13380,14 +13386,6 @@ W: https://linuxtv.org S: Maintained F: drivers/media/platform/vivid/* -VLAN (802.1Q) -M: Patrick McHardy -L: netdev@vger.kernel.org -S: Maintained -F: drivers/net/macvlan.c -F: include/linux/if_*vlan.h -F: net/8021q/ - VLYNQ BUS M: Florian Fainelli L: openwrt-devel@lists.openwrt.org (subscribers-only) diff --git a/Makefile b/Makefile index 4cb6b0a1152b5f..7acbcb324bae68 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 -PATCHLEVEL = 10 +PATCHLEVEL = 11 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc5 NAME = Fearless Coyote # *DOCUMENTATION* @@ -372,7 +372,7 @@ LDFLAGS_MODULE = CFLAGS_KERNEL = AFLAGS_KERNEL = LDFLAGS_vmlinux = -CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) @@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \ # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +# check for 'asm goto' +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) + KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO + KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO +endif + include scripts/Makefile.gcc-plugins ifdef CONFIG_READABLE_ASM @@ -798,12 +804,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) -# check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) - KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO - KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO -endif - include scripts/Makefile.kasan include scripts/Makefile.extrawarn include scripts/Makefile.ubsan diff --git a/arch/alpha/include/asm/a.out-core.h b/arch/alpha/include/asm/a.out-core.h index 9e33e92e524c61..1610d078b06478 100644 --- a/arch/alpha/include/asm/a.out-core.h +++ b/arch/alpha/include/asm/a.out-core.h @@ -15,6 +15,7 @@ #ifdef __KERNEL__ #include +#include /* * Fill in the user structure for an ECOFF core dump. diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h index 4c51c05333c604..384bd47b518717 100644 --- a/arch/alpha/include/asm/mmu_context.h +++ b/arch/alpha/include/asm/mmu_context.h @@ -7,6 +7,8 @@ * Copyright (C) 1996, Linus Torvalds */ +#include + #include #include #include diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 9d27a7d333dca2..0b961093ca5cac 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -11,7 +11,10 @@ */ #include -#include +#include +#include +#include +#include #include #include #include diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index bca963a4aa4880..0b963504072167 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -11,6 +11,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c index bc4d2cdcf21d76..285a82d491efb9 100644 --- a/arch/alpha/kernel/ptrace.c +++ b/arch/alpha/kernel/ptrace.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 17308f9253066a..8129dd92cadc2e 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -6,7 +6,8 @@ * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson */ -#include +#include +#include #include #include #include diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index acb4b146a60795..9fc560459ebd64 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index af2994206b4b8b..b137390e87e792 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -10,7 +10,8 @@ #include #include -#include +#include +#include #include #include #include diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c index fa5ae0ad8983b6..d17d705f65453c 100644 --- a/arch/alpha/math-emu/math.c +++ b/arch/alpha/math-emu/math.c @@ -2,6 +2,7 @@ #include #include #include +#include #include diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 47948b4dd1574b..c25e8827e7cd03 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -4,7 +4,7 @@ * Copyright (C) 1995 Linus Torvalds */ -#include +#include #include #include #include diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi index 65808fe0a290be..2891cb266cf0b5 100644 --- a/arch/arc/boot/dts/skeleton.dtsi +++ b/arch/arc/boot/dts/skeleton.dtsi @@ -26,6 +26,7 @@ device_type = "cpu"; compatible = "snps,arc770d"; reg = <0>; + clocks = <&core_clk>; }; }; diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi index 2dfe8037dfbb34..5e944d3e5b74f6 100644 --- a/arch/arc/boot/dts/skeleton_hs.dtsi +++ b/arch/arc/boot/dts/skeleton_hs.dtsi @@ -21,6 +21,7 @@ device_type = "cpu"; compatible = "snps,archs38"; reg = <0>; + clocks = <&core_clk>; }; }; diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi index 4c11079f3565a3..54b277d7dea0e4 100644 --- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi +++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi @@ -19,8 +19,27 @@ cpu@0 { device_type = "cpu"; - compatible = "snps,archs38xN"; + compatible = "snps,archs38"; reg = <0>; + clocks = <&core_clk>; + }; + cpu@1 { + device_type = "cpu"; + compatible = "snps,archs38"; + reg = <1>; + clocks = <&core_clk>; + }; + cpu@2 { + device_type = "cpu"; + compatible = "snps,archs38"; + reg = <2>; + clocks = <&core_clk>; + }; + cpu@3 { + device_type = "cpu"; + compatible = "snps,archs38"; + reg = <3>; + clocks = <&core_clk>; }; }; diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi index f0df59b23e21e4..459fc656b759ae 100644 --- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi +++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi @@ -112,13 +112,19 @@ interrupts = <7>; bus-width = <4>; }; + }; - /* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */ - uio_ev: uio@0xD0000000 { - compatible = "generic-uio"; - reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>; - reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem"; - interrupts = <23>; - }; + /* + * Embedded Vision subsystem UIO mappings; only relevant for EV VDK + * + * This node is intentionally put outside of MB above becase + * it maps areas outside of MB's 0xEz-0xFz. + */ + uio_ev: uio@0xD0000000 { + compatible = "generic-uio"; + reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>; + reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem"; + interrupt-parent = <&mb_intc>; + interrupts = <23>; }; }; diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index 317ff773e1ca5f..b18fcb60690822 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h @@ -11,6 +11,7 @@ #define _ASM_ARC_HUGEPAGE_H #include +#define __ARCH_USE_5LEVEL_HACK #include static inline pte_t pmd_pte(pmd_t pmd) diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h index 00bdbe167615ec..2e52d18e6bc7ee 100644 --- a/arch/arc/include/asm/kprobes.h +++ b/arch/arc/include/asm/kprobes.h @@ -54,9 +54,7 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause); void kretprobe_trampoline(void); void trap_is_kprobe(unsigned long address, struct pt_regs *regs); #else -static void trap_is_kprobe(unsigned long address, struct pt_regs *regs) -{ -} +#define trap_is_kprobe(address, regs) #endif /* CONFIG_KPROBES */ #endif /* _ARC_KPROBES_H */ diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index b0b87f2447f524..64b5ebae1ae8bc 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h @@ -20,6 +20,7 @@ #include #include +#include #include diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index e94ca72b974e7c..ee22d40afef43b 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -37,6 +37,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index 6f4cb0dab1b945..9e1ae9d41925ff 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c @@ -16,6 +16,7 @@ #include #include +#include #ifdef CONFIG_ARC_PLAT_EZNPS #include #endif diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index 2585632eaa6891..cc558a25b8fa69 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -100,15 +100,21 @@ END(handle_interrupt) ;################### Non TLB Exception Handling ############################# ENTRY(EV_SWI) - flag 1 + ; TODO: implement this + EXCEPTION_PROLOGUE + b ret_from_exception END(EV_SWI) ENTRY(EV_DivZero) - flag 1 + ; TODO: implement this + EXCEPTION_PROLOGUE + b ret_from_exception END(EV_DivZero) ENTRY(EV_DCError) - flag 1 + ; TODO: implement this + EXCEPTION_PROLOGUE + b ret_from_exception END(EV_DCError) ; --------------------------------------------- diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index ecf6a78693758b..9a3c34af2ae810 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c @@ -10,6 +10,7 @@ #include #include +#include #include #include diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index a41a79a4f4feac..2a018de6d6cdbc 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -11,6 +11,9 @@ #include #include #include +#include +#include + #include #include #include diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c index 4442204fe238e6..31150060d38b41 100644 --- a/arch/arc/kernel/ptrace.c +++ b/arch/arc/kernel/ptrace.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 3093fa898a236a..fa62404ba58f77 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -488,8 +489,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) { char *str; int cpu_id = ptr_to_cpu(v); - struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk"); - u32 freq = 0; + struct device *cpu_dev = get_cpu_device(cpu_id); + struct clk *cpu_clk; + unsigned long freq = 0; if (!cpu_online(cpu_id)) { seq_printf(m, "processor [%d]\t: Offline\n", cpu_id); @@ -502,9 +504,15 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); - of_property_read_u32(core_clk, "clock-frequency", &freq); + cpu_clk = clk_get(cpu_dev, NULL); + if (IS_ERR(cpu_clk)) { + seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n", + cpu_id); + } else { + freq = clk_get_rate(cpu_clk); + } if (freq) - seq_printf(m, "CPU speed\t: %u.%02u Mhz\n", + seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n", freq / 1000000, (freq / 10000) % 100); seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n", diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index d347bbc086fed1..48685445002e77 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -53,6 +53,8 @@ #include #include #include +#include + #include struct rt_sigframe { diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index b8e8d394448137..f46267153ec2e9 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -13,7 +13,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index b9192a653b7e3a..74315f302971b1 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -28,6 +28,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index c927aa84e652e0..ff83e78d0cfb55 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -13,7 +13,7 @@ * Rahul Trivedi: Codito Technologies 2004 */ -#include +#include #include #include #include diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 82f9bc819f4a2d..f9caf79186d42d 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -13,6 +13,9 @@ #include #include #include +#include +#include + #include #include diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index d408fa21a07c99..928562967f3cd0 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -633,6 +633,9 @@ noinline static void slc_entire_op(const int op) write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); + /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ + read_aux_reg(r); + /* Important to wait for flush to complete */ while (read_aux_reg(r) & SLC_CTRL_BUSY); } diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index e94e5aa33985c5..162c9752887251 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 2e06d56e987bf8..3e25e8d6486ba2 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -13,7 +13,8 @@ #include #include #include -#include +#include + #include #define COLOUR_ALIGN(addr, pgoff) \ diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index bdb295e09160b2..d0126fdfe2d854 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -53,6 +53,8 @@ #include #include +#include + #include #include #include diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi index 02981eae96b994..1ec8e0d801912f 100644 --- a/arch/arm/boot/dts/am335x-pcm-953.dtsi +++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi @@ -63,14 +63,14 @@ label = "home"; linux,code = ; gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>; - gpio-key,wakeup; + wakeup-source; }; button@1 { label = "menu"; linux,code = ; gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>; - gpio-key,wakeup; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi index 0d341c545b010f..e5ac1d81d15c9e 100644 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi @@ -315,6 +315,13 @@ /* ID & VBUS GPIOs provided in board dts */ }; }; + + tpic2810: tpic2810@60 { + compatible = "ti,tpic2810"; + reg = <0x60>; + gpio-controller; + #gpio-cells = <2>; + }; }; &mcspi3 { @@ -330,13 +337,6 @@ spi-max-frequency = <1000000>; spi-cpol; }; - - tpic2810: tpic2810@60 { - compatible = "ti,tpic2810"; - reg = <0x60>; - gpio-controller; - #gpio-cells = <2>; - }; }; &uart3 { diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 4fbb089cf5ad3c..00de62dc0042f1 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -66,14 +66,14 @@ timer@20200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x20200 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; local-timer@20600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0x20600 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts index bfd923096a8c1f..ae31a5826e918e 100644 --- a/arch/arm/boot/dts/bcm953012k.dts +++ b/arch/arm/boot/dts/bcm953012k.dts @@ -48,15 +48,14 @@ }; memory { - reg = <0x00000000 0x10000000>; + reg = <0x80000000 0x10000000>; }; }; &uart0 { - clock-frequency = <62499840>; + status = "okay"; }; &uart1 { - clock-frequency = <62499840>; status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts index 3f04a40eb90cc9..df05e7f568af3e 100644 --- a/arch/arm/boot/dts/bcm958522er.dts +++ b/arch/arm/boot/dts/bcm958522er.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts index 9fd542200d3d52..4a3ab19c62819f 100644 --- a/arch/arm/boot/dts/bcm958525er.dts +++ b/arch/arm/boot/dts/bcm958525er.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts index 41e7fd350fcd1b..81f78435d8c76c 100644 --- a/arch/arm/boot/dts/bcm958525xmc.dts +++ b/arch/arm/boot/dts/bcm958525xmc.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 31 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts index 477c4860db5223..c88b8fefcb2f13 100644 --- a/arch/arm/boot/dts/bcm958622hr.dts +++ b/arch/arm/boot/dts/bcm958622hr.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts index c0a499d5ba447d..d503fa0dde310f 100644 --- a/arch/arm/boot/dts/bcm958623hr.dts +++ b/arch/arm/boot/dts/bcm958623hr.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index f7eb5854a22448..cc0363b843c1a0 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts index 16666324fda8b5..74e15a3cd9f8ef 100644 --- a/arch/arm/boot/dts/bcm988312hr.dts +++ b/arch/arm/boot/dts/bcm988312hr.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi index 49f466fe0b1dc2..dcfc9759143375 100644 --- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi +++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi @@ -121,11 +121,6 @@ }; }; -&cpu0 { - arm-supply = <&sw1a_reg>; - soc-supply = <&sw1c_reg>; -}; - &fec1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet1>; diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 22332be7214032..528b4e9c6d3d30 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -266,7 +266,7 @@ }; usb1: ohci@00400000 { - compatible = "atmel,sama5d2-ohci", "usb-ohci"; + compatible = "atmel,at91rm9200-ohci", "usb-ohci"; reg = <0x00400000 0x100000>; interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>; clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index 82d8c477129359..162e1eb5373d34 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -14,6 +14,7 @@ #include #include #include +#include #include "skeleton.dtsi" / { @@ -603,6 +604,11 @@ interrupt-controller; #interrupt-cells = <2>; + ab8500_clock: clock-controller { + compatible = "stericsson,ab8500-clk"; + #clock-cells = <1>; + }; + ab8500_gpio: ab8500-gpio { compatible = "stericsson,ab8500-gpio"; gpio-controller; @@ -686,6 +692,8 @@ ab8500-pwm { compatible = "stericsson,ab8500-pwm"; + clocks = <&ab8500_clock AB8500_SYSCLK_INT>; + clock-names = "intclk"; }; ab8500-debugfs { @@ -700,6 +708,9 @@ V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>; V-DMIC-supply = <&ab8500_ldo_dmic_reg>; + clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>; + clock-names = "audioclk"; + stericsson,earpeice-cmv = <950>; /* Units in mV. */ }; @@ -1095,6 +1106,14 @@ status = "disabled"; }; + sound { + compatible = "stericsson,snd-soc-mop500"; + stericsson,cpu-dai = <&msp1 &msp3>; + stericsson,audio-codec = <&codec>; + clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>; + clock-names = "sysclk", "ulpclk", "intclk"; + }; + msp0: msp@80123000 { compatible = "stericsson,ux500-msp-i2s"; reg = <0x80123000 0x1000>; diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index f37f9e10713cc8..9e359e4f342e76 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -186,15 +186,6 @@ status = "okay"; }; - sound { - compatible = "stericsson,snd-soc-mop500"; - - stericsson,cpu-dai = <&msp1 &msp3>; - stericsson,audio-codec = <&codec>; - clocks = <&prcmu_clk PRCMU_SYSCLK>; - clock-names = "sysclk"; - }; - msp0: msp@80123000 { pinctrl-names = "default"; pinctrl-0 = <&msp0_default_mode>; diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index dd5514def60424..ade1d0d4e5f45c 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -159,15 +159,6 @@ "", "", "", "", "", "", "", ""; }; - sound { - compatible = "stericsson,snd-soc-mop500"; - - stericsson,cpu-dai = <&msp1 &msp3>; - stericsson,audio-codec = <&codec>; - clocks = <&prcmu_clk PRCMU_SYSCLK>; - clock-names = "sysclk"; - }; - msp0: msp@80123000 { pinctrl-names = "default"; pinctrl-0 = <&msp0_default_mode>; diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts index 72ec0d5ae052cd..bbf1c8cbaac6aa 100644 --- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts +++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts @@ -167,7 +167,7 @@ reg = <8>; label = "cpu"; ethernet = <&gmac>; - phy-mode = "rgmii"; + phy-mode = "rgmii-txid"; fixed-link { speed = <1000>; full-duplex; diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index a952cc0703cc17..8a3ed21cb7bcfc 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi @@ -495,7 +495,7 @@ resets = <&ccu RST_BUS_GPU>; assigned-clocks = <&ccu CLK_GPU>; - assigned-clock-rates = <408000000>; + assigned-clock-rates = <384000000>; }; gic: interrupt-controller@01c81000 { diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi index 18c174fef84f51..0467fb365bfca7 100644 --- a/arch/arm/boot/dts/sun8i-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a33.dtsi @@ -113,8 +113,8 @@ simple-audio-card,mclk-fs = <512>; simple-audio-card,aux-devs = <&codec_analog>; simple-audio-card,routing = - "Left DAC", "Digital Left DAC", - "Right DAC", "Digital Right DAC"; + "Left DAC", "AIF1 Slot 0 Left", + "Right DAC", "AIF1 Slot 0 Right"; status = "disabled"; simple-audio-card,cpu { diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi index 7097c18ff487d4..d6bd15898db6d6 100644 --- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi +++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi @@ -50,8 +50,6 @@ backlight: backlight { compatible = "pwm-backlight"; - pinctrl-names = "default"; - pinctrl-0 = <&bl_en_pin>; pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; default-brightness-level = <8>; @@ -93,11 +91,6 @@ }; &pio { - bl_en_pin: bl_en_pin@0 { - pins = "PH6"; - function = "gpio_in"; - }; - mmc0_cd_pin: mmc0_cd_pin@0 { pins = "PB4"; function = "gpio_in"; diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index 46730017b3c54c..57f3b751263616 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c @@ -13,7 +13,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig index a3cb76cfb8282f..b2ddd534867fcb 100644 --- a/arch/arm/configs/moxart_defconfig +++ b/arch/arm/configs/moxart_defconfig @@ -18,9 +18,8 @@ CONFIG_EMBEDDED=y # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set -CONFIG_ARCH_MULTI_V4T=y +CONFIG_ARCH_MULTI_V4=y # CONFIG_ARCH_MULTI_V7 is not set -CONFIG_KEYBOARD_GPIO_POLLED=y CONFIG_ARCH_MOXART=y CONFIG_MACH_UC7112LX=y CONFIG_PREEMPT=y @@ -94,12 +93,10 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set -CONFIG_DEBUG_GPIO=y -CONFIG_GPIO_SYSFS=y CONFIG_GPIO_MOXART=y -CONFIG_POWER_SUPPLY=y CONFIG_POWER_RESET=y CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_SUPPLY=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y @@ -107,10 +104,13 @@ CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_MOXART_WDT=y # CONFIG_USB_SUPPORT is not set CONFIG_MMC=y -CONFIG_MMC_SDHCI_MOXART=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_MOXART=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_ONESHOT=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index f2462a6bdba6e7..decd388d613d7e 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -188,6 +188,7 @@ CONFIG_WL12XX=m CONFIG_WL18XX=m CONFIG_WLCORE_SPI=m CONFIG_WLCORE_SDIO=m +CONFIG_INPUT_MOUSEDEV=m CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=m CONFIG_KEYBOARD_ATKBD=m diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 1822c4697278cb..f2215fbeed138f 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -15,7 +15,17 @@ ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o ce-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o ce-obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o ce-obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o -ce-obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o +crc-obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o + +ifneq ($(crc-obj-y)$(crc-obj-m),) +ifeq ($(call as-instr,.arch armv8-a\n.arch_extension crc,y,n),y) +ce-obj-y += $(crc-obj-y) +ce-obj-m += $(crc-obj-m) +else +$(warning These CRC Extensions modules need binutils 2.23 or higher) +$(warning $(crc-obj-y) $(crc-obj-m)) +endif +endif ifneq ($(ce-obj-y)$(ce-obj-m),) ifeq ($(call as-instr,.fpu crypto-neon-fp-armv8,y,n),y) diff --git a/arch/arm/crypto/crc32-ce-core.S b/arch/arm/crypto/crc32-ce-core.S index e63d400dc5c14b..5cbd4a6fedad7c 100644 --- a/arch/arm/crypto/crc32-ce-core.S +++ b/arch/arm/crypto/crc32-ce-core.S @@ -135,7 +135,7 @@ ENTRY(crc32c_pmull_le) vld1.8 {q3-q4}, [BUF, :128]! vmov.i8 qzr, #0 vmov.i8 qCONSTANT, #0 - vmov dCONSTANTl[0], CRC + vmov.32 dCONSTANTl[0], CRC veor.8 d2, d2, dCONSTANTl sub LEN, LEN, #0x40 cmp LEN, #0x40 diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index e22089fb44dc86..a3f0b3d500895b 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -209,6 +209,7 @@ #define HSR_EC_IABT_HYP (0x21) #define HSR_EC_DABT (0x24) #define HSR_EC_DABT_HYP (0x25) +#define HSR_EC_MAX (0x3f) #define HSR_WFI_IS_WFE (_AC(1, UL) << 0) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index cc495d799c6764..31ee468ce667de 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -30,7 +30,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED #define KVM_USER_MEM_SLOTS 32 -#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HAVE_ONE_REG #define KVM_HALT_POLL_NS_DEFAULT 500000 diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 3cc14dd8587c09..7f303295ef1903 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -15,7 +15,9 @@ #include #include +#include #include + #include #include #include diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index a8d656d9aec715..1c462381c225ee 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -20,6 +20,7 @@ #else +#define __ARCH_USE_5LEVEL_HACK #include #include #include diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index def9e570199f90..1897b5196fb57f 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -10,6 +10,10 @@ #ifndef _ASMARM_TLBFLUSH_H #define _ASMARM_TLBFLUSH_H +#ifndef __ASSEMBLY__ +# include +#endif + #ifdef CONFIG_MMU #include @@ -644,9 +648,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #elif defined(CONFIG_SMP) /* !CONFIG_MMU */ #ifndef __ASSEMBLY__ - -#include - static inline void local_flush_tlb_all(void) { } static inline void local_flush_tlb_mm(struct mm_struct *mm) { } static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { } diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c index 592dda3f21fff0..c366b83bf9550d 100644 --- a/arch/arm/kernel/perf_regs.c +++ b/arch/arm/kernel/perf_regs.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 91d2d5b014145d..939e8b58c59d1e 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -12,6 +12,9 @@ #include #include +#include +#include +#include #include #include #include diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ae738a6319f6a3..58e3771e4c5bb8 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -10,7 +10,8 @@ * published by the Free Software Foundation. */ #include -#include +#include +#include #include #include #include diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5a07c5a4b8943c..572a8df1b7662d 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -11,7 +11,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 92b72375c4c72a..3a2fa203637a99 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -1,5 +1,6 @@ #include #include +#include #include #include diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 9a2f882a0a2d1c..ef794c799cb660 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -1,5 +1,6 @@ #include #include +#include #include #include diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 853221f81104c2..3bda08bee6747c 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 5f221acd21aebb..b9786f491873fa 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -76,6 +76,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index ebf47d91b8041f..f8a3ab82e77f51 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 9688ec0c6ef43f..948c648fea009d 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -24,7 +24,9 @@ #include #include #include -#include +#include +#include +#include #include #include diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index c9a2103faeb9ac..96dba7cd8be7b4 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_NR_MEMSLOTS: + r = KVM_USER_MEM_SLOTS; + break; case KVM_CAP_MSI_DEVID: if (!kvm) r = -EINVAL; diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 4e40d1955e3534..96af65a30d78b1 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } +static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + u32 hsr = kvm_vcpu_get_hsr(vcpu); + + kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n", + hsr); + + kvm_inject_undefined(vcpu); + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { + [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec, [HSR_EC_WFI] = kvm_handle_wfx, [HSR_EC_CP15_32] = kvm_handle_cp15_32, [HSR_EC_CP15_64] = kvm_handle_cp15_64, @@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) { u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); - if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || - !arm_exit_handlers[hsr_ec]) { - kvm_err("Unknown exception class: hsr: %#08x\n", - (unsigned int)kvm_vcpu_get_hsr(vcpu)); - BUG(); - } - return arm_exit_handlers[hsr_ec]; } diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 3d89b7905bd903..a277981f414d8d 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -289,6 +289,22 @@ static void at91_ddr_standby(void) at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); } +static void sama5d3_ddr_standby(void) +{ + u32 lpr0; + u32 saved_lpr0; + + saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR); + lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB; + lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN; + + at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0); + + cpu_do_idle(); + + at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); +} + /* We manage both DDRAM/SDRAM controllers, we need more than one value to * remember. */ @@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = { { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, - { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby }, + { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby }, { /*sentinel*/ } }; diff --git a/arch/arm/mach-bcm/platsmp.c b/arch/arm/mach-bcm/platsmp.c index 582886d0d02f72..9e3f275934eb41 100644 --- a/arch/arm/mach-bcm/platsmp.c +++ b/arch/arm/mach-bcm/platsmp.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 093458b62c8dad..c89757abb0ae4b 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o obj-y += $(onenand-m) $(onenand-y) - -nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o -obj-y += $(nand-m) $(nand-y) diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c deleted file mode 100644 index f6ac027f3c3bf2..00000000000000 --- a/arch/arm/mach-omap2/gpmc-nand.c +++ /dev/null @@ -1,154 +0,0 @@ -/* - * gpmc-nand.c - * - * Copyright (C) 2009 Texas Instruments - * Vimal Singh - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include "soc.h" - -/* minimum size for IO mapping */ -#define NAND_IO_SIZE 4 - -static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt) -{ - /* platforms which support all ECC schemes */ - if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() || - soc_is_omap54xx() || soc_is_dra7xx()) - return 1; - - if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW || - ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) { - if (cpu_is_omap24xx()) - return 0; - else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0)) - return 0; - else - return 1; - } - - /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes - * which require H/W based ECC error detection */ - if ((cpu_is_omap34xx() || cpu_is_omap3630()) && - ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) || - (ecc_opt == OMAP_ECC_BCH8_CODE_HW))) - return 0; - - /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ - if (ecc_opt == OMAP_ECC_HAM1_CODE_HW || - ecc_opt == OMAP_ECC_HAM1_CODE_SW) - return 1; - else - return 0; -} - -/* This function will go away once the device-tree convertion is complete */ -static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data, - struct gpmc_settings *s) -{ - /* Enable RD PIN Monitoring Reg */ - if (gpmc_nand_data->dev_ready) { - s->wait_on_read = true; - s->wait_on_write = true; - } - - if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16) - s->device_width = GPMC_DEVWIDTH_16BIT; - else - s->device_width = GPMC_DEVWIDTH_8BIT; -} - -int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data, - struct gpmc_timings *gpmc_t) -{ - int err = 0; - struct gpmc_settings s; - struct platform_device *pdev; - struct resource gpmc_nand_res[] = { - { .flags = IORESOURCE_MEM, }, - { .flags = IORESOURCE_IRQ, }, - { .flags = IORESOURCE_IRQ, }, - }; - - BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM); - - err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE, - (unsigned long *)&gpmc_nand_res[0].start); - if (err < 0) { - pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n", - gpmc_nand_data->cs, err); - return err; - } - gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1; - gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE); - gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT); - - memset(&s, 0, sizeof(struct gpmc_settings)); - gpmc_set_legacy(gpmc_nand_data, &s); - - s.device_nand = true; - - if (gpmc_t) { - err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s); - if (err < 0) { - pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n", - err); - return err; - } - } - - err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s); - if (err < 0) - goto out_free_cs; - - err = gpmc_configure(GPMC_CONFIG_WP, 0); - if (err < 0) - goto out_free_cs; - - if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) { - pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n"); - err = -EINVAL; - goto out_free_cs; - } - - - pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs); - if (pdev) { - err = platform_device_add_resources(pdev, gpmc_nand_res, - ARRAY_SIZE(gpmc_nand_res)); - if (!err) - pdev->dev.platform_data = gpmc_nand_data; - } else { - err = -ENOMEM; - } - if (err) - goto out_free_pdev; - - err = platform_device_add(pdev); - if (err) { - dev_err(&pdev->dev, "Unable to register NAND device\n"); - goto out_free_pdev; - } - - return 0; - -out_free_pdev: - platform_device_put(pdev); -out_free_cs: - gpmc_cs_free(gpmc_nand_data->cs); - - return err; -} diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c index 8633c703546a65..2944af82055847 100644 --- a/arch/arm/mach-omap2/gpmc-onenand.c +++ b/arch/arm/mach-omap2/gpmc-onenand.c @@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr) return ret; } -void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) +int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) { int err; struct device *dev = &gpmc_onenand_device.dev; @@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) if (err < 0) { dev_err(dev, "Cannot request GPMC CS %d, error %d\n", gpmc_onenand_data->cs, err); - return; + return err; } gpmc_onenand_resource.end = gpmc_onenand_resource.start + ONENAND_IO_SIZE - 1; - if (platform_device_register(&gpmc_onenand_device) < 0) { + err = platform_device_register(&gpmc_onenand_device); + if (err) { dev_err(dev, "Unable to register OneNAND device\n"); gpmc_cs_free(gpmc_onenand_data->cs); - return; } + + return err; } diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S index fe36ce2734d47a..4c6f14cf92a82e 100644 --- a/arch/arm/mach-omap2/omap-headsmp.S +++ b/arch/arm/mach-omap2/omap-headsmp.S @@ -17,6 +17,7 @@ #include #include +#include #include "omap44xx.h" @@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 cmp r0, r4 bne wait_2 ldr r12, =API_HYP_ENTRY - adr r0, hyp_boot + badr r0, hyp_boot smc #0 hyp_boot: b omap_secondary_startup diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 56f917ec8621e8..1435fee39a89ba 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = { }; /* L4 CORE -> SR1 interface */ +static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = { + { + .pa_start = OMAP34XX_SR1_BASE, + .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1, + .flags = ADDR_TYPE_RT, + }, + { }, +}; static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr1_hwmod, .clk = "sr_l4_ick", + .addr = omap3_sr1_addr_space, .user = OCP_USER_MPU, }; @@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap36xx_sr1_hwmod, .clk = "sr_l4_ick", + .addr = omap3_sr1_addr_space, .user = OCP_USER_MPU, }; /* L4 CORE -> SR1 interface */ +static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = { + { + .pa_start = OMAP34XX_SR2_BASE, + .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1, + .flags = ADDR_TYPE_RT, + }, + { }, +}; static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr2_hwmod, .clk = "sr_l4_ick", + .addr = omap3_sr2_addr_space, .user = OCP_USER_MPU, }; @@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap36xx_sr2_hwmod, .clk = "sr_l4_ick", + .addr = omap3_sr2_addr_space, .user = OCP_USER_MPU, }; @@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = { * Return: 0 if device named @dev_name is not likely to be accessible, * or 1 if it is likely to be accessible. */ -static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, - const char *dev_name) +static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, + const char *dev_name) { + struct device_node *node; + bool available; + if (!bus) - return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0; + return omap_type() == OMAP2_DEVICE_TYPE_GP; - if (of_device_is_available(of_find_node_by_name(bus, dev_name))) - return 1; + node = of_get_child_by_name(bus, dev_name); + available = of_device_is_available(node); + of_node_put(node); - return 0; + return available; } int __init omap3xxx_hwmod_init(void) @@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void) if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) { r = omap_hwmod_register_links(h_sham); - if (r < 0) + if (r < 0) { + of_node_put(bus); return r; + } } if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) { r = omap_hwmod_register_links(h_aes); - if (r < 0) + if (r < 0) { + of_node_put(bus); return r; + } } + of_node_put(bus); /* * Register hwmod links specific to certain ES levels of a diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 003a6cb248bec3..5c46ea6756d7b1 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c @@ -21,6 +21,7 @@ #include #include +#include #include #include #include diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index dc67a7fb383199..6b279d0377742c 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile index c2499bff498670..a9a3453548f49c 100644 --- a/arch/arm/mach-ux500/Makefile +++ b/arch/arm/mach-ux500/Makefile @@ -5,7 +5,4 @@ obj-y := pm.o obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o obj-$(CONFIG_SMP) += platsmp.o -obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o - -CFLAGS_hotplug.o += -march=armv7-a diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 24529cf58df60f..28083ef7281958 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -31,8 +31,6 @@ #include #include -#include "setup.h" - #include "db8500-regs.h" static int __init ux500_l2x0_unlock(void) diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c deleted file mode 100644 index 1cbed0331fd326..00000000000000 --- a/arch/arm/mach-ux500/hotplug.c +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (C) STMicroelectronics 2009 - * Copyright (C) ST-Ericsson SA 2010 - * - * License Terms: GNU General Public License v2 - * Based on ARM realview platform - * - * Author: Sundar Iyer - * - */ -#include -#include -#include - -#include - -#include "setup.h" - -/* - * platform-specific code to shutdown a CPU - * - * Called with IRQs disabled - */ -void ux500_cpu_die(unsigned int cpu) -{ - /* directly enter low power state, skipping secure registers */ - for (;;) { - __asm__ __volatile__("dsb\n\t" "wfi\n\t" - : : : "memory"); - if (pen_release == cpu_logical_map(cpu)) { - /* - * OK, proper wakeup, we're done - */ - break; - } - } -} diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index 9b124c22035f4a..69c2361ca688c3 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c @@ -23,8 +23,6 @@ #include #include -#include "setup.h" - #include "db8500-regs.h" /* Magic triggers in backup RAM */ @@ -90,6 +88,13 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) return 0; } +#ifdef CONFIG_HOTPLUG_CPU +void ux500_cpu_die(unsigned int cpu) +{ + wfi(); +} +#endif + static const struct smp_operations ux500_smp_ops __initconst = { .smp_prepare_cpus = ux500_smp_prepare_cpus, .smp_boot_secondary = ux500_boot_secondary, diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h deleted file mode 100644 index 988e7c77068dc5..00000000000000 --- a/arch/arm/mach-ux500/setup.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (C) 2009 ST-Ericsson. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * These symbols are needed for board-specific files to call their - * own cpu-specific files - */ -#ifndef __ASM_ARCH_SETUP_H -#define __ASM_ARCH_SETUP_H - -extern void ux500_cpu_die(unsigned int cpu); - -#endif /* __ASM_ARCH_SETUP_H */ diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 7d5f4c736a16b4..2c96190e018bd6 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -14,12 +14,13 @@ #include #include #include +#include #include #include #include #include #include -#include +#include #include #include diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index c2b5b9892fd17d..ff8b0aa2dfde88 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -16,7 +16,8 @@ #include #include #include -#include +#include +#include #include #include diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index c1a48f88764ea8..3e511bec69b836 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index bf4d3bc41a7a85..1d8558ff9827ff 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 66353caa35b9f7..2239fde10b808d 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -5,7 +5,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c index ec717c190e2c79..1365e865084370 100644 --- a/arch/arm/nwfpe/fpmodule.c +++ b/arch/arm/nwfpe/fpmodule.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index a4ec240ee7ba38..b6dc9d838a9a39 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index 9775de22e2ffa3..c893726aa52d8d 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c @@ -203,6 +203,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 3c2cb5d5adfa4f..0bb0e9c6376c4a 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -411,3 +411,4 @@ 394 common pkey_mprotect sys_pkey_mprotect 395 common pkey_alloc sys_pkey_alloc 396 common pkey_free sys_pkey_free +397 common statx sys_statx diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 569d5a650a4a2c..a71a48e71fffa8 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index ce18c91b50a1cb..f0325d96b97aed 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -198,6 +198,8 @@ static const struct dma_map_ops xen_swiotlb_dma_ops = { .unmap_page = xen_swiotlb_unmap_page, .dma_supported = xen_swiotlb_dma_supported, .set_dma_mask = xen_swiotlb_set_dma_mask, + .mmap = xen_swiotlb_dma_mmap, + .get_sgtable = xen_swiotlb_get_sgtable, }; int __init xen_mm_init(void) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a39029b5414eb2..3741859765cfe0 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -508,6 +508,16 @@ config QCOM_FALKOR_ERRATUM_1009 If unsure, say Y. +config QCOM_QDF2400_ERRATUM_0065 + bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size" + default y + help + On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports + ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have + been indicated as 16Bytes (0xf), not 8Bytes (0x7). + + If unsure, say Y. + endmenu @@ -1063,6 +1073,10 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC +config KEYS_COMPAT + def_bool y + depends on COMPAT && KEYS + endmenu menu "Power management options" diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile index 0d7bfbf7d922bb..3f94bce33b7f4a 100644 --- a/arch/arm64/boot/dts/amlogic/Makefile +++ b/arch/arm64/boot/dts/amlogic/Makefile @@ -5,12 +5,14 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-p201.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-pro.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-meta.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-telos.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-hub.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-play2.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-p212.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p230.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p231.dtb -dtb-$(CONFIG_ARCH_MESON) += meson-gxl-nexbox-a95x.dtb -dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-q200.dtb -dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-q201.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-nexbox-a95x.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q200.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q201.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb always := $(dtb-y) diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 0cbe24b49710fd..5d995f7724af67 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -83,6 +83,7 @@ reg = <0x0 0x0>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 0>; }; cpu1: cpu@1 { @@ -91,6 +92,7 @@ reg = <0x0 0x1>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 0>; }; cpu2: cpu@2 { @@ -99,6 +101,7 @@ reg = <0x0 0x2>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 0>; }; cpu3: cpu@3 { @@ -107,6 +110,7 @@ reg = <0x0 0x3>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 0>; }; l2: l2-cache0 { @@ -171,6 +175,28 @@ }; }; + scpi { + compatible = "amlogic,meson-gxbb-scpi", "arm,scpi-pre-1.0"; + mboxes = <&mailbox 1 &mailbox 2>; + shmem = <&cpu_scp_lpri &cpu_scp_hpri>; + + scpi_clocks: clocks { + compatible = "arm,scpi-clocks"; + + scpi_dvfs: scpi_clocks@0 { + compatible = "arm,scpi-dvfs-clocks"; + #clock-cells = <1>; + clock-indices = <0>; + clock-output-names = "vcpu"; + }; + }; + + scpi_sensors: sensors { + compatible = "arm,scpi-sensors"; + #thermal-sensor-cells = <1>; + }; + }; + soc { compatible = "simple-bus"; #address-cells = <2>; @@ -229,6 +255,14 @@ status = "disabled"; }; + saradc: adc@8680 { + compatible = "amlogic,meson-saradc"; + reg = <0x0 0x8680 0x0 0x34>; + #io-channel-cells = <1>; + interrupts = ; + status = "disabled"; + }; + pwm_ef: pwm@86c0 { compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm"; reg = <0x0 0x086c0 0x0 0x10>; @@ -282,6 +316,25 @@ #address-cells = <0>; }; + sram: sram@c8000000 { + compatible = "amlogic,meson-gxbb-sram", "mmio-sram"; + reg = <0x0 0xc8000000 0x0 0x14000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0xc8000000 0x14000>; + + cpu_scp_lpri: scp-shmem@0 { + compatible = "amlogic,meson-gxbb-scp-shmem"; + reg = <0x13000 0x400>; + }; + + cpu_scp_hpri: scp-shmem@200 { + compatible = "amlogic,meson-gxbb-scp-shmem"; + reg = <0x13400 0x400>; + }; + }; + aobus: aobus@c8100000 { compatible = "simple-bus"; reg = <0x0 0xc8100000 0x0 0x100000>; @@ -297,6 +350,21 @@ status = "disabled"; }; + uart_AO_B: serial@4e0 { + compatible = "amlogic,meson-uart"; + reg = <0x0 0x004e0 0x0 0x14>; + interrupts = ; + clocks = <&xtal>; + status = "disabled"; + }; + + pwm_AO_ab: pwm@550 { + compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm"; + reg = <0x0 0x00550 0x0 0x10>; + #pwm-cells = <3>; + status = "disabled"; + }; + ir: ir@580 { compatible = "amlogic,meson-gxbb-ir"; reg = <0x0 0x00580 0x0 0x40>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts index 03e3d76626ddc4..fc0e86cb4cdedc 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts @@ -45,10 +45,55 @@ /dts-v1/; #include "meson-gxbb-p20x.dtsi" +#include / { compatible = "amlogic,p200", "amlogic,meson-gxbb"; model = "Amlogic Meson GXBB P200 Development Board"; + + avdd18_usb_adc: regulator-avdd18_usb_adc { + compatible = "regulator-fixed"; + regulator-name = "AVDD18_USB_ADC"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + adc_keys { + compatible = "adc-keys"; + io-channels = <&saradc 0>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1800000>; + + button-home { + label = "Home"; + linux,code = ; + press-threshold-microvolt = <900000>; /* 50% */ + }; + + button-esc { + label = "Esc"; + linux,code = ; + press-threshold-microvolt = <684000>; /* 38% */ + }; + + button-up { + label = "Volume Up"; + linux,code = ; + press-threshold-microvolt = <468000>; /* 26% */ + }; + + button-down { + label = "Volume Down"; + linux,code = ; + press-threshold-microvolt = <252000>; /* 14% */ + }; + + button-menu { + label = "Menu"; + linux,code = ; + press-threshold-microvolt = <0>; /* 0% */ + }; + }; }; &i2c_B { @@ -56,3 +101,8 @@ pinctrl-0 = <&i2c_b_pins>; pinctrl-names = "default"; }; + +&saradc { + status = "okay"; + vref-supply = <&avdd18_usb_adc>; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index e59ad308192f62..86709929fd208c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi @@ -53,6 +53,17 @@ stdout-path = "serial0:115200n8"; }; + leds { + compatible = "gpio-leds"; + + blue { + label = "vega-s95:blue:on"; + gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>; + default-state = "on"; + panic-indicator; + }; + }; + usb_vbus: regulator-usb0-vbus { compatible = "regulator-fixed"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts new file mode 100644 index 00000000000000..56f855901262c3 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2016 BayLibre, Inc. + * Author: Neil Armstrong + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; + +#include "meson-gxbb-p20x.dtsi" + +/ { + compatible = "wetek,hub", "amlogic,meson-gxbb"; + model = "WeTek Hub"; + + leds { + compatible = "gpio-leds"; + + system { + label = "wetek-play:system-status"; + gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>; + default-state = "on"; + panic-indicator; + }; + }; + + cvbs-connector { + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts new file mode 100644 index 00000000000000..ea79fdd2c248a3 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2016 BayLibre, Inc. + * Author: Neil Armstrong + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/dts-v1/; + +#include "meson-gxbb-p20x.dtsi" +#include + +/ { + compatible = "wetek,play2", "amlogic,meson-gxbb"; + model = "WeTek Play 2"; + + leds { + compatible = "gpio-leds"; + + system { + label = "wetek-play:system-status"; + gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>; + default-state = "on"; + panic-indicator; + }; + + wifi { + label = "wetek-play:wifi-status"; + gpios = <&gpio GPIODV_26 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + ethernet { + label = "wetek-play:ethernet-status"; + gpios = <&gpio GPIODV_27 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + #address-cells = <1>; + #size-cells = <0>; + poll-interval = <100>; + + button@0 { + label = "reset"; + linux,code = ; + gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&i2c_A { + status = "okay"; + pinctrl-0 = <&i2c_a_pins>; + pinctrl-names = "default"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi index b35307321b6398..04b3324bc1329d 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi @@ -50,28 +50,6 @@ / { compatible = "amlogic,meson-gxbb"; - scpi { - compatible = "amlogic,meson-gxbb-scpi", "arm,scpi-pre-1.0"; - mboxes = <&mailbox 1 &mailbox 2>; - shmem = <&cpu_scp_lpri &cpu_scp_hpri>; - - scpi_clocks: clocks { - compatible = "arm,scpi-clocks"; - - scpi_dvfs: scpi_clocks@0 { - compatible = "arm,scpi-dvfs-clocks"; - #clock-cells = <1>; - clock-indices = <0>; - clock-output-names = "vcpu"; - }; - }; - - scpi_sensors: sensors { - compatible = "arm,scpi-sensors"; - #thermal-sensor-cells = <1>; - }; - }; - soc { usb0_phy: phy@c0000000 { compatible = "amlogic,meson-gxbb-usb2-phy"; @@ -93,25 +71,6 @@ status = "disabled"; }; - sram: sram@c8000000 { - compatible = "amlogic,meson-gxbb-sram", "mmio-sram"; - reg = <0x0 0xc8000000 0x0 0x14000>; - - #address-cells = <1>; - #size-cells = <1>; - ranges = <0 0x0 0xc8000000 0x14000>; - - cpu_scp_lpri: scp-shmem@0 { - compatible = "amlogic,meson-gxbb-scp-shmem"; - reg = <0x13000 0x400>; - }; - - cpu_scp_hpri: scp-shmem@200 { - compatible = "amlogic,meson-gxbb-scp-shmem"; - reg = <0x13400 0x400>; - }; - }; - usb0: usb@c9000000 { compatible = "amlogic,meson-gxbb-usb", "snps,dwc2"; reg = <0x0 0xc9000000 0x0 0x40000>; @@ -138,22 +97,6 @@ }; }; -&cpu0 { - clocks = <&scpi_dvfs 0>; -}; - -&cpu1 { - clocks = <&scpi_dvfs 0>; -}; - -&cpu2 { - clocks = <&scpi_dvfs 0>; -}; - -&cpu3 { - clocks = <&scpi_dvfs 0>; -}; - &cbus { spifc: spi@8c80 { compatible = "amlogic,meson-gxbb-spifc"; @@ -195,6 +138,29 @@ }; }; + uart_ao_a_cts_rts_pins: uart_ao_a_cts_rts { + mux { + groups = "uart_cts_ao_a", + "uart_rts_ao_a"; + function = "uart_ao"; + }; + }; + + uart_ao_b_pins: uart_ao_b { + mux { + groups = "uart_tx_ao_b", "uart_rx_ao_b"; + function = "uart_ao_b"; + }; + }; + + uart_ao_b_cts_rts_pins: uart_ao_b_cts_rts { + mux { + groups = "uart_cts_ao_b", + "uart_rts_ao_b"; + function = "uart_ao_b"; + }; + }; + remote_input_ao_pins: remote_input_ao { mux { groups = "remote_input_ao"; @@ -340,6 +306,14 @@ }; }; + uart_a_cts_rts_pins: uart_a_cts_rts { + mux { + groups = "uart_cts_a", + "uart_rts_a"; + function = "uart_a"; + }; + }; + uart_b_pins: uart_b { mux { groups = "uart_tx_b", @@ -348,6 +322,14 @@ }; }; + uart_b_cts_rts_pins: uart_b_cts_rts { + mux { + groups = "uart_cts_b", + "uart_rts_b"; + function = "uart_b"; + }; + }; + uart_c_pins: uart_c { mux { groups = "uart_tx_c", @@ -356,6 +338,14 @@ }; }; + uart_c_cts_rts_pins: uart_c_cts_rts { + mux { + groups = "uart_cts_c", + "uart_rts_c"; + function = "uart_c"; + }; + }; + i2c_a_pins: i2c_a { mux { groups = "i2c_sck_a", @@ -463,6 +453,20 @@ function = "pwm_f_y"; }; }; + + hdmi_hpd_pins: hdmi_hpd { + mux { + groups = "hdmi_hpd"; + function = "hdmi_hpd"; + }; + }; + + hdmi_i2c_pins: hdmi_i2c { + mux { + groups = "hdmi_sda", "hdmi_scl"; + function = "hdmi_i2c"; + }; + }; }; }; @@ -486,6 +490,16 @@ clocks = <&clkc CLKID_I2C>; }; +&saradc { + compatible = "amlogic,meson-gxbb-saradc", "amlogic,meson-saradc"; + clocks = <&xtal>, + <&clkc CLKID_SAR_ADC>, + <&clkc CLKID_SANA>, + <&clkc CLKID_SAR_ADC_CLK>, + <&clkc CLKID_SAR_ADC_SEL>; + clock-names = "clkin", "core", "sana", "adc_clk", "adc_sel"; +}; + &sd_emmc_a { clocks = <&clkc CLKID_SD_EMMC_A>, <&xtal>, diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts similarity index 100% rename from arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts rename to arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 69216246275dfa..fe11b5fc61f78e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -88,12 +88,42 @@ }; }; + uart_ao_a_cts_rts_pins: uart_ao_a_cts_rts { + mux { + groups = "uart_cts_ao_a", + "uart_rts_ao_a"; + function = "uart_ao"; + }; + }; + + uart_ao_b_pins: uart_ao_b { + mux { + groups = "uart_tx_ao_b", "uart_rx_ao_b"; + function = "uart_ao_b"; + }; + }; + + uart_ao_b_cts_rts_pins: uart_ao_b_cts_rts { + mux { + groups = "uart_cts_ao_b", + "uart_rts_ao_b"; + function = "uart_ao_b"; + }; + }; + remote_input_ao_pins: remote_input_ao { mux { groups = "remote_input_ao"; function = "remote_input_ao"; }; }; + + pwm_ao_b_pins: pwm_ao_b { + mux { + groups = "pwm_ao_b"; + function = "pwm_ao_b"; + }; + }; }; }; @@ -163,6 +193,14 @@ }; }; + uart_a_cts_rts_pins: uart_a_cts_rts { + mux { + groups = "uart_cts_a", + "uart_rts_a"; + function = "uart_a"; + }; + }; + uart_b_pins: uart_b { mux { groups = "uart_tx_b", @@ -171,6 +209,14 @@ }; }; + uart_b_cts_rts_pins: uart_b_cts_rts { + mux { + groups = "uart_cts_b", + "uart_rts_b"; + function = "uart_b"; + }; + }; + uart_c_pins: uart_c { mux { groups = "uart_tx_c", @@ -179,6 +225,14 @@ }; }; + uart_c_cts_rts_pins: uart_c_cts_rts { + mux { + groups = "uart_cts_c", + "uart_rts_c"; + function = "uart_c"; + }; + }; + i2c_a_pins: i2c_a { mux { groups = "i2c_sck_a", @@ -229,6 +283,20 @@ function = "pwm_e"; }; }; + + hdmi_hpd_pins: hdmi_hpd { + mux { + groups = "hdmi_hpd"; + function = "hdmi_hpd"; + }; + }; + + hdmi_i2c_pins: hdmi_i2c { + mux { + groups = "hdmi_sda", "hdmi_scl"; + function = "hdmi_i2c"; + }; + }; }; eth-phy-mux { @@ -279,6 +347,16 @@ clocks = <&clkc CLKID_I2C>; }; +&saradc { + compatible = "amlogic,meson-gxl-saradc", "amlogic,meson-saradc"; + clocks = <&xtal>, + <&clkc CLKID_SAR_ADC>, + <&clkc CLKID_SANA>, + <&clkc CLKID_SAR_ADC_CLK>, + <&clkc CLKID_SAR_ADC_SEL>; + clock-names = "clkin", "core", "sana", "adc_clk", "adc_sel"; +}; + &sd_emmc_a { clocks = <&clkc CLKID_SD_EMMC_A>, <&xtal>, diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q200.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts similarity index 100% rename from arch/arm64/boot/dts/amlogic/meson-gxm-s912-q200.dts rename to arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q201.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q201.dts similarity index 100% rename from arch/arm64/boot/dts/amlogic/meson-gxm-s912-q201.dts rename to arch/arm64/boot/dts/amlogic/meson-gxm-q201.dts diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi index eb2f0c3e5e538e..ddea7305c644af 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi @@ -85,6 +85,7 @@ reg = <0x0 0x100>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 1>; }; cpu5: cpu@101 { @@ -93,6 +94,7 @@ reg = <0x0 0x101>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 1>; }; cpu6: cpu@102 { @@ -101,6 +103,7 @@ reg = <0x0 0x102>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 1>; }; cpu7: cpu@103 { @@ -109,10 +112,21 @@ reg = <0x0 0x103>; enable-method = "psci"; next-level-cache = <&l2>; + clocks = <&scpi_dvfs 1>; }; }; }; +&saradc { + compatible = "amlogic,meson-gxm-saradc", "amlogic,meson-saradc"; +}; + +&scpi_dvfs { + clock-indices = <0 1>; + clock-output-names = "vbig", "vlittle"; +}; + &vpu { compatible = "amlogic,meson-gxm-vpu", "amlogic,meson-gx-vpu"; }; + diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index 9d799d938d2f6b..df539e865b903c 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -372,12 +372,13 @@ }; }; - coresight-replicator { - /* - * Non-configurable replicators don't show up on the - * AMBA bus. As such no need to add "arm,primecell". - */ - compatible = "arm,coresight-replicator"; + replicator@20120000 { + compatible = "qcom,coresight-replicator1x", "arm,primecell"; + reg = <0 0x20120000 0 0x1000>; + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + power-domains = <&scpi_devpd 0>; ports { #address-cells = <1>; diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi index 9f9e203c09c5ad..bcb03fc3266552 100644 --- a/arch/arm64/boot/dts/broadcom/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi @@ -114,6 +114,7 @@ pcie0: pcie@20020000 { compatible = "brcm,iproc-pcie"; reg = <0 0x20020000 0 0x1000>; + dma-coherent; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; @@ -144,6 +145,7 @@ pcie4: pcie@50020000 { compatible = "brcm,iproc-pcie"; reg = <0 0x50020000 0 0x1000>; + dma-coherent; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; @@ -174,6 +176,7 @@ pcie8: pcie@60c00000 { compatible = "brcm,iproc-pcie-paxc"; reg = <0 0x60c00000 0 0x1000>; + dma-coherent; linux,pci-domain = <8>; bus-range = <0x0 0x1>; @@ -203,6 +206,7 @@ <0x61030000 0x100>; reg-names = "amac_base", "idm_base", "nicpm_base"; interrupts = ; + dma-coherent; phy-handle = <&gphy0>; phy-mode = "rgmii"; status = "disabled"; @@ -213,6 +217,7 @@ reg = <0x612c0000 0x445>; /* PDC FS0 regs */ interrupts = ; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -222,6 +227,7 @@ reg = <0x612e0000 0x445>; /* PDC FS1 regs */ interrupts = ; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -231,6 +237,7 @@ reg = <0x61300000 0x445>; /* PDC FS2 regs */ interrupts = ; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -240,6 +247,7 @@ reg = <0x61320000 0x445>; /* PDC FS3 regs */ interrupts = ; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -644,6 +652,7 @@ sata: ahci@663f2000 { compatible = "brcm,iproc-ahci", "generic-ahci"; reg = <0x663f2000 0x1000>; + dma-coherent; reg-names = "ahci"; interrupts = ; #address-cells = <1>; @@ -667,6 +676,7 @@ compatible = "brcm,sdhci-iproc-cygnus"; reg = <0x66420000 0x100>; interrupts = ; + dma-coherent; bus-width = <8>; clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; status = "disabled"; @@ -676,6 +686,7 @@ compatible = "brcm,sdhci-iproc-cygnus"; reg = <0x66430000 0x100>; interrupts = ; + dma-coherent; bus-width = <8>; clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; status = "disabled"; diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi index 53fd0683d4001b..098ad557fee325 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi @@ -217,18 +217,6 @@ assigned-clock-parents = <&cmu_top CLK_FOUT_AUD_PLL>; }; -&cmu_disp { - assigned-clocks = <&cmu_mif CLK_MOUT_SCLK_DECON_TV_ECLK_A>, - <&cmu_mif CLK_DIV_SCLK_DECON_TV_ECLK>, - <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>, - <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK>; - assigned-clock-parents = <&cmu_mif CLK_MOUT_BUS_PLL_DIV2>, - <0>, - <&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>, - <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>; - assigned-clock-rates = <0>, <400000000>; -}; - &cmu_fsys { assigned-clocks = <&cmu_top CLK_MOUT_SCLK_USBDRD30>, <&cmu_top CLK_MOUT_SCLK_USBHOST30>, diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts index ddba2f889326b1..dea0a6f5bc18f0 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts @@ -18,6 +18,40 @@ compatible = "samsung,tm2", "samsung,exynos5433"; }; +&cmu_disp { + /* + * TM2 and TM2e differ only by DISP_PLL rate, but define all assigned + * clocks properties for DISP CMU for each board to keep them together + * for easier review and maintenance. + */ + assigned-clocks = <&cmu_disp CLK_FOUT_DISP_PLL>, + <&cmu_mif CLK_DIV_SCLK_DECON_TV_ECLK>, + <&cmu_disp CLK_MOUT_ACLK_DISP_333_USER>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK>, + <&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER>, + <&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER>, + <&cmu_disp CLK_MOUT_DISP_PLL>, + <&cmu_mif CLK_MOUT_SCLK_DECON_TV_ECLK_A>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK>; + assigned-clock-parents = <0>, <0>, + <&cmu_mif CLK_ACLK_DISP_333>, + <&cmu_mif CLK_SCLK_DSIM0_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>, + <&cmu_mif CLK_SCLK_DECON_ECLK_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>, + <&cmu_disp CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY>, + <&cmu_disp CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY>, + <&cmu_disp CLK_FOUT_DISP_PLL>, + <&cmu_mif CLK_MOUT_BUS_PLL_DIV2>, + <&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>; + assigned-clock-rates = <250000000>, <400000000>; +}; + &hsi2c_9 { status = "okay"; diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts index 2fbf3a86031689..7891a31adc1759 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts @@ -18,6 +18,40 @@ compatible = "samsung,tm2e", "samsung,exynos5433"; }; +&cmu_disp { + /* + * TM2 and TM2e differ only by DISP_PLL rate, but define all assigned + * clocks properties for DISP CMU for each board to keep them together + * for easier review and maintenance. + */ + assigned-clocks = <&cmu_disp CLK_FOUT_DISP_PLL>, + <&cmu_mif CLK_DIV_SCLK_DECON_TV_ECLK>, + <&cmu_disp CLK_MOUT_ACLK_DISP_333_USER>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK>, + <&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER>, + <&cmu_disp CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER>, + <&cmu_disp CLK_MOUT_DISP_PLL>, + <&cmu_mif CLK_MOUT_SCLK_DECON_TV_ECLK_A>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK>; + assigned-clock-parents = <0>, <0>, + <&cmu_mif CLK_ACLK_DISP_333>, + <&cmu_mif CLK_SCLK_DSIM0_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DSIM0_USER>, + <&cmu_mif CLK_SCLK_DECON_ECLK_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DECON_ECLK_USER>, + <&cmu_disp CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY>, + <&cmu_disp CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY>, + <&cmu_disp CLK_FOUT_DISP_PLL>, + <&cmu_mif CLK_MOUT_BUS_PLL_DIV2>, + <&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>, + <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>; + assigned-clock-rates = <278000000>, <400000000>; +}; + &ldo31_reg { regulator-name = "TSP_VDD_1.8V_AP"; regulator-min-microvolt = <1800000>; diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index c528dd52ba2d39..e5892bb0ae6e55 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts @@ -13,6 +13,7 @@ #include "exynos7.dtsi" #include #include +#include / { model = "Samsung Exynos7 Espresso board based on EXYNOS7"; @@ -32,6 +33,29 @@ device_type = "memory"; reg = <0x0 0x40000000 0x0 0xC0000000>; }; + + usb30_vbus_reg: regulator-usb30 { + compatible = "regulator-fixed"; + regulator-name = "VBUS_5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gph1 1 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb30_vbus_en>; + enable-active-high; + }; + + usb3drd_boost_5v: regulator-usb3drd-boost { + compatible = "regulator-fixed"; + regulator-name = "VUSB_VBUS_5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpf4 1 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb3drd_boost_en>; + enable-active-high; + }; + }; &fin_pll { @@ -328,8 +352,8 @@ &pinctrl_alive { pmic_irq: pmic-irq { samsung,pins = "gpa0-2"; - samsung,pin-pud = <3>; - samsung,pin-drv = <3>; + samsung,pin-pud = ; + samsung,pin-drv = ; }; }; @@ -365,3 +389,24 @@ vqmmc-supply = <&ldo2_reg>; disable-wp; }; + +&pinctrl_bus1 { + usb30_vbus_en: usb30-vbus-en { + samsung,pins = "gph1-1"; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; + }; + + usb3drd_boost_en: usb3drd-boost-en { + samsung,pins = "gpf4-1"; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; + }; +}; + +&usbdrd_phy { + vbus-supply = <&usb30_vbus_reg>; + vbus-boost-supply = <&usb3drd_boost_5v>; +}; diff --git a/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi b/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi index 7ebb93927f136a..8f58850cd28cdd 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi @@ -12,6 +12,8 @@ * published by the Free Software Foundation. */ +#include + &pinctrl_alive { gpa0: gpa0 { gpio-controller; @@ -187,163 +189,163 @@ hs_i2c10_bus: hs-i2c10-bus { samsung,pins = "gpb0-1", "gpb0-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; hs_i2c11_bus: hs-i2c11-bus { samsung,pins = "gpb0-3", "gpb0-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; hs_i2c2_bus: hs-i2c2-bus { samsung,pins = "gpd0-3", "gpd0-2"; - samsung,pin-function = <3>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; uart0_data: uart0-data { samsung,pins = "gpd0-0", "gpd0-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; uart0_fctl: uart0-fctl { samsung,pins = "gpd0-2", "gpd0-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; uart2_data: uart2-data { samsung,pins = "gpd1-4", "gpd1-5"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; hs_i2c3_bus: hs-i2c3-bus { samsung,pins = "gpd1-3", "gpd1-2"; - samsung,pin-function = <3>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; uart1_data: uart1-data { samsung,pins = "gpd1-0", "gpd1-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; uart1_fctl: uart1-fctl { samsung,pins = "gpd1-2", "gpd1-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; hs_i2c0_bus: hs-i2c0-bus { samsung,pins = "gpd2-1", "gpd2-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; hs_i2c1_bus: hs-i2c1-bus { samsung,pins = "gpd2-3", "gpd2-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; hs_i2c9_bus: hs-i2c9-bus { samsung,pins = "gpd2-7", "gpd2-6"; - samsung,pin-function = <3>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; pwm0_out: pwm0-out { samsung,pins = "gpd2-4"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; pwm1_out: pwm1-out { samsung,pins = "gpd2-5"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; pwm2_out: pwm2-out { samsung,pins = "gpd2-6"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; pwm3_out: pwm3-out { samsung,pins = "gpd2-7"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; hs_i2c8_bus: hs-i2c8-bus { samsung,pins = "gpd5-3", "gpd5-2"; - samsung,pin-function = <3>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; uart3_data: uart3-data { samsung,pins = "gpd5-0", "gpd5-1"; - samsung,pin-function = <3>; - samsung,pin-pud = <0>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; spi2_bus: spi2-bus { samsung,pins = "gpd5-0", "gpd5-1", "gpd5-2", "gpd5-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; spi1_bus: spi1-bus { samsung,pins = "gpd6-2", "gpd6-3", "gpd6-4", "gpd6-5"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; spi0_bus: spi0-bus { samsung,pins = "gpd8-0", "gpd8-1", "gpd6-0", "gpd6-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; hs_i2c4_bus: hs-i2c4-bus { samsung,pins = "gpg3-1", "gpg3-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; hs_i2c5_bus: hs-i2c5-bus { samsung,pins = "gpg3-3", "gpg3-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; }; @@ -358,9 +360,9 @@ hs_i2c6_bus: hs-i2c6-bus { samsung,pins = "gpj0-1", "gpj0-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; }; @@ -375,9 +377,9 @@ hs_i2c7_bus: hs-i2c7-bus { samsung,pins = "gpj1-1", "gpj1-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; }; @@ -392,9 +394,9 @@ spi3_bus: spi3-bus { samsung,pins = "gpg4-0", "gpg4-1", "gpg4-2", "gpg4-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; }; @@ -409,9 +411,9 @@ spi4_bus: spi4-bus { samsung,pins = "gpv7-0", "gpv7-1", "gpv7-2", "gpv7-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; }; @@ -426,37 +428,37 @@ sd2_clk: sd2-clk { samsung,pins = "gpr4-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <3>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd2_cmd: sd2-cmd { samsung,pins = "gpr4-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <3>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd2_cd: sd2-cd { samsung,pins = "gpr4-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <3>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd2_bus1: sd2-bus-width1 { samsung,pins = "gpr4-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <3>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd2_bus4: sd2-bus-width4 { samsung,pins = "gpr4-4", "gpr4-5", "gpr4-6"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <3>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; }; @@ -495,107 +497,107 @@ sd0_clk: sd0-clk { samsung,pins = "gpr0-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <4>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd0_cmd: sd0-cmd { samsung,pins = "gpr0-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <4>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd0_ds: sd0-ds { samsung,pins = "gpr0-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <4>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd0_qrdy: sd0-qrdy { samsung,pins = "gpr0-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <4>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd0_bus1: sd0-bus-width1 { samsung,pins = "gpr1-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <4>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd0_bus4: sd0-bus-width4 { samsung,pins = "gpr1-1", "gpr1-2", "gpr1-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <4>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd0_bus8: sd0-bus-width8 { samsung,pins = "gpr1-4", "gpr1-5", "gpr1-6", "gpr1-7"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <4>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd1_clk: sd1-clk { samsung,pins = "gpr2-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <2>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd1_cmd: sd1-cmd { samsung,pins = "gpr2-1"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <2>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd1_ds: sd1-ds { samsung,pins = "gpr2-2"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <6>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd1_qrdy: sd1-qrdy { samsung,pins = "gpr2-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <6>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd1_int: sd1-int { samsung,pins = "gpr2-4"; - samsung,pin-function = <2>; - samsung,pin-pud = <1>; - samsung,pin-drv = <6>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd1_bus1: sd1-bus-width1 { samsung,pins = "gpr3-0"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <2>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd1_bus4: sd1-bus-width4 { samsung,pins = "gpr3-1", "gpr3-2", "gpr3-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <2>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; sd1_bus8: sd1-bus-width8 { samsung,pins = "gpr3-4", "gpr3-5", "gpr3-6", "gpr3-7"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <2>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; }; @@ -682,22 +684,22 @@ spi5_bus: spi5-bus { samsung,pins = "gpf2-0", "gpf2-1", "gpf2-2", "gpf2-3"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; ufs_refclk_out: ufs-refclk-out { samsung,pins = "gpg2-4"; - samsung,pin-function = <2>; - samsung,pin-pud = <0>; - samsung,pin-drv = <2>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; ufs_rst_n: ufs-rst-n { samsung,pins = "gph1-5"; - samsung,pin-function = <2>; - samsung,pin-pud = <3>; - samsung,pin-drv = <0>; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; }; }; diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi index 80aa60e38237a2..9a3fbed1765af6 100644 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi @@ -603,6 +603,40 @@ #include "exynos7-trip-points.dtsi" }; }; + + usbdrd_phy: phy@15500000 { + compatible = "samsung,exynos7-usbdrd-phy"; + reg = <0x15500000 0x100>; + clocks = <&clock_fsys0 ACLK_USBDRD300>, + <&clock_fsys0 OSCCLK_PHY_CLKOUT_USB30_PHY>, + <&clock_fsys0 PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER>, + <&clock_fsys0 PHYCLK_USBDRD300_UDRD30_PHYCLK_USER>, + <&clock_fsys0 SCLK_USBDRD300_REFCLK>; + clock-names = "phy", "ref", "phy_pipe", + "phy_utmi", "itp"; + samsung,pmu-syscon = <&pmu_system_controller>; + #phy-cells = <1>; + }; + + usbdrd3 { + compatible = "samsung,exynos7-dwusb3"; + clocks = <&clock_fsys0 ACLK_USBDRD300>, + <&clock_fsys0 SCLK_USBDRD300_SUSPENDCLK>, + <&clock_fsys0 ACLK_AXIUS_USBDRD30X_FSYS0X>; + clock-names = "usbdrd30", "usbdrd30_susp_clk", + "usbdrd30_axius_clk"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + dwc3@15400000 { + compatible = "snps,dwc3"; + reg = <0x15400000 0x10000>; + interrupts = ; + phys = <&usbdrd_phy 0>, <&usbdrd_phy 1>; + phy-names = "usb2-phy", "usb3-phy"; + }; + }; }; }; diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index eb8432bb82b8dd..e39d487bf72438 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -23,6 +23,7 @@ */ #include #include +#include #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 05310ad8c5abec..f31c48d0cd6873 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void) static inline bool system_uses_ttbr0_pan(void) { return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && - !cpus_have_cap(ARM64_HAS_PAN); + !cpus_have_const_cap(ARM64_HAS_PAN); } #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h index 86c404171305ab..f6580d4afb0e0c 100644 --- a/arch/arm64/include/asm/current.h +++ b/arch/arm64/include/asm/current.h @@ -3,8 +3,6 @@ #include -#include - #ifndef __ASSEMBLY__ struct task_struct; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f21fd38943708f..e7705e7bb07b13 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,8 +30,7 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#define KVM_USER_MEM_SLOTS 32 -#define KVM_PRIVATE_MEM_SLOTS 4 +#define KVM_USER_MEM_SLOTS 512 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HALT_POLL_NS_DEFAULT 500000 diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 1ef40d82cfd3ca..3257895a9b5e41 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -25,6 +25,8 @@ #include #include +#include +#include #include #include diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h index 69b2fd41503ca3..345a072b5856d4 100644 --- a/arch/arm64/include/asm/pgtable-types.h +++ b/arch/arm64/include/asm/pgtable-types.h @@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t; #define __pgprot(x) ((pgprot_t) { (x) } ) #if CONFIG_PGTABLE_LEVELS == 2 +#define __ARCH_USE_5LEVEL_HACK #include #elif CONFIG_PGTABLE_LEVELS == 3 +#define __ARCH_USE_5LEVEL_HACK #include +#elif CONFIG_PGTABLE_LEVELS == 4 +#include #endif #endif /* __ASM_PGTABLE_TYPES_H */ diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index e78ac26324bd80..bdbeb06dc11ede 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 394 +#define __NR_compat_syscalls 398 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index b7e8ef16ff0dc6..c66b51aab19588 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range) __SYSCALL(__NR_preadv2, compat_sys_preadv2) #define __NR_pwritev2 393 __SYSCALL(__NR_pwritev2, compat_sys_pwritev2) +#define __NR_pkey_mprotect 394 +__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect) +#define __NR_pkey_alloc 395 +__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) +#define __NR_pkey_free 396 +__SYSCALL(__NR_pkey_free, sys_pkey_free) +#define __NR_statx 397 +__SYSCALL(__NR_statx, sys_statx) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 75a0f8acef669c..fd691087dc9ad5 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu) } /** - * cpu_suspend() - function to enter a low-power idle state + * arm_cpuidle_suspend() - function to enter a low-power idle state * @arg: argument to pass to CPU suspend operations * * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 2bd426448fc190..32913567da087d 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index b883f1f75216ae..06da8ea16bbe5e 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 769f24ef628c1e..d7e90d97f5c405 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) /* * The kernel Image should not extend across a 1GB/32MB/512MB alignment * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this - * happens, increase the KASLR offset by the size of the kernel image. + * happens, increase the KASLR offset by the size of the kernel image + * rounded up by SWAPPER_BLOCK_SIZE. */ if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != - (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) - offset = (offset + (u64)(_end - _text)) & mask; + (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) { + u64 kimg_sz = _end - _text; + offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE)) + & mask; + } if (IS_ENABLED(CONFIG_KASAN)) /* diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index d217c9e95b06cc..2122cd187f194e 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -24,6 +24,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 3f62b35fb6f157..bd1b74c2436f5c 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index f0593c92279bf6..c5c45942fb6e66 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -371,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) return 0; } -int __kprobes kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - return NOTIFY_DONE; -} - static void __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p, *cur_kprobe; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1ad48f93abdd59..043d373b836992 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -24,6 +24,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index a22161ccf4470a..c142459a88f33e 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -22,7 +22,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 952e2c0dabd51e..42274bda0ccb5a 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 827d52d78b67d1..9b1036570586f9 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -21,7 +21,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include @@ -942,7 +944,7 @@ static bool have_cpu_die(void) #ifdef CONFIG_HOTPLUG_CPU int any_cpu = raw_smp_processor_id(); - if (cpu_ops[any_cpu]->cpu_die) + if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die) return true; #endif return false; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 8a552a33c6efa2..feac80c22f61f7 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index abaf582fc7a8eb..8b8bbd3eaa52cc 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 565dd69888cc57..08243533e5ee62 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 7d47c2cdfd9315..e52be6aa44ee7f 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -29,8 +29,11 @@ #include #include #include -#include +#include +#include +#include #include +#include #include #include diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore index b8cc94e9698b69..f8b69d84238eb4 100644 --- a/arch/arm64/kernel/vdso/.gitignore +++ b/arch/arm64/kernel/vdso/.gitignore @@ -1,2 +1 @@ vdso.lds -vdso-offsets.h diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 1bfe30dfbfe77f..fa1b18e364fc9d 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) return ret; } +static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + u32 hsr = kvm_vcpu_get_hsr(vcpu); + + kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n", + hsr, esr_get_class_string(hsr)); + + kvm_inject_undefined(vcpu); + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { + [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, [ESR_ELx_EC_WFx] = kvm_handle_wfx, [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, @@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) u32 hsr = kvm_vcpu_get_hsr(vcpu); u8 hsr_ec = ESR_ELx_EC(hsr); - if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || - !arm_exit_handlers[hsr_ec]) { - kvm_err("Unknown exception class: hsr: %#08x -- %s\n", - hsr, esr_get_class_string(hsr)); - BUG(); - } - return arm_exit_handlers[hsr_ec]; } diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index e8e7ba2bc11f93..9e1d2b75eecd60 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -18,14 +18,62 @@ #include #include +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) +{ + u64 val; + + /* + * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and + * most TLB operations target EL2/EL0. In order to affect the + * guest TLBs (EL1/EL0), we need to change one of these two + * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so + * let's flip TGE before executing the TLB operation. + */ + write_sysreg(kvm->arch.vttbr, vttbr_el2); + val = read_sysreg(hcr_el2); + val &= ~HCR_TGE; + write_sysreg(val, hcr_el2); + isb(); +} + +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) +{ + write_sysreg(kvm->arch.vttbr, vttbr_el2); + isb(); +} + +static hyp_alternate_select(__tlb_switch_to_guest, + __tlb_switch_to_guest_nvhe, + __tlb_switch_to_guest_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) +{ + /* + * We're done with the TLB operation, let's restore the host's + * view of HCR_EL2. + */ + write_sysreg(0, vttbr_el2); + write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); +} + +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) +{ + write_sysreg(0, vttbr_el2); +} + +static hyp_alternate_select(__tlb_switch_to_host, + __tlb_switch_to_host_nvhe, + __tlb_switch_to_host_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - write_sysreg(kvm->arch.vttbr, vttbr_el2); - isb(); + __tlb_switch_to_guest()(kvm); /* * We could do so much better if we had the VA as well. @@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) dsb(ish); isb(); - write_sysreg(0, vttbr_el2); + __tlb_switch_to_host()(kvm); } void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) @@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - write_sysreg(kvm->arch.vttbr, vttbr_el2); - isb(); + __tlb_switch_to_guest()(kvm); __tlbi(vmalls12e1is); dsb(ish); isb(); - write_sysreg(0, vttbr_el2); + __tlb_switch_to_host()(kvm); } void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) @@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); /* Switch to requested VMID */ - write_sysreg(kvm->arch.vttbr, vttbr_el2); - isb(); + __tlb_switch_to_guest()(kvm); __tlbi(vmalle1); dsb(nsh); isb(); - write_sysreg(0, vttbr_el2); + __tlb_switch_to_host()(kvm); } void __hyp_text __kvm_flush_vm_context(void) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 81283851c9af9a..4bf899fb451baf 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -26,7 +26,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 201d918e75759d..687a358a37337a 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -13,6 +13,7 @@ #define pr_fmt(fmt) "kasan: " fmt #include #include +#include #include #include #include @@ -161,7 +162,7 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); vmemmap_populate(kimg_shadow_start, kimg_shadow_end, - pfn_to_nid(virt_to_pfn(_text))); + pfn_to_nid(virt_to_pfn(lm_alias(_text)))); /* * vmemmap_populate() has populated the shadow region that covers the diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 01c171723bb33b..7b0d55756eb1c2 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -22,7 +22,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/avr32/include/asm/mmu_context.h b/arch/avr32/include/asm/mmu_context.h index 27ff234071009b..cd87abba8db78d 100644 --- a/arch/avr32/include/asm/mmu_context.h +++ b/arch/avr32/include/asm/mmu_context.h @@ -12,6 +12,8 @@ #ifndef __ASM_AVR32_MMU_CONTEXT_H #define __ASM_AVR32_MMU_CONTEXT_H +#include + #include #include #include diff --git a/arch/avr32/include/asm/pgtable-2level.h b/arch/avr32/include/asm/pgtable-2level.h index 425dd567b5b955..d5b1c63993ec29 100644 --- a/arch/avr32/include/asm/pgtable-2level.h +++ b/arch/avr32/include/asm/pgtable-2level.h @@ -8,6 +8,7 @@ #ifndef __ASM_AVR32_PGTABLE_2LEVEL_H #define __ASM_AVR32_PGTABLE_2LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/avr32/kernel/nmi_debug.c b/arch/avr32/kernel/nmi_debug.c index 3414b8566c291f..25823049bb99ea 100644 --- a/arch/avr32/kernel/nmi_debug.c +++ b/arch/avr32/kernel/nmi_debug.c @@ -9,6 +9,7 @@ #include #include #include +#include #include diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index 68e5b9dac0596b..ad0dfccedb7928 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -6,6 +6,9 @@ * published by the Free Software Foundation. */ #include +#include +#include +#include #include #include #include diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c index a89b893279bbbf..41a14e96a1dbaf 100644 --- a/arch/avr32/kernel/ptrace.c +++ b/arch/avr32/kernel/ptrace.c @@ -8,6 +8,7 @@ #undef DEBUG #include #include +#include #include #include #include diff --git a/arch/avr32/kernel/stacktrace.c b/arch/avr32/kernel/stacktrace.c index c09f0d8dd67946..f8cc995cf0e0d0 100644 --- a/arch/avr32/kernel/stacktrace.c +++ b/arch/avr32/kernel/stacktrace.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include +#include #include #include #include diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index eb4a3fcfbaff17..50b54132502504 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c @@ -14,7 +14,7 @@ #include #include /* print_modules */ #include -#include +#include #include #include diff --git a/arch/avr32/oprofile/backtrace.c b/arch/avr32/oprofile/backtrace.c index 75d9ad6f99cf56..29cf2f191bfd28 100644 --- a/arch/avr32/oprofile/backtrace.c +++ b/arch/avr32/oprofile/backtrace.c @@ -14,7 +14,7 @@ */ #include -#include +#include #include /* The first two words of each frame on the stack look like this if we have diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h index 15b16d3e8de8ae..0ce6de873b27e2 100644 --- a/arch/blackfin/include/asm/mmu_context.h +++ b/arch/blackfin/include/asm/mmu_context.h @@ -9,6 +9,8 @@ #include #include +#include + #include #include #include diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c index 95ba6d9e9a3d80..3c992c1f8ef282 100644 --- a/arch/blackfin/kernel/dumpstack.c +++ b/arch/blackfin/kernel/dumpstack.c @@ -10,6 +10,8 @@ #include #include #include +#include + #include /* diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c index 61fbd2de993dd9..4b89af9243d392 100644 --- a/arch/blackfin/kernel/early_printk.c +++ b/arch/blackfin/kernel/early_printk.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include diff --git a/arch/blackfin/kernel/flat.c b/arch/blackfin/kernel/flat.c index a88daddbf074b2..b5b6584496164d 100644 --- a/arch/blackfin/kernel/flat.c +++ b/arch/blackfin/kernel/flat.c @@ -6,6 +6,7 @@ #include #include +#include #include #define FLAT_BFIN_RELOC_TYPE_16_BIT 0 diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c index 9919d29287dce9..633c37083e877f 100644 --- a/arch/blackfin/kernel/nmi.c +++ b/arch/blackfin/kernel/nmi.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 4aa5545c4fde14..89d5162d4ca675 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -12,6 +12,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 360d996451633c..a6827095b99a88 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index ea570db598e503..5f51727792047b 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c index 30301e1eace5df..17198f3650b6d2 100644 --- a/arch/blackfin/kernel/stacktrace.c +++ b/arch/blackfin/kernel/stacktrace.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index 719dd796c12cb1..151f22196ab6f0 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c @@ -11,7 +11,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 1ed85ddadc0d25..a323a40a46e917 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 4986b4fbcee982..13e94bf9d8ba5f 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index a2e6db2ce811c9..b32ddab7966c95 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -11,7 +11,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/blackfin/mm/isram-driver.c b/arch/blackfin/mm/isram-driver.c index 7e2e674ed4440a..aaa1e64b753b9b 100644 --- a/arch/blackfin/mm/isram-driver.c +++ b/arch/blackfin/mm/isram-driver.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c index 1f3b3ef3e103fb..d2a96c2c02a3e6 100644 --- a/arch/blackfin/mm/sram-alloc.c +++ b/arch/blackfin/mm/sram-alloc.c @@ -19,6 +19,8 @@ #include #include #include +#include + #include #include #include "blackfin_sram.h" diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c index 0ee7686a78f375..c4ecb24c2d5c7b 100644 --- a/arch/c6x/kernel/process.c +++ b/arch/c6x/kernel/process.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c index 3c494e84444d1e..8801dc98fd442a 100644 --- a/arch/c6x/kernel/ptrace.c +++ b/arch/c6x/kernel/ptrace.c @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -69,46 +70,6 @@ static int gpr_get(struct task_struct *target, 0, sizeof(*regs)); } -static int gpr_set(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) -{ - int ret; - struct pt_regs *regs = task_pt_regs(target); - - /* Don't copyin TSR or CSR */ - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - ®s, - 0, PT_TSR * sizeof(long)); - if (ret) - return ret; - - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - PT_TSR * sizeof(long), - (PT_TSR + 1) * sizeof(long)); - if (ret) - return ret; - - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - ®s, - (PT_TSR + 1) * sizeof(long), - PT_CSR * sizeof(long)); - if (ret) - return ret; - - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - PT_CSR * sizeof(long), - (PT_CSR + 1) * sizeof(long)); - if (ret) - return ret; - - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - ®s, - (PT_CSR + 1) * sizeof(long), -1); - return ret; -} - enum c6x_regset { REGSET_GPR, }; @@ -120,7 +81,6 @@ static const struct user_regset c6x_regsets[] = { .size = sizeof(u32), .align = sizeof(u32), .get = gpr_get, - .set = gpr_set }, }; diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c index dcc2c2f6d67c8d..09b8a40d56807b 100644 --- a/arch/c6x/kernel/traps.c +++ b/arch/c6x/kernel/traps.c @@ -10,6 +10,7 @@ */ #include #include +#include #include #include diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c index 9ac75d68f1847f..cc62572c1b9460 100644 --- a/arch/cris/arch-v10/drivers/sync_serial.c +++ b/arch/cris/arch-v10/drivers/sync_serial.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c index 96e5afef6b47b8..e299d30105b53b 100644 --- a/arch/cris/arch-v10/kernel/process.c +++ b/arch/cris/arch-v10/kernel/process.c @@ -11,6 +11,9 @@ */ #include +#include +#include +#include #include #include #include diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c index eca94c7d56e770..c2f2b9b83cc4da 100644 --- a/arch/cris/arch-v10/kernel/ptrace.c +++ b/arch/cris/arch-v10/kernel/ptrace.c @@ -4,6 +4,7 @@ #include #include +#include #include #include #include diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c index db30c98e4926ca..bab4a8dd6bfda2 100644 --- a/arch/cris/arch-v10/kernel/signal.c +++ b/arch/cris/arch-v10/kernel/signal.c @@ -14,6 +14,7 @@ */ #include +#include #include #include #include diff --git a/arch/cris/arch-v10/kernel/traps.c b/arch/cris/arch-v10/kernel/traps.c index 96d004fe974053..c0a501f29bd89d 100644 --- a/arch/cris/arch-v10/kernel/traps.c +++ b/arch/cris/arch-v10/kernel/traps.c @@ -10,6 +10,8 @@ #include #include +#include + #include #include diff --git a/arch/cris/arch-v10/mm/tlb.c b/arch/cris/arch-v10/mm/tlb.c index 21d78c599babc5..3225d38bdaea29 100644 --- a/arch/cris/arch-v10/mm/tlb.c +++ b/arch/cris/arch-v10/mm/tlb.c @@ -10,6 +10,8 @@ * */ +#include + #include #include #include diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c index ae6903d7fdbe08..14970f11bbf2b6 100644 --- a/arch/cris/arch-v32/drivers/cryptocop.c +++ b/arch/cris/arch-v32/drivers/cryptocop.c @@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void) dma_in_cfg.en = regk_dma_no; REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg); - /* Disble the cryptocop. */ + /* Disable the cryptocop. */ rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg); rw_cfg.en = 0; REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg); diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c index ef515af1a37792..8efcc1a899a890 100644 --- a/arch/cris/arch-v32/drivers/sync_serial.c +++ b/arch/cris/arch-v32/drivers/sync_serial.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c index 4d1afa9f9fd367..c530a8fa87ceb7 100644 --- a/arch/cris/arch-v32/kernel/process.c +++ b/arch/cris/arch-v32/kernel/process.c @@ -9,6 +9,9 @@ */ #include +#include +#include +#include #include #include #include diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c index c366bc05466a66..0461e95bbb629a 100644 --- a/arch/cris/arch-v32/kernel/ptrace.c +++ b/arch/cris/arch-v32/kernel/ptrace.c @@ -4,6 +4,7 @@ #include #include +#include #include #include #include diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c index 816bf2ca93ef57..ea2e8e1398e875 100644 --- a/arch/cris/arch-v32/kernel/signal.c +++ b/arch/cris/arch-v32/kernel/signal.c @@ -3,6 +3,7 @@ */ #include +#include #include #include #include diff --git a/arch/cris/arch-v32/kernel/traps.c b/arch/cris/arch-v32/kernel/traps.c index ad6174e217c932..a34256515036c4 100644 --- a/arch/cris/arch-v32/kernel/traps.c +++ b/arch/cris/arch-v32/kernel/traps.c @@ -5,6 +5,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c index c030d020660aaa..bc3de5b5e27c01 100644 --- a/arch/cris/arch-v32/mm/tlb.c +++ b/arch/cris/arch-v32/mm/tlb.c @@ -6,6 +6,7 @@ * Authors: Bjorn Wesen * Tobias Anderberg , CRISv32 port. */ +#include #include #include diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index ceefc314d64d08..fa3a73004cc570 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -6,10 +6,11 @@ #define _CRIS_PGTABLE_H #include +#define __ARCH_USE_5LEVEL_HACK #include #ifndef __ASSEMBLY__ -#include +#include #include #endif #include diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 694850e8f077af..09b864f46f8a76 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 50a7dd451456cc..0bbd3a0c3d7087 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/cris/kernel/stacktrace.c b/arch/cris/kernel/stacktrace.c index 99838c74456dd5..f1cc3aaacd8d7b 100644 --- a/arch/cris/kernel/stacktrace.c +++ b/arch/cris/kernel/stacktrace.c @@ -1,5 +1,5 @@ #include -#include +#include #include #include diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c index 2dda6da7152159..bc562cf511a630 100644 --- a/arch/cris/kernel/time.c +++ b/arch/cris/kernel/time.c @@ -29,7 +29,7 @@ #include #include #include -#include /* just for sched_clock() - funny that */ +#include #define D(x) diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c index b2a312a7afc6b8..a01636a12a6e87 100644 --- a/arch/cris/kernel/traps.c +++ b/arch/cris/kernel/traps.c @@ -15,6 +15,7 @@ #include #include #include +#include #ifdef CONFIG_KALLSYMS #include #endif diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 94183d3639ef5c..1fca464f1b9e75 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/arch/cris/mm/tlb.c b/arch/cris/mm/tlb.c index b7f8de576777f8..8413741cfa0fbd 100644 --- a/arch/cris/mm/tlb.c +++ b/arch/cris/mm/tlb.c @@ -9,6 +9,8 @@ #include #include +#include + #include #define D(x) diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index a0513d463a1fa8..ab6e7e961b545c 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -16,6 +16,7 @@ #ifndef _ASM_PGTABLE_H #define _ASM_PGTABLE_H +#include #include #include #include diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index b306241c4ef22f..5a4c92abc99ec3 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c @@ -13,6 +13,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index 31221fb4348e22..ce29991e4219dd 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c @@ -9,7 +9,8 @@ * 2 of the License, or (at your option) any later version. */ -#include +#include +#include #include #include #include diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index 836f14707a627f..da82c25301e777 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index 88a15974352857..328f0a2923168c 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c index 3473bde77f566e..16946a58f64db9 100644 --- a/arch/frv/mm/mmu-context.c +++ b/arch/frv/mm/mmu-context.c @@ -10,6 +10,8 @@ */ #include +#include +#include #include #include diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h index 8341db67821dd1..7d265d28ba5eec 100644 --- a/arch/h8300/include/asm/pgtable.h +++ b/arch/h8300/include/asm/pgtable.h @@ -1,5 +1,6 @@ #ifndef _H8300_PGTABLE_H #define _H8300_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include #define pgtable_cache_init() do { } while (0) diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 891974a1170440..0f5db5bb561b75 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -25,6 +25,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c index 92075544a19ac0..0dc1c8f622bc3f 100644 --- a/arch/h8300/kernel/ptrace.c +++ b/arch/h8300/kernel/ptrace.c @@ -95,7 +95,8 @@ static int regs_get(struct task_struct *target, long *reg = (long *)®s; /* build user regs in buffer */ - for (r = 0; r < ARRAY_SIZE(register_offset); r++) + BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0); + for (r = 0; r < sizeof(regs) / sizeof(long); r++) *reg++ = h8300_get_reg(target, r); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, @@ -113,7 +114,8 @@ static int regs_set(struct task_struct *target, long *reg; /* build user regs in buffer */ - for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++) + BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0); + for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++) *reg++ = h8300_get_reg(target, r); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, @@ -122,7 +124,7 @@ static int regs_set(struct task_struct *target, return ret; /* write back to pt_regs */ - for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++) + for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++) h8300_put_reg(target, r, *reg++); return 0; } diff --git a/arch/h8300/kernel/ptrace_h.c b/arch/h8300/kernel/ptrace_h.c index fe3b5673babaa4..f5ff3b794c8512 100644 --- a/arch/h8300/kernel/ptrace_h.c +++ b/arch/h8300/kernel/ptrace_h.c @@ -9,7 +9,7 @@ */ #include -#include +#include #include #define BREAKINST 0x5730 /* trapa #3 */ diff --git a/arch/h8300/kernel/ptrace_s.c b/arch/h8300/kernel/ptrace_s.c index ef5a9c13e76d3b..c0af930052c019 100644 --- a/arch/h8300/kernel/ptrace_s.c +++ b/arch/h8300/kernel/ptrace_s.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include #include diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index d784f7117f9abe..1e8070d08770a0 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -25,6 +25,7 @@ */ #include +#include #include #include #include diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index 044a3612584615..e47a9e0dc278fa 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -16,6 +16,8 @@ #include #include +#include +#include #include #include #include diff --git a/arch/hexagon/include/asm/mmu_context.h b/arch/hexagon/include/asm/mmu_context.h index d423d2e73c3088..d8a071afdd1d5c 100644 --- a/arch/hexagon/include/asm/mmu_context.h +++ b/arch/hexagon/include/asm/mmu_context.h @@ -21,6 +21,8 @@ #ifndef _ASM_MMU_CONTEXT_H #define _ASM_MMU_CONTEXT_H +#include + #include #include #include diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index 49eab8136ec307..24a9177fb897b6 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -26,6 +26,7 @@ */ #include #include +#define __ARCH_USE_5LEVEL_HACK #include /* A handy thing to have if one has the RAM. Declared in head.S */ diff --git a/arch/hexagon/kernel/kgdb.c b/arch/hexagon/kernel/kgdb.c index 62dece3ad827b0..16c24b22d0b269 100644 --- a/arch/hexagon/kernel/kgdb.c +++ b/arch/hexagon/kernel/kgdb.c @@ -20,6 +20,7 @@ #include #include +#include #include #include diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index d9edfd3fc52af9..de715bab7956c7 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -19,6 +19,9 @@ */ #include +#include +#include +#include #include #include #include diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c index 390a9ad14ca15f..ecd75e2e8eb391 100644 --- a/arch/hexagon/kernel/ptrace.c +++ b/arch/hexagon/kernel/ptrace.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index c6b22b9945a72f..78aa7304a5c9f4 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c @@ -21,6 +21,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index c02a6455839e01..5dbc15549e011d 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c @@ -25,10 +25,11 @@ #include #include #include -#include +#include #include #include #include +#include #include /* timer_interrupt */ #include diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c index f94918b449a8aa..41866a06adf7f3 100644 --- a/arch/hexagon/kernel/stacktrace.c +++ b/arch/hexagon/kernel/stacktrace.c @@ -19,6 +19,7 @@ */ #include +#include #include #include #include diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 110dab152f82c2..2942a9204a9aad 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -19,7 +19,9 @@ */ #include -#include +#include +#include +#include #include #include #include diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c index 741aaa917cda5c..04f57ef2200929 100644 --- a/arch/hexagon/kernel/vm_events.c +++ b/arch/hexagon/kernel/vm_events.c @@ -19,6 +19,7 @@ */ #include +#include #include #include #include diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 489875fd2be459..3eec33c5cfd716 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 21fd50def2708b..de8cba12101315 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h index 7f2a456603cbf5..9b99368633b5ad 100644 --- a/arch/ia64/include/asm/mmu_context.h +++ b/arch/ia64/include/asm/mmu_context.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 9f3ed9ee8f13e0..6cc22c8d8923e9 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -147,7 +147,7 @@ # ifndef __ASSEMBLY__ -#include /* for mm_struct */ +#include /* for mm_struct */ #include #include #include @@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr; #if CONFIG_PGTABLE_LEVELS == 3 +#define __ARCH_USE_5LEVEL_HACK #include #endif +#include #include #endif /* _ASM_IA64_PGTABLE_H */ diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 03911a33640685..26a63d69c599ad 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -19,8 +19,6 @@ #include #include -#define ARCH_HAS_PREFETCH_SWITCH_STACK - #define IA64_NUM_PHYS_STACK_REG 96 #define IA64_NUM_DBG_REGS 8 diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 60ef83e6db71eb..8786c8b4f187ca 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -6,7 +6,7 @@ #define ASM_OFFSETS_C 1 -#include +#include #include #include #include diff --git a/arch/ia64/kernel/brl_emu.c b/arch/ia64/kernel/brl_emu.c index 8682df6263d6d3..987b11be0021db 100644 --- a/arch/ia64/kernel/brl_emu.c +++ b/arch/ia64/kernel/brl_emu.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include #include diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 6f27a663177c43..e7a716b09350d0 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -455,29 +455,6 @@ GLOBAL_ENTRY(load_switch_stack) br.cond.sptk.many b7 END(load_switch_stack) -GLOBAL_ENTRY(prefetch_stack) - add r14 = -IA64_SWITCH_STACK_SIZE, sp - add r15 = IA64_TASK_THREAD_KSP_OFFSET, in0 - ;; - ld8 r16 = [r15] // load next's stack pointer - lfetch.fault.excl [r14], 128 - ;; - lfetch.fault.excl [r14], 128 - lfetch.fault [r16], 128 - ;; - lfetch.fault.excl [r14], 128 - lfetch.fault [r16], 128 - ;; - lfetch.fault.excl [r14], 128 - lfetch.fault [r16], 128 - ;; - lfetch.fault.excl [r14], 128 - lfetch.fault [r16], 128 - ;; - lfetch.fault [r16], 128 - br.ret.sptk.many rp -END(prefetch_stack) - /* * Invoke a system call, but do some tracing before and after the call. * We MUST preserve the current register frame throughout this routine diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 9509cc73b9c641..79c7c46d7dc175 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -72,7 +72,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 677a86826771a4..09f86ebfcc7b4f 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 52deab683ba137..d344d0d691aaca 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -20,6 +20,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 0b1153e610ea3c..3f8293378a8304 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -11,6 +11,8 @@ */ #include #include +#include +#include #include #include #include diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index d68322966f33ac..23e3fd61e335e4 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -29,9 +29,12 @@ #include #include #include +#include #include #include -#include +#include +#include +#include #include #include #include diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index a09c12230bc507..5ce927c854a68c 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include #include /* doh, must come after sched.h... */ #include diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index faa116822c4c3d..aa7be020a9042b 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -16,12 +16,13 @@ #include #include #include +#include #include #include #include #include #include -#include +#include #include #include diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 8981ce98afb365..7b1fe9462158e1 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -9,7 +9,8 @@ #include #include -#include +#include +#include #include #include /* For unblank_screen() */ #include diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 99348d7f2255ce..a13680ca1e6118 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -15,7 +15,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index f3976da36721a9..583f7ff6b589e6 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 7f2feb21753c8b..15f09cfff335b9 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -4,7 +4,7 @@ * Copyright (C) 1998-2002 Hewlett-Packard Co * David Mosberger-Tang */ -#include +#include #include #include #include diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 06cdaef54b2eae..8f3efa682ee848 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index c98dc965fe82fb..b73b0ebf82148e 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h index 9fc78fc4444524..1230b7050d8e30 100644 --- a/arch/m32r/include/asm/mmu_context.h +++ b/arch/m32r/include/asm/mmu_context.h @@ -12,6 +12,8 @@ #ifndef __ASSEMBLY__ #include +#include + #include #include #include diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index e0568bee60c072..d8ffcfec599cb6 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c @@ -22,6 +22,9 @@ #include #include +#include +#include +#include #include #include #include diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index a68acb9fa515c1..2d887400e30e3e 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c index b18bc0bd654470..1a9e977287e615 100644 --- a/arch/m32r/kernel/setup.c +++ b/arch/m32r/kernel/setup.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index f98d2f6519d633..a7d04684d2c770 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index c3c5fdfae920d5..647dd94a0c399f 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -14,7 +14,11 @@ #include #include #include +#include +#include #include +#include + #include #include diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 048bf076f7df66..531cb9eb3319f4 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -71,6 +73,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -383,6 +390,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_A2065=y CONFIG_ARIADNE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index d4de24963f5f74..ca91d39555da2d 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -69,6 +71,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -378,7 +386,6 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index fc0fd3f871f334..23a3d8a691e223 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -69,6 +71,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -372,6 +379,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_ATARILANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -389,7 +397,6 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SOLARFLARE is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 52e984a0aa696a..95deb95140fe92 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68040=y @@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -67,6 +69,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index aaeed4422cc975..afae6958db2d77 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -69,6 +71,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -363,6 +370,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_HPLANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -379,7 +387,6 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 3bbc9b2f0dac0f..b010734729a79e 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -68,6 +70,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -379,6 +386,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -398,7 +406,6 @@ CONFIG_MAC8390=y # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8f2c0decb2f8ed..0e414549b235b0 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -78,6 +80,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -419,6 +426,7 @@ CONFIG_HPLANCE=y CONFIG_MVME147_NET=y CONFIG_SUN3LANCE=y CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SOLARFLARE is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index c743dd22e96f93..b2e687a0ec3d47 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68030=y @@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -66,6 +68,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -361,6 +368,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_MVME147_NET=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 2ccaca858f0533..cbd8ee24d1bc4e 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68040=y @@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -67,6 +69,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 5599f3fd5fcd44..1e82cc9443399a 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68040=y @@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -67,6 +69,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -369,6 +376,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -388,7 +396,6 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 313bf0a562ad33..f9e77f57a97250 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_SUN3=y @@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -64,6 +66,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -359,6 +366,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_EZCHIP is not set @@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 38b61365f76927..3c394fcfb36836 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_SUN3X=y @@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -64,6 +66,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -359,6 +366,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/include/asm/a.out-core.h b/arch/m68k/include/asm/a.out-core.h index f6bfc1d63ff6db..ae91ea6bb30379 100644 --- a/arch/m68k/include/asm/a.out-core.h +++ b/arch/m68k/include/asm/a.out-core.h @@ -16,6 +16,7 @@ #include #include +#include /* * fill in the user structure for an a.out core dump diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index b4a9b0d5928dfb..dda58cfe8c22a3 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr) #define __change_bit(nr, vaddr) change_bit(nr, vaddr) -static inline int test_bit(int nr, const unsigned long *vaddr) +static inline int test_bit(int nr, const volatile unsigned long *vaddr) { return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; } diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h index dc3be991d63431..4a6ae6dffa345f 100644 --- a/arch/m68k/include/asm/mmu_context.h +++ b/arch/m68k/include/asm/mmu_context.h @@ -2,6 +2,7 @@ #define __M68K_MMU_CONTEXT_H #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index a857d82ec5094a..aab1edd0d4bade 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include -#define NR_syscalls 379 +#define NR_syscalls 380 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 9fe674bf911fd2..25589f5b866963 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -384,5 +384,6 @@ #define __NR_copy_file_range 376 #define __NR_preadv2 377 #define __NR_pwritev2 378 +#define __NR_statx 379 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index f0a8e9b332cda7..e475c945c8b2bf 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -13,6 +13,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index 9cd86d7343a638..748c63bd008130 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index d6fd6d9ced2474..8c9fcfafe0dd90 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -399,3 +399,4 @@ ENTRY(sys_call_table) .long sys_copy_file_range .long sys_preadv2 .long sys_pwritev2 + .long sys_statx diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 4e5aa2f4f52254..87160b4415fbb0 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 558f3840273783..a926d2c88898c2 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -19,6 +19,7 @@ */ #include +#include #include #include #include diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c index b5cd06df71fd74..9637dee90dac0e 100644 --- a/arch/m68k/mac/macints.c +++ b/arch/m68k/mac/macints.c @@ -110,6 +110,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index e9d7fbe4d5ae4a..7fdc61525e0b73 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/arch/metag/include/asm/mmu_context.h b/arch/metag/include/asm/mmu_context.h index ae2a71b5e0bedf..2e0312748197db 100644 --- a/arch/metag/include/asm/mmu_context.h +++ b/arch/metag/include/asm/mmu_context.h @@ -9,6 +9,7 @@ #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h index ffa3a3a2ecadda..0c151e5af07928 100644 --- a/arch/metag/include/asm/pgtable.h +++ b/arch/metag/include/asm/pgtable.h @@ -6,6 +6,7 @@ #define _METAG_PGTABLE_H #include +#define __ARCH_USE_5LEVEL_HACK #include /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c index 35062796edf222..c4606ce743d240 100644 --- a/arch/metag/kernel/process.c +++ b/arch/metag/kernel/process.c @@ -8,6 +8,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c index 7563628822bdf6..e615603a4b0ae9 100644 --- a/arch/metag/kernel/ptrace.c +++ b/arch/metag/kernel/ptrace.c @@ -15,6 +15,8 @@ #include #include #include +#include + #include #define CREATE_TRACE_POINTS @@ -24,6 +26,16 @@ * user_regset definitions. */ +static unsigned long user_txstatus(const struct pt_regs *regs) +{ + unsigned long data = (unsigned long)regs->ctx.Flags; + + if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) + data |= USER_GP_REGS_STATUS_CATCH_BIT; + + return data; +} + int metag_gp_regs_copyout(const struct pt_regs *regs, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) @@ -62,9 +74,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs, if (ret) goto out; /* TXSTATUS */ - data = (unsigned long)regs->ctx.Flags; - if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) - data |= USER_GP_REGS_STATUS_CATCH_BIT; + data = user_txstatus(regs); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &data, 4*25, 4*26); if (ret) @@ -119,6 +129,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs, if (ret) goto out; /* TXSTATUS */ + data = user_txstatus(regs); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &data, 4*25, 4*26); if (ret) @@ -244,6 +255,8 @@ int metag_rp_state_copyin(struct pt_regs *regs, unsigned long long *ptr; int ret, i; + if (count < 4*13) + return -EINVAL; /* Read the entire pipeline before making any changes */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &rp, 0, 4*13); @@ -303,7 +316,7 @@ static int metag_tls_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { int ret; - void __user *tls; + void __user *tls = target->thread.tls_ptr; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); if (ret) diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c index ce49d429c74aa6..338925d808e6da 100644 --- a/arch/metag/kernel/signal.c +++ b/arch/metag/kernel/signal.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index c622293254e4e4..232a12bf3f999e 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c @@ -12,7 +12,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include diff --git a/arch/metag/kernel/stacktrace.c b/arch/metag/kernel/stacktrace.c index 5510361d5beac8..91ffc4b75c332b 100644 --- a/arch/metag/kernel/stacktrace.c +++ b/arch/metag/kernel/stacktrace.c @@ -1,5 +1,7 @@ #include #include +#include +#include #include #include diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c index 17b2e2e38d5a0c..444851e510d5b4 100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c @@ -10,6 +10,9 @@ #include #include +#include +#include +#include #include #include #include diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index c765b3621b9b9f..5055477486b6f1 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index c0ec116b3993a3..188d4d9fbed4d9 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/arch/microblaze/include/asm/mmu_context_mm.h b/arch/microblaze/include/asm/mmu_context_mm.h index d6864774644874..99472d2ca3404d 100644 --- a/arch/microblaze/include/asm/mmu_context_mm.h +++ b/arch/microblaze/include/asm/mmu_context_mm.h @@ -12,6 +12,8 @@ #define _ASM_MICROBLAZE_MMU_CONTEXT_H #include +#include + #include #include #include diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index fd850879854dff..d506bb0893f94e 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t; # else /* CONFIG_MMU */ typedef struct { unsigned long ste[64]; } pmd_t; typedef struct { pmd_t pue[1]; } pud_t; -typedef struct { pud_t pge[1]; } pgd_t; +typedef struct { pud_t p4e[1]; } p4d_t; +typedef struct { p4d_t pge[1]; } pgd_t; # endif /* CONFIG_MMU */ # define pte_val(x) ((x).pte) diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index 42dd12a62ff567..e6f338d0496bbd 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c index 4643e3ab941495..2022130139d2de 100644 --- a/arch/microblaze/kernel/heartbeat.c +++ b/arch/microblaze/kernel/heartbeat.c @@ -9,6 +9,7 @@ */ #include +#include #include #include diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index b2dd37196b3b16..e92a817e645fac 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -11,6 +11,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index 8cfa98cadf3d10..badd286882ae68 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 1d6fad50fa76f6..99906619271553 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index cb619533a19270..45bbba9d919f91 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c index 61c04eed14d5fa..34c270cb11fcbd 100644 --- a/arch/microblaze/kernel/unwind.c +++ b/arch/microblaze/kernel/unwind.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index cc732fe357ad1c..4c059923991530 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a008a9f03072de..e0bb576410bbdf 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA select GENERIC_CSUM - select MIPS_O32_FP64_SUPPORT if MIPS32_O32 + select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32 select HAVE_KVM help Choose this option to build a kernel for release 6 or later of the diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c index a5b427909b5cac..036d56cc459168 100644 --- a/arch/mips/cavium-octeon/cpu.c +++ b/arch/mips/cavium-octeon/cpu.c @@ -10,7 +10,9 @@ #include #include #include +#include #include +#include #include #include diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c index 4d22365844af30..cfb4a146cf1786 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-crypto.c +++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "octeon-crypto.h" diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 4355a4cf4d74b2..3de786545ded10 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include #include diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h index 940760844e2fe9..dba7f4b6bebfae 100644 --- a/arch/mips/include/asm/abi.h +++ b/arch/mips/include/asm/abi.h @@ -9,6 +9,8 @@ #ifndef _ASM_ABI_H #define _ASM_ABI_H +#include + #include #include #include diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 7a6c466e5f2a01..0eb1a75be10584 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -10,6 +10,8 @@ #include #include +#include + #include #include diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index f06f97bd62df90..a2813fe381cf54 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -11,6 +11,8 @@ #define _ASM_FPU_H #include +#include +#include #include #include @@ -19,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 956db6e201d187..ddd1c918103bcc 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -18,9 +18,24 @@ #include #define IRQ_STACK_SIZE THREAD_SIZE +#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long)) extern void *irq_stack[NR_CPUS]; +/* + * The highest address on the IRQ stack contains a dummy frame put down in + * genex.S (handle_int & except_vec_vi_handler) which is structured as follows: + * + * top ------------ + * | task sp | <- irq_stack[cpu] + IRQ_STACK_START + * ------------ + * | | <- First frame of IRQ context + * ------------ + * + * task sp holds a copy of the task stack pointer where the struct pt_regs + * from exception entry can be found. + */ + static inline bool on_irq_stack(int cpu, unsigned long sp) { unsigned long low = (unsigned long)irq_stack[cpu]; diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 2abf94f72c0a81..da2004cef2d5c8 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -13,8 +13,10 @@ #include #include +#include #include #include + #include #include #include diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index d21f3da7bdb619..6f94bed571c441 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -16,6 +16,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #include extern int temp_tlb_entry; diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 514cbc0a6a6760..130a2a6c153156 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -17,6 +17,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) #include #else diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index f485afe5151476..a8df44d60607ba 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " andi %[ticket], %[ticket], 0xffff \n" " bne %[ticket], %[my_ticket], 4f \n" " subu %[ticket], %[my_ticket], %[ticket] \n" - "2: \n" + "2: .insn \n" " .subsection 2 \n" "4: andi %[ticket], %[ticket], 0xffff \n" " sll %[ticket], 5 \n" @@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " sc %[ticket], %[ticket_ptr] \n" " beqz %[ticket], 1b \n" " li %[ticket], 1 \n" - "2: \n" + "2: .insn \n" " .subsection 2 \n" "3: b 2b \n" " li %[ticket], 0 \n" @@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) " .set reorder \n" __WEAK_LLSC_MB " li %2, 1 \n" - "2: \n" + "2: .insn \n" : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); @@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " li %2, 1 \n" - "2: \n" + "2: .insn \n" : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) : GCC_OFF_SMALL_ASM() (rw->lock) diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 3e940dbe02629a..78faf4292e907c 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -386,17 +386,18 @@ #define __NR_pkey_mprotect (__NR_Linux + 363) #define __NR_pkey_alloc (__NR_Linux + 364) #define __NR_pkey_free (__NR_Linux + 365) +#define __NR_statx (__NR_Linux + 366) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 365 +#define __NR_Linux_syscalls 366 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 365 +#define __NR_O32_Linux_syscalls 366 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -730,16 +731,17 @@ #define __NR_pkey_mprotect (__NR_Linux + 323) #define __NR_pkey_alloc (__NR_Linux + 324) #define __NR_pkey_free (__NR_Linux + 325) +#define __NR_statx (__NR_Linux + 326) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 325 +#define __NR_Linux_syscalls 326 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 325 +#define __NR_64_Linux_syscalls 326 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1077,15 +1079,16 @@ #define __NR_pkey_mprotect (__NR_Linux + 327) #define __NR_pkey_alloc (__NR_Linux + 328) #define __NR_pkey_free (__NR_Linux + 329) +#define __NR_statx (__NR_Linux + 330) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 329 +#define __NR_Linux_syscalls 330 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 329 +#define __NR_N32_Linux_syscalls 330 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index bb5c5d34ba8152..a670c0c11875d2 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -102,6 +102,7 @@ void output_thread_info_defines(void) DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); + DEFINE(_IRQ_STACK_START, IRQ_STACK_START); BLANK(); } diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index ae037a304ee459..b11facd11c9d05 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -7,7 +7,7 @@ * Copyright (C) 2001 MIPS Technologies, Inc. */ #include -#include +#include #include #include #include diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 59476a607adda0..a00e87b0256d3d 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg) END(mips_cps_get_bootcfg) LEAF(mips_cps_boot_vpes) - PTR_L ta2, COREBOOTCFG_VPEMASK(a0) + lw ta2, COREBOOTCFG_VPEMASK(a0) PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) #if defined(CONFIG_CPU_MIPSR6) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 07718bb5fc9d86..12422fd4af2335 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) } decode_configs(c); - c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; + c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; default: diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 5a71518be0f10b..ca25cd393b1ccb 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -8,6 +8,7 @@ #include #include #include +#include /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu = -1; diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 7ec9612cb0078a..ae810da4d499e6 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp) beq t0, t1, 2f /* Switch to IRQ stack */ - li t1, _IRQ_STACK_SIZE + li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 + /* Save task's sp on IRQ stack so that unwinding can follow it */ + LONG_S s1, 0(sp) 2: jal plat_irq_dispatch @@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp) beq t0, t1, 2f /* Switch to IRQ stack */ - li t1, _IRQ_STACK_SIZE + li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 + /* Save task's sp on IRQ stack so that unwinding can follow it */ + LONG_S s1, 0(sp) 2: jalr v0 @@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER reserved reserved sti verbose /* others */ .align 5 - LEAF(handle_ri_rdhwr_vivt) + LEAF(handle_ri_rdhwr_tlbp) .set push .set noat .set noreorder @@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set pop bltz k1, handle_ri /* slow path */ /* fall thru */ - END(handle_ri_rdhwr_vivt) + END(handle_ri_rdhwr_tlbp) LEAF(handle_ri_rdhwr) .set push diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 1a0a3b4ecc3efb..8cab633e0e5ade 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index d64056e0bb567a..f298eb2ff6c296 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -15,6 +15,7 @@ */ #include +#include #include diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 803e255b6fc376..b68e10fc453d11 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -11,6 +11,9 @@ */ #include #include +#include +#include +#include #include #include #include @@ -485,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, unsigned long pc, unsigned long *ra) { + unsigned long low, high, irq_stack_high; struct mips_frame_info info; unsigned long size, ofs; + struct pt_regs *regs; int leaf; - extern void ret_from_irq(void); - extern void ret_from_exception(void); if (!stack_page) return 0; /* - * If we reached the bottom of interrupt context, - * return saved pc in pt_regs. + * IRQ stacks start at IRQ_STACK_START + * task stacks at THREAD_SIZE - 32 */ - if (pc == (unsigned long)ret_from_irq || - pc == (unsigned long)ret_from_exception) { - struct pt_regs *regs; - if (*sp >= stack_page && - *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { - regs = (struct pt_regs *)*sp; - pc = regs->cp0_epc; - if (!user_mode(regs) && __kernel_text_address(pc)) { - *sp = regs->regs[29]; - *ra = regs->regs[31]; - return pc; - } + low = stack_page; + if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) { + high = stack_page + IRQ_STACK_START; + irq_stack_high = high; + } else { + high = stack_page + THREAD_SIZE - 32; + irq_stack_high = 0; + } + + /* + * If we reached the top of the interrupt stack, start unwinding + * the interrupted task stack. + */ + if (unlikely(*sp == irq_stack_high)) { + unsigned long task_sp = *(unsigned long *)*sp; + + /* + * Check that the pointer saved in the IRQ stack head points to + * something within the stack of the current task + */ + if (!object_is_on_stack((void *)task_sp)) + return 0; + + /* + * Follow pointer to tasks kernel stack frame where interrupted + * state was saved. + */ + regs = (struct pt_regs *)task_sp; + pc = regs->cp0_epc; + if (!user_mode(regs) && __kernel_text_address(pc)) { + *sp = regs->regs[29]; + *ra = regs->regs[31]; + return pc; } return 0; } @@ -530,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, if (leaf < 0) return 0; - if (*sp < stack_page || - *sp + info.frame_size > stack_page + THREAD_SIZE - 32) + if (*sp < low || *sp + info.frame_size > high) return 0; if (leaf) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index fdef26382c376e..6931fe722a0b54 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -455,7 +456,8 @@ static int fpr_set(struct task_struct *target, &target->thread.fpu, 0, sizeof(elf_fpregset_t)); - for (i = 0; i < NUM_FPU_REGS; i++) { + BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); + for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) { err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpr_val, i * sizeof(elf_fpreg_t), (i + 1) * sizeof(elf_fpreg_t)); diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 4f099852562667..40e212d6b26b2d 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index c5c4fd54d79722..b80dd8b17a764f 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -12,6 +12,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index c29d397eee86cf..80ed68b2c95e41 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -600,3 +600,4 @@ EXPORT(sys_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ + PTR sys_statx diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 0687f96ee91269..49765b44aa9b3b 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -438,4 +438,5 @@ EXPORT(sys_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 5325 */ + PTR sys_statx .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 0331ba39a065b8..90bad2d1b2d3e2 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -433,4 +433,5 @@ EXPORT(sysn32_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free + PTR sys_statx /* 6330 */ .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 5a47042dd25f7a..2dd70bd104e1a0 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -588,4 +588,5 @@ EXPORT(sys32_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ + PTR sys_statx .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c index 5e169fc5ca5c08..2b3572fb5f1b9d 100644 --- a/arch/mips/kernel/signal_o32.c +++ b/arch/mips/kernel/signal_o32.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 16e37a28f876cc..1b070a76fcdd4c 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -10,6 +10,8 @@ #include #include +#include +#include #include #include #include diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index a2544c2394e423..6d45f05538c8b3 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -11,7 +11,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index e077ea3e11fb36..e398cbc3d7767d 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 8c60a296294c59..6e71130549eae5 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c index 506021f62549d9..7c7c902249f2af 100644 --- a/arch/mips/kernel/stacktrace.c +++ b/arch/mips/kernel/stacktrace.c @@ -4,6 +4,8 @@ * Copyright (C) 2006 Atsushi Nemoto */ #include +#include +#include #include #include #include diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index c86ddbaa4598cd..f1d17ece41819e 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 49c6df20672a9d..b49e7bf9f95023 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -23,7 +23,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -82,7 +83,7 @@ extern asmlinkage void handle_dbe(void); extern asmlinkage void handle_sys(void); extern asmlinkage void handle_bp(void); extern asmlinkage void handle_ri(void); -extern asmlinkage void handle_ri_rdhwr_vivt(void); +extern asmlinkage void handle_ri_rdhwr_tlbp(void); extern asmlinkage void handle_ri_rdhwr(void); extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); @@ -2407,9 +2408,18 @@ void __init trap_init(void) set_except_vector(EXCCODE_SYS, handle_sys); set_except_vector(EXCCODE_BP, handle_bp); - set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri : - (cpu_has_vtag_icache ? - handle_ri_rdhwr_vivt : handle_ri_rdhwr)); + + if (rdhwr_noopt) + set_except_vector(EXCCODE_RI, handle_ri); + else { + if (cpu_has_vtag_icache) + set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); + else if (current_cpu_type() == CPU_LOONGSON3) + set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); + else + set_except_vector(EXCCODE_RI, handle_ri_rdhwr); + } + set_except_vector(EXCCODE_CPU, handle_cpu); set_except_vector(EXCCODE_OV, handle_ov); set_except_vector(EXCCODE_TR, handle_tr); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index ed81e5ac14267f..15a1b1716c2eee 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -16,8 +16,10 @@ #include #include #include +#include #include #include + #include #include #include diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 3c3aa05891dd78..95bec460b651fd 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -467,7 +467,7 @@ void __init ltq_soc_init(void) if (!np_xbar) panic("Failed to load xbar nodes from devicetree"); - if (of_address_to_resource(np_pmu, 0, &res_xbar)) + if (of_address_to_resource(np_xbar, 0, &res_xbar)) panic("Failed to get xbar resources"); if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), res_xbar.name)) diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/loongson-3/cop2-ex.c index ea13764d0a035c..621d6af5f6eb8e 100644 --- a/arch/mips/loongson64/loongson-3/cop2-ex.c +++ b/arch/mips/loongson64/loongson-3/cop2-ex.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index cfcf240cedbe26..64659fc7394053 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c index c4469ff4a996bc..b6bfd36253694f 100644 --- a/arch/mips/math-emu/dsemul.c +++ b/arch/mips/math-emu/dsemul.c @@ -1,5 +1,7 @@ #include #include +#include +#include #include #include diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index e7f798d55fbcca..3fe99cb271a9ca 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1562,6 +1562,7 @@ static void probe_vcache(void) vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; c->vcache.waybit = 0; + c->vcache.waysize = vcache_size / c->vcache.ways; pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); @@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void) /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ scache_size *= 4; c->scache.waybit = 0; + c->scache.waysize = scache_size / c->scache.ways; pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); if (scache_size) diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 1f189627440f23..1986e09fb457c5 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index d6d92c02308dd8..64dd8bdd92c339 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -13,7 +13,8 @@ #include #include #include -#include +#include +#include unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 9bfee8988eaf11..4f642e07c2b198 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte, static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, struct uasm_label **l, unsigned int pte, - unsigned int ptr) + unsigned int ptr, + unsigned int flush) { #ifdef CONFIG_SMP UASM_i_SC(p, pte, 0, ptr); @@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, #else UASM_i_SW(p, pte, 0, ptr); #endif + if (cpu_has_ftlb && flush) { + BUG_ON(!cpu_has_tlbinv); + + UASM_i_MFC0(p, ptr, C0_ENTRYHI); + uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); + UASM_i_MTC0(p, ptr, C0_ENTRYHI); + build_tlb_write_entry(p, l, r, tlb_indexed); + + uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); + UASM_i_MTC0(p, ptr, C0_ENTRYHI); + build_huge_update_entries(p, pte, ptr); + build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0); + + return; + } + build_huge_update_entries(p, pte, ptr); build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); } @@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void) uasm_l_tlbl_goaround2(&l, p); } uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); - build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); #endif uasm_l_nopage_tlbl(&l, p); @@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void) build_tlb_probe_entry(&p); uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); #endif uasm_l_nopage_tlbs(&l, p); @@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void) build_tlb_probe_entry(&p); uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0); #endif uasm_l_nopage_tlbm(&l, p); diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index 10d86d54880ab8..bddf1ef553a4f6 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c index 52bc5de420052c..21e439b3db707f 100644 --- a/arch/mips/netlogic/xlp/cop2-ex.c +++ b/arch/mips/netlogic/xlp/cop2-ex.c @@ -9,11 +9,14 @@ * Copyright (C) 2009 Wind River Systems, * written by Ralf Baechle */ +#include #include #include #include #include +#include #include +#include #include #include diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index f8d3e081b2ebc7..72eb1a56c64508 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c index c4ffd43d3996ac..48ce701557a451 100644 --- a/arch/mips/ralink/rt3883.c +++ b/arch/mips/ralink/rt3883.c @@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; -static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; +static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; static struct rt2880_pmx_func pci_func[] = { FUNC("pci-dev", 0, 40, 32), FUNC("pci-host2", 1, 40, 32), @@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = { FUNC("pci-fnc", 3, 40, 32) }; static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; -static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; +static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) }; static struct rt2880_pmx_group rt3883_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), diff --git a/arch/mips/sgi-ip22/ip22-berr.c b/arch/mips/sgi-ip22/ip22-berr.c index 3f6ccd53c15d5b..ff8e1935c873a7 100644 --- a/arch/mips/sgi-ip22/ip22-berr.c +++ b/arch/mips/sgi-ip22/ip22-berr.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c index a36f6b87548a5a..03a39ac5ead92e 100644 --- a/arch/mips/sgi-ip22/ip22-reset.c +++ b/arch/mips/sgi-ip22/ip22-reset.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c index 9960a8302eac59..75460e1e106b2c 100644 --- a/arch/mips/sgi-ip22/ip28-berr.c +++ b/arch/mips/sgi-ip22/ip28-berr.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c index f8919b6a24c884..83efe03d5c600f 100644 --- a/arch/mips/sgi-ip27/ip27-berr.c +++ b/arch/mips/sgi-ip27/ip27-berr.c @@ -11,7 +11,10 @@ #include #include /* for SIGBUS */ #include /* schow_regs(), force_sig() */ +#include +#include +#include #include #include #include diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index f9ae6a8fa7c726..4cd47d23d81a76 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -8,9 +8,13 @@ */ #include #include +#include +#include #include + #include #include +#include #include #include #include diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c index ba8f46d80ab8a5..c1f12a9cf305f4 100644 --- a/arch/mips/sgi-ip32/ip32-berr.c +++ b/arch/mips/sgi-ip32/ip32-berr.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c index 838d8589a1c092..a6a0ff7f5aed00 100644 --- a/arch/mips/sgi-ip32/ip32-irq.c +++ b/arch/mips/sgi-ip32/ip32-irq.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index 8bd415c8729f97..b3b442def42383 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index 4c71aea2566372..d0e94ffcc1b8b8 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 1cf66f5ff23d1a..0a4a2c3982d86d 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h index 75dbe696f830cc..d2034f5e6eda79 100644 --- a/arch/mn10300/include/asm/mmu_context.h +++ b/arch/mn10300/include/asm/mmu_context.h @@ -23,6 +23,8 @@ #define _ASM_MMU_CONTEXT_H #include +#include + #include #include #include diff --git a/arch/mn10300/include/asm/page.h b/arch/mn10300/include/asm/page.h index 3810a6f740fdf6..dfe730a5ede04a 100644 --- a/arch/mn10300/include/asm/page.h +++ b/arch/mn10300/include/asm/page.h @@ -57,6 +57,7 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) +#define __ARCH_USE_5LEVEL_HACK #include #endif /* !__ASSEMBLY__ */ diff --git a/arch/mn10300/kernel/fpu.c b/arch/mn10300/kernel/fpu.c index 2578b7ae7dd55e..50ce7b447fed4b 100644 --- a/arch/mn10300/kernel/fpu.c +++ b/arch/mn10300/kernel/fpu.c @@ -9,6 +9,8 @@ * 2 of the Licence, or (at your option) any later version. */ #include +#include + #include #include #include diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index e5def2217f72db..c9fa42619c6a9a 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -11,6 +11,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/mn10300/kernel/ptrace.c b/arch/mn10300/kernel/ptrace.c index 976020f469c1d3..8009876a7ac4e7 100644 --- a/arch/mn10300/kernel/ptrace.c +++ b/arch/mn10300/kernel/ptrace.c @@ -11,6 +11,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index e65b5cc2fa67f1..35d2c3fe6f7696 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -21,7 +21,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c index 67c6416a58f830..06b83b17c5f197 100644 --- a/arch/mn10300/kernel/time.c +++ b/arch/mn10300/kernel/time.c @@ -10,6 +10,7 @@ * 2 of the Licence, or (at your option) any later version. */ #include +#include #include #include #include diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c index a7a987c7954f0a..800fd08019698a 100644 --- a/arch/mn10300/kernel/traps.c +++ b/arch/mn10300/kernel/traps.c @@ -10,6 +10,7 @@ * 2 of the Licence, or (at your option) any later version. */ #include +#include #include #include #include diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c index 9a39ea9031d4f7..085f2bb691aca8 100644 --- a/arch/mn10300/mm/tlb-smp.c +++ b/arch/mn10300/mm/tlb-smp.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/nios2/include/asm/mmu_context.h b/arch/nios2/include/asm/mmu_context.h index 294b4b1f81d4e1..78ab3dacf57932 100644 --- a/arch/nios2/include/asm/mmu_context.h +++ b/arch/nios2/include/asm/mmu_context.h @@ -13,6 +13,8 @@ #ifndef _ASM_NIOS2_MMU_CONTEXT_H #define _ASM_NIOS2_MMU_CONTEXT_H +#include + #include extern void mmu_context_init(void); diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 298393c3cb426f..db4f7d17922078 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -22,6 +22,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #include #define FIRST_USER_ADDRESS 0UL diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c index 2f8c74f93e705a..509e7855e8dc58 100644 --- a/arch/nios2/kernel/process.c +++ b/arch/nios2/kernel/process.c @@ -14,6 +14,10 @@ #include #include +#include +#include +#include +#include #include #include diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c index 367c5426157ba1..3901b80d442021 100644 --- a/arch/nios2/kernel/prom.c +++ b/arch/nios2/kernel/prom.c @@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return alloc_bootmem_align(size, align); } +int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, + bool nomap) +{ + reserve_bootmem(base, size, BOOTMEM_DEFAULT); + return 0; +} + void __init early_init_devtree(void *params) { __be32 *dtb = (u32 *)__dtb_start; diff --git a/arch/nios2/kernel/ptrace.c b/arch/nios2/kernel/ptrace.c index 681dda92eff161..de97bcb7dd4437 100644 --- a/arch/nios2/kernel/ptrace.c +++ b/arch/nios2/kernel/ptrace.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index a3fa80d1aacc2d..6044d9be28b449 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -200,6 +201,9 @@ void __init setup_arch(char **cmdline_p) } #endif /* CONFIG_BLK_DEV_INITRD */ + early_init_fdt_reserve_self(); + early_init_fdt_scan_reserved_mem(); + unflatten_and_copy_device_tree(); setup_cpuinfo(); diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c index 72ed30a93c8519..8184e7d6b3857d 100644 --- a/arch/nios2/kernel/traps.c +++ b/arch/nios2/kernel/traps.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index e7a14e1e0d6b6d..b804dd06ea1cec 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h index 5fcb9ac7269385..f0a5d8b844d6b8 100644 --- a/arch/openrisc/include/asm/cmpxchg.h +++ b/arch/openrisc/include/asm/cmpxchg.h @@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, return val; } -#define xchg(ptr, with) \ - ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr)))) +#define xchg(ptr, with) \ + ({ \ + (__typeof__(*(ptr))) __xchg((unsigned long)(with), \ + (ptr), \ + sizeof(*(ptr))); \ + }) #endif /* __ASM_OPENRISC_CMPXCHG_H */ diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index 3567aa7be55504..ff97374ca0693d 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -25,6 +25,7 @@ #ifndef __ASM_OPENRISC_PGTABLE_H #define __ASM_OPENRISC_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #ifndef __ASSEMBLY__ diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 140faa16685a23..1311e6b1399166 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -211,7 +211,7 @@ do { \ case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \ case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ - case 8: __get_user_asm2(x, ptr, retval); \ + case 8: __get_user_asm2(x, ptr, retval); break; \ default: (x) = __get_user_bad(); \ } \ } while (0) diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c index 5c4695d13542fc..ee3e604959e15c 100644 --- a/arch/openrisc/kernel/or32_ksyms.c +++ b/arch/openrisc/kernel/or32_ksyms.c @@ -30,6 +30,7 @@ #include #include #include +#include #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) @@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3); DECLARE_EXPORT(__ashrdi3); DECLARE_EXPORT(__ashldi3); DECLARE_EXPORT(__lshrdi3); +DECLARE_EXPORT(__ucmpdi2); +EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(__copy_tofrom_user); +EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(memset); diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 6e9d1cb519f245..f8da545854f979 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -22,6 +22,9 @@ #include #include +#include +#include +#include #include #include #include @@ -87,6 +90,7 @@ void arch_cpu_idle(void) } void (*pm_power_off) (void) = machine_power_off; +EXPORT_SYMBOL(pm_power_off); /* * When a process does an "exec", machine state like FPU and debug diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c index 228288887d74fa..eb97a8e7c8aa79 100644 --- a/arch/openrisc/kernel/ptrace.c +++ b/arch/openrisc/kernel/ptrace.c @@ -18,6 +18,7 @@ #include #include +#include #include #include diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 7e81ad258bca39..803e9e756f7785 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -22,6 +22,8 @@ #include #include +#include +#include #include #include #include diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 53592a639744f4..e310ab499385c5 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 7bd69bd43a0185..c7e15cc5c6683b 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -27,8 +27,6 @@ void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_page_asm(void *); void flush_kernel_icache_page(void *); -void flush_user_dcache_range(unsigned long, unsigned long); -void flush_user_icache_range(unsigned long, unsigned long); /* Cache flush operations */ @@ -45,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page) #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); -/* vmap range flushes and invalidates. Architecturally, we don't need - * the invalidate, because the CPU should refuse to speculate once an - * area has been flushed, so invalidate is left empty */ -static inline void flush_kernel_vmap_range(void *vaddr, int size) -{ - unsigned long start = (unsigned long)vaddr; - - flush_kernel_dcache_range_asm(start, start + size); -} -static inline void invalidate_kernel_vmap_range(void *vaddr, int size) -{ - unsigned long start = (unsigned long)vaddr; - void *cursor = vaddr; - for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { - struct page *page = vmalloc_to_page(cursor); - - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - flush_kernel_dcache_page(page); - } - flush_kernel_dcache_range_asm(start, start + size); -} +void flush_kernel_vmap_range(void *vaddr, int size); +void invalidate_kernel_vmap_range(void *vaddr, int size); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 9a2aee1b90fcca..8442727f28d273 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -32,11 +32,8 @@ * that put_user is the same as __put_user, etc. */ -static inline long access_ok(int type, const void __user * addr, - unsigned long size) -{ - return 1; -} +#define access_ok(type, uaddr, size) \ + ( (uaddr) == (uaddr) ) #define put_user __put_user #define get_user __get_user @@ -67,6 +64,15 @@ struct exception_table_entry { ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ ".previous\n" +/* + * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry + * (with lowest bit set) for which the fault handler in fixup_exception() will + * load -EFAULT into %r8 for a read or write fault, and zeroes the target + * register in case of a read fault in get_user(). + */ +#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ + ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) + /* * The page fault handler stores, in a per-cpu area, the following information * if a fixup routine is available. @@ -94,7 +100,7 @@ struct exception_data { #define __get_user(x, ptr) \ ({ \ register long __gu_err __asm__ ("r8") = 0; \ - register long __gu_val __asm__ ("r9") = 0; \ + register long __gu_val; \ \ load_sr2(); \ switch (sizeof(*(ptr))) { \ @@ -110,22 +116,23 @@ struct exception_data { }) #define __get_user_asm(ldx, ptr) \ - __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ + __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err) \ - : "r1"); + : "r"(ptr), "1"(__gu_err)); #if !defined(CONFIG_64BIT) #define __get_user_asm64(ptr) \ - __asm__("\n1:\tldw 0(%%sr2,%2),%0" \ - "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\ - ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\ + __asm__(" copy %%r0,%R0\n" \ + "1: ldw 0(%%sr2,%2),%0\n" \ + "2: ldw 4(%%sr2,%2),%R0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err) \ - : "r1"); + : "r"(ptr), "1"(__gu_err)); #endif /* !defined(CONFIG_64BIT) */ @@ -151,32 +158,31 @@ struct exception_data { * The "__put_user/kernel_asm()" macros tell gcc they read from memory * instead of writing. This is because they do not write to any memory * gcc knows about, so there are no aliasing issues. These macros must - * also be aware that "fixup_put_user_skip_[12]" are executed in the - * context of the fault, and any registers used there must be listed - * as clobbers. In this case only "r1" is used by the current routines. - * r8/r9 are already listed as err/val. + * also be aware that fixups are executed in the context of the fault, + * and any registers used there must be listed as clobbers. + * r8 is already listed as err. */ #define __put_user_asm(stx, x, ptr) \ __asm__ __volatile__ ( \ - "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ + "1: " stx " %2,0(%%sr2,%1)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(x), "0"(__pu_err) \ - : "r1") + : "r"(ptr), "r"(x), "0"(__pu_err)) #if !defined(CONFIG_64BIT) #define __put_user_asm64(__val, ptr) do { \ __asm__ __volatile__ ( \ - "\n1:\tstw %2,0(%%sr2,%1)" \ - "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ - ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ + "1: stw %2,0(%%sr2,%1)\n" \ + "2: stw %R2,4(%%sr2,%1)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(__val), "0"(__pu_err) \ - : "r1"); \ + : "r"(ptr), "r"(__val), "0"(__pu_err)); \ } while (0) #endif /* !defined(CONFIG_64BIT) */ diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 6b0741e7a7ed3e..667c99421003e4 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -362,8 +362,9 @@ #define __NR_copy_file_range (__NR_Linux + 346) #define __NR_preadv2 (__NR_Linux + 347) #define __NR_pwritev2 (__NR_Linux + 348) +#define __NR_statx (__NR_Linux + 349) -#define __NR_Linux_syscalls (__NR_pwritev2 + 1) +#define __NR_Linux_syscalls (__NR_statx + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 977f0a4f5ecf2c..c32a0909521665 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -573,24 +574,6 @@ void flush_cache_mm(struct mm_struct *mm) } } -void -flush_user_dcache_range(unsigned long start, unsigned long end) -{ - if ((end - start) < parisc_cache_flush_threshold) - flush_user_dcache_range_asm(start,end); - else - flush_data_cache(); -} - -void -flush_user_icache_range(unsigned long start, unsigned long end) -{ - if ((end - start) < parisc_cache_flush_threshold) - flush_user_icache_range_asm(start,end); - else - flush_instruction_cache(); -} - void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -633,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } } + +void flush_kernel_vmap_range(void *vaddr, int size) +{ + unsigned long start = (unsigned long)vaddr; + + if ((unsigned long)size > parisc_cache_flush_threshold) + flush_data_cache(); + else + flush_kernel_dcache_range_asm(start, start + size); +} +EXPORT_SYMBOL(flush_kernel_vmap_range); + +void invalidate_kernel_vmap_range(void *vaddr, int size) +{ + unsigned long start = (unsigned long)vaddr; + + if ((unsigned long)size > parisc_cache_flush_threshold) + flush_data_cache(); + else + flush_kernel_dcache_range_asm(start, start + size); +} +EXPORT_SYMBOL(invalidate_kernel_vmap_range); diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index a0ecdb4abcc878..c66c943d93224f 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, */ *loc = fsel(val, addend); break; + case R_PARISC_SECREL32: + /* 32-bit section relative address. */ + *loc = fsel(val, addend); + break; case R_PARISC_DPREL21L: /* left 21 bit of relative address */ val = lrsel(val - dp, addend); @@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, */ *loc = fsel(val, addend); break; + case R_PARISC_SECREL32: + /* 32-bit section relative address. */ + *loc = fsel(val, addend); + break; case R_PARISC_FPTR64: /* 64-bit function address */ if(in_local(me, (void *)(val + addend))) { diff --git a/arch/parisc/kernel/pa7300lc.c b/arch/parisc/kernel/pa7300lc.c index 8a89780223aa35..9b245fc6756095 100644 --- a/arch/parisc/kernel/pa7300lc.c +++ b/arch/parisc/kernel/pa7300lc.c @@ -5,6 +5,7 @@ * Copyright (C) 2000 Philipp Rumpf */ #include +#include #include #include #include diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 7484b3d11e0dbf..c6d6272a934f03 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64); EXPORT_SYMBOL(lclear_user); EXPORT_SYMBOL(lstrnlen_user); -/* Global fixups - defined as int to avoid creation of function pointers */ -extern int fixup_get_user_skip_1; -extern int fixup_get_user_skip_2; -extern int fixup_put_user_skip_1; -extern int fixup_put_user_skip_2; -EXPORT_SYMBOL(fixup_get_user_skip_1); -EXPORT_SYMBOL(fixup_get_user_skip_2); -EXPORT_SYMBOL(fixup_put_user_skip_1); -EXPORT_SYMBOL(fixup_put_user_skip_2); - #ifndef CONFIG_64BIT /* Needed so insmod can set dp value */ extern int $global$; diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index e282a5131d77e1..6017a5af2e6e2c 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -39,7 +39,7 @@ * the PDC INTRIGUE calls. This is done to eliminate bugs introduced * in various PDC revisions. The code is much more maintainable * and reliable this way vs having to debug on every version of PDC - * on every box. + * on every box. */ #include @@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr); static int perf_release(struct inode *inode, struct file *file); static int perf_open(struct inode *inode, struct file *file); static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); -static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, - loff_t *ppos); +static ssize_t perf_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos); static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static void perf_start_counters(void); static int perf_stop_counters(uint32_t *raddr); @@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void); /* * configure: * - * Configure the cpu with a given data image. First turn off the counters, + * Configure the cpu with a given data image. First turn off the counters, * then download the image, then turn the counters back on. */ static int perf_config(uint32_t *image_ptr) @@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr) error = perf_stop_counters(raddr); if (error != 0) { printk("perf_config: perf_stop_counters = %ld\n", error); - return -EINVAL; + return -EINVAL; } printk("Preparing to write image\n"); @@ -242,7 +242,7 @@ printk("Preparing to write image\n"); error = perf_write_image((uint64_t *)image_ptr); if (error != 0) { printk("perf_config: DOWNLOAD = %ld\n", error); - return -EINVAL; + return -EINVAL; } printk("Preparing to start counters\n"); @@ -254,7 +254,7 @@ printk("Preparing to start counters\n"); } /* - * Open the device and initialize all of its memory. The device is only + * Open the device and initialize all of its memory. The device is only * opened once, but can be "queried" by multiple processes that know its * file descriptor. */ @@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t * called on the processor that the download should happen * on. */ -static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, - loff_t *ppos) +static ssize_t perf_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) { size_t image_size; uint32_t image_type; uint32_t interface_type; uint32_t test; - if (perf_processor_interface == ONYX_INTF) + if (perf_processor_interface == ONYX_INTF) image_size = PCXU_IMAGE_SIZE; - else if (perf_processor_interface == CUDA_INTF) + else if (perf_processor_interface == CUDA_INTF) image_size = PCXW_IMAGE_SIZE; - else + else return -EFAULT; if (!capable(CAP_SYS_ADMIN)) @@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun /* First check the machine type is correct for the requested image */ - if (((perf_processor_interface == CUDA_INTF) && - (interface_type != CUDA_INTF)) || - ((perf_processor_interface == ONYX_INTF) && - (interface_type != ONYX_INTF))) + if (((perf_processor_interface == CUDA_INTF) && + (interface_type != CUDA_INTF)) || + ((perf_processor_interface == ONYX_INTF) && + (interface_type != ONYX_INTF))) return -EINVAL; /* Next check to make sure the requested image is valid */ - if (((interface_type == CUDA_INTF) && + if (((interface_type == CUDA_INTF) && (test >= MAX_CUDA_IMAGES)) || - ((interface_type == ONYX_INTF) && - (test >= MAX_ONYX_IMAGES))) + ((interface_type == ONYX_INTF) && + (test >= MAX_ONYX_IMAGES))) return -EINVAL; /* Copy the image into the processor */ - if (interface_type == CUDA_INTF) + if (interface_type == CUDA_INTF) return perf_config(cuda_images[test]); else return perf_config(onyx_images[test]); @@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun static void perf_patch_images(void) { #if 0 /* FIXME!! */ -/* +/* * NOTE: this routine is VERY specific to the current TLB image. * If the image is changed, this routine might also need to be changed. */ @@ -367,9 +367,9 @@ static void perf_patch_images(void) extern void $i_dtlb_miss_2_0(); extern void PA2_0_iva(); - /* + /* * We can only use the lower 32-bits, the upper 32-bits should be 0 - * anyway given this is in the kernel + * anyway given this is in the kernel */ uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0); uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0); @@ -377,21 +377,21 @@ static void perf_patch_images(void) if (perf_processor_interface == ONYX_INTF) { /* clear last 2 bytes */ - onyx_images[TLBMISS][15] &= 0xffffff00; + onyx_images[TLBMISS][15] &= 0xffffff00; /* set 2 bytes */ onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00; onyx_images[TLBMISS][17] = itlb_addr; /* clear last 2 bytes */ - onyx_images[TLBHANDMISS][15] &= 0xffffff00; + onyx_images[TLBHANDMISS][15] &= 0xffffff00; /* set 2 bytes */ onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00; onyx_images[TLBHANDMISS][17] = itlb_addr; /* clear last 2 bytes */ - onyx_images[BIG_CPI][15] &= 0xffffff00; + onyx_images[BIG_CPI][15] &= 0xffffff00; /* set 2 bytes */ onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24)); onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00; @@ -404,24 +404,24 @@ static void perf_patch_images(void) } else if (perf_processor_interface == CUDA_INTF) { /* Cuda interface */ - cuda_images[TLBMISS][16] = + cuda_images[TLBMISS][16] = (cuda_images[TLBMISS][16]&0xffff0000) | ((dtlb_addr >> 8)&0x0000ffff); - cuda_images[TLBMISS][17] = + cuda_images[TLBMISS][17] = ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000; - cuda_images[TLBHANDMISS][16] = + cuda_images[TLBHANDMISS][16] = (cuda_images[TLBHANDMISS][16]&0xffff0000) | ((dtlb_addr >> 8)&0x0000ffff); - cuda_images[TLBHANDMISS][17] = + cuda_images[TLBHANDMISS][17] = ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000; - cuda_images[BIG_CPI][16] = + cuda_images[BIG_CPI][16] = (cuda_images[BIG_CPI][16]&0xffff0000) | ((dtlb_addr >> 8)&0x0000ffff); - cuda_images[BIG_CPI][17] = + cuda_images[BIG_CPI][17] = ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000; } else { @@ -433,7 +433,7 @@ static void perf_patch_images(void) /* * ioctl routine - * All routines effect the processor that they are executed on. Thus you + * All routines effect the processor that they are executed on. Thus you * must be running on the processor that you wish to change. */ @@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } /* copy out the Counters */ - if (copy_to_user((void __user *)arg, raddr, + if (copy_to_user((void __user *)arg, raddr, sizeof (raddr)) != 0) { error = -EFAULT; break; @@ -487,7 +487,7 @@ static const struct file_operations perf_fops = { .open = perf_open, .release = perf_release }; - + static struct miscdevice perf_dev = { MISC_DYNAMIC_MINOR, PA_PERF_DEV, @@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr) /* OR sticky2 (bit 1496) to counter2 bit 32 */ tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000; raddr[2] = (uint32_t)tmp64; - + /* Counter3 is bits 1497 to 1528 */ tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff; /* OR sticky3 (bit 1529) to counter3 bit 32 */ @@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr) userbuf[22] = 0; userbuf[23] = 0; - /* + /* * Write back the zeroed bytes + the image given * the read was destructive. */ @@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr) } else { /* - * Read RDR-15 which contains the counters and sticky bits + * Read RDR-15 which contains the counters and sticky bits */ if (!perf_rdr_read_ubuf(15, userbuf)) { return -13; } - /* + /* * Clear out the counters */ perf_rdr_clear(15); @@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr) raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL); raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL); } - + return 0; } @@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer) i = tentry->num_words; while (i--) { buffer[i] = 0; - } + } /* Check for bits an even number of 64 */ if ((xbits = width & 0x03f) != 0) { @@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr) } runway = ioremap_nocache(cpu_device->hpa.start, 4096); + if (!runway) { + pr_err("perf_write_image: ioremap failed!\n"); + return -ENOMEM; + } /* Merge intrigue bits into Runway STATUS 0 */ tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful; - __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), + __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), runway + RUNWAY_STATUS); - + /* Write RUNWAY DEBUG registers */ for (i = 0; i < 8; i++) { __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG); } - return 0; + return 0; } /* @@ -843,7 +847,7 @@ printk("perf_rdr_write\n"); perf_rdr_shift_out_U(rdr_num, buffer[i]); } else { perf_rdr_shift_out_W(rdr_num, buffer[i]); - } + } } printk("perf_rdr_write done\n"); } diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index ea6603ee8d2498..4516a5b53f38ef 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -43,6 +43,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -139,6 +142,10 @@ void machine_power_off(void) printk(KERN_EMERG "System shut down completed.\n" "Please power this system off now."); + + /* prevent soft lockup/stalled CPU messages for endless loop. */ + rcu_sysrq_start(); + for (;;); } void (*pm_power_off)(void) = machine_power_off; diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 068ed3607bac0c..dee6f9d6a153ce 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index e58925ac64d105..26f12f45b4bb1c 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -232,6 +233,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, struct rt_sigframe __user *frame; unsigned long rp, usp; unsigned long haddr, sigframe_size; + unsigned long start, end; int err = 0; #ifdef CONFIG_64BIT struct compat_rt_sigframe __user * compat_frame; @@ -299,10 +301,10 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, } #endif - flush_user_dcache_range((unsigned long) &frame->tramp[0], - (unsigned long) &frame->tramp[TRAMP_SIZE]); - flush_user_icache_range((unsigned long) &frame->tramp[0], - (unsigned long) &frame->tramp[TRAMP_SIZE]); + start = (unsigned long) &frame->tramp[0]; + end = (unsigned long) &frame->tramp[TRAMP_SIZE]; + flush_user_dcache_range_asm(start, end); + flush_user_icache_range_asm(start, end); /* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP @@ -548,8 +550,8 @@ insert_restart_trampoline(struct pt_regs *regs) WARN_ON(err); /* flush data/instruction cache for new insns */ - flush_user_dcache_range(start, end); - flush_user_icache_range(start, end); + flush_user_dcache_range_asm(start, end); + flush_user_icache_range_asm(start, end); regs->gr[31] = regs->gr[30] + 8; return; diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 67b452b41ff6a6..63365106ea1907 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index bf3294171230ad..e5288638a1d9ad 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -30,6 +30,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 3cfef1de8061af..44aeaa9c039fc4 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -444,6 +444,7 @@ ENTRY_SAME(copy_file_range) ENTRY_COMP(preadv2) ENTRY_COMP(pwritev2) + ENTRY_SAME(statx) .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 1e22f981cd81fb..89421df7016083 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 378df9207406f2..991654c88eec86 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 0a21067ac0a349..e36f7b75ab07b3 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -23,7 +23,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 8fa92b8d839abb..f2dac4d73b1b30 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile @@ -2,7 +2,7 @@ # Makefile for parisc-specific library files # -lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ +lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \ ucmpdi2.o delay.o obj-y := iomap.o diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S deleted file mode 100644 index a5b72f22c7a6c4..00000000000000 --- a/arch/parisc/lib/fixup.S +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Linux/PA-RISC Project (http://www.parisc-linux.org/) - * - * Copyright (C) 2004 Randolph Chung - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Fixup routines for kernel exception handling. - */ -#include -#include -#include -#include - -#ifdef CONFIG_SMP - .macro get_fault_ip t1 t2 - loadgp - addil LT%__per_cpu_offset,%r27 - LDREG RT%__per_cpu_offset(%r1),\t1 - /* t2 = smp_processor_id() */ - mfctl 30,\t2 - ldw TI_CPU(\t2),\t2 -#ifdef CONFIG_64BIT - extrd,u \t2,63,32,\t2 -#endif - /* t2 = &__per_cpu_offset[smp_processor_id()]; */ - LDREGX \t2(\t1),\t2 - addil LT%exception_data,%r27 - LDREG RT%exception_data(%r1),\t1 - /* t1 = this_cpu_ptr(&exception_data) */ - add,l \t1,\t2,\t1 - /* %r27 = t1->fault_gp - restore gp */ - LDREG EXCDATA_GP(\t1), %r27 - /* t1 = t1->fault_ip */ - LDREG EXCDATA_IP(\t1), \t1 - .endm -#else - .macro get_fault_ip t1 t2 - loadgp - /* t1 = this_cpu_ptr(&exception_data) */ - addil LT%exception_data,%r27 - LDREG RT%exception_data(%r1),\t2 - /* %r27 = t2->fault_gp - restore gp */ - LDREG EXCDATA_GP(\t2), %r27 - /* t1 = t2->fault_ip */ - LDREG EXCDATA_IP(\t2), \t1 - .endm -#endif - - .level LEVEL - - .text - .section .fixup, "ax" - - /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */ -ENTRY_CFI(fixup_get_user_skip_1) - get_fault_ip %r1,%r8 - ldo 4(%r1), %r1 - ldi -EFAULT, %r8 - bv %r0(%r1) - copy %r0, %r9 -ENDPROC_CFI(fixup_get_user_skip_1) - -ENTRY_CFI(fixup_get_user_skip_2) - get_fault_ip %r1,%r8 - ldo 8(%r1), %r1 - ldi -EFAULT, %r8 - bv %r0(%r1) - copy %r0, %r9 -ENDPROC_CFI(fixup_get_user_skip_2) - - /* put_user() fixups, store -EFAULT in r8 */ -ENTRY_CFI(fixup_put_user_skip_1) - get_fault_ip %r1,%r8 - ldo 4(%r1), %r1 - bv %r0(%r1) - ldi -EFAULT, %r8 -ENDPROC_CFI(fixup_put_user_skip_1) - -ENTRY_CFI(fixup_put_user_skip_2) - get_fault_ip %r1,%r8 - ldo 8(%r1), %r1 - bv %r0(%r1) - ldi -EFAULT, %r8 -ENDPROC_CFI(fixup_put_user_skip_2) - diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S index 56845de6b5dfc9..f01188c044ee83 100644 --- a/arch/parisc/lib/lusercopy.S +++ b/arch/parisc/lib/lusercopy.S @@ -5,6 +5,8 @@ * Copyright (C) 2000 Richard Hirst * Copyright (C) 2001 Matthieu Delahaye * Copyright (C) 2003 Randolph Chung + * Copyright (C) 2017 Helge Deller + * Copyright (C) 2017 John David Anglin * * * This program is free software; you can redistribute it and/or modify @@ -132,4 +134,320 @@ ENDPROC_CFI(lstrnlen_user) .procend + + +/* + * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) + * + * Inputs: + * - sr1 already contains space of source region + * - sr2 already contains space of destination region + * + * Returns: + * - number of bytes that could not be copied. + * On success, this will be zero. + * + * This code is based on a C-implementation of a copy routine written by + * Randolph Chung, which in turn was derived from the glibc. + * + * Several strategies are tried to try to get the best performance for various + * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes + * at a time using general registers. Unaligned copies are handled either by + * aligning the destination and then using shift-and-write method, or in a few + * cases by falling back to a byte-at-a-time copy. + * + * Testing with various alignments and buffer sizes shows that this code is + * often >10x faster than a simple byte-at-a-time copy, even for strangely + * aligned operands. It is interesting to note that the glibc version of memcpy + * (written in C) is actually quite fast already. This routine is able to beat + * it by 30-40% for aligned copies because of the loop unrolling, but in some + * cases the glibc version is still slightly faster. This lends more + * credibility that gcc can generate very good code as long as we are careful. + * + * Possible optimizations: + * - add cache prefetching + * - try not to use the post-increment address modifiers; they may create + * additional interlocks. Assumption is that those were only efficient on old + * machines (pre PA8000 processors) + */ + + dst = arg0 + src = arg1 + len = arg2 + end = arg3 + t1 = r19 + t2 = r20 + t3 = r21 + t4 = r22 + srcspc = sr1 + dstspc = sr2 + + t0 = r1 + a1 = t1 + a2 = t2 + a3 = t3 + a0 = t4 + + save_src = ret0 + save_dst = ret1 + save_len = r31 + +ENTRY_CFI(pa_memcpy) + .proc + .callinfo NO_CALLS + .entry + + /* Last destination address */ + add dst,len,end + + /* short copy with less than 16 bytes? */ + cmpib,>>=,n 15,len,.Lbyte_loop + + /* same alignment? */ + xor src,dst,t0 + extru t0,31,2,t1 + cmpib,<>,n 0,t1,.Lunaligned_copy + +#ifdef CONFIG_64BIT + /* only do 64-bit copies if we can get aligned. */ + extru t0,31,3,t1 + cmpib,<>,n 0,t1,.Lalign_loop32 + + /* loop until we are 64-bit aligned */ +.Lalign_loop64: + extru dst,31,3,t1 + cmpib,=,n 0,t1,.Lcopy_loop_16 +20: ldb,ma 1(srcspc,src),t1 +21: stb,ma t1,1(dstspc,dst) + b .Lalign_loop64 + ldo -1(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + + ldi 31,t0 +.Lcopy_loop_16: + cmpb,COND(>>=),n t0,len,.Lword_loop + +10: ldd 0(srcspc,src),t1 +11: ldd 8(srcspc,src),t2 + ldo 16(src),src +12: std,ma t1,8(dstspc,dst) +13: std,ma t2,8(dstspc,dst) +14: ldd 0(srcspc,src),t1 +15: ldd 8(srcspc,src),t2 + ldo 16(src),src +16: std,ma t1,8(dstspc,dst) +17: std,ma t2,8(dstspc,dst) + + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault) + ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault) + ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) + + b .Lcopy_loop_16 + ldo -32(len),len + +.Lword_loop: + cmpib,COND(>>=),n 3,len,.Lbyte_loop +20: ldw,ma 4(srcspc,src),t1 +21: stw,ma t1,4(dstspc,dst) + b .Lword_loop + ldo -4(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + +#endif /* CONFIG_64BIT */ + + /* loop until we are 32-bit aligned */ +.Lalign_loop32: + extru dst,31,2,t1 + cmpib,=,n 0,t1,.Lcopy_loop_4 +20: ldb,ma 1(srcspc,src),t1 +21: stb,ma t1,1(dstspc,dst) + b .Lalign_loop32 + ldo -1(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + + +.Lcopy_loop_4: + cmpib,COND(>>=),n 15,len,.Lbyte_loop + +10: ldw 0(srcspc,src),t1 +11: ldw 4(srcspc,src),t2 +12: stw,ma t1,4(dstspc,dst) +13: stw,ma t2,4(dstspc,dst) +14: ldw 8(srcspc,src),t1 +15: ldw 12(srcspc,src),t2 + ldo 16(src),src +16: stw,ma t1,4(dstspc,dst) +17: stw,ma t2,4(dstspc,dst) + + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault) + ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault) + ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) + + b .Lcopy_loop_4 + ldo -16(len),len + +.Lbyte_loop: + cmpclr,COND(<>) len,%r0,%r0 + b,n .Lcopy_done +20: ldb 0(srcspc,src),t1 + ldo 1(src),src +21: stb,ma t1,1(dstspc,dst) + b .Lbyte_loop + ldo -1(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + +.Lcopy_done: + bv %r0(%r2) + sub end,dst,ret0 + + + /* src and dst are not aligned the same way. */ + /* need to go the hard way */ +.Lunaligned_copy: + /* align until dst is 32bit-word-aligned */ + extru dst,31,2,t1 + cmpib,COND(=),n 0,t1,.Lcopy_dstaligned +20: ldb 0(srcspc,src),t1 + ldo 1(src),src +21: stb,ma t1,1(dstspc,dst) + b .Lunaligned_copy + ldo -1(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + +.Lcopy_dstaligned: + + /* store src, dst and len in safe place */ + copy src,save_src + copy dst,save_dst + copy len,save_len + + /* len now needs give number of words to copy */ + SHRREG len,2,len + + /* + * Copy from a not-aligned src to an aligned dst using shifts. + * Handles 4 words per loop. + */ + + depw,z src,28,2,t0 + subi 32,t0,t0 + mtsar t0 + extru len,31,2,t0 + cmpib,= 2,t0,.Lcase2 + /* Make src aligned by rounding it down. */ + depi 0,31,2,src + + cmpiclr,<> 3,t0,%r0 + b,n .Lcase3 + cmpiclr,<> 1,t0,%r0 + b,n .Lcase1 +.Lcase0: + cmpb,= %r0,len,.Lcda_finish + nop + +1: ldw,ma 4(srcspc,src), a3 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) +1: ldw,ma 4(srcspc,src), a0 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + b,n .Ldo3 +.Lcase1: +1: ldw,ma 4(srcspc,src), a2 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) +1: ldw,ma 4(srcspc,src), a3 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + ldo -1(len),len + cmpb,=,n %r0,len,.Ldo0 +.Ldo4: +1: ldw,ma 4(srcspc,src), a0 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + shrpw a2, a3, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) +.Ldo3: +1: ldw,ma 4(srcspc,src), a1 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + shrpw a3, a0, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) +.Ldo2: +1: ldw,ma 4(srcspc,src), a2 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + shrpw a0, a1, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) +.Ldo1: +1: ldw,ma 4(srcspc,src), a3 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + shrpw a1, a2, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) + ldo -4(len),len + cmpb,<> %r0,len,.Ldo4 + nop +.Ldo0: + shrpw a2, a3, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) + +.Lcda_rdfault: +.Lcda_finish: + /* calculate new src, dst and len and jump to byte-copy loop */ + sub dst,save_dst,t0 + add save_src,t0,src + b .Lbyte_loop + sub save_len,t0,len + +.Lcase3: +1: ldw,ma 4(srcspc,src), a0 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) +1: ldw,ma 4(srcspc,src), a1 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + b .Ldo2 + ldo 1(len),len +.Lcase2: +1: ldw,ma 4(srcspc,src), a1 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) +1: ldw,ma 4(srcspc,src), a2 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + b .Ldo1 + ldo 2(len),len + + + /* fault exception fixup handlers: */ +#ifdef CONFIG_64BIT +.Lcopy16_fault: +10: b .Lcopy_done + std,ma t1,8(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) +#endif + +.Lcopy8_fault: +10: b .Lcopy_done + stw,ma t1,4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) + + .exit +ENDPROC_CFI(pa_memcpy) + .procend + .end diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index f82ff10ed97411..b3d47ec1d80a24 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -2,7 +2,7 @@ * Optimized memory copy routines. * * Copyright (C) 2004 Randolph Chung - * Copyright (C) 2013 Helge Deller + * Copyright (C) 2013-2017 Helge Deller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,474 +21,21 @@ * Portions derived from the GNU C Library * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. * - * Several strategies are tried to try to get the best performance for various - * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using - * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using - * general registers. Unaligned copies are handled either by aligning the - * destination and then using shift-and-write method, or in a few cases by - * falling back to a byte-at-a-time copy. - * - * I chose to implement this in C because it is easier to maintain and debug, - * and in my experiments it appears that the C code generated by gcc (3.3/3.4 - * at the time of writing) is fairly optimal. Unfortunately some of the - * semantics of the copy routine (exception handling) is difficult to express - * in C, so we have to play some tricks to get it to work. - * - * All the loads and stores are done via explicit asm() code in order to use - * the right space registers. - * - * Testing with various alignments and buffer sizes shows that this code is - * often >10x faster than a simple byte-at-a-time copy, even for strangely - * aligned operands. It is interesting to note that the glibc version - * of memcpy (written in C) is actually quite fast already. This routine is - * able to beat it by 30-40% for aligned copies because of the loop unrolling, - * but in some cases the glibc version is still slightly faster. This lends - * more credibility that gcc can generate very good code as long as we are - * careful. - * - * TODO: - * - cache prefetching needs more experimentation to get optimal settings - * - try not to use the post-increment address modifiers; they create additional - * interlocks - * - replace byte-copy loops with stybs sequences */ -#ifdef __KERNEL__ #include #include #include -#define s_space "%%sr1" -#define d_space "%%sr2" -#else -#include "memcpy.h" -#define s_space "%%sr0" -#define d_space "%%sr0" -#define pa_memcpy new2_copy -#endif DECLARE_PER_CPU(struct exception_data, exception_data); -#define preserve_branch(label) do { \ - volatile int dummy = 0; \ - /* The following branch is never taken, it's just here to */ \ - /* prevent gcc from optimizing away our exception code. */ \ - if (unlikely(dummy != dummy)) \ - goto label; \ -} while (0) - #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) #define get_kernel_space() (0) -#define MERGE(w0, sh_1, w1, sh_2) ({ \ - unsigned int _r; \ - asm volatile ( \ - "mtsar %3\n" \ - "shrpw %1, %2, %%sar, %0\n" \ - : "=r"(_r) \ - : "r"(w0), "r"(w1), "r"(sh_2) \ - ); \ - _r; \ -}) -#define THRESHOLD 16 - -#ifdef DEBUG_MEMCPY -#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0) -#else -#define DPRINTF(fmt, args...) -#endif - -#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ - __asm__ __volatile__ ( \ - "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ - : _tt(_t), "+r"(_a) \ - : \ - : "r8") - -#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ - __asm__ __volatile__ ( \ - "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ - : "+r"(_a) \ - : _tt(_t) \ - : "r8") - -#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e) -#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e) -#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e) -#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e) -#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e) -#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e) - -#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \ - __asm__ __volatile__ ( \ - "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ - : _tt(_t) \ - : "r"(_a) \ - : "r8") - -#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \ - __asm__ __volatile__ ( \ - "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ - : \ - : _tt(_t), "r"(_a) \ - : "r8") - -#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e) -#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e) - -#ifdef CONFIG_PREFETCH -static inline void prefetch_src(const void *addr) -{ - __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr)); -} - -static inline void prefetch_dst(const void *addr) -{ - __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr)); -} -#else -#define prefetch_src(addr) do { } while(0) -#define prefetch_dst(addr) do { } while(0) -#endif - -#define PA_MEMCPY_OK 0 -#define PA_MEMCPY_LOAD_ERROR 1 -#define PA_MEMCPY_STORE_ERROR 2 - -/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words - * per loop. This code is derived from glibc. - */ -static noinline unsigned long copy_dstaligned(unsigned long dst, - unsigned long src, unsigned long len) -{ - /* gcc complains that a2 and a3 may be uninitialized, but actually - * they cannot be. Initialize a2/a3 to shut gcc up. - */ - register unsigned int a0, a1, a2 = 0, a3 = 0; - int sh_1, sh_2; - - /* prefetch_src((const void *)src); */ - - /* Calculate how to shift a word read at the memory operation - aligned srcp to make it aligned for copy. */ - sh_1 = 8 * (src % sizeof(unsigned int)); - sh_2 = 8 * sizeof(unsigned int) - sh_1; - - /* Make src aligned by rounding it down. */ - src &= -sizeof(unsigned int); - - switch (len % 4) - { - case 2: - /* a1 = ((unsigned int *) src)[0]; - a2 = ((unsigned int *) src)[1]; */ - ldw(s_space, 0, src, a1, cda_ldw_exc); - ldw(s_space, 4, src, a2, cda_ldw_exc); - src -= 1 * sizeof(unsigned int); - dst -= 3 * sizeof(unsigned int); - len += 2; - goto do1; - case 3: - /* a0 = ((unsigned int *) src)[0]; - a1 = ((unsigned int *) src)[1]; */ - ldw(s_space, 0, src, a0, cda_ldw_exc); - ldw(s_space, 4, src, a1, cda_ldw_exc); - src -= 0 * sizeof(unsigned int); - dst -= 2 * sizeof(unsigned int); - len += 1; - goto do2; - case 0: - if (len == 0) - return PA_MEMCPY_OK; - /* a3 = ((unsigned int *) src)[0]; - a0 = ((unsigned int *) src)[1]; */ - ldw(s_space, 0, src, a3, cda_ldw_exc); - ldw(s_space, 4, src, a0, cda_ldw_exc); - src -=-1 * sizeof(unsigned int); - dst -= 1 * sizeof(unsigned int); - len += 0; - goto do3; - case 1: - /* a2 = ((unsigned int *) src)[0]; - a3 = ((unsigned int *) src)[1]; */ - ldw(s_space, 0, src, a2, cda_ldw_exc); - ldw(s_space, 4, src, a3, cda_ldw_exc); - src -=-2 * sizeof(unsigned int); - dst -= 0 * sizeof(unsigned int); - len -= 1; - if (len == 0) - goto do0; - goto do4; /* No-op. */ - } - - do - { - /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */ -do4: - /* a0 = ((unsigned int *) src)[0]; */ - ldw(s_space, 0, src, a0, cda_ldw_exc); - /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ - stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); -do3: - /* a1 = ((unsigned int *) src)[1]; */ - ldw(s_space, 4, src, a1, cda_ldw_exc); - /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */ - stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc); -do2: - /* a2 = ((unsigned int *) src)[2]; */ - ldw(s_space, 8, src, a2, cda_ldw_exc); - /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */ - stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc); -do1: - /* a3 = ((unsigned int *) src)[3]; */ - ldw(s_space, 12, src, a3, cda_ldw_exc); - /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */ - stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc); - - src += 4 * sizeof(unsigned int); - dst += 4 * sizeof(unsigned int); - len -= 4; - } - while (len != 0); - -do0: - /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ - stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); - - preserve_branch(handle_load_error); - preserve_branch(handle_store_error); - - return PA_MEMCPY_OK; - -handle_load_error: - __asm__ __volatile__ ("cda_ldw_exc:\n"); - return PA_MEMCPY_LOAD_ERROR; - -handle_store_error: - __asm__ __volatile__ ("cda_stw_exc:\n"); - return PA_MEMCPY_STORE_ERROR; -} - - -/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. - * In case of an access fault the faulty address can be read from the per_cpu - * exception data struct. */ -static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp, - unsigned long len) -{ - register unsigned long src, dst, t1, t2, t3; - register unsigned char *pcs, *pcd; - register unsigned int *pws, *pwd; - register double *pds, *pdd; - unsigned long ret; - - src = (unsigned long)srcp; - dst = (unsigned long)dstp; - pcs = (unsigned char *)srcp; - pcd = (unsigned char *)dstp; - - /* prefetch_src((const void *)srcp); */ - - if (len < THRESHOLD) - goto byte_copy; - - /* Check alignment */ - t1 = (src ^ dst); - if (unlikely(t1 & (sizeof(double)-1))) - goto unaligned_copy; - - /* src and dst have same alignment. */ - - /* Copy bytes till we are double-aligned. */ - t2 = src & (sizeof(double) - 1); - if (unlikely(t2 != 0)) { - t2 = sizeof(double) - t2; - while (t2 && len) { - /* *pcd++ = *pcs++; */ - ldbma(s_space, pcs, t3, pmc_load_exc); - len--; - stbma(d_space, t3, pcd, pmc_store_exc); - t2--; - } - } - - pds = (double *)pcs; - pdd = (double *)pcd; - -#if 0 - /* Copy 8 doubles at a time */ - while (len >= 8*sizeof(double)) { - register double r1, r2, r3, r4, r5, r6, r7, r8; - /* prefetch_src((char *)pds + L1_CACHE_BYTES); */ - flddma(s_space, pds, r1, pmc_load_exc); - flddma(s_space, pds, r2, pmc_load_exc); - flddma(s_space, pds, r3, pmc_load_exc); - flddma(s_space, pds, r4, pmc_load_exc); - fstdma(d_space, r1, pdd, pmc_store_exc); - fstdma(d_space, r2, pdd, pmc_store_exc); - fstdma(d_space, r3, pdd, pmc_store_exc); - fstdma(d_space, r4, pdd, pmc_store_exc); - -#if 0 - if (L1_CACHE_BYTES <= 32) - prefetch_src((char *)pds + L1_CACHE_BYTES); -#endif - flddma(s_space, pds, r5, pmc_load_exc); - flddma(s_space, pds, r6, pmc_load_exc); - flddma(s_space, pds, r7, pmc_load_exc); - flddma(s_space, pds, r8, pmc_load_exc); - fstdma(d_space, r5, pdd, pmc_store_exc); - fstdma(d_space, r6, pdd, pmc_store_exc); - fstdma(d_space, r7, pdd, pmc_store_exc); - fstdma(d_space, r8, pdd, pmc_store_exc); - len -= 8*sizeof(double); - } -#endif - - pws = (unsigned int *)pds; - pwd = (unsigned int *)pdd; - -word_copy: - while (len >= 8*sizeof(unsigned int)) { - register unsigned int r1,r2,r3,r4,r5,r6,r7,r8; - /* prefetch_src((char *)pws + L1_CACHE_BYTES); */ - ldwma(s_space, pws, r1, pmc_load_exc); - ldwma(s_space, pws, r2, pmc_load_exc); - ldwma(s_space, pws, r3, pmc_load_exc); - ldwma(s_space, pws, r4, pmc_load_exc); - stwma(d_space, r1, pwd, pmc_store_exc); - stwma(d_space, r2, pwd, pmc_store_exc); - stwma(d_space, r3, pwd, pmc_store_exc); - stwma(d_space, r4, pwd, pmc_store_exc); - - ldwma(s_space, pws, r5, pmc_load_exc); - ldwma(s_space, pws, r6, pmc_load_exc); - ldwma(s_space, pws, r7, pmc_load_exc); - ldwma(s_space, pws, r8, pmc_load_exc); - stwma(d_space, r5, pwd, pmc_store_exc); - stwma(d_space, r6, pwd, pmc_store_exc); - stwma(d_space, r7, pwd, pmc_store_exc); - stwma(d_space, r8, pwd, pmc_store_exc); - len -= 8*sizeof(unsigned int); - } - - while (len >= 4*sizeof(unsigned int)) { - register unsigned int r1,r2,r3,r4; - ldwma(s_space, pws, r1, pmc_load_exc); - ldwma(s_space, pws, r2, pmc_load_exc); - ldwma(s_space, pws, r3, pmc_load_exc); - ldwma(s_space, pws, r4, pmc_load_exc); - stwma(d_space, r1, pwd, pmc_store_exc); - stwma(d_space, r2, pwd, pmc_store_exc); - stwma(d_space, r3, pwd, pmc_store_exc); - stwma(d_space, r4, pwd, pmc_store_exc); - len -= 4*sizeof(unsigned int); - } - - pcs = (unsigned char *)pws; - pcd = (unsigned char *)pwd; - -byte_copy: - while (len) { - /* *pcd++ = *pcs++; */ - ldbma(s_space, pcs, t3, pmc_load_exc); - stbma(d_space, t3, pcd, pmc_store_exc); - len--; - } - - return PA_MEMCPY_OK; - -unaligned_copy: - /* possibly we are aligned on a word, but not on a double... */ - if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) { - t2 = src & (sizeof(unsigned int) - 1); - - if (unlikely(t2 != 0)) { - t2 = sizeof(unsigned int) - t2; - while (t2) { - /* *pcd++ = *pcs++; */ - ldbma(s_space, pcs, t3, pmc_load_exc); - stbma(d_space, t3, pcd, pmc_store_exc); - len--; - t2--; - } - } - - pws = (unsigned int *)pcs; - pwd = (unsigned int *)pcd; - goto word_copy; - } - - /* Align the destination. */ - if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) { - t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1)); - while (t2) { - /* *pcd++ = *pcs++; */ - ldbma(s_space, pcs, t3, pmc_load_exc); - stbma(d_space, t3, pcd, pmc_store_exc); - len--; - t2--; - } - dst = (unsigned long)pcd; - src = (unsigned long)pcs; - } - - ret = copy_dstaligned(dst, src, len / sizeof(unsigned int)); - if (ret) - return ret; - - pcs += (len & -sizeof(unsigned int)); - pcd += (len & -sizeof(unsigned int)); - len %= sizeof(unsigned int); - - preserve_branch(handle_load_error); - preserve_branch(handle_store_error); - - goto byte_copy; - -handle_load_error: - __asm__ __volatile__ ("pmc_load_exc:\n"); - return PA_MEMCPY_LOAD_ERROR; - -handle_store_error: - __asm__ __volatile__ ("pmc_store_exc:\n"); - return PA_MEMCPY_STORE_ERROR; -} - - /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ -static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) -{ - unsigned long ret, fault_addr, reference; - struct exception_data *d; - - ret = pa_memcpy_internal(dstp, srcp, len); - if (likely(ret == PA_MEMCPY_OK)) - return 0; - - /* if a load or store fault occured we can get the faulty addr */ - d = this_cpu_ptr(&exception_data); - fault_addr = d->fault_addr; - - /* error in load or store? */ - if (ret == PA_MEMCPY_LOAD_ERROR) - reference = (unsigned long) srcp; - else - reference = (unsigned long) dstp; +extern unsigned long pa_memcpy(void *dst, const void *src, + unsigned long len); - DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n", - ret, len, fault_addr, reference); - - if (fault_addr >= reference) - return len - (fault_addr - reference); - else - return len; -} - -#ifdef __KERNEL__ unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long len) { @@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size) return __probe_kernel_read(dst, src, size); } - -#endif diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c index 09ef4136c6935e..2fb59d2e2b294b 100644 --- a/arch/parisc/math-emu/driver.c +++ b/arch/parisc/math-emu/driver.c @@ -27,7 +27,8 @@ * Copyright (C) 2001 Hewlett-Packard */ -#include +#include + #include "float.h" #include "math-emu.h" diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 1a0b4f63f0e90f..32ec22146141e5 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -149,6 +150,23 @@ int fixup_exception(struct pt_regs *regs) d->fault_space = regs->isr; d->fault_addr = regs->ior; + /* + * Fix up get_user() and put_user(). + * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant + * bit in the relative address of the fixup routine to indicate + * that %r8 should be loaded with -EFAULT to report a userspace + * access error. + */ + if (fix->fixup & 1) { + regs->gr[8] = -EFAULT; + + /* zero target register for get_user() */ + if (parisc_acctyp(0, regs->iir) == VM_READ) { + int treg = regs->iir & 0x1f; + regs->gr[treg] = 0; + } + } + regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup; regs->iaoq[0] &= ~3; /* @@ -238,8 +256,8 @@ show_signal_msg(struct pt_regs *regs, unsigned long code, vma ? ',':'\n'); if (vma) - pr_warn(KERN_CONT " vm_start = 0x%08lx, vm_end = 0x%08lx\n", - vma->vm_start, vma->vm_end); + pr_cont(" vm_start = 0x%08lx, vm_end = 0x%08lx\n", + vma->vm_start, vma->vm_end); show_regs(regs); } diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 5d6eea925cf4ec..aa50ac090e9b9d 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 494091762bd7f3..97a8bc8a095ce4 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK config PPC bool default y - select BUILDTIME_EXTABLE_SORT + # + # Please keep this list sorted alphabetically. + # + select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_DMA_SET_COHERENT_MASK + select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE + select ARCH_HAS_SG_CHAIN + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO + select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT + select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_CMPXCHG_LOCKREF if PPC64 + select ARCH_WANT_IPC_PARSE_VERSION select BINFMT_ELF - select ARCH_HAS_ELF_RANDOMIZE - select OF - select OF_EARLY_FLATTREE - select OF_RESERVED_MEM - select HAVE_FTRACE_MCOUNT_RECORD + select BUILDTIME_EXTABLE_SORT + select CLONE_BACKWARDS + select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN + select EDAC_ATOMIC_SCRUB + select EDAC_SUPPORT + select GENERIC_ATOMIC64 if PPC32 + select GENERIC_CLOCKEVENTS + select GENERIC_CLOCKEVENTS_BROADCAST if SMP + select GENERIC_CMOS_UPDATE + select GENERIC_CPU_AUTOPROBE + select GENERIC_IRQ_SHOW + select GENERIC_IRQ_SHOW_LEVEL + select GENERIC_SMP_IDLE_THREAD + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER + select GENERIC_TIME_VSYSCALL_OLD + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_HARDENED_USERCOPY + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_KGDB + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_TRACEHOOK + select HAVE_CBPF_JIT if !PPC64 + select HAVE_CONTEXT_TRACKING if PPC64 + select HAVE_DEBUG_KMEMLEAK + select HAVE_DEBUG_STACKOVERFLOW + select HAVE_DMA_API_DEBUG select HAVE_DYNAMIC_FTRACE - select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL - select HAVE_FUNCTION_TRACER + select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL + select HAVE_EBPF_JIT if PPC64 + select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) + select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER select HAVE_GCC_PLUGINS - select SYSCTL_EXCEPTION_TRACE - select VIRT_TO_BUS if !PPC64 + select HAVE_GENERIC_RCU_GUP + select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) select HAVE_IDE select HAVE_IOREMAP_PROT - select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) + select HAVE_IRQ_EXIT_ON_IRQ_STACK + select HAVE_KERNEL_GZIP select HAVE_KPROBES - select HAVE_OPTPROBES if PPC64 - select HAVE_ARCH_KGDB select HAVE_KRETPROBES - select HAVE_ARCH_TRACEHOOK + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP - select HAVE_DMA_API_DEBUG + select HAVE_MOD_ARCH_SPECIFIC + select HAVE_NMI if PERF_EVENTS select HAVE_OPROFILE - select HAVE_DEBUG_KMEMLEAK - select ARCH_HAS_SG_CHAIN - select GENERIC_ATOMIC64 if PPC32 + select HAVE_OPTPROBES if PPC64 select HAVE_PERF_EVENTS + select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE if SMP select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) - select ARCH_WANT_IPC_PARSE_VERSION - select SPARSE_IRQ + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_VIRT_CPU_ACCOUNTING select IRQ_DOMAIN - select GENERIC_IRQ_SHOW - select GENERIC_IRQ_SHOW_LEVEL select IRQ_FORCED_THREADING - select HAVE_RCU_TABLE_FREE if SMP - select HAVE_SYSCALL_TRACEPOINTS - select HAVE_CBPF_JIT if !PPC64 - select HAVE_EBPF_JIT if PPC64 - select HAVE_ARCH_JUMP_LABEL - select ARCH_HAVE_NMI_SAFE_CMPXCHG - select ARCH_HAS_GCOV_PROFILE_ALL - select GENERIC_SMP_IDLE_THREAD - select GENERIC_CMOS_UPDATE - select GENERIC_TIME_VSYSCALL_OLD - select GENERIC_CLOCKEVENTS - select GENERIC_CLOCKEVENTS_BROADCAST if SMP - select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select GENERIC_STRNCPY_FROM_USER - select GENERIC_STRNLEN_USER - select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA - select CLONE_BACKWARDS - select ARCH_USE_BUILTIN_BSWAP - select OLD_SIGSUSPEND - select OLD_SIGACTION if PPC32 - select HAVE_DEBUG_STACKOVERFLOW - select HAVE_IRQ_EXIT_ON_IRQ_STACK - select ARCH_USE_CMPXCHG_LOCKREF if PPC64 - select HAVE_ARCH_AUDITSYSCALL - select ARCH_SUPPORTS_ATOMIC_RMW - select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN select NO_BOOTMEM - select HAVE_GENERIC_RCU_GUP - select HAVE_PERF_EVENTS_NMI if PPC64 - select HAVE_NMI if PERF_EVENTS - select EDAC_SUPPORT - select EDAC_ATOMIC_SCRUB - select ARCH_HAS_DMA_SET_COHERENT_MASK - select ARCH_HAS_DEVMEM_IS_ALLOWED - select HAVE_ARCH_SECCOMP_FILTER - select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT - select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS - select GENERIC_CPU_AUTOPROBE - select HAVE_VIRT_CPU_ACCOUNTING - select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE - select HAVE_ARCH_HARDENED_USERCOPY - select HAVE_KERNEL_GZIP - select HAVE_CONTEXT_TRACKING if PPC64 + select OF + select OF_EARLY_FLATTREE + select OF_RESERVED_MEM + select OLD_SIGACTION if PPC32 + select OLD_SIGSUSPEND + select SPARSE_IRQ + select SYSCTL_EXCEPTION_TRACE + select VIRT_TO_BUS if !PPC64 + # + # Please keep this list sorted alphabetically. + # config GENERIC_CSUM def_bool n diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 31286fa7873c1d..19b0d1a8195930 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -72,8 +72,15 @@ GNUTARGET := powerpc MULTIPLEWORD := -mmultiple endif -cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) +ifdef CONFIG_PPC64 +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc) +aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) +aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2 +endif + cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) ifneq ($(cc-name),clang) cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align endif @@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc)) AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) else +CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc) +AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) endif CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S index 861e72109df2da..f080abfc2f83fb 100644 --- a/arch/powerpc/boot/zImage.lds.S +++ b/arch/powerpc/boot/zImage.lds.S @@ -68,6 +68,7 @@ SECTIONS } #ifdef CONFIG_PPC64_BOOT_WRAPPER + . = ALIGN(256); .got : { __toc_start = .; diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index 9fa046d56ebadd..411994551afc13 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); - *key = 0; + *key = ~0; return 0; } diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 73eb794d616381..bc5fdfd227886a 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -51,6 +51,10 @@ #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) +/* Put a PPC bit into a "normal" bit position */ +#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \ + ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit)) + #include /* Macro for generating the ***_bits() functions */ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 01222363881556..26ed228d4dc6b7 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 1145dc8e726dbe..805d4105e9bbd2 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -46,7 +46,7 @@ extern struct patb_entry *partition_tb; /* Bits in patb0 field */ #define PATB_HR (1UL << 63) -#define RPDB_MASK 0x0ffffffffffff00fUL +#define RPDB_MASK 0x0fffffffffffff00UL #define RPDB_SHIFT (1UL << 8) #define RTS1_SHIFT 61 /* top 2 bits of radix tree size */ #define RTS1_MASK (3UL << RTS1_SHIFT) @@ -57,6 +57,7 @@ extern struct patb_entry *partition_tb; /* Bits in patb1 field */ #define PATB_GR (1UL << 63) /* guest uses radix; must match HR */ #define PRTS_MASK 0x1f /* process table size field */ +#define PRTB_MASK 0x0ffffffffffff000UL /* * Limit process table to PAGE_SIZE table. This diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 1eeeb72c70158a..8f4d41936e5a90 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1,9 +1,12 @@ #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ +#include + #ifndef __ASSEMBLY__ #include #endif + /* * Common bits between hash and Radix page table */ @@ -347,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, __r; \ }) +static inline int __pte_write(pte_t pte) +{ + return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); +} + +#ifdef CONFIG_NUMA_BALANCING +#define pte_savedwrite pte_savedwrite +static inline bool pte_savedwrite(pte_t pte) +{ + /* + * Saved write ptes are prot none ptes that doesn't have + * privileged bit sit. We mark prot none as one which has + * present and pviliged bit set and RWX cleared. To mark + * protnone which used to have _PAGE_WRITE set we clear + * the privileged bit. + */ + return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED)); +} +#else +#define pte_savedwrite pte_savedwrite +static inline bool pte_savedwrite(pte_t pte) +{ + return false; +} +#endif + +static inline int pte_write(pte_t pte) +{ + return __pte_write(pte) || pte_savedwrite(pte); +} + #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) - return; - - pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); + if (__pte_write(*ptep)) + pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); + else if (unlikely(pte_savedwrite(*ptep))) + pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0); } static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) - return; - - pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); + /* + * We should not find protnone for hugetlb, but this complete the + * interface. + */ + if (__pte_write(*ptep)) + pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); + else if (unlikely(pte_savedwrite(*ptep))) + pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1); } #define __HAVE_ARCH_PTEP_GET_AND_CLEAR @@ -397,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_update(mm, addr, ptep, ~0UL, 0, 0); } -static inline int pte_write(pte_t pte) -{ - return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); -} - static inline int pte_dirty(pte_t pte) { return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY)); @@ -465,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte) VM_BUG_ON(!pte_protnone(pte)); return __pte(pte_val(pte) | _PAGE_PRIVILEGED); } - -#define pte_savedwrite pte_savedwrite -static inline bool pte_savedwrite(pte_t pte) +#else +#define pte_clear_savedwrite pte_clear_savedwrite +static inline pte_t pte_clear_savedwrite(pte_t pte) { - /* - * Saved write ptes are prot none ptes that doesn't have - * privileged bit sit. We mark prot none as one which has - * present and pviliged bit set and RWX cleared. To mark - * protnone which used to have _PAGE_WRITE set we clear - * the privileged bit. - */ - VM_BUG_ON(!pte_protnone(pte)); - return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED)); + VM_WARN_ON(1); + return __pte(pte_val(pte) & ~_PAGE_WRITE); } #endif /* CONFIG_NUMA_BALANCING */ @@ -506,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte) /* Generic modifiers for PTE bits */ static inline pte_t pte_wrprotect(pte_t pte) { + if (unlikely(pte_savedwrite(pte))) + return pte_clear_savedwrite(pte); return __pte(pte_val(pte) & ~_PAGE_WRITE); } @@ -926,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd) #define __HAVE_ARCH_PMD_WRITE #define pmd_write(pmd) pte_write(pmd_pte(pmd)) +#define __pmd_write(pmd) __pte_write(pmd_pte(pmd)) #define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -982,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { - - if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0) - return; - - pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); + if (__pmd_write((*pmdp))) + pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); + else if (unlikely(pmd_savedwrite(*pmdp))) + pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED); } static inline int pmd_trans_huge(pmd_t pmd) diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 4e63787dc3becf..842124b199b585 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend) #ifdef __powerpc64__ res += (__force u64)addend; - return (__force __wsum)((u32)res + (res >> 32)); + return (__force __wsum) from64to32(res); #else asm("addc %0,%0,%1;" "addze %0,%0;" diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h index fd321eb423cb44..155731557c9bc0 100644 --- a/arch/powerpc/include/asm/cpuidle.h +++ b/arch/powerpc/include/asm/cpuidle.h @@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err) std r0,0(r1); \ ptesync; \ ld r0,0(r1); \ -1: cmpd cr0,r0,r0; \ - bne 1b; \ +236: cmpd cr0,r0,r0; \ + bne 236b; \ IDLE_INST; \ #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 93b9b84568e817..09bde6e34f5d52 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #define ARCH_DLINFO_CACHE_GEOMETRY \ NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \ NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \ - NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size); \ - NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i)); \ + NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \ + NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \ NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \ NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \ NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \ diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index f97d8cb6bdf64f..ed62efe01e49ed 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -66,6 +66,55 @@ #define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \ P8_DSISR_MC_ERAT_MULTIHIT_SEC) + +/* + * Machine Check bits on power9 + */ +#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1) + +#define P9_SRR1_MC_IFETCH(srr1) ( \ + PPC_BITEXTRACT(srr1, 45, 0) | \ + PPC_BITEXTRACT(srr1, 44, 1) | \ + PPC_BITEXTRACT(srr1, 43, 2) | \ + PPC_BITEXTRACT(srr1, 36, 3) ) + +/* 0 is reserved */ +#define P9_SRR1_MC_IFETCH_UE 1 +#define P9_SRR1_MC_IFETCH_SLB_PARITY 2 +#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3 +#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4 +#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5 +#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6 +/* 7 is reserved */ +#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8 +#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9 +/* 10 ? */ +#define P9_SRR1_MC_IFETCH_RA 11 +#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12 +#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13 +#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14 +#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15 + +/* DSISR bits for machine check (On Power9) */ +#define P9_DSISR_MC_UE (PPC_BIT(48)) +#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) +#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50)) +#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51)) +#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) +#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) +#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54)) +#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) +#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56)) +#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57)) +#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58)) +#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59)) +#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60)) + +/* SLB error bits */ +#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \ + P9_DSISR_MC_SLB_PARITY_MFSLB | \ + P9_DSISR_MC_SLB_MULTIHIT_MFSLB) + enum MCE_Version { MCE_V1 = 1, }; @@ -93,6 +142,9 @@ enum MCE_ErrorType { MCE_ERROR_TYPE_SLB = 2, MCE_ERROR_TYPE_ERAT = 3, MCE_ERROR_TYPE_TLB = 4, + MCE_ERROR_TYPE_USER = 5, + MCE_ERROR_TYPE_RA = 6, + MCE_ERROR_TYPE_LINK = 7, }; enum MCE_UeErrorType { @@ -121,6 +173,32 @@ enum MCE_TlbErrorType { MCE_TLB_ERROR_MULTIHIT = 2, }; +enum MCE_UserErrorType { + MCE_USER_ERROR_INDETERMINATE = 0, + MCE_USER_ERROR_TLBIE = 1, +}; + +enum MCE_RaErrorType { + MCE_RA_ERROR_INDETERMINATE = 0, + MCE_RA_ERROR_IFETCH = 1, + MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2, + MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3, + MCE_RA_ERROR_LOAD = 4, + MCE_RA_ERROR_STORE = 5, + MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6, + MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7, + MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8, +}; + +enum MCE_LinkErrorType { + MCE_LINK_ERROR_INDETERMINATE = 0, + MCE_LINK_ERROR_IFETCH_TIMEOUT = 1, + MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2, + MCE_LINK_ERROR_LOAD_TIMEOUT = 3, + MCE_LINK_ERROR_STORE_TIMEOUT = 4, + MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5, +}; + struct machine_check_event { enum MCE_Version version:8; /* 0x00 */ uint8_t in_use; /* 0x01 */ @@ -166,6 +244,30 @@ struct machine_check_event { uint64_t effective_address; uint8_t reserved_2[16]; } tlb_error; + + struct { + enum MCE_UserErrorType user_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } user_error; + + struct { + enum MCE_RaErrorType ra_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } ra_error; + + struct { + enum MCE_LinkErrorType link_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } link_error; } u; }; @@ -176,8 +278,12 @@ struct mce_error_info { enum MCE_SlbErrorType slb_error_type:8; enum MCE_EratErrorType erat_error_type:8; enum MCE_TlbErrorType tlb_error_type:8; + enum MCE_UserErrorType user_error_type:8; + enum MCE_RaErrorType ra_error_type:8; + enum MCE_LinkErrorType link_error_type:8; } u; - uint8_t reserved[2]; + enum MCE_Severity severity:8; + enum MCE_Initiator initiator:8; }; #define MAX_MC_EVT 100 diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index ba9921bf202e0c..5134ade2e85016 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H #define _ASM_POWERPC_NOHASH_32_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h index d0db98793dd83d..9f4de0a1035efb 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h @@ -1,5 +1,8 @@ #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H #define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H + +#include + /* * Entries per page directory level. The PTE level must use a 64b record * for each page table entry. The PMD and PGD level use a 32b record for diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h index 55b28ef3409af5..1facb584dd2962 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H #define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H +#define __ARCH_USE_5LEVEL_HACK #include diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 0cd8a385276329..e5805ad78e127b 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd) return ((hpd_val(hpd) & 0x4) != 0); #else /* We clear the top bit to indicate hugepd */ - return ((hpd_val(hpd) & PD_HUGE) == 0); + return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); #endif } diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index d99bd442aacbe5..e7d6d86563eeda 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -284,6 +284,13 @@ #define PPC_INST_BRANCH_COND 0x40800000 #define PPC_INST_LBZCIX 0x7c0006aa #define PPC_INST_STBCIX 0x7c0007aa +#define PPC_INST_LWZX 0x7c00002e +#define PPC_INST_LFSX 0x7c00042e +#define PPC_INST_STFSX 0x7c00052e +#define PPC_INST_LFDX 0x7c0004ae +#define PPC_INST_STFDX 0x7c0005ae +#define PPC_INST_LVX 0x7c0000ce +#define PPC_INST_STVX 0x7c0001ce /* macros to insert fields into opcodes */ #define ___PPC_RA(a) (((a) & 0x1f) << 16) diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 4a90634e83223c..35c00d7a0cf81a 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -160,12 +160,18 @@ struct of_drconf_cell { #define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */ #define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */ #define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */ -#define OV5_MMU_RADIX_300 0x1880 /* ISA v3.00 radix MMU supported */ -#define OV5_MMU_HASH_300 0x1840 /* ISA v3.00 hash MMU supported */ -#define OV5_MMU_SEGM_RADIX 0x1820 /* radix mode (no segmentation) */ -#define OV5_MMU_PROC_TBL 0x1810 /* hcall selects SLB or proc table */ -#define OV5_MMU_SLB 0x1800 /* always use SLB */ -#define OV5_MMU_GTSE 0x1808 /* Guest translation shootdown */ +/* MMU Base Architecture */ +#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */ +#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */ +#define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */ +#define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */ +#define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */ +#define OV5_NMMU 0x1820 /* Nest MMU Available */ +/* Hash Table Extensions */ +#define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */ +#define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */ +/* Radix Table Extensions */ +#define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */ /* Option Vector 6: IBM PAPR hints */ #define OV6_LINUX 0x02 /* Linux is our OS */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 4b369d83fe9ce1..1c9470881c4abe 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -387,3 +387,4 @@ SYSCALL(copy_file_range) COMPAT_SYS_SPU(preadv2) COMPAT_SYS_SPU(pwritev2) SYSCALL(kexec_file_load) +SYSCALL(statx) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index eb1acee91a2034..9ba11dbcaca98f 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include -#define NR_syscalls 383 +#define NR_syscalls 384 #define __NR__exit __NR_exit diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 2f26335a3c42a8..b85f1422885746 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -393,5 +393,6 @@ #define __NR_preadv2 380 #define __NR_pwritev2 381 #define __NR_kexec_file_load 382 +#define __NR_statx 383 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index bb7a1890aeb7fb..e79b9daa873c18 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action); extern void __flush_tlb_power9(unsigned int action); extern long __machine_check_early_realmode_p7(struct pt_regs *regs); extern long __machine_check_early_realmode_p8(struct pt_regs *regs); +extern long __machine_check_early_realmode_p9(struct pt_regs *regs); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); @@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_setup = __setup_cpu_power9, .cpu_restore = __restore_cpu_power9, .flush_tlb = __flush_tlb_power9, + .machine_check_early = __machine_check_early_realmode_p9, .platform = "power9", }, { /* Power9 */ @@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_setup = __setup_cpu_power9, .cpu_restore = __restore_cpu_power9, .flush_tlb = __flush_tlb_power9, + .machine_check_early = __machine_check_early_realmode_p9, .platform = "power9", }, { /* Cell Broadband Engine */ diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 5f61cc0349c063..6fd08219248db7 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -276,19 +276,21 @@ power_enter_stop: */ andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ - bne 1f + bne .Lhandle_esl_ec_set IDLE_STATE_ENTER_SEQ(PPC_STOP) li r3,0 /* Since we didn't lose state, return 0 */ b pnv_wakeup_noloss + +.Lhandle_esl_ec_set: /* * Check if the requested state is a deep idle state. */ -1: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) + LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) cmpd r3,r4 - bge 2f + bge .Lhandle_deep_stop IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) -2: +.Lhandle_deep_stop: /* * Entering deep idle state. * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to @@ -447,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) _GLOBAL(pnv_wakeup_tb_loss) ld r1,PACAR1(r13) /* - * Before entering any idle state, the NVGPRs are saved in the stack - * and they are restored before switching to the process context. Hence - * until they are restored, they are free to be used. + * Before entering any idle state, the NVGPRs are saved in the stack. + * If there was a state loss, or PACA_NAPSTATELOST was set, then the + * NVGPRs are restored. If we are here, it is likely that state is lost, + * but not guaranteed -- neither ISA207 nor ISA300 tests to reach + * here are the same as the test to restore NVGPRS: + * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, + * and SRR1 test for restoring NVGPRs. + * + * We are about to clobber NVGPRs now, so set NAPSTATELOST to + * guarantee they will always be restored. This might be tightened + * with careful reading of specs (particularly for ISA300) but this + * is already a slow wakeup path and it's simpler to be safe. + */ + li r0,1 + stb r0,PACA_NAPSTATELOST(r13) + + /* * * Save SRR1 and LR in NVGPRs as they might be clobbered in * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 5f8613ceb97f15..a582e0d4252552 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -12,7 +12,7 @@ #undef DEBUG #include -#include /* for init_mm */ +#include /* for init_mm */ #include #include diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index c6923ff451311b..a1475e6aef3a51 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce, case MCE_ERROR_TYPE_TLB: mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type; break; + case MCE_ERROR_TYPE_USER: + mce->u.user_error.user_error_type = mce_err->u.user_error_type; + break; + case MCE_ERROR_TYPE_RA: + mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type; + break; + case MCE_ERROR_TYPE_LINK: + mce->u.link_error.link_error_type = mce_err->u.link_error_type; + break; case MCE_ERROR_TYPE_UNKNOWN: default: break; @@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled, mce->gpr3 = regs->gpr[3]; mce->in_use = 1; - mce->initiator = MCE_INITIATOR_CPU; /* Mark it recovered if we have handled it and MSR(RI=1). */ if (handled && (regs->msr & MSR_RI)) mce->disposition = MCE_DISPOSITION_RECOVERED; else mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; - mce->severity = MCE_SEV_ERROR_SYNC; + + mce->initiator = mce_err->initiator; + mce->severity = mce_err->severity; /* * Populate the mce error_type and type-specific error_type. @@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled, } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) { mce->u.erat_error.effective_address_provided = true; mce->u.erat_error.effective_address = addr; + } else if (mce->error_type == MCE_ERROR_TYPE_USER) { + mce->u.user_error.effective_address_provided = true; + mce->u.user_error.effective_address = addr; + } else if (mce->error_type == MCE_ERROR_TYPE_RA) { + mce->u.ra_error.effective_address_provided = true; + mce->u.ra_error.effective_address = addr; + } else if (mce->error_type == MCE_ERROR_TYPE_LINK) { + mce->u.link_error.effective_address_provided = true; + mce->u.link_error.effective_address = addr; } else if (mce->error_type == MCE_ERROR_TYPE_UE) { mce->u.ue_error.effective_address_provided = true; mce->u.ue_error.effective_address = addr; @@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt) "Parity", "Multihit", }; + static const char *mc_user_types[] = { + "Indeterminate", + "tlbie(l) invalid", + }; + static const char *mc_ra_types[] = { + "Indeterminate", + "Instruction fetch (bad)", + "Page table walk ifetch (bad)", + "Page table walk ifetch (foreign)", + "Load (bad)", + "Store (bad)", + "Page table walk Load/Store (bad)", + "Page table walk Load/Store (foreign)", + "Load/Store (foreign)", + }; + static const char *mc_link_types[] = { + "Indeterminate", + "Instruction fetch (timeout)", + "Page table walk ifetch (timeout)", + "Load (timeout)", + "Store (timeout)", + "Page table walk Load/Store (timeout)", + }; /* Print things out */ if (evt->version != MCE_V1) { @@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt) printk("%s Effective address: %016llx\n", level, evt->u.tlb_error.effective_address); break; + case MCE_ERROR_TYPE_USER: + subtype = evt->u.user_error.user_error_type < + ARRAY_SIZE(mc_user_types) ? + mc_user_types[evt->u.user_error.user_error_type] + : "Unknown"; + printk("%s Error type: User [%s]\n", level, subtype); + if (evt->u.user_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.user_error.effective_address); + break; + case MCE_ERROR_TYPE_RA: + subtype = evt->u.ra_error.ra_error_type < + ARRAY_SIZE(mc_ra_types) ? + mc_ra_types[evt->u.ra_error.ra_error_type] + : "Unknown"; + printk("%s Error type: Real address [%s]\n", level, subtype); + if (evt->u.ra_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.ra_error.effective_address); + break; + case MCE_ERROR_TYPE_LINK: + subtype = evt->u.link_error.link_error_type < + ARRAY_SIZE(mc_link_types) ? + mc_link_types[evt->u.link_error.link_error_type] + : "Unknown"; + printk("%s Error type: Link [%s]\n", level, subtype); + if (evt->u.link_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.link_error.effective_address); + break; default: case MCE_ERROR_TYPE_UNKNOWN: printk("%s Error type: Unknown\n", level); @@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt) if (evt->u.tlb_error.effective_address_provided) return evt->u.tlb_error.effective_address; break; + case MCE_ERROR_TYPE_USER: + if (evt->u.user_error.effective_address_provided) + return evt->u.user_error.effective_address; + break; + case MCE_ERROR_TYPE_RA: + if (evt->u.ra_error.effective_address_provided) + return evt->u.ra_error.effective_address; + break; + case MCE_ERROR_TYPE_LINK: + if (evt->u.link_error.effective_address_provided) + return evt->u.link_error.effective_address; + break; default: case MCE_ERROR_TYPE_UNKNOWN: break; diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 7353991c4ecee6..763d6f58caa8ca 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -116,6 +116,51 @@ static void flush_and_reload_slb(void) } #endif +static void flush_erat(void) +{ + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); +} + +#define MCE_FLUSH_SLB 1 +#define MCE_FLUSH_TLB 2 +#define MCE_FLUSH_ERAT 3 + +static int mce_flush(int what) +{ +#ifdef CONFIG_PPC_STD_MMU_64 + if (what == MCE_FLUSH_SLB) { + flush_and_reload_slb(); + return 1; + } +#endif + if (what == MCE_FLUSH_ERAT) { + flush_erat(); + return 1; + } + if (what == MCE_FLUSH_TLB) { + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { + cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL); + return 1; + } + } + + return 0; +} + +static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat) +{ + if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB)) + dsisr &= ~slb; + if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT)) + dsisr &= ~erat; + if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB)) + dsisr &= ~tlb; + /* Any other errors we don't understand? */ + if (dsisr) + return 0; + return 1; +} + static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) { long handled = 1; @@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs) long handled = 1; struct mce_error_info mce_error_info = { 0 }; + mce_error_info.severity = MCE_SEV_ERROR_SYNC; + mce_error_info.initiator = MCE_INITIATOR_CPU; + srr1 = regs->msr; nip = regs->nip; @@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs) long handled = 1; struct mce_error_info mce_error_info = { 0 }; + mce_error_info.severity = MCE_SEV_ERROR_SYNC; + mce_error_info.initiator = MCE_INITIATOR_CPU; + srr1 = regs->msr; nip = regs->nip; @@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs) save_mce_event(regs, handled, &mce_error_info, nip, addr); return handled; } + +static int mce_handle_derror_p9(struct pt_regs *regs) +{ + uint64_t dsisr = regs->dsisr; + + return mce_handle_flush_derrors(dsisr, + P9_DSISR_MC_SLB_PARITY_MFSLB | + P9_DSISR_MC_SLB_MULTIHIT_MFSLB, + + P9_DSISR_MC_TLB_MULTIHIT_MFTLB, + + P9_DSISR_MC_ERAT_MULTIHIT); +} + +static int mce_handle_ierror_p9(struct pt_regs *regs) +{ + uint64_t srr1 = regs->msr; + + switch (P9_SRR1_MC_IFETCH(srr1)) { + case P9_SRR1_MC_IFETCH_SLB_PARITY: + case P9_SRR1_MC_IFETCH_SLB_MULTIHIT: + return mce_flush(MCE_FLUSH_SLB); + case P9_SRR1_MC_IFETCH_TLB_MULTIHIT: + return mce_flush(MCE_FLUSH_TLB); + case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT: + return mce_flush(MCE_FLUSH_ERAT); + default: + return 0; + } +} + +static void mce_get_derror_p9(struct pt_regs *regs, + struct mce_error_info *mce_err, uint64_t *addr) +{ + uint64_t dsisr = regs->dsisr; + + mce_err->severity = MCE_SEV_ERROR_SYNC; + mce_err->initiator = MCE_INITIATOR_CPU; + + if (dsisr & P9_DSISR_MC_USER_TLBIE) + *addr = regs->nip; + else + *addr = regs->dar; + + if (dsisr & P9_DSISR_MC_UE) { + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE; + } else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) { + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE; + } else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) { + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT; + } else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) { + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT; + } else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) { + mce_err->error_type = MCE_ERROR_TYPE_ERAT; + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; + } else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) { + mce_err->error_type = MCE_ERROR_TYPE_TLB; + mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT; + } else if (dsisr & P9_DSISR_MC_USER_TLBIE) { + mce_err->error_type = MCE_ERROR_TYPE_USER; + mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE; + } else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) { + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY; + } else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) { + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT; + } else if (dsisr & P9_DSISR_MC_RA_LOAD) { + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD; + } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) { + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE; + } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) { + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN; + } else if (dsisr & P9_DSISR_MC_RA_FOREIGN) { + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN; + } +} + +static void mce_get_ierror_p9(struct pt_regs *regs, + struct mce_error_info *mce_err, uint64_t *addr) +{ + uint64_t srr1 = regs->msr; + + switch (P9_SRR1_MC_IFETCH(srr1)) { + case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE: + case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT: + mce_err->severity = MCE_SEV_FATAL; + break; + default: + mce_err->severity = MCE_SEV_ERROR_SYNC; + break; + } + + mce_err->initiator = MCE_INITIATOR_CPU; + + *addr = regs->nip; + + switch (P9_SRR1_MC_IFETCH(srr1)) { + case P9_SRR1_MC_IFETCH_UE: + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH; + break; + case P9_SRR1_MC_IFETCH_SLB_PARITY: + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY; + break; + case P9_SRR1_MC_IFETCH_SLB_MULTIHIT: + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT; + break; + case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT: + mce_err->error_type = MCE_ERROR_TYPE_ERAT; + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; + break; + case P9_SRR1_MC_IFETCH_TLB_MULTIHIT: + mce_err->error_type = MCE_ERROR_TYPE_TLB; + mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT; + break; + case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD: + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH; + break; + case P9_SRR1_MC_IFETCH_LINK_TIMEOUT: + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT; + break; + case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT: + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT; + break; + case P9_SRR1_MC_IFETCH_RA: + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH; + break; + case P9_SRR1_MC_IFETCH_RA_TABLEWALK: + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH; + break; + case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE: + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_STORE; + break; + case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT: + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT; + break; + case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN: + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN; + break; + default: + break; + } +} + +long __machine_check_early_realmode_p9(struct pt_regs *regs) +{ + uint64_t nip, addr; + long handled; + struct mce_error_info mce_error_info = { 0 }; + + nip = regs->nip; + + if (P9_SRR1_MC_LOADSTORE(regs->msr)) { + handled = mce_handle_derror_p9(regs); + mce_get_derror_p9(regs, &mce_error_info, &addr); + } else { + handled = mce_handle_ierror_p9(regs); + mce_get_ierror_p9(regs, &mce_error_info, &addr); + } + + /* Handle UE error. */ + if (mce_error_info.error_type == MCE_ERROR_TYPE_UE) + handled = mce_handle_ue_error(regs); + + save_mce_event(regs, handled, &mce_error_info, nip, addr); + return handled; +} diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index fa20060ff7a52e..dfc479df9634e2 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4379a079b3c25f..d645da302bf22f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -16,6 +16,9 @@ #include #include +#include +#include +#include #include #include #include diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index a3944540fe0d56..1c1b44ec7642a5 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start; static unsigned long __initdata prom_tce_alloc_end; #endif +static bool __initdata prom_radix_disable; + +struct platform_support { + bool hash_mmu; + bool radix_mmu; + bool radix_gtse; +}; + /* Platforms codes are now obsolete in the kernel. Now only used within this * file and ultimately gone too. Feel free to change them if you need, they * are not shared with anything outside of this file anymore @@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void) prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); #endif } + + opt = strstr(prom_cmd_line, "disable_radix"); + if (opt) { + prom_debug("Radix disabled from cmdline\n"); + prom_radix_disable = true; + } } #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) @@ -695,6 +709,8 @@ struct option_vector5 { u8 byte22; u8 intarch; u8 mmu; + u8 hash_ext; + u8 radix_ext; } __packed; struct option_vector6 { @@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { .reserved3 = 0, .subprocessors = 1, .intarch = 0, - .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) | - OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE), + .mmu = 0, + .hash_ext = 0, + .radix_ext = 0, }, /* option vector 6: IBM PAPR hints */ @@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void) } +static void __init prom_parse_mmu_model(u8 val, + struct platform_support *support) +{ + switch (val) { + case OV5_FEAT(OV5_MMU_DYNAMIC): + case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ + prom_debug("MMU - either supported\n"); + support->radix_mmu = !prom_radix_disable; + support->hash_mmu = true; + break; + case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ + prom_debug("MMU - radix only\n"); + if (prom_radix_disable) { + /* + * If we __have__ to do radix, we're better off ignoring + * the command line rather than not booting. + */ + prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); + } + support->radix_mmu = true; + break; + case OV5_FEAT(OV5_MMU_HASH): + prom_debug("MMU - hash only\n"); + support->hash_mmu = true; + break; + default: + prom_debug("Unknown mmu support option: 0x%x\n", val); + break; + } +} + +static void __init prom_parse_platform_support(u8 index, u8 val, + struct platform_support *support) +{ + switch (index) { + case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ + prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); + break; + case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ + if (val & OV5_FEAT(OV5_RADIX_GTSE)) { + prom_debug("Radix - GTSE supported\n"); + support->radix_gtse = true; + } + break; + } +} + +static void __init prom_check_platform_support(void) +{ + struct platform_support supported = { + .hash_mmu = false, + .radix_mmu = false, + .radix_gtse = false + }; + int prop_len = prom_getproplen(prom.chosen, + "ibm,arch-vec-5-platform-support"); + if (prop_len > 1) { + int i; + u8 vec[prop_len]; + prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", + prop_len); + prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", + &vec, sizeof(vec)); + for (i = 0; i < prop_len; i += 2) { + prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 + , vec[i] + , vec[i + 1]); + prom_parse_platform_support(vec[i], vec[i + 1], + &supported); + } + } + + if (supported.radix_mmu && supported.radix_gtse) { + /* Radix preferred - but we require GTSE for now */ + prom_debug("Asking for radix with GTSE\n"); + ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); + ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); + } else if (supported.hash_mmu) { + /* Default to hash mmu (if we can) */ + prom_debug("Asking for hash\n"); + ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); + } else { + /* We're probably on a legacy hypervisor */ + prom_debug("Assuming legacy hash support\n"); + } +} static void __init prom_send_capabilities(void) { @@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void) prom_arg_t ret; u32 cores; + /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ + prom_check_platform_support(); + root = call_prom("open", 1, 1, ADDR("/")); if (root != 0) { /* We need to tell the FW about the number of cores we support. @@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, */ prom_check_initrd(r3, r4); + /* + * Do early parsing of command line + */ + early_cmdline_parse(); + #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * On pSeries, inform the firmware about our capabilities @@ -3008,11 +3119,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, if (of_platform != PLATFORM_POWERMAC) copy_and_flush(0, kbase, 0x100, 0); - /* - * Do early parsing of command line - */ - early_cmdline_parse(); - /* * Initialize memory management within prom_init */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index adf2084f214b2b..9cfaa8b69b5f32 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize, info->line_size = lsize; info->block_size = bsize; info->log_block_size = __ilog2(bsize); - info->blocks_per_page = PAGE_SIZE / bsize; + if (bsize) + info->blocks_per_page = PAGE_SIZE / bsize; + else + info->blocks_per_page = 0; if (sets == 0) info->assoc = 0xffff; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 573fb3a461b5d7..46f89e66a273bc 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -19,7 +19,8 @@ #include #include -#include +#include +#include #include #include #include @@ -795,7 +796,7 @@ void __init smp_cpus_done(unsigned int max_cpus) * se we pin us down to CPU 0 for a short while */ alloc_cpumask_var(&old_mask, GFP_NOWAIT); - cpumask_copy(old_mask, tsk_cpus_allowed(current)); + cpumask_copy(old_mask, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); if (smp_ops && smp_ops->setup_cpu) diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 4f24606afc3f5e..66711958493cd3 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c index 0e899e47c325b5..51db012808f5c5 100644 --- a/arch/powerpc/kernel/swsusp_64.c +++ b/arch/powerpc/kernel/swsusp_64.c @@ -10,6 +10,7 @@ #include #include #include +#include void do_after_copyback(void) { diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index bc84a8d47b9e88..07b90725855e3b 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -57,7 +58,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index e6cc56b61d0173..ff365f9de27a1c 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3158fb16de34b..8c68145ba1bd35 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, hva, NULL, NULL); if (ptep) { pte = kvmppc_read_update_linux_pte(ptep, 1); - if (pte_write(pte)) + if (__pte_write(pte)) write_ok = 1; } local_irq_restore(flags); diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 4344651f408ca2..f6b3e67c576294 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -32,6 +32,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, u32 pid; int ret, level, ps; __be64 prte, rpte; + unsigned long ptbl; unsigned long root, pte, index; unsigned long rts, bits, offset; unsigned long gpa; @@ -53,8 +54,8 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, return -EINVAL; /* Read partition table to find root of tree for effective PID */ - ret = kvm_read_guest(kvm, kvm->arch.process_table + pid * 16, - &prte, sizeof(prte)); + ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16); + ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte)); if (ret) return ret; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index ab9d14c0e4609a..3e26cd4979f936 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 1e107ece4e3701..1ec86d9e2a82a3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -22,7 +22,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 6fca970373ee90..ce6f2121fffe46 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, } pte = kvmppc_read_update_linux_pte(ptep, writing); if (pte_present(pte) && !pte_protnone(pte)) { - if (writing && !pte_write(pte)) + if (writing && !__pte_write(pte)) /* make the actual HPTE be read-only */ ptel = hpte_make_readonly(ptel); is_ci = pte_ci(pte); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 47414a6fe2dde8..7c6477d1840aab 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1787,12 +1787,12 @@ kvmppc_hdsi: /* HPTE not found fault or protection fault? */ andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h beq 1f /* if not, send it to the guest */ + andi. r0, r11, MSR_DR /* data relocation enabled? */ + beq 3f BEGIN_FTR_SECTION mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ b 4f END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - andi. r0, r11, MSR_DR /* data relocation enabled? */ - beq 3f clrrdi r0, r4, 28 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ li r0, BOOK3S_INTERRUPT_DATA_SEGMENT @@ -1879,12 +1879,12 @@ kvmppc_hisi: bne .Lradix_hisi /* for radix, just save ASDR */ andis. r0, r11, SRR1_ISI_NOPT@h beq 1f + andi. r0, r11, MSR_IR /* instruction relocation enabled? */ + beq 3f BEGIN_FTR_SECTION mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ b 4f END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - andi. r0, r11, MSR_IR /* instruction relocation enabled? */ - beq 3f clrrdi r0, r10, 28 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ li r0, BOOK3S_INTERRUPT_INST_SEGMENT diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index 5a1ab1250a056f..905a934c1ef469 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index b0333cc737dd67..0fda4230f6c0f8 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 2b38d824e9e5fa..95c91a9de351c4 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 0e649d72fe8d0d..2b5e09020cfe37 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -20,6 +20,7 @@ obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \ obj64-$(CONFIG_SMP) += locks.o obj64-$(CONFIG_ALTIVEC) += vmx-helper.o +obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o obj-y += checksum_$(BITS).o checksum_wrappers.o diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 043415f0bdb164..f3917705c686cb 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 846dba2c636000..9c542ec70c5bc8 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) goto instr_done; case LARX: - if (regs->msr & MSR_LE) - return 0; if (op.ea & (size - 1)) break; /* can't handle misaligned */ if (!address_ok(regs, op.ea, size)) @@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) goto ldst_done; case STCX: - if (regs->msr & MSR_LE) - return 0; if (op.ea & (size - 1)) break; /* can't handle misaligned */ if (!address_ok(regs, op.ea, size)) @@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) goto ldst_done; case LOAD: - if (regs->msr & MSR_LE) - return 0; err = read_mem(®s->gpr[op.reg], op.ea, size, regs); if (!err) { if (op.type & SIGNEXT) @@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) #ifdef CONFIG_PPC_FPU case LOAD_FP: - if (regs->msr & MSR_LE) - return 0; if (size == 4) err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); else @@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) #endif #ifdef CONFIG_ALTIVEC case LOAD_VMX: - if (regs->msr & MSR_LE) - return 0; err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); goto ldst_done; #endif #ifdef CONFIG_VSX case LOAD_VSX: - if (regs->msr & MSR_LE) - return 0; err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); goto ldst_done; #endif @@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) goto instr_done; case STORE: - if (regs->msr & MSR_LE) - return 0; if ((op.type & UPDATE) && size == sizeof(long) && op.reg == 1 && op.update_reg == 1 && !(regs->msr & MSR_PR) && @@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) #ifdef CONFIG_PPC_FPU case STORE_FP: - if (regs->msr & MSR_LE) - return 0; if (size == 4) err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); else @@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) #endif #ifdef CONFIG_ALTIVEC case STORE_VMX: - if (regs->msr & MSR_LE) - return 0; err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); goto ldst_done; #endif #ifdef CONFIG_VSX case STORE_VSX: - if (regs->msr & MSR_LE) - return 0; err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); goto ldst_done; #endif diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c new file mode 100644 index 00000000000000..2534c14475546a --- /dev/null +++ b/arch/powerpc/lib/test_emulate_step.c @@ -0,0 +1,434 @@ +/* + * Simple sanity test for emulate_step load/store instructions. + * + * Copyright IBM Corp. 2016 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#define pr_fmt(fmt) "emulate_step_test: " fmt + +#include +#include +#include + +#define IMM_L(i) ((uintptr_t)(i) & 0xffff) + +/* + * Defined with TEST_ prefix so it does not conflict with other + * definitions. + */ +#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \ + ___PPC_RA(base) | IMM_L(i)) +#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \ + ___PPC_RA(base) | IMM_L(i)) +#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \ + ___PPC_RA(base) | ((i) & 0xfffc)) +#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b) | \ + __PPC_EH(eh)) +#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b)) +#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b)) + + +static void __init init_pt_regs(struct pt_regs *regs) +{ + static unsigned long msr; + static bool msr_cached; + + memset(regs, 0, sizeof(struct pt_regs)); + + if (likely(msr_cached)) { + regs->msr = msr; + return; + } + + asm volatile("mfmsr %0" : "=r"(regs->msr)); + + regs->msr |= MSR_FP; + regs->msr |= MSR_VEC; + regs->msr |= MSR_VSX; + + msr = regs->msr; + msr_cached = true; +} + +static void __init show_result(char *ins, char *result) +{ + pr_info("%-14s : %s\n", ins, result); +} + +static void __init test_ld(void) +{ + struct pt_regs regs; + unsigned long a = 0x23; + int stepped = -1; + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long) &a; + + /* ld r5, 0(r3) */ + stepped = emulate_step(®s, TEST_LD(5, 3, 0)); + + if (stepped == 1 && regs.gpr[5] == a) + show_result("ld", "PASS"); + else + show_result("ld", "FAIL"); +} + +static void __init test_lwz(void) +{ + struct pt_regs regs; + unsigned int a = 0x4545; + int stepped = -1; + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long) &a; + + /* lwz r5, 0(r3) */ + stepped = emulate_step(®s, TEST_LWZ(5, 3, 0)); + + if (stepped == 1 && regs.gpr[5] == a) + show_result("lwz", "PASS"); + else + show_result("lwz", "FAIL"); +} + +static void __init test_lwzx(void) +{ + struct pt_regs regs; + unsigned int a[3] = {0x0, 0x0, 0x1234}; + int stepped = -1; + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long) a; + regs.gpr[4] = 8; + regs.gpr[5] = 0x8765; + + /* lwzx r5, r3, r4 */ + stepped = emulate_step(®s, TEST_LWZX(5, 3, 4)); + if (stepped == 1 && regs.gpr[5] == a[2]) + show_result("lwzx", "PASS"); + else + show_result("lwzx", "FAIL"); +} + +static void __init test_std(void) +{ + struct pt_regs regs; + unsigned long a = 0x1234; + int stepped = -1; + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long) &a; + regs.gpr[5] = 0x5678; + + /* std r5, 0(r3) */ + stepped = emulate_step(®s, TEST_STD(5, 3, 0)); + if (stepped == 1 || regs.gpr[5] == a) + show_result("std", "PASS"); + else + show_result("std", "FAIL"); +} + +static void __init test_ldarx_stdcx(void) +{ + struct pt_regs regs; + unsigned long a = 0x1234; + int stepped = -1; + unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */ + + init_pt_regs(®s); + asm volatile("mfcr %0" : "=r"(regs.ccr)); + + + /*** ldarx ***/ + + regs.gpr[3] = (unsigned long) &a; + regs.gpr[4] = 0; + regs.gpr[5] = 0x5678; + + /* ldarx r5, r3, r4, 0 */ + stepped = emulate_step(®s, TEST_LDARX(5, 3, 4, 0)); + + /* + * Don't touch 'a' here. Touching 'a' can do Load/store + * of 'a' which result in failure of subsequent stdcx. + * Instead, use hardcoded value for comparison. + */ + if (stepped <= 0 || regs.gpr[5] != 0x1234) { + show_result("ldarx / stdcx.", "FAIL (ldarx)"); + return; + } + + + /*** stdcx. ***/ + + regs.gpr[5] = 0x9ABC; + + /* stdcx. r5, r3, r4 */ + stepped = emulate_step(®s, TEST_STDCX(5, 3, 4)); + + /* + * Two possible scenarios that indicates successful emulation + * of stdcx. : + * 1. Reservation is active and store is performed. In this + * case cr0.eq bit will be set to 1. + * 2. Reservation is not active and store is not performed. + * In this case cr0.eq bit will be set to 0. + */ + if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq)) + || (regs.gpr[5] != a && !(regs.ccr & cr0_eq)))) + show_result("ldarx / stdcx.", "PASS"); + else + show_result("ldarx / stdcx.", "FAIL (stdcx.)"); +} + +#ifdef CONFIG_PPC_FPU +static void __init test_lfsx_stfsx(void) +{ + struct pt_regs regs; + union { + float a; + int b; + } c; + int cached_b; + int stepped = -1; + + init_pt_regs(®s); + + + /*** lfsx ***/ + + c.a = 123.45; + cached_b = c.b; + + regs.gpr[3] = (unsigned long) &c.a; + regs.gpr[4] = 0; + + /* lfsx frt10, r3, r4 */ + stepped = emulate_step(®s, TEST_LFSX(10, 3, 4)); + + if (stepped == 1) + show_result("lfsx", "PASS"); + else + show_result("lfsx", "FAIL"); + + + /*** stfsx ***/ + + c.a = 678.91; + + /* stfsx frs10, r3, r4 */ + stepped = emulate_step(®s, TEST_STFSX(10, 3, 4)); + + if (stepped == 1 && c.b == cached_b) + show_result("stfsx", "PASS"); + else + show_result("stfsx", "FAIL"); +} + +static void __init test_lfdx_stfdx(void) +{ + struct pt_regs regs; + union { + double a; + long b; + } c; + long cached_b; + int stepped = -1; + + init_pt_regs(®s); + + + /*** lfdx ***/ + + c.a = 123456.78; + cached_b = c.b; + + regs.gpr[3] = (unsigned long) &c.a; + regs.gpr[4] = 0; + + /* lfdx frt10, r3, r4 */ + stepped = emulate_step(®s, TEST_LFDX(10, 3, 4)); + + if (stepped == 1) + show_result("lfdx", "PASS"); + else + show_result("lfdx", "FAIL"); + + + /*** stfdx ***/ + + c.a = 987654.32; + + /* stfdx frs10, r3, r4 */ + stepped = emulate_step(®s, TEST_STFDX(10, 3, 4)); + + if (stepped == 1 && c.b == cached_b) + show_result("stfdx", "PASS"); + else + show_result("stfdx", "FAIL"); +} +#else +static void __init test_lfsx_stfsx(void) +{ + show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)"); + show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)"); +} + +static void __init test_lfdx_stfdx(void) +{ + show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)"); + show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)"); +} +#endif /* CONFIG_PPC_FPU */ + +#ifdef CONFIG_ALTIVEC +static void __init test_lvx_stvx(void) +{ + struct pt_regs regs; + union { + vector128 a; + u32 b[4]; + } c; + u32 cached_b[4]; + int stepped = -1; + + init_pt_regs(®s); + + + /*** lvx ***/ + + cached_b[0] = c.b[0] = 923745; + cached_b[1] = c.b[1] = 2139478; + cached_b[2] = c.b[2] = 9012; + cached_b[3] = c.b[3] = 982134; + + regs.gpr[3] = (unsigned long) &c.a; + regs.gpr[4] = 0; + + /* lvx vrt10, r3, r4 */ + stepped = emulate_step(®s, TEST_LVX(10, 3, 4)); + + if (stepped == 1) + show_result("lvx", "PASS"); + else + show_result("lvx", "FAIL"); + + + /*** stvx ***/ + + c.b[0] = 4987513; + c.b[1] = 84313948; + c.b[2] = 71; + c.b[3] = 498532; + + /* stvx vrs10, r3, r4 */ + stepped = emulate_step(®s, TEST_STVX(10, 3, 4)); + + if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && + cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) + show_result("stvx", "PASS"); + else + show_result("stvx", "FAIL"); +} +#else +static void __init test_lvx_stvx(void) +{ + show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)"); + show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)"); +} +#endif /* CONFIG_ALTIVEC */ + +#ifdef CONFIG_VSX +static void __init test_lxvd2x_stxvd2x(void) +{ + struct pt_regs regs; + union { + vector128 a; + u32 b[4]; + } c; + u32 cached_b[4]; + int stepped = -1; + + init_pt_regs(®s); + + + /*** lxvd2x ***/ + + cached_b[0] = c.b[0] = 18233; + cached_b[1] = c.b[1] = 34863571; + cached_b[2] = c.b[2] = 834; + cached_b[3] = c.b[3] = 6138911; + + regs.gpr[3] = (unsigned long) &c.a; + regs.gpr[4] = 0; + + /* lxvd2x vsr39, r3, r4 */ + stepped = emulate_step(®s, TEST_LXVD2X(39, 3, 4)); + + if (stepped == 1) + show_result("lxvd2x", "PASS"); + else + show_result("lxvd2x", "FAIL"); + + + /*** stxvd2x ***/ + + c.b[0] = 21379463; + c.b[1] = 87; + c.b[2] = 374234; + c.b[3] = 4; + + /* stxvd2x vsr39, r3, r4 */ + stepped = emulate_step(®s, TEST_STXVD2X(39, 3, 4)); + + if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && + cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) + show_result("stxvd2x", "PASS"); + else + show_result("stxvd2x", "FAIL"); +} +#else +static void __init test_lxvd2x_stxvd2x(void) +{ + show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)"); + show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)"); +} +#endif /* CONFIG_VSX */ + +static int __init test_emulate_step(void) +{ + test_ld(); + test_lwz(); + test_lwzx(); + test_std(); + test_ldarx_stdcx(); + test_lfsx_stfsx(); + test_lfdx_stfdx(); + test_lvx_stvx(); + test_lxvd2x_stxvd2x(); + + return 0; +} +late_initcall(test_emulate_step); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 8dc7586589727d..51def8a515be81 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 12d679df50bd17..c554768b1fa2d4 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -23,7 +23,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 6aa3b76aa0d66b..c22f207aa6564b 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -356,25 +356,48 @@ static void early_check_vec5(void) unsigned long root, chosen; int size; const u8 *vec5; + u8 mmu_supported; root = of_get_flat_dt_root(); chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); - if (chosen == -FDT_ERR_NOTFOUND) + if (chosen == -FDT_ERR_NOTFOUND) { + cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; return; + } vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); - if (!vec5) + if (!vec5) { + cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; return; - if (size <= OV5_INDX(OV5_MMU_RADIX_300) || - !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300))) - /* Hypervisor doesn't support radix */ + } + if (size <= OV5_INDX(OV5_MMU_SUPPORT)) { cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; + return; + } + + /* Check for supported configuration */ + mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] & + OV5_FEAT(OV5_MMU_SUPPORT); + if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) { + /* Hypervisor only supports radix - check enabled && GTSE */ + if (!early_radix_enabled()) { + pr_warn("WARNING: Ignoring cmdline option disable_radix\n"); + } + if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] & + OV5_FEAT(OV5_RADIX_GTSE))) { + pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n"); + } + /* Do radix anyway - the hypervisor said we had to */ + cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; + } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) { + /* Hypervisor only supports hash - disable radix */ + cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; + } } void __init mmu_early_init_devtree(void) { /* Disable radix mode based on kernel command line. */ - /* We don't yet have the machinery to do radix as a guest. */ - if (disable_radix || !(mfmsr() & MSR_HV)) + if (disable_radix) cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; /* @@ -383,7 +406,7 @@ void __init mmu_early_init_devtree(void) * even though the ibm,architecture-vec-5 property created by * skiboot doesn't have the necessary bits set. */ - if (early_radix_enabled() && !(mfmsr() & MSR_HV)) + if (!(mfmsr() & MSR_HV)) early_check_vec5(); if (early_radix_enabled()) diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 2f1e44362198d3..a5d9ef59debe25 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -25,7 +25,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index 7de7124ac91bf9..497130c5c74203 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -10,7 +10,7 @@ * */ -#include +#include #include #include #include diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index b798ff674fabd5..5fcb3dd74c139b 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -8,6 +8,8 @@ */ #include +#include + #include #include diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c index c23e286a6b8ff8..8b85a14b08eaa5 100644 --- a/arch/powerpc/mm/pgtable-hash64.c +++ b/arch/powerpc/mm/pgtable-hash64.c @@ -10,6 +10,8 @@ */ #include +#include + #include #include diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index feeda90cd06d5f..c28165d8970b64 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -8,7 +8,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ -#include +#include #include #include @@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void) */ register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); + asm volatile("ptesync" : : : "memory"); + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); + asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } static void __init radix_init_partition_table(void) diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 48fc28bab54477..5e01b2ece1d016 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -22,6 +22,8 @@ #include #include #include +#include + #include #include diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 595dd718ea8718..2ff13249f87a61 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) sdsync = POWER7P_MMCRA_SDAR_VALID; else if (ppmu->flags & PPMU_ALT_SIPR) sdsync = POWER6_MMCRA_SDSYNC; + else if (ppmu->flags & PPMU_NO_SIAR) + sdsync = MMCRA_SAMPLE_ENABLE; else sdsync = MMCRA_SDSYNC; diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index e79fb5fb817dbe..cd951fd231c404 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -65,12 +65,41 @@ static bool is_event_valid(u64 event) return !(event & ~valid_mask); } -static u64 mmcra_sdar_mode(u64 event) +static inline bool is_event_marked(u64 event) { - if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) - return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; + if (event & EVENT_IS_MARKED) + return true; + + return false; +} - return MMCRA_SDAR_MODE_TLB; +static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) +{ + /* + * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in + * continous sampling mode. + * + * Incase of Power8: + * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling + * mode and will be un-changed when setting MMCRA[63] (Marked events). + * + * Incase of Power9: + * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'), + * or if group already have any marked events. + * Non-Marked events (for DD1): + * MMCRA[SDAR_MODE] will be set to 0b01 + * For rest + * MMCRA[SDAR_MODE] will be set from event code. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE)) + *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES; + else if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) + *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; + else if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + *mmcra |= MMCRA_SDAR_MODE_TLB; + } else + *mmcra |= MMCRA_SDAR_MODE_TLB; } static u64 thresh_cmp_val(u64 value) @@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) value |= CNST_L1_QUAL_VAL(cache); } - if (event & EVENT_IS_MARKED) { + if (is_event_marked(event)) { mask |= CNST_SAMPLE_MASK; value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); } @@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, } /* In continuous sampling mode, update SDAR on TLB miss */ - mmcra |= mmcra_sdar_mode(event[i]); + mmcra_sdar_mode(event[i], &mmcra); if (event[i] & EVENT_IS_L1) { cache = event[i] >> EVENT_CACHE_SEL_SHIFT; @@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; } - if (event[i] & EVENT_IS_MARKED) { + if (is_event_marked(event[i])) { mmcra |= MMCRA_SAMPLE_ENABLE; val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index cf9bd89901595c..899210f14ee432 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -246,6 +246,7 @@ #define MMCRA_THR_CMP_SHIFT 32 #define MMCRA_SDAR_MODE_SHIFT 42 #define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT) +#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT) #define MMCRA_IFM_SHIFT 30 /* MMCR1 Threshold Compare bit constant for power9 */ diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index d24a8a3668fac5..cbd82fde57702e 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 08f92f6ed228f3..978b85bb3233e5 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index a83a6d26090d1d..078097a0b09d47 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c index 88301e53f0856b..882944c36ef571 100644 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 3b4152faeb1fc4..b500b17254a00d 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -25,6 +25,8 @@ #include #include #include +#include + #include #include #include "spufs.h" diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index e29e4d5afa2ddd..870c0a82d560de 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -19,7 +19,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include +#include #include #include diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 460f5f31d5cb01..1fbb5da17dd27f 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -23,7 +23,8 @@ #undef DEBUG #include -#include +#include +#include #include #include #include @@ -140,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx) * runqueue. The context will be rescheduled on the proper node * if it is timesliced or preempted. */ - cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed); /* Save the current cpu id for spu interrupt routing. */ ctx->last_ran = raw_smp_processor_id(); diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index aac7339660923d..5e59f80e95dbee 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c9eb7d6540eaeb..746ca7321b03c0 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -23,6 +23,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 6693f75e93d162..da8a0f7a035c10 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -39,8 +39,8 @@ opal_tracepoint_refcount: BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ - ld r12,opal_tracepoint_refcount@toc(r2); \ - cmpdi r12,0; \ + ld r11,opal_tracepoint_refcount@toc(r2); \ + cmpdi r11,0; \ bne- LABEL; \ 1: diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 86d9fde93c175f..e0f856bfbfe8f3 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs, struct machine_check_event *evt) { int recovered = 0; - uint64_t ea = get_mce_fault_addr(evt); if (!(regs->msr & MSR_RI)) { /* If MSR_RI isn't set, we cannot recover */ @@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs, } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { /* Platform corrected itself */ recovered = 1; - } else if (ea && !is_kernel_addr(ea)) { + } else if (evt->severity == MCE_SEV_FATAL) { + /* Fatal machine check */ + pr_err("Machine check interrupt is fatal\n"); + recovered = 0; + } else if ((evt->severity == MCE_SEV_ERROR_SYNC) && + (user_mode(regs) && !is_global_init(current))) { /* - * Faulting address is not in kernel text. We should be fine. - * We need to find which process uses this address. * For now, kill the task if we have received exception when * in userspace. * * TODO: Queue up this address for hwpoisioning later. */ - if (user_mode(regs) && !is_global_init(current)) { - _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); - recovered = 1; - } else - recovered = 0; - } else if (user_mode(regs) && !is_global_init(current) && - evt->severity == MCE_SEV_ERROR_SYNC) { - /* - * If we have received a synchronous error when in userspace - * kill the task. - */ _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); recovered = 1; } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6901a06da2f90b..e36738291c3205 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev) } static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, - struct pci_bus *bus) + struct pci_bus *bus, + bool add_to_group) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); set_dma_offset(&dev->dev, pe->tce_bypass_base); - iommu_add_device(&dev->dev); + if (add_to_group) + iommu_add_device(&dev->dev); if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) - pnv_ioda_setup_bus_dma(pe, dev->subordinate); + pnv_ioda_setup_bus_dma(pe, dev->subordinate, + add_to_group); } } @@ -2191,7 +2194,7 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, set_iommu_table_base(&pe->pdev->dev, tbl); iommu_add_device(&pe->pdev->dev); } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus); + pnv_ioda_setup_bus_dma(pe, pe->pbus, true); return; fail: @@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) pnv_pci_ioda2_set_bypass(pe, false); pnv_pci_ioda2_unset_window(&pe->table_group, 0); + if (pe->pbus) + pnv_ioda_setup_bus_dma(pe, pe->pbus, false); pnv_ioda2_table_free(tbl); } @@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) table_group); pnv_pci_ioda2_setup_default_config(pe); + if (pe->pbus) + pnv_ioda_setup_bus_dma(pe, pe->pbus, false); } static struct iommu_table_group_ops pnv_pci_ioda2_ops = { @@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, level_shift = entries_shift + 3; level_shift = max_t(unsigned, level_shift, PAGE_SHIFT); + if ((level_shift - 3) * levels + page_shift >= 60) + return -EINVAL; + /* Allocate TCE table */ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, levels, tce_table_size, &offset, &total_allocated); @@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, if (pe->flags & PNV_IODA_PE_DEV) iommu_add_device(&pe->pdev->dev); else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus); + pnv_ioda_setup_bus_dma(pe, pe->pbus, true); } #ifdef CONFIG_PCI_MSI diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index e39e6c428af1e4..8b67e1eefb5c0a 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index a1b63e00b2f7d4..7bc0e91f871544 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -24,6 +24,7 @@ #include #include #include /* for idle_task_exit */ +#include #include #include #include diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 251060cf171364..8b1fe895daa3f0 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -751,7 +751,9 @@ void __init hpte_init_pseries(void) mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; - mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; + + if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) + mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; } void radix_init_pseries(void) diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S index f9760ccf403236..3696ea6c4826b9 100644 --- a/arch/powerpc/purgatory/trampoline.S +++ b/arch/powerpc/purgatory/trampoline.S @@ -116,13 +116,13 @@ dt_offset: .data .balign 8 -.globl sha256_digest -sha256_digest: +.globl purgatory_sha256_digest +purgatory_sha256_digest: .skip 32 - .size sha256_digest, . - sha256_digest + .size purgatory_sha256_digest, . - purgatory_sha256_digest .balign 8 -.globl sha_regions -sha_regions: +.globl purgatory_sha_regions +purgatory_sha_regions: .skip 8 * 2 * 16 - .size sha_regions, . - sha_regions + .size purgatory_sha_regions, . - purgatory_sha_regions diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index ada29eaed6e280..f523ac88315070 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -274,7 +274,9 @@ static int axon_ram_probe(struct platform_device *device) if (bank->disk->major > 0) unregister_blkdev(bank->disk->major, bank->disk->disk_name); - del_gendisk(bank->disk); + if (bank->disk->flags & GENHD_FL_UP) + del_gendisk(bank->disk); + put_disk(bank->disk); } device->dev.platform_data = NULL; if (bank->io_addr != 0) @@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device) device_remove_file(&device->dev, &dev_attr_ecc); free_irq(bank->irq_id, device); del_gendisk(bank->disk); + put_disk(bank->disk); iounmap((void __iomem *) bank->io_addr); kfree(bank); diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index f9670eabfcfa70..b53f80f0b4d822 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void) static void icp_opal_set_cpu_priority(unsigned char cppr) { + /* + * Here be dragons. The caller has asked to allow only IPI's and not + * external interrupts. But OPAL XIVE doesn't support that. So instead + * of allowing no interrupts allow all. That's still not right, but + * currently the only caller who does this is xics_migrate_irqs_away() + * and it works in that case. + */ + if (cppr >= DEFAULT_PRIORITY) + cppr = LOWEST_PRIORITY; + xics_set_base_cppr(cppr); opal_int_set_cppr(cppr); iosync(); diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index 69d858e51ac76f..23efe4e4217221 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void) /* Remove ourselves from the global interrupt queue */ xics_set_cpu_giq(xics_default_distrib_server, 0); - /* Allow IPIs again... */ - icp_ops->set_priority(DEFAULT_PRIORITY); - for_each_irq_desc(virq, desc) { struct irq_chip *chip; long server; @@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void) unlock: raw_spin_unlock_irqrestore(&desc->lock, flags); } + + /* Allow "sufficient" time to drop any inflight IRQ's */ + mdelay(5); + + /* + * Allow IPIs again. This is done at the very end, after migrating all + * interrupts, the expectation is that we'll only get woken up by an IPI + * interrupt beyond this point, but leave externals masked just to be + * safe. If we're using icp-opal this may actually allow all + * interrupts anyway, but that should be OK. + */ + icp_ops->set_priority(DEFAULT_PRIORITY); + } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 26fa03fc9f3c8c..16321ad9e70c04 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 5a8dfa22da7c31..ef3fb1b9201f03 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include +#include #include #include #include diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 08b9e942a262ed..45b3178200abc1 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index fa95041fa9f684..33ca29333e1808 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size) unsigned long decompress_kernel(void) { - unsigned long output_addr; - unsigned char *output; + void *output, *kernel_end; - output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; - check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); - memset(&_bss, 0, &_ebss - &_bss); - free_mem_ptr = (unsigned long)&_end; - free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - output = (unsigned char *) output_addr; + output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE); + kernel_end = output + SZ__bss_start; + check_ipl_parmblock((void *) 0, (unsigned long) kernel_end); #ifdef CONFIG_BLK_DEV_INITRD /* * Move the initrd right behind the end of the decompressed - * kernel image. + * kernel image. This also prevents initrd corruption caused by + * bss clearing since kernel_end will always be located behind the + * current bss section.. */ - if (INITRD_START && INITRD_SIZE && - INITRD_START < (unsigned long) output + SZ__bss_start) { - check_ipl_parmblock(output + SZ__bss_start, - INITRD_START + INITRD_SIZE); - memmove(output + SZ__bss_start, - (void *) INITRD_START, INITRD_SIZE); - INITRD_START = (unsigned long) output + SZ__bss_start; + if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) { + check_ipl_parmblock(kernel_end, INITRD_SIZE); + memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE); + INITRD_START = (unsigned long) kernel_end; } #endif + /* + * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be + * initialized afterwards since they reside in bss. + */ + memset(&_bss, 0, &_ebss - &_bss); + free_mem_ptr = (unsigned long) &_end; + free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; + puts("Uncompressing Linux... "); __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); puts("Ok, booting the kernel.\n"); diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 143b1e00b81849..4b176fe83da4c6 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -609,7 +609,7 @@ CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y +CONFIG_UPROBE_EVENTS=y CONFIG_FUNCTION_PROFILER=y CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index f05d2d6e10872a..0de46cc397f6fe 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -560,7 +560,7 @@ CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y +CONFIG_UPROBE_EVENTS=y CONFIG_FUNCTION_PROFILER=y CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 2358bf33c5efcf..e167557b434c20 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -558,7 +558,7 @@ CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y +CONFIG_UPROBE_EVENTS=y CONFIG_FUNCTION_PROFILER=y CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index d69ea495c4d748..716b17238599f6 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -474,8 +474,11 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, ret = blkcipher_walk_done(desc, walk, nbytes - n); } if (k < n) { - if (__ctr_paes_set_key(ctx) != 0) + if (__ctr_paes_set_key(ctx) != 0) { + if (locked) + spin_unlock(&ctrblk_lock); return blkcipher_walk_done(desc, walk, -EIO); + } } } if (locked) diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index 85b7f5efe06a94..5a3ec04a7082c1 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -20,6 +20,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 68bfd09f1b02ec..97189dbaf34b2a 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -179,7 +179,7 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y +CONFIG_UPROBE_EVENTS=y CONFIG_FUNCTION_PROFILER=y CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_KPROBES_SANITY_TEST=y diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 352f7bdaf11fc1..0ddd37e6c29d90 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -5,6 +5,7 @@ */ #include #include +#include #include #define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64)) diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index d1c407ddf7032d..9072bf63a84614 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -8,31 +8,27 @@ #define _S390_CPUTIME_H #include -#include +#include #define CPUTIME_PER_USEC 4096ULL #define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC) /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ -typedef unsigned long long __nocast cputime_t; -typedef unsigned long long __nocast cputime64_t; - #define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) -static inline unsigned long __div(unsigned long long n, unsigned long base) -{ - return n / base; -} - /* - * Convert cputime to microseconds and back. + * Convert cputime to microseconds. */ -static inline unsigned int cputime_to_usecs(const cputime_t cputime) +static inline u64 cputime_to_usecs(const u64 cputime) { - return (__force unsigned long long) cputime >> 12; + return cputime >> 12; } +/* + * Convert cputime to nanoseconds. + */ +#define cputime_to_nsecs(cputime) tod_to_ns(cputime) u64 arch_cpu_idle_time(int cpu); diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 83aaefed2a7b0a..1d48880b3cc142 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -132,7 +132,7 @@ typedef s390_fp_regs compat_elf_fpregset_t; typedef s390_compat_regs compat_elf_gregset_t; #include -#include /* for task_struct */ +#include /* for task_struct */ #include #include diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 84c0f908648366..1293c4066cfc80 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -35,6 +35,7 @@ #include #include #include +#include #define __ARCH_WANT_KPROBES_INSN_SLOT diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 9b828c073176db..6e31d87fb669bd 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -9,6 +9,7 @@ #include #include +#include #include #include diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 7ed1972b1920eb..93e37b12e88237 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -24,6 +24,7 @@ * the S390 page table tree. */ #ifndef __ASSEMBLY__ +#include #include #include #include diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h index 5ce29fe100baaa..fbd9116eb17bf2 100644 --- a/arch/s390/include/asm/sections.h +++ b/arch/s390/include/asm/sections.h @@ -4,6 +4,5 @@ #include extern char _eshared[], _ehead[]; -extern char __start_ro_after_init[], __end_ro_after_init[]; #endif diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 354344dcc19898..118535123f346d 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -206,20 +206,16 @@ static inline unsigned long long get_tod_clock_monotonic(void) * ns = (todval * 125) >> 9; * * In order to avoid an overflow with the multiplication we can rewrite this. - * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) + * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits) * we end up with * - * ns = ((2^32 * th + tl) * 125 ) >> 9; - * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); + * ns = ((2^9 * th + tl) * 125 ) >> 9; + * -> ns = (th * 125) + ((tl * 125) >> 9); * */ static inline unsigned long long tod_to_ns(unsigned long long todval) { - unsigned long long ns; - - ns = ((todval >> 32) << 23) * 125; - ns += ((todval & 0xffffffff) * 125) >> 9; - return ns; + return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9); } #endif diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 136932ff425020..3ea1554d04b377 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from, " jg 2b\n" \ ".popsection\n" \ EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ - : "=d" (__rc), "=Q" (*(to)) \ + : "=d" (__rc), "+Q" (*(to)) \ : "d" (size), "Q" (*(from)), \ "d" (__reg0), "K" (-EFAULT) \ : "cc"); \ diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 4384bc797a54f9..152de9b796e149 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -313,7 +313,9 @@ #define __NR_copy_file_range 375 #define __NR_preadv2 376 #define __NR_pwritev2 377 -#define NR_syscalls 378 +/* Number 378 is reserved for guarded storage */ +#define __NR_statx 379 +#define NR_syscalls 380 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 362350cc485c42..c620049c61f2df 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index ae2cda5eee5a99..e89cc2e71db169 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -178,3 +178,4 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); +COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 55d4fe174fd972..829e1c53005c57 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index dff2152350a7eb..6a7d737d514c4c 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -490,7 +490,7 @@ ENTRY(pgm_check_handler) jnz .Lpgm_svcper # -> single stepped svc 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 3f + j 4f 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER lg %r15,__LC_KERNEL_STACK lgr %r14,%r12 @@ -499,8 +499,8 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+2,0x02 # check for transaction abort jz 3f mvc __THREAD_trap_tdb(256,%r14),0(%r13) -3: la %r11,STACK_FRAME_OVERHEAD(%r15) - stg %r10,__THREAD_last_break(%r14) +3: stg %r10,__THREAD_last_break(%r14) +4: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -509,14 +509,14 @@ ENTRY(pgm_check_handler) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception - jz 4f + jz 5f tmhh %r8,0x0001 # kernel per event ? jz .Lpgm_kprobe oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID -4: REENABLE_IRQS +5: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table llgh %r10,__PT_INT_CODE+2(%r11) diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index fb07a70820af42..9340b2a07935de 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include "entry.h" diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index b67dafb7b7cfc5..e545ffe5155ab0 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -564,6 +564,8 @@ static struct kset *ipl_kset; static void __ipl_run(void *unused) { + if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW) + diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); diag308(DIAG308_LOAD_CLEAR, NULL); if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 80c093e0c6f1a3..9bf8327154eeee 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -13,6 +13,9 @@ #include #include #include +#include +#include + #include #include #include diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 54281660582cb1..f29e41c5e2ecf6 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -11,6 +11,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -121,7 +124,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, clear_tsk_thread_flag(p, TIF_SINGLE_STEP); /* Initialize per thread user and system timer values */ p->thread.user_timer = 0; + p->thread.guest_timer = 0; p->thread.system_timer = 0; + p->thread.hardirq_timer = 0; + p->thread.softirq_timer = 0; frame->sf.back_chain = 0; /* new return point is ret_from_fork */ diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index bc2b60dcb17828..928b929a62614a 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -8,10 +8,13 @@ #include #include +#include #include #include +#include #include #include + #include #include #include diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 12020b55887bfd..c14df0a1ec3ca5 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index fffa0e5462afe0..429d3a782f1cb8 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -11,6 +11,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index e4d811f179715a..911dc0b49be05b 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 62a4c263e8878e..289dd50f974452 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index d0a74d7ce433c9..5dab859b0d543b 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -31,6 +31,8 @@ #include #include #include +#include +#include #include #include #include @@ -907,13 +909,11 @@ void __init smp_prepare_boot_cpu(void) { struct pcpu *pcpu = pcpu_devices; + WARN_ON(!cpu_present(0) || !cpu_online(0)); pcpu->state = CPU_STATE_CONFIGURED; - pcpu->address = stap(); pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); S390_lowcore.percpu_offset = __per_cpu_offset[0]; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); - set_cpu_present(0, true); - set_cpu_online(0, true); } void __init smp_cpus_done(unsigned int max_cpus) @@ -922,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_setup_processor_id(void) { + pcpu_devices[0].address = stap(); S390_lowcore.cpu_nr = 0; S390_lowcore.spinlock_lockval = arch_spin_lockval(0); } diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 0085b2d8ed7d3d..e66687dc61446d 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 9b59e6212d8fd2..2659b5cfeddba4 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -386,3 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2) SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ SYSCALL(sys_preadv2,compat_sys_preadv2) SYSCALL(sys_pwritev2,compat_sys_pwritev2) +NI_SYSCALL +SYSCALL(sys_statx,compat_sys_statx) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index de66abb479c9eb..c31da46bc037d3 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 2cd5f4f1013c2b..17660e800e74f3 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 283ad7840335c1..f787b9d8f54c35 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index 66956c09d5bf92..314e0ee3016a34 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -9,6 +9,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 5ccf9539625182..72307f108c4038 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -63,11 +63,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); __start_ro_after_init = .; - __start_data_ro_after_init = .; .data..ro_after_init : { *(.data..ro_after_init) } - __end_data_ro_after_init = .; EXCEPTION_TABLE(16) . = ALIGN(PAGE_SIZE); __end_ro_after_init = .; diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 31bd96e8116775..072d84ba42a372 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -6,7 +6,7 @@ */ #include -#include +#include #include #include #include @@ -111,7 +111,7 @@ static inline u64 scale_vtime(u64 vtime) } static void account_system_index_scaled(struct task_struct *p, - cputime_t cputime, cputime_t scaled, + u64 cputime, u64 scaled, enum cpu_usage_stat index) { p->stimescaled += cputime_to_nsecs(scaled); diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 4492c93631781b..d55c829a5944c2 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -6,7 +6,9 @@ */ #include +#include #include + #include #include #include "kvm-s390.h" diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f5694838234d5a..fd6cd05bb6a7c7 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -29,6 +29,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index fb4b494cde9bff..64b6a309f2c47c 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -15,6 +15,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 38556e3959156d..5491be39776b66 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -14,6 +14,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index bb5560eb2435ec..5845d3028ffca9 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 7ae1282d5be98d..50618614881f33 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b48dc5f1900b51..463e5ef02304bb 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -608,12 +608,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) { spinlock_t *ptl; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; pgste_t pgste; pte_t *ptep; pte_t pte; bool dirty; - ptep = get_locked_pte(mm, addr, &ptl); + pgd = pgd_offset(mm, addr); + pud = pud_alloc(mm, pgd, addr); + if (!pud) + return false; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return false; + /* We can't run guests backed by huge pages, but userspace can + * still set them up and then try to migrate them without any + * migration support. + */ + if (pmd_large(*pmd)) + return true; + + ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (unlikely(!ptep)) return false; diff --git a/arch/score/include/asm/mmu_context.h b/arch/score/include/asm/mmu_context.h index 2644577c96e844..073f95d350ded0 100644 --- a/arch/score/include/asm/mmu_context.h +++ b/arch/score/include/asm/mmu_context.h @@ -3,7 +3,9 @@ #include #include +#include #include + #include #include diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 0553e5cd5985a0..46ff8fd678a75c 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -2,6 +2,7 @@ #define _ASM_SCORE_PGTABLE_H #include +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index aae9480706c2c8..eb64d7a677cb95 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c index 8b75e54816c19f..d8455e60bce06c 100644 --- a/arch/score/kernel/ptrace.c +++ b/arch/score/kernel/ptrace.c @@ -28,6 +28,7 @@ #include #include #include +#include #include diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c index 569ac02f68dfe5..12daf45369b442 100644 --- a/arch/score/kernel/traps.c +++ b/arch/score/kernel/traps.c @@ -24,7 +24,11 @@ */ #include -#include +#include +#include +#include +#include +#include #include #include diff --git a/arch/score/mm/extable.c b/arch/score/mm/extable.c index ec871355fc2d60..6736a3ad628609 100644 --- a/arch/score/mm/extable.c +++ b/arch/score/mm/extable.c @@ -24,6 +24,8 @@ */ #include +#include +#include int fixup_exception(struct pt_regs *regs) { diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c index 340fd40b381dc3..9c292c27e0d711 100644 --- a/arch/sh/boards/mach-cayman/setup.c +++ b/arch/sh/boards/mach-cayman/setup.c @@ -128,7 +128,6 @@ static int __init smsc_superio_setup(void) SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX); SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX); -#ifdef CONFIG_IDE /* * Only IDE1 exists on the Cayman */ @@ -158,7 +157,6 @@ static int __init smsc_superio_setup(void) SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ -#endif /* Exit the configuration state */ outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c index 49bace446a1ab8..c6d96049a0bb07 100644 --- a/arch/sh/drivers/heartbeat.c +++ b/arch/sh/drivers/heartbeat.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 09fc2bc8a790af..50921c7cc3f024 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -3,6 +3,8 @@ #ifndef __ASSEMBLY__ +#include + struct task_struct; #ifdef CONFIG_SH_FPU diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 35ffdd081d2655..eb6ac3c10c4487 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -11,6 +11,8 @@ #include #include #include +#include + #include #include diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h index 19bd89db17e717..f75cf438725766 100644 --- a/arch/sh/include/asm/pgtable-2level.h +++ b/arch/sh/include/asm/pgtable-2level.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_PGTABLE_2LEVEL_H #define __ASM_SH_PGTABLE_2LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h index 249a985d96482e..9b1e776eca31be 100644 --- a/arch/sh/include/asm/pgtable-3level.h +++ b/arch/sh/include/asm/pgtable-3level.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_PGTABLE_3LEVEL_H #define __ASM_SH_PGTABLE_3LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c index 4e332244ea75c3..547c7347845983 100644 --- a/arch/sh/kernel/cpu/fpu.c +++ b/arch/sh/kernel/cpu/fpu.c @@ -1,8 +1,11 @@ -#include +#include +#include +#include #include #include #include #include +#include int init_fpu(struct task_struct *tsk) { diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c index 98bbaa447c9340..352f894bece10a 100644 --- a/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/arch/sh/kernel/cpu/sh2a/fpu.c @@ -9,7 +9,7 @@ * * FIXME! These routines can be optimized in big endian case. */ -#include +#include #include #include #include diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index 69ab4d3c8d4149..95fd2dcb83da3b 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -10,8 +10,7 @@ * * FIXME! These routines have not been tested for big endian case. */ -#include -#include +#include #include #include #include diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c index 64d5d8dded7c15..015fee58014b14 100644 --- a/arch/sh/kernel/disassemble.c +++ b/arch/sh/kernel/disassemble.c @@ -12,6 +12,8 @@ #include #include +#include + /* * Format of an instruction in memory. */ diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 8dfe645bcc4b85..b564b1eae4aee6 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index 2197fc58418658..afe965712a6940 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -11,6 +11,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index adad46e41a1d93..4f04c6638a4d28 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c @@ -14,6 +14,8 @@ #include #include #include +#include + #include #include diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c index ff0abbd1e6526a..730d928f0d1242 100644 --- a/arch/sh/kernel/nmi_debug.c +++ b/arch/sh/kernel/nmi_debug.c @@ -9,6 +9,7 @@ #include #include #include +#include #include enum nmi_action { diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 53bc6c4c84ecdf..f8a695a223dd36 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -1,10 +1,12 @@ #include #include #include -#include +#include +#include #include #include #include +#include struct kmem_cache *task_xstate_cachep = NULL; unsigned int xstate_size; diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 51741850a71540..2c7bdf8cb93426 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -15,6 +15,9 @@ */ #include #include +#include +#include +#include #include #include #include diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index e0b271bffd6a53..ee2abe96f9f3a5 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -25,6 +25,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 1aabfd356b35ff..5fc3ff606210c8 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -12,6 +12,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index c49d0d05a21517..1e0656d9e7afa6 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 5128d3001ee588..08bce11badc6a8 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -9,6 +9,7 @@ * */ #include +#include #include #include #include diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index edc4769b047eee..c483422ea4d075 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -20,7 +20,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index bf989e063a0cdb..7a73d2763e1ba3 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -10,6 +10,7 @@ * for more details. */ #include +#include #include #include #include diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c index d5287d76809c02..a2e1231a90a306 100644 --- a/arch/sh/kernel/sys_sh32.c +++ b/arch/sh/kernel/sys_sh32.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 9513fa7840aa99..b32d1c3a4655d3 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -4,10 +4,14 @@ #include #include #include +#include +#include #include #include #include #include +#include + #include #include /* print_modules */ #include diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index ff639342a8bef9..57cff00cad1780 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -25,6 +25,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index 00835edb6e20f8..014fb08cf133a4 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c @@ -10,6 +10,7 @@ * for more details. */ #include +#include #include #include #include diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c index 5078cb809750f5..c86f4360c6cee5 100644 --- a/arch/sh/math-emu/math.c +++ b/arch/sh/math-emu/math.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c index bf95fdaedd0cf1..e5539e0f8e3b11 100644 --- a/arch/sh/mm/asids-debugfs.c +++ b/arch/sh/mm/asids-debugfs.c @@ -20,6 +20,9 @@ #include #include #include +#include +#include + #include #include diff --git a/arch/sh/mm/extable_32.c b/arch/sh/mm/extable_32.c index 24a75d315dcbba..940e871bc8169c 100644 --- a/arch/sh/mm/extable_32.c +++ b/arch/sh/mm/extable_32.c @@ -7,6 +7,8 @@ #include #include +#include + int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 9bf876780cef4b..6fd1bf7481c7d8 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -13,6 +13,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6777177807c26f..08e7af0be4a77b 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -9,6 +9,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index d0317993e9476f..22fede6eba1160 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -6,6 +6,8 @@ #ifndef __ASSEMBLY__ #include +#include + #include #include diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 7932a4a378176c..8a598528ec1f04 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -12,6 +12,7 @@ * the SpitFire page tables. */ +#include #include #include #include @@ -878,6 +879,9 @@ static inline unsigned long pud_pfn(pud_t pud) #define pte_offset_map pte_index #define pte_unmap(pte) do { } while (0) +/* We cannot include at this point yet: */ +extern struct mm_struct init_mm; + /* Actual page table PTE updates. */ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm, diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index f76389a3234229..3f09e1c83f5849 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -11,6 +11,7 @@ */ #include +#include // #include #include diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index f87a55d7709469..b542cc7c8d94d8 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c index 3ae36f36e7581f..44a3ed93c214c5 100644 --- a/arch/sparc/kernel/led.c +++ b/arch/sparc/kernel/led.c @@ -8,6 +8,7 @@ #include #include #include +#include #include diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index b99d33797e1df0..db7acf27bea2e0 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 48ffc3e7d1dd9a..b6dac8e980f071 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -14,6 +14,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index d249ca10b20337..1badc493e62ee7 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -14,6 +14,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 901063c1cf7eed..fc5124ccdb53c7 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -350,7 +351,7 @@ static int genregs64_set(struct task_struct *target, } if (!ret) { - unsigned long y; + unsigned long y = regs->y; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &y, diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 8e3e13924594c2..b3bc0ac757cc11 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -5,7 +5,8 @@ #include #include -#include +#include +#include #include #include #include diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c index e78386a0029f69..be4c14cccc05d1 100644 --- a/arch/sparc/kernel/stacktrace.c +++ b/arch/sparc/kernel/stacktrace.c @@ -1,4 +1,5 @@ #include +#include #include #include #include diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 7b55c50eabe55a..af93b50e3ce430 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c index da737c712fa89b..aa84da0b2d30d3 100644 --- a/arch/sparc/kernel/sun4m_irq.c +++ b/arch/sparc/kernel/sun4m_irq.c @@ -10,6 +10,7 @@ */ #include +#include #include #include diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 633c4cf6fdb0bf..5547fcb1d72df5 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index fb7b185ee94171..7aecb239626dde 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -7,7 +7,9 @@ #include #include -#include +#include +#include +#include #include #include #include diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 884c70331345d8..ef4520efc8130c 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -7,7 +7,9 @@ #include #include -#include +#include +#include +#include #include #include #include diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index 4808b6d234551b..d63fc613e7a9c7 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c @@ -106,7 +106,7 @@ static unsigned long run_on_cpu(unsigned long cpu, cpumask_t old_affinity; unsigned long ret; - cpumask_copy(&old_affinity, tsk_cpus_allowed(current)); + cpumask_copy(&old_affinity, ¤t->cpus_allowed); /* should return -EINVAL to userspace */ if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) return 0; diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index ecddac5a4c9628..466d4aed06c771 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -9,7 +9,9 @@ * I hate traps on the sparc, grrr... */ -#include /* for jiffies */ +#include +#include +#include #include #include #include diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index e022d7b0039045..196ee5eb4d489b 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -9,7 +9,8 @@ */ #include -#include +#include +#include #include #include #include diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index d20d4e3fd129d6..8367dce5f41b5f 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -8,7 +8,7 @@ #include -#include +#include #include #include #include diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index 526fcb5d8ce95d..b30b30ab3ddde2 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 643c149a315154..b84c4dd14954f1 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index e98a3f2e8f0f48..323bc6b6e3ad0e 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 23479c3d39f022..0a04811f06b78c 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -6,6 +6,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h index f67753db1f782c..45a4b4c424cfd5 100644 --- a/arch/tile/include/asm/mmu_context.h +++ b/arch/tile/include/asm/mmu_context.h @@ -16,6 +16,8 @@ #define _ASM_TILE_MMU_CONTEXT_H #include +#include + #include #include #include diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index d26a4227903683..5f8c615cb5e9bd 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h @@ -74,6 +74,7 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */; #define MAXMEM (_VMALLOC_START - PAGE_OFFSET) /* We have no pmd or pud since we are strictly a two-level page table */ +#define __ARCH_USE_5LEVEL_HACK #include static inline int pud_huge_page(pud_t pud) { return 0; } diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index e96cec52f6d8aa..96fe58b451188a 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h @@ -59,6 +59,7 @@ #ifndef __ASSEMBLY__ /* We have no pud since we are a three-level page table. */ +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h index c3cb42615a9fa4..3573325e340b64 100644 --- a/arch/tile/include/asm/stack.h +++ b/arch/tile/include/asm/stack.h @@ -17,6 +17,8 @@ #include #include +#include + #include #include #include diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index c667e104a0c251..0e863f1ee08c04 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c index 9247d6b562f494..d4eb5fb2df9d79 100644 --- a/arch/tile/kernel/kgdb.c +++ b/arch/tile/kernel/kgdb.c @@ -19,6 +19,8 @@ #include #include #include +#include + #include static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP; diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index c84c54a1ac5501..f0a0e18e4dfbb8 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -13,6 +13,9 @@ */ #include +#include +#include +#include #include #include #include diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index e279572824b15e..e1a078e6828e59 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c @@ -23,6 +23,8 @@ #include #include #include +#include + #include #include diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 87299a6cfec87d..f2bf557bb005f9 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -14,6 +14,8 @@ */ #include +#include +#include #include #include #include diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index 53ce940a50169a..869c22e5756145 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c @@ -16,7 +16,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 22bbbd3ff4a3a4..94ecbc6676e5d8 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -13,6 +13,8 @@ */ #include +#include +#include #include #include #include diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index c9357012b1c892..5bd4e88c7c604a 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 39f427bb0de2db..54804866f238a5 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index f229e979584e03..8149c38f67b6cc 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 709f8e9ba3e967..f58fa06a2214aa 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -16,6 +16,9 @@ #include #include +#include +#include +#include #include #include #include diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 77ceaa343fcef1..cb10153b5c9fb4 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c index ef61c597898bc0..8ab28167c44b12 100644 --- a/arch/tile/mm/mmap.c +++ b/arch/tile/mm/mmap.c @@ -17,7 +17,8 @@ #include #include #include -#include +#include +#include #include #include diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 62087028a9ce1e..366e57f5e8d635 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -5,8 +5,9 @@ #include #include -#include +#include #include + #include "chan.h" #include #include diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 8a4c72af3bc0ab..af326fb6510dbf 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c index 57f03050c8505b..37c51a6be690c7 100644 --- a/arch/um/drivers/random.c +++ b/arch/um/drivers/random.c @@ -6,7 +6,7 @@ * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ -#include +#include #include #include #include diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index 1a60e1328e2fa1..94ac2739918c62 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -7,6 +7,8 @@ #define __UM_MMU_CONTEXT_H #include +#include + #include extern void uml_setup_stubs(struct mm_struct *mm); diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h index cfbe597524698c..179c0ea87a0c3b 100644 --- a/arch/um/include/asm/pgtable-2level.h +++ b/arch/um/include/asm/pgtable-2level.h @@ -8,6 +8,7 @@ #ifndef __UM_PGTABLE_2LEVEL_H #define __UM_PGTABLE_2LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* PGDIR_SHIFT determines what a third-level page table entry can map */ diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index bae8523a162fd3..c4d876dfb9acd1 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h @@ -7,6 +7,7 @@ #ifndef __UM_PGTABLE_3LEVEL_H #define __UM_PGTABLE_3LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* PGDIR_SHIFT determines what a third-level page table entry can map */ diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index 770ec07b6a6af0..a43d42bf0a8640 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -7,7 +7,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 078630d6448c07..a9bd618200429c 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -17,6 +17,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c index b60a9f8cda7550..71f3e9217cf2a7 100644 --- a/arch/um/kernel/reboot.c +++ b/arch/um/kernel/reboot.c @@ -3,7 +3,9 @@ * Licensed under the GPL */ -#include +#include +#include +#include #include #include #include diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 3943e9d7d13d2d..7a1f2a936fd10b 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -5,8 +5,9 @@ */ #include -#include +#include #include + #include #include #include diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c index 527fa5881915ce..d4dbf08722d68c 100644 --- a/arch/um/kernel/skas/process.c +++ b/arch/um/kernel/skas/process.c @@ -4,7 +4,10 @@ */ #include -#include +#include +#include +#include + #include #include #include diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index aa1b56f5ac6894..a76295f7ede9cd 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -11,6 +11,9 @@ #include #include #include +#include +#include + #include #include #include diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 3777b82759bda1..37508b190106db 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -5,7 +5,8 @@ #include #include -#include +#include + #include #include #include diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index ad8f206ab5e851..59158871b9fcc3 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -4,10 +4,11 @@ */ #include -#include +#include #include #include #include +#include #include #include #include diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index e8175a8aa22c7b..4b85acd4020c40 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -11,7 +11,9 @@ #include #include #include +#include #include + #include #include #include diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h index 818d0f5598e324..a4f2bef37e7069 100644 --- a/arch/unicore32/include/asm/pgtable.h +++ b/arch/unicore32/include/asm/pgtable.h @@ -12,6 +12,7 @@ #ifndef __UNICORE_PGTABLE_H__ #define __UNICORE_PGTABLE_H__ +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c index a53343a90ca294..12c8c9527b8e83 100644 --- a/arch/unicore32/kernel/fpu-ucf64.c +++ b/arch/unicore32/kernel/fpu-ucf64.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index d7c6b676b3a56a..d22c1dc7e39e9b 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -13,6 +13,9 @@ #include #include +#include +#include +#include #include #include #include diff --git a/arch/unicore32/kernel/ptrace.c b/arch/unicore32/kernel/ptrace.c index 9f07c08da050d8..a102c2b4f35804 100644 --- a/arch/unicore32/kernel/ptrace.c +++ b/arch/unicore32/kernel/ptrace.c @@ -15,6 +15,7 @@ #include #include #include +#include /* * this routine will get a word off of the processes privileged stack. diff --git a/arch/unicore32/kernel/stacktrace.c b/arch/unicore32/kernel/stacktrace.c index b34030bdabe3e0..9976e767d51c2e 100644 --- a/arch/unicore32/kernel/stacktrace.c +++ b/arch/unicore32/kernel/stacktrace.c @@ -11,6 +11,7 @@ */ #include #include +#include #include #include diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index c54e32410eade6..5f25b39f04d430 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -14,6 +14,9 @@ */ #include #include +#include +#include +#include #include #include #include diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c index 24e836023e6cc4..3a7f6faa87940c 100644 --- a/arch/unicore32/mm/alignment.c +++ b/arch/unicore32/mm/alignment.c @@ -15,6 +15,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index b656d216a8a85d..bbefcc46a45e41 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 2d449337a36051..a94a4d10f2dfa4 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -120,10 +120,6 @@ else # -funit-at-a-time shrinks the kernel .text considerably # unfortunately it makes reading oopses harder. KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) - - # this works around some issues with generating unwind tables in older gccs - # newer gccs do it by default - KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args) endif ifdef CONFIG_X86_X32 @@ -147,6 +143,37 @@ ifeq ($(CONFIG_KMEMCHECK),y) KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) endif +# +# If the function graph tracer is used with mcount instead of fentry, +# '-maccumulate-outgoing-args' is needed to prevent a GCC bug +# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42109) +# +ifdef CONFIG_FUNCTION_GRAPH_TRACER + ifndef CONFIG_HAVE_FENTRY + ACCUMULATE_OUTGOING_ARGS := 1 + else + ifeq ($(call cc-option-yn, -mfentry), n) + ACCUMULATE_OUTGOING_ARGS := 1 + endif + endif +endif + +# +# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a +# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way +# to test for this bug at compile-time because the test case needs to execute, +# which is a no-go for cross compilers. So check the GCC version instead. +# +ifdef CONFIG_JUMP_LABEL + ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1) + ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1) + endif +endif + +ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) + KBUILD_CFLAGS += -maccumulate-outgoing-args +endif + # Stackpointer is addressed different for 32 bit and 64 bit x86 sp-$(CONFIG_X86_32) := esp sp-$(CONFIG_X86_64) := rsp diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu index 6647ed49c66c97..a45eb15b7cf290 100644 --- a/arch/x86/Makefile_32.cpu +++ b/arch/x86/Makefile_32.cpu @@ -45,24 +45,6 @@ cflags-$(CONFIG_MGEODE_LX) += $(call cc-option,-march=geode,-march=pentium-mmx) # cpu entries cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) -# Work around the pentium-mmx code generator madness of gcc4.4.x which -# does stack alignment by generating horrible code _before_ the mcount -# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph -# tracer assumptions. For i686, generic, core2 this is set by the -# compiler anyway -ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y) -ADD_ACCUMULATE_OUTGOING_ARGS := y -endif - -# Work around to a bug with asm goto with first implementations of it -# in gcc causing gcc to mess up the push and pop of the stack in some -# uses of asm goto. -ifeq ($(CONFIG_JUMP_LABEL), y) -ADD_ACCUMULATE_OUTGOING_ARGS := y -endif - -cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args) - # Bug fix for binutils: this option is required in order to keep # binutils from generating NOPL instructions against our will. ifneq ($(CONFIG_X86_P6_NOP),y) diff --git a/arch/x86/boot/compressed/error.c b/arch/x86/boot/compressed/error.c index 6248740b68b5a0..31922023de4928 100644 --- a/arch/x86/boot/compressed/error.c +++ b/arch/x86/boot/compressed/error.c @@ -4,6 +4,7 @@ * memcpy() and memmove() are defined for the compressed boot environment. */ #include "misc.h" +#include "error.h" void warn(char *m) { diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 7ef4a099defcda..6205d3b81e6d11 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -176,6 +176,7 @@ CONFIG_E1000E=y CONFIG_SKY2=y CONFIG_FORCEDETH=y CONFIG_8139TOO=y +CONFIG_R8169=y CONFIG_FDDI=y CONFIG_INPUT_POLLDEV=y # CONFIG_INPUT_MOUSEDEV_PSAUX is not set diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index b83c61cfd1546d..370c42c7f04683 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 2b361854254414..9ba050fe47f30e 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -389,3 +389,4 @@ 380 i386 pkey_mprotect sys_pkey_mprotect 381 i386 pkey_alloc sys_pkey_alloc 382 i386 pkey_free sys_pkey_free +383 i386 statx sys_statx diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index e93ef0b38db8e1..5aef183e2f85c5 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -338,6 +338,7 @@ 329 common pkey_mprotect sys_pkey_mprotect 330 common pkey_alloc sys_pkey_alloc 331 common pkey_free sys_pkey_free +332 common statx sys_statx # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 572cee3fccffc5..226ca70dc6bd43 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 636c4b341f36a9..ce1d7534fa530a 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -27,6 +27,8 @@ #include #include +#include +#include #include #include diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index afb222b63caeb0..c84584bb940280 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -604,7 +604,7 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, return &amd_f15_PMC20; } case AMD_EVENT_NB: - /* moved to perf_event_amd_uncore.c */ + /* moved to uncore.c */ return &emptyconstraint; default: return &emptyconstraint; diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 496e60391fac68..786fd875de9287 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -12,6 +12,7 @@ #include #include #include +#include #include diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 1635c0c8df23a6..580b60f5ac83ce 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -20,7 +20,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -2100,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event) static void refresh_pce(void *ignored) { - if (current->mm) - load_mm_cr4(current->mm); + if (current->active_mm) + load_mm_cr4(current->active_mm); } static void x86_pmu_event_mapped(struct perf_event *event) @@ -2109,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event) if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return; + /* + * This function relies on not being called concurrently in two + * tasks in the same mm. Otherwise one task could observe + * perf_rdpmc_allowed > 1 and return all the way back to + * userspace with CR4.PCE clear while another task is still + * doing on_each_cpu_mask() to propagate CR4.PCE. + * + * For now, this can't happen because all callers hold mmap_sem + * for write. If this changes, we'll need a different solution. + */ + lockdep_assert_held_exclusive(¤t->mm->mmap_sem); + if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); } @@ -2243,6 +2256,7 @@ void arch_perf_update_userpage(struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) { struct cyc2ns_data *data; + u64 offset; userpg->cap_user_time = 0; userpg->cap_user_time_zero = 0; @@ -2250,11 +2264,13 @@ void arch_perf_update_userpage(struct perf_event *event, !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); userpg->pmc_width = x86_pmu.cntval_bits; - if (!sched_clock_stable()) + if (!using_native_sched_clock() || !sched_clock_stable()) return; data = cyc2ns_read_begin(); + offset = data->cyc2ns_offset + __sched_clock_offset; + /* * Internal timekeeping for enabled/running/stopped times * is always in the local_clock domain. @@ -2262,7 +2278,7 @@ void arch_perf_update_userpage(struct perf_event *event, userpg->cap_user_time = 1; userpg->time_mult = data->cyc2ns_mul; userpg->time_shift = data->cyc2ns_shift; - userpg->time_offset = data->cyc2ns_offset - now; + userpg->time_offset = offset - now; /* * cap_user_time_zero doesn't make sense when we're using a different @@ -2270,7 +2286,7 @@ void arch_perf_update_userpage(struct perf_event *event, */ if (!event->attr.use_clockid) { userpg->cap_user_time_zero = 1; - userpg->time_zero = data->cyc2ns_offset; + userpg->time_zero = offset; } cyc2ns_read_end(data); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index aff4b5b69d4021..238ae3248ba559 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -1,5 +1,5 @@ /* - * perf_event_intel_cstate.c: support cstate residency counters + * Support cstate residency counters * * Copyright (C) 2015, Intel Corp. * Author: Kan Liang (kan.liang@intel.com) diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 22054ca4902651..9d05c7e67f6073 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -1,5 +1,5 @@ /* - * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters + * Support Intel RAPL energy consumption counters * Copyright (C) 2013 Google, Inc., Stephane Eranian * * Intel RAPL interface is specified in the IA-32 Manual Vol3b diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index ad986c1e29bccd..df5989f27b1b65 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -360,7 +360,7 @@ extern struct list_head pci2phy_map_head; extern struct pci_extra_dev *uncore_extra_pci_dev; extern struct event_constraint uncore_constraint_empty; -/* perf_event_intel_uncore_snb.c */ +/* uncore_snb.c */ int snb_uncore_pci_init(void); int ivb_uncore_pci_init(void); int hsw_uncore_pci_init(void); @@ -371,7 +371,7 @@ void nhm_uncore_cpu_init(void); void skl_uncore_cpu_init(void); int snb_pci2phy_map_init(int devid); -/* perf_event_intel_uncore_snbep.c */ +/* uncore_snbep.c */ int snbep_uncore_pci_init(void); void snbep_uncore_cpu_init(void); int ivbep_uncore_pci_init(void); @@ -385,5 +385,5 @@ void knl_uncore_cpu_init(void); int skx_uncore_pci_init(void); void skx_uncore_cpu_init(void); -/* perf_event_intel_uncore_nhmex.c */ +/* uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index db64baf0e500b4..8bef70e7f3cc6d 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -158,13 +158,13 @@ void hyperv_init(void) clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); return; } +register_msr_cs: #endif /* * For 32 bit guests just use the MSR based mechanism for reading * the partition counter. */ -register_msr_cs: hyperv_cs = &hyperv_cs_msr; if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 7c0a711989d2c0..8d0879f1d42cad 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 95c0b4ae09b010..724153797209e9 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index 7a15588e45d472..7d3ece8bfb616b 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h @@ -17,6 +17,8 @@ #include #include +#include + #include /* diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index eff8e36aaf7208..730ef65e83934f 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -2,7 +2,6 @@ #define _ASM_X86_APIC_H #include -#include #include #include diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 4e7772387c6e92..b04bb6dfed7f84 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -289,7 +289,8 @@ #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ -#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */ +#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ +#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index cb8f9149f6c852..1548ca92ad3f62 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -205,6 +205,8 @@ static inline void native_load_tr_desc(void) asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); } +DECLARE_PER_CPU(bool, __tss_limit_invalid); + static inline void force_reload_TR(void) { struct desc_struct *d = get_cpu_gdt_table(smp_processor_id()); @@ -220,18 +222,20 @@ static inline void force_reload_TR(void) write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS); load_TR_desc(); + this_cpu_write(__tss_limit_invalid, false); } -DECLARE_PER_CPU(bool, need_tr_refresh); - -static inline void refresh_TR(void) +/* + * Call this if you need the TSS limit to be correct, which should be the case + * if and only if you have TIF_IO_BITMAP set or you're switching to a task + * with TIF_IO_BITMAP set. + */ +static inline void refresh_tss_limit(void) { DEBUG_LOCKS_WARN_ON(preemptible()); - if (unlikely(this_cpu_read(need_tr_refresh))) { + if (unlikely(this_cpu_read(__tss_limit_invalid))) force_reload_TR(); - this_cpu_write(need_tr_refresh, false); - } } /* @@ -250,7 +254,7 @@ static inline void invalidate_tss_limit(void) if (unlikely(test_thread_flag(TIF_IO_BITMAP))) force_reload_TR(); else - this_cpu_write(need_tr_refresh, true); + this_cpu_write(__tss_limit_invalid, true); } static inline void native_load_gdt(const struct desc_ptr *dtr) diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 8167fdb67ae846..9814db42b79001 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -59,6 +59,7 @@ #define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */ #define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C +#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A #define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ /* Xeon Phi */ diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h index 95ce5c85b0096b..0d64397cee58e0 100644 --- a/arch/x86/include/asm/intel_rdt.h +++ b/arch/x86/include/asm/intel_rdt.h @@ -3,6 +3,7 @@ #ifdef CONFIG_INTEL_RDT_A +#include #include #include diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index d74747b031ecd2..c4eda791f877b6 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node { }; void kvm_page_track_init(struct kvm *kvm); +void kvm_page_track_cleanup(struct kvm *kvm); void kvm_page_track_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont); diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h index 0b416d4cf73b69..a0d662be4c5b85 100644 --- a/arch/x86/include/asm/mpx.h +++ b/arch/x86/include/asm/mpx.h @@ -2,6 +2,8 @@ #define _ASM_X86_MPX_H #include +#include + #include #include diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 00293a94ffaf5a..d8b5f8ab8ef9e7 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -46,7 +46,7 @@ #define MSR_FSB_FREQ 0x000000cd #define MSR_PLATFORM_INFO 0x000000ce -#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 +#define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 #define NHM_C3_AUTO_DEMOTE (1UL << 25) #define NHM_C1_AUTO_DEMOTE (1UL << 26) #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) @@ -147,6 +147,7 @@ /* C-state Residency Counters */ #define MSR_PKG_C3_RESIDENCY 0x000003f8 #define MSR_PKG_C6_RESIDENCY 0x000003f9 +#define MSR_ATOM_PKG_C6_RESIDENCY 0x000003fa #define MSR_PKG_C7_RESIDENCY 0x000003fa #define MSR_CORE_C3_RESIDENCY 0x000003fc #define MSR_CORE_C6_RESIDENCY 0x000003fd @@ -203,10 +204,17 @@ #define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B #define MSR_CORE_C1_RES 0x00000660 +#define MSR_MODULE_C6_RES_MS 0x00000664 #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 +#define MSR_ATOM_CORE_RATIOS 0x0000066a +#define MSR_ATOM_CORE_VIDS 0x0000066b +#define MSR_ATOM_CORE_TURBO_RATIOS 0x0000066c +#define MSR_ATOM_CORE_TURBO_VIDS 0x0000066d + + #define MSR_CORE_PERF_LIMIT_REASONS 0x00000690 #define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 #define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 @@ -459,6 +467,7 @@ #define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 +#define MSR_MISC_FEATURE_CONTROL 0x000001a4 #define MSR_MISC_PWR_MGMT 0x000001aa #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index f37f2d8a2989d0..bda3c27f0da06c 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -2,6 +2,7 @@ #define _ASM_X86_MWAIT_H #include +#include #include diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 72277b1028a5f5..50d35e3185f553 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd) *(tmp + 1) = 0; } -#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \ - defined(CONFIG_PARAVIRT)) static inline void native_pud_clear(pud_t *pudp) { } -#endif static inline void pud_clear(pud_t *pudp) { diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 1cfb36b8c024ab..585ee0d42d18fc 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); # define set_pud(pudp, pud) native_set_pud(pudp, pud) #endif -#ifndef __PAGETABLE_PMD_FOLDED +#ifndef __PAGETABLE_PUD_FOLDED #define pud_clear(pud) native_pud_clear(pud) #endif diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 8b4de22d64299e..62484333673d98 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -273,6 +273,8 @@ static inline pgdval_t pgd_flags(pgd_t pgd) } #if CONFIG_PGTABLE_LEVELS > 3 +#include + typedef struct { pudval_t pud; } pud_t; static inline pud_t native_make_pud(pmdval_t val) @@ -285,6 +287,7 @@ static inline pudval_t native_pud_val(pud_t pud) return pud.pud; } #else +#define __ARCH_USE_5LEVEL_HACK #include static inline pudval_t native_pud_val(pud_t pud) @@ -306,6 +309,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) return pmd.pmd; } #else +#define __ARCH_USE_5LEVEL_HACK #include static inline pmdval_t native_pmd_val(pmd_t pmd) diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 34684adb6899ad..b3b09b98896d52 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) { + /* + * "Allocated" pkeys are those that have been returned + * from pkey_alloc(). pkey 0 is special, and never + * returned from pkey_alloc(). + */ + if (pkey <= 0) + return false; + if (pkey >= arch_max_pkey()) + return false; return mm_pkey_allocation_map(mm) & (1U << pkey); } @@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm) static inline int mm_pkey_free(struct mm_struct *mm, int pkey) { - /* - * pkey 0 is special, always allocated and can never - * be freed. - */ - if (!pkey) - return -EINVAL; if (!mm_pkey_is_allocated(mm, pkey)) return -EINVAL; diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h new file mode 100644 index 00000000000000..d7da2729903d72 --- /dev/null +++ b/arch/x86/include/asm/purgatory.h @@ -0,0 +1,20 @@ +#ifndef _ASM_X86_PURGATORY_H +#define _ASM_X86_PURGATORY_H + +#ifndef __ASSEMBLY__ +#include + +extern void purgatory(void); +/* + * These forward declarations serve two purposes: + * + * 1) Make sparse happy when checking arch/purgatory + * 2) Document that these are required to be global so the symbol + * lookup in kexec works + */ +extern unsigned long purgatory_backup_dest; +extern unsigned long purgatory_backup_src; +extern unsigned long purgatory_backup_sz; +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PURGATORY_H */ diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index a04eabd43d0662..27e9f9d769b892 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -12,6 +12,8 @@ extern int recalibrate_cpu_khz(void); extern int no_timer_check; +extern bool using_native_sched_clock(void); + /* * We use the full linear equation: f(x) = a + b*x, in order to allow * a continuous function in the face of dynamic freq changes. diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 6fa85944af83d8..fc5abff9b7fd63 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long addr) static inline void __flush_tlb_all(void) { - if (static_cpu_has(X86_FEATURE_PGE)) + if (boot_cpu_has(X86_FEATURE_PGE)) __flush_tlb_global(); else __flush_tlb(); diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 72e8300b1e8a6a..9cffb44a3cf5df 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -485,15 +485,17 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr) if (paddr < uv_hub_info->lowmem_remap_top) paddr |= uv_hub_info->lowmem_remap_base; - paddr |= uv_hub_info->gnode_upper; - if (m_val) + + if (m_val) { + paddr |= uv_hub_info->gnode_upper; paddr = ((paddr << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | ((paddr >> uv_hub_info->m_val) << uv_hub_info->n_lshift); - else + } else { paddr |= uv_soc_phys_ram_to_nasid(paddr) << uv_hub_info->gpa_shift; + } return paddr; } diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 5138dacf8bb836..07244ea16765a6 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -58,7 +58,7 @@ struct setup_header { __u32 header; __u16 version; __u32 realmode_swtch; - __u16 start_sys; + __u16 start_sys_seg; __u16 kernel_version; __u8 type_of_loader; __u8 loadflags; diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index ae32838cac5fd2..b2879cc23db470 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled) return -EINVAL; } + if (!enabled) { + ++disabled_cpus; + return -EINVAL; + } + if (boot_cpu_physical_apicid != -1U) ver = boot_cpu_apic_version; - cpu = __generic_processor_info(id, ver, enabled); + cpu = generic_processor_info(id, ver); if (cpu >= 0) early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid; @@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void) #ifdef CONFIG_ACPI_HOTPLUG_CPU #include -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 82dfe32faaf41b..df083efe6ee007 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 4261b3282ad99d..8ccb7ef512e05d 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1610,24 +1610,15 @@ static inline void try_to_enable_x2apic(int remap_mode) { } static inline void __x2apic_enable(void) { } #endif /* !CONFIG_X86_X2APIC */ -static int __init try_to_enable_IR(void) -{ -#ifdef CONFIG_X86_IO_APIC - if (!x2apic_enabled() && skip_ioapic_setup) { - pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n"); - return -1; - } -#endif - return irq_remapping_enable(); -} - void __init enable_IR_x2apic(void) { unsigned long flags; int ret, ir_stat; - if (skip_ioapic_setup) + if (skip_ioapic_setup) { + pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n"); return; + } ir_stat = irq_remapping_prepare(); if (ir_stat < 0 && !x2apic_supported()) @@ -1645,7 +1636,7 @@ void __init enable_IR_x2apic(void) /* If irq_remapping_prepare() succeeded, try to enable it */ if (ir_stat >= 0) - ir_stat = try_to_enable_IR(); + ir_stat = irq_remapping_enable(); /* ir_stat contains the remap mode or an error code */ try_to_enable_x2apic(ir_stat); @@ -2062,17 +2053,17 @@ static int allocate_logical_cpuid(int apicid) /* Allocate a new cpuid. */ if (nr_logical_cpuids >= nr_cpu_ids) { - WARN_ONCE(1, "Only %d processors supported." + WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. " "Processor %d/0x%x and the rest are ignored.\n", - nr_cpu_ids - 1, nr_logical_cpuids, apicid); - return -1; + nr_cpu_ids, nr_logical_cpuids, apicid); + return -EINVAL; } cpuid_to_apicid[nr_logical_cpuids] = apicid; return nr_logical_cpuids++; } -int __generic_processor_info(int apicid, int version, bool enabled) +int generic_processor_info(int apicid, int version) { int cpu, max = nr_cpu_ids; bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, @@ -2130,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled) if (num_processors >= nr_cpu_ids) { int thiscpu = max + disabled_cpus; - if (enabled) { - pr_warning("APIC: NR_CPUS/possible_cpus limit of %i " - "reached. Processor %d/0x%x ignored.\n", - max, thiscpu, apicid); - } + pr_warning("APIC: NR_CPUS/possible_cpus limit of %i " + "reached. Processor %d/0x%x ignored.\n", + max, thiscpu, apicid); disabled_cpus++; return -EINVAL; @@ -2186,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled) apic->x86_32_early_logical_apicid(cpu); #endif set_cpu_possible(cpu, true); - - if (enabled) { - num_processors++; - physid_set(apicid, phys_cpu_present_map); - set_cpu_present(cpu, true); - } else { - disabled_cpus++; - } + physid_set(apicid, phys_cpu_present_map); + set_cpu_present(cpu, true); + num_processors++; return cpu; } -int generic_processor_info(int apicid, int version) -{ - return __generic_processor_info(apicid, version, true); -} - int hard_smp_processor_id(void) { return read_apic_id(); diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index e9f8f8cdd57085..86f20cc0a65e22 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1105,7 +1105,8 @@ void __init uv_init_hub_info(struct uv_hub_info_s *hi) node_id.v = uv_read_local_mmr(UVH_NODE_ID); uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val); hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1; - hi->gnode_upper = (unsigned long)hi->gnode_extra << mn.m_val; + if (mn.m_val) + hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val; if (uv_gp_table) { hi->global_mmr_base = uv_gp_table->mmr_base; diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 4a7080c84a5a54..5a414545e8a390 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -218,7 +218,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 4e95b2e0d95fed..c36140d788fe21 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -555,10 +556,6 @@ static void early_init_amd(struct cpuinfo_x86 *c) if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); - if (check_tsc_unstable()) - clear_sched_clock_stable(); - } else { - clear_sched_clock_stable(); } /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 2c234a6d94c448..43955ee6715b18 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -1,5 +1,6 @@ #include +#include #include #include @@ -104,8 +105,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #endif - - clear_sched_clock_stable(); } static void init_centaur(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c64ca5929cb5e0..58094a1f9e9d30 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -7,7 +7,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include @@ -86,7 +88,6 @@ static void default_init(struct cpuinfo_x86 *c) strcpy(c->x86_model_id, "386"); } #endif - clear_sched_clock_stable(); } static const struct cpu_dev default_cpu = { @@ -1075,8 +1076,6 @@ static void identify_cpu(struct cpuinfo_x86 *c) */ if (this_cpu->c_init) this_cpu->c_init(c); - else - clear_sched_clock_stable(); /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 47416f959a48e3..a70fd61095f8a7 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "cpu.h" @@ -184,7 +185,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; } - clear_sched_clock_stable(); } static void init_cyrix(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 017ecd3bb5536e..063197771b8d7b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -161,10 +162,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); - if (check_tsc_unstable()) - clear_sched_clock_stable(); - } else { - clear_sched_clock_stable(); } /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 0282b0df004a86..c55fb2cb2acca8 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 8af04afdfcb964..9ac2a5cdd9c206 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -25,9 +25,9 @@ #include #include #include -#include +#include +#include #include -#include #include #include @@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn) if (atomic_dec_and_test(&rdtgrp->waitcount) && (rdtgrp->flags & RDT_DELETED)) { kernfs_unbreak_active_protection(kn); - kernfs_put(kn); + kernfs_put(rdtgrp->kn); kfree(rdtgrp); } else { kernfs_unbreak_active_protection(kn); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8e9725c607ea6a..5accfbdee3f06f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -54,6 +54,8 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); +static int mce_chrdev_open_count; /* #times opened */ + #define mce_log_get_idx_check(p) \ ({ \ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ @@ -598,6 +600,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val, if (atomic_read(&num_notifiers) > 2) return NOTIFY_DONE; + /* Don't print when mcelog is running */ + if (mce_chrdev_open_count > 0) + return NOTIFY_DONE; + __print_mce(m); return NOTIFY_DONE; @@ -1828,7 +1834,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c) */ static DEFINE_SPINLOCK(mce_chrdev_state_lock); -static int mce_chrdev_open_count; /* #times opened */ static int mce_chrdev_open_exclu; /* already open exclusive? */ static int mce_chrdev_open(struct inode *inode, struct file *file) diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 524cc5780a7796..6e4a047e4b684b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -60,7 +60,7 @@ static const char * const th_names[] = { "load_store", "insn_fetch", "combined_unit", - "", + "decode_unit", "northbridge", "execution_unit", }; diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index c1ea5b99983935..d77d07ab310b43 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -15,8 +16,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c) if (xlvl >= 0x80860001) c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); } - - clear_sched_clock_stable(); } static void init_transmeta(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 891f4dad7b2c49..22403a28caf522 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -30,7 +30,6 @@ #include #include #include -#include #undef pr_fmt #define pr_fmt(fmt) "vmware: " fmt diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index b2f7207ba86c4b..f9c324e08d8554 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c @@ -1,5 +1,6 @@ #include #include +#include #include #include diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 0cfd01d2754cc9..09d4ac0d2661fd 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index bb3b5b9a689921..b0b3a3df7c2080 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -2,6 +2,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs */ +#include #include #include #include diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index fac189efcc347e..a8b117e93b4620 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -2,6 +2,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs */ +#include #include #include #include diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 19bdd1bf81607c..c2f8dde3255ca7 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -7,6 +7,7 @@ #include #include +#include #include /* diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index c114b132d12178..b188b16841e376 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c @@ -5,6 +5,7 @@ #include #include #include +#include /* * The xstateregs_active() routine is the same as the regset_fpregs_active() routine, diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 8639bb2ae05868..cbd73eb4217026 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -29,6 +29,12 @@ #include #include +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && \ + !defined(CC_USING_FENTRY) && \ + !defined(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE) +# error The following combination is not supported: ((compiler missing -mfentry) || (CONFIG_X86_32 and !CONFIG_DYNAMIC_FTRACE)) && CONFIG_FUNCTION_GRAPH_TRACER && CONFIG_CC_OPTIMIZE_FOR_SIZE +#endif + #ifdef CONFIG_DYNAMIC_FTRACE int ftrace_arch_code_modify_prepare(void) @@ -535,7 +541,7 @@ static void run_sync(void) { int enable_irqs = irqs_disabled(); - /* We may be called with interrupts disbled (on bootup). */ + /* We may be called with interrupts disabled (on bootup). */ if (enable_irqs) local_irq_enable(); on_each_cpu(do_sync_core, NULL, 1); diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 54a2372f5dbb1e..b5785c197e5347 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -4,6 +4,7 @@ * Copyright (C) 2000 Andrea Arcangeli SuSE */ +#define DISABLE_BRANCH_PROFILING #include #include #include diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index dc6ba5bda9fc83..89ff7af2de508b 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer) irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq)); irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); - disable_irq(hdev->irq); + disable_hardirq(hdev->irq); irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); enable_irq(hdev->irq); } diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index b01bc851745048..9c3cf0944bce35 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -47,8 +48,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) t->io_bitmap_ptr = bitmap; set_thread_flag(TIF_IO_BITMAP); + /* + * Now that we have an IO bitmap, we need our TSS limit to be + * correct. It's fine if we are preempted after doing this: + * with TIF_IO_BITMAP set, context switches will keep our TSS + * limit correct. + */ preempt_disable(); - refresh_TR(); + refresh_tss_limit(); preempt_enable(); } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 6b0678a541e2c2..3be74fbdeff27f 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index bdb83e431d8976..38b64587b31be5 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c @@ -167,7 +167,7 @@ static int __init boot_params_kdebugfs_init(void) struct dentry *dbp, *version, *data; int error = -ENOMEM; - dbp = debugfs_create_dir("boot_params", NULL); + dbp = debugfs_create_dir("boot_params", arch_debugfs_dir); if (!dbp) return -ENOMEM; diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index c6ee63f927ab72..d688826e5736a1 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -67,7 +67,7 @@ #endif /* Ensure if the instruction can be boostable */ -extern int can_boost(kprobe_opcode_t *instruction); +extern int can_boost(kprobe_opcode_t *instruction, void *addr); /* Recover instruction if given address is probed */ extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 520b8dfe164026..993fa4fe4f6869 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -166,12 +167,12 @@ NOKPROBE_SYMBOL(skip_prefixes); * Returns non-zero if opcode is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ -int can_boost(kprobe_opcode_t *opcodes) +int can_boost(kprobe_opcode_t *opcodes, void *addr) { kprobe_opcode_t opcode; kprobe_opcode_t *orig_opcodes = opcodes; - if (search_exception_tables((unsigned long)opcodes)) + if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ retry: @@ -416,7 +417,7 @@ static int arch_copy_kprobe(struct kprobe *p) * __copy_instruction can modify the displacement of the instruction, * but it doesn't affect boostable check. */ - if (can_boost(p->ainsn.insn)) + if (can_boost(p->ainsn.insn, p->addr)) p->ainsn.boostable = 0; else p->ainsn.boostable = -1; diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 3d1bee9d6a728f..3e7c6e5a08ffde 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -178,7 +178,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src) while (len < RELATIVEJUMP_SIZE) { ret = __copy_instruction(dest + len, src + len); - if (!ret || !can_boost(dest + len)) + if (!ret || !can_boost(dest + len, src + len)) return -EINVAL; len += ret; } diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index bae6ea6cfb9413..d88967659098b5 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 307b1f4543de4b..857cdbd0286757 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -194,19 +194,22 @@ static int arch_update_purgatory(struct kimage *image) /* Setup copying of backup region */ if (image->type == KEXEC_TYPE_CRASH) { - ret = kexec_purgatory_get_set_symbol(image, "backup_dest", + ret = kexec_purgatory_get_set_symbol(image, + "purgatory_backup_dest", &image->arch.backup_load_addr, sizeof(image->arch.backup_load_addr), 0); if (ret) return ret; - ret = kexec_purgatory_get_set_symbol(image, "backup_src", + ret = kexec_purgatory_get_set_symbol(image, + "purgatory_backup_src", &image->arch.backup_src_start, sizeof(image->arch.backup_src_start), 0); if (ret) return ret; - ret = kexec_purgatory_get_set_symbol(image, "backup_sz", + ret = kexec_purgatory_get_set_symbol(image, + "purgatory_backup_sz", &image->arch.backup_src_sz, sizeof(image->arch.backup_src_sz), 0); if (ret) diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index bfe4d6c96fbd8f..a723ae9440ab25 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #if defined(CONFIG_EDAC) #include @@ -164,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action) spin_lock_irqsave(&desc->lock, flags); /* - * most handlers of type NMI_UNKNOWN never return because - * they just assume the NMI is theirs. Just a sanity check - * to manage expectations + * Indicate if there are multiple registrations on the + * internal NMI handler call chains (SERR and IO_CHECK). */ - WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index da8cb987b97312..587d887f7f17b2 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7780efa635b911..f675915617110f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -7,6 +7,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include @@ -65,8 +69,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { }; EXPORT_PER_CPU_SYMBOL(cpu_tss); -DEFINE_PER_CPU(bool, need_tr_refresh); -EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh); +DEFINE_PER_CPU(bool, __tss_limit_invalid); +EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); /* * this gets called so that we can store lazy state into memory and copy the @@ -218,7 +222,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, * Make sure that the TSS limit is correct for the CPU * to notice the IO bitmap. */ - refresh_TR(); + refresh_tss_limit(); } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { /* * Clear any possible leftover bits: diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a0ac3e81518ad8..4c818f8bc1352b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a61e141b6891ed..d6b784a5520daf 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 9cc7d5a330ef9d..2364b23ea3e52c 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 9e93fe5803b470..5c3f6d6a507833 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -21,6 +21,8 @@ #include #include #include +#include + #include #include diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e244c19a2451aa..067f9813fd2cf7 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -223,6 +223,22 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "P4S800"), }, }, + { /* Handle problems with rebooting on ASUS EeeBook X205TA */ + .callback = set_acpi_reboot, + .ident = "ASUS EeeBook X205TA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X205TA"), + }, + }, + { /* Handle problems with rebooting on ASUS EeeBook X205TAW */ + .callback = set_acpi_reboot, + .ident = "ASUS EeeBook X205TAW", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"), + }, + }, /* Certec */ { /* Handle problems with rebooting on Certec BPC600 */ diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 763af1d0de64d8..396c042e9d0ee5 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a0d38685f7dfdd..bd1f1ad3528420 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -45,6 +45,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 0653788026e288..8e2b79b88e512f 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -4,6 +4,8 @@ * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar */ #include +#include +#include #include #include #include diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index a23ce84a3f6ccf..f07f83b3611b6d 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -2,6 +2,7 @@ * x86 single-step support code, common to 32-bit and 64-bit. */ #include +#include #include #include #include diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index a55ed63b9f91b0..50215a4b934744 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1dc86ee60a0319..948443e115c147 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 2724dc82f992ef..714dfba6a1e713 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -326,9 +327,16 @@ unsigned long long sched_clock(void) { return paravirt_sched_clock(); } + +bool using_native_sched_clock(void) +{ + return pv_time_ops.sched_clock == native_sched_clock; +} #else unsigned long long sched_clock(void) __attribute__((alias("native_sched_clock"))); + +bool using_native_sched_clock(void) { return true; } #endif int check_tsc_unstable(void) @@ -1111,8 +1119,10 @@ static void tsc_cs_mark_unstable(struct clocksource *cs) { if (tsc_unstable) return; + tsc_unstable = 1; - clear_sched_clock_stable(); + if (using_native_sched_clock()) + clear_sched_clock_stable(); disable_sched_clock_irqtime(); pr_info("Marking TSC unstable due to clocksource watchdog\n"); } @@ -1134,18 +1144,20 @@ static struct clocksource clocksource_tsc = { void mark_tsc_unstable(char *reason) { - if (!tsc_unstable) { - tsc_unstable = 1; + if (tsc_unstable) + return; + + tsc_unstable = 1; + if (using_native_sched_clock()) clear_sched_clock_stable(); - disable_sched_clock_irqtime(); - pr_info("Marking TSC unstable due to %s\n", reason); - /* Change only the rating, when not registered */ - if (clocksource_tsc.mult) - clocksource_mark_unstable(&clocksource_tsc); - else { - clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; - clocksource_tsc.rating = 0; - } + disable_sched_clock_irqtime(); + pr_info("Marking TSC unstable due to %s\n", reason); + /* Change only the rating, when not registered */ + if (clocksource_tsc.mult) { + clocksource_mark_unstable(&clocksource_tsc); + } else { + clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; + clocksource_tsc.rating = 0; } } @@ -1321,6 +1333,8 @@ static int __init init_tsc_clocksource(void) * the refined calibration and directly register it as a clocksource. */ if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { + if (boot_cpu_has(X86_FEATURE_ART)) + art_related_clocksource = &clocksource_tsc; clocksource_register_khz(&clocksource_tsc, tsc_khz); return 0; } diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 23d15565d02ad7..08339262b666e5 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -1,4 +1,6 @@ #include +#include +#include #include #include #include @@ -80,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs) return sizeof(*regs); } +#ifdef CONFIG_X86_32 +#define GCC_REALIGN_WORDS 3 +#else +#define GCC_REALIGN_WORDS 1 +#endif + static bool is_last_task_frame(struct unwind_state *state) { - unsigned long bp = (unsigned long)state->bp; - unsigned long regs = (unsigned long)task_pt_regs(state->task); + unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2; + unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS; /* * We have to check for the last task frame at two different locations * because gcc can occasionally decide to realign the stack pointer and - * change the offset of the stack frame by a word in the prologue of a - * function called by head/entry code. + * change the offset of the stack frame in the prologue of a function + * called by head/entry code. Examples: + * + * : + * push %edi + * lea 0x8(%esp),%edi + * and $0xfffffff8,%esp + * pushl -0x4(%edi) + * push %ebp + * mov %esp,%ebp + * + * : + * lea 0x8(%rsp),%r10 + * and $0xfffffffffffffff0,%rsp + * pushq -0x8(%r10) + * push %rbp + * mov %rsp,%rbp + * + * Note that after aligning the stack, it pushes a duplicate copy of + * the return address before pushing the frame pointer. */ - return bp == regs - FRAME_HEADER_SIZE || - bp == regs - FRAME_HEADER_SIZE - sizeof(long); + return (state->bp == last_bp || + (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1))); } /* diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 0442d98367aec5..23ee89ce59a940 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 1d155cc56629a7..efde6cc5087518 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -16,6 +16,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index f701d443072770..ebae57ac59024a 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -28,6 +28,8 @@ #include #include +#include + #include #include diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 73ea24d4f119c8..047b17a2626961 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm) { struct kvm_pic *vpic = kvm->arch.vpic; + if (!vpic) + return; + kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 6e219e5c07d27c..289270a6aecbb4 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; + if (!ioapic) + return; + cancel_delayed_work_sync(&ioapic->eoi_inject); kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); kvm->arch.vioapic = NULL; diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index b96d3893f121c7..6825cd36d13b7c 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -23,6 +23,8 @@ #include #include #include +#include + #include #include diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1cda35277278ab..ac7810513d0e95 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 4a1c13eaa51833..60168cdd05463e 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -14,6 +14,8 @@ */ #include +#include + #include #include @@ -158,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); } +void kvm_page_track_cleanup(struct kvm *kvm) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + cleanup_srcu_struct(&head->track_srcu); +} + void kvm_page_track_init(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 06ce377dcbc9ff..026db42a86c323 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -113,12 +113,19 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, .config = config, }; + attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); + if (in_tx) attr.config |= HSW_IN_TX; - if (in_tx_cp) + if (in_tx_cp) { + /* + * HSW_IN_TX_CHECKPOINTED is not supported with nonzero + * period. Just clear the sample period so at least + * allocating the counter doesn't fail. + */ + attr.sample_period = 0; attr.config |= HSW_IN_TX_CHECKPOINTED; - - attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); + } event = perf_event_create_kernel_counter(&attr, -1, current, intr ? kvm_perf_overflow_intr : diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d1efe2c62b3f8d..5fba70646c3279 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm) unsigned long flags; struct kvm_arch *vm_data = &kvm->arch; + if (!avic) + return; + avic_free_vm_id(vm_data->avic_vm_id); if (vm_data->avic_logical_id_table_page) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ef4ba71dbb66a5..2ee00dbbbd5188 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void) return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; } +static inline bool cpu_has_vmx_invvpid(void) +{ + return vmx_capability.vpid & VMX_VPID_INVVPID_BIT; +} + static inline bool cpu_has_vmx_ept(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & @@ -2053,7 +2058,6 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) static unsigned long segment_base(u16 selector) { struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); - struct desc_struct *d; struct desc_struct *table; unsigned long v; @@ -2754,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_DESC | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | - SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_WBINVD_EXITING | @@ -2782,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) * though it is treated as global context. The alternative is * not failing the single-context invvpid, and it is worse. */ - if (enable_vpid) + if (enable_vpid) { + vmx->nested.nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_VPID; vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | VMX_VPID_EXTENT_SUPPORTED_MASK; - else + } else vmx->nested.nested_vmx_vpid_caps = 0; if (enable_unrestricted_guest) @@ -4025,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); } +static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) +{ + if (enable_ept) + vmx_flush_tlb(vcpu); +} + static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; @@ -6518,8 +6529,10 @@ static __init int hardware_setup(void) if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); - if (!cpu_has_vmx_vpid()) + if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || + !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) enable_vpid = 0; + if (!cpu_has_vmx_shadow_vmcs()) enable_shadow_vmcs = 0; if (enable_shadow_vmcs) @@ -7259,9 +7272,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) static int handle_vmclear(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 zero = 0; gpa_t vmptr; - struct vmcs12 *vmcs12; - struct page *page; if (!nested_vmx_check_permission(vcpu)) return 1; @@ -7272,22 +7284,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vmx); - page = nested_get_page(vcpu, vmptr); - if (page == NULL) { - /* - * For accurate processor emulation, VMCLEAR beyond available - * physical memory should do nothing at all. However, it is - * possible that a nested vmx bug, not a guest hypervisor bug, - * resulted in this case, so let's shut down before doing any - * more damage: - */ - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - return 1; - } - vmcs12 = kmap(page); - vmcs12->launch_state = 0; - kunmap(page); - nested_release_page(page); + kvm_vcpu_write_guest(vcpu, + vmptr + offsetof(struct vmcs12, launch_state), + &zero, sizeof(zero)); nested_free_vmcs02(vmx, vmptr); @@ -8516,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); else { - WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); + vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", + exit_reason); kvm_queue_exception(vcpu, UD_VECTOR); return 1; } @@ -8562,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) } else { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + vmx_flush_tlb_ept_only(vcpu); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); @@ -8587,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) */ if (!is_guest_mode(vcpu) || !nested_cpu_has2(get_vmcs12(&vmx->vcpu), - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmcs_write64(APIC_ACCESS_ADDR, hpa); + vmx_flush_tlb_ept_only(vcpu); + } } static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) @@ -9695,10 +9698,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, return false; page = nested_get_page(vcpu, vmcs12->msr_bitmap); - if (!page) { - WARN_ON(1); + if (!page) return false; - } msr_bitmap_l1 = (unsigned long *)kmap(page); memset(msr_bitmap_l0, 0xff, PAGE_SIZE); @@ -9991,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control; - bool nested_ept_enabled = false; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); @@ -10138,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs12->guest_intr_status); } - nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0; - /* * Write an illegal value to APIC_ACCESS_ADDR. Later, * nested_get_vmcs12_pages will either fix it up or @@ -10272,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (nested_cpu_has_ept(vmcs12)) { kvm_mmu_unload(vcpu); nested_ept_init_mmu_context(vcpu); + } else if (nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + vmx_flush_tlb_ept_only(vcpu); } /* @@ -10299,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmx_set_efer(vcpu, vcpu->arch.efer); /* Shadow page tables on either EPT or shadow page tables. */ - if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled, + if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), entry_failure_code)) return 1; - kvm_mmu_reset_context(vcpu); - if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; @@ -10642,6 +10641,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); + if (vcpu->arch.exception.pending || + vcpu->arch.nmi_injected || + vcpu->arch.interrupt.pending) + return -EBUSY; + if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { if (vmx->nested.nested_run_pending) @@ -10651,8 +10655,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) } if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { - if (vmx->nested.nested_run_pending || - vcpu->arch.interrupt.pending) + if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, NMI_VECTOR | INTR_TYPE_NMI_INTR | @@ -11069,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vmx->nested.change_vmcs01_virtual_x2apic_mode = false; vmx_set_virtual_x2apic_mode(vcpu, vcpu->arch.apic_base & X2APIC_ENABLE); + } else if (!nested_cpu_has_ept(vmcs12) && + nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + vmx_flush_tlb_ept_only(vcpu); } /* This is needed for same reason as it was needed in prepare_vmcs02 */ @@ -11118,8 +11125,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, */ static void vmx_leave_nested(struct kvm_vcpu *vcpu) { - if (is_guest_mode(vcpu)) + if (is_guest_mode(vcpu)) { + to_vmx(vcpu)->nested.nested_run_pending = 0; nested_vmx_vmexit(vcpu, -1, 0, 0); + } free_nested(to_vmx(vcpu)); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b2a4b11274b04f..ccbd45ecd41a3f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -54,6 +54,8 @@ #include #include #include +#include + #include #include @@ -8151,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm) if (kvm_x86_ops->vm_destroy) kvm_x86_ops->vm_destroy(kvm); kvm_iommu_unmap_guest(kvm); - kfree(kvm->arch.vpic); - kfree(kvm->arch.vioapic); + kvm_pic_destroy(kvm); + kvm_ioapic_destroy(kvm); kvm_free_vcpus(kvm); kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); kvm_mmu_uninit_vm(kvm); + kvm_page_track_cleanup(kvm); } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, @@ -8564,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, { struct x86_exception fault; - trace_kvm_async_pf_ready(work->arch.token, work->gva); if (work->wakeup_all) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); + trace_kvm_async_pf_ready(work->arch.token, work->gva); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 779782f5832476..9a53a06e5a3efc 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) + _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 61a7e9ea9aa16d..35ea061010a1a5 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -1,5 +1,7 @@ #include #include +#include + #include #include diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e3254ca0eec4ec..428e31763cb93e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -4,6 +4,7 @@ * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar */ #include /* test_thread_flag(), ... */ +#include /* task_stack_*(), ... */ #include /* oops_begin/end, ... */ #include /* search_exception_tables */ #include /* max_low_pfn */ diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 99c7805a96937c..1f3b6ef105cda5 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -106,32 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; - int nr_start = *nr; - pte_t *ptep; + int nr_start = *nr, ret = 0; + pte_t *ptep, *ptem; - ptep = pte_offset_map(&pmd, addr); + /* + * Keep the original mapped PTE value (ptem) around since we + * might increment ptep off the end of the page when finishing + * our loop iteration. + */ + ptem = ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *page; /* Similar to the PMD case, NUMA hinting must take slow path */ - if (pte_protnone(pte)) { - pte_unmap(ptep); - return 0; - } + if (pte_protnone(pte)) + break; + + if (!pte_allows_gup(pte_val(pte), write)) + break; if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); - pte_unmap(ptep); - return 0; + break; } - } else if (!pte_allows_gup(pte_val(pte), write) || - pte_special(pte)) { - pte_unmap(ptep); - return 0; - } + } else if (pte_special(pte)) + break; + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); get_page(page); @@ -141,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); - pte_unmap(ptep - 1); + if (addr == end) + ret = 1; + pte_unmap(ptem); - return 1; + return ret; } static inline void get_head_page_multiple(struct page *page, int nr) diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 2ae8584b44c73d..c5066a260803d4 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 0493c17b8a516f..4c90cfdc128b83 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -1,9 +1,11 @@ +#define DISABLE_BRANCH_PROFILING #define pr_fmt(fmt) "kasan: " fmt #include #include #include #include #include +#include #include #include diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 887e5718271682..aed206475aa7c0 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -48,7 +48,7 @@ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; #if defined(CONFIG_X86_ESPFIX64) static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; #elif defined(CONFIG_EFI) -static const unsigned long vaddr_end = EFI_VA_START; +static const unsigned long vaddr_end = EFI_VA_END; #else static const unsigned long vaddr_end = __START_KERNEL_map; #endif @@ -105,7 +105,7 @@ void __init kernel_randomize_memory(void) */ BUILD_BUG_ON(vaddr_start >= vaddr_end); BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && - vaddr_end >= EFI_VA_START); + vaddr_end >= EFI_VA_END); BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || IS_ENABLED(CONFIG_EFI)) && vaddr_end >= __START_KERNEL_map); diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index d2dc0438d654a8..7940166c799b78 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -28,7 +28,8 @@ #include #include #include -#include +#include +#include #include struct va_alignment __read_mostly va_align = { diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index c98079684bdb29..cd44ae727df7f4 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -7,6 +7,7 @@ */ #include #include +#include #include #include @@ -589,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm, * we might run off the end of the bounds table if we are on * a 64-bit kernel and try to get 8 bytes. */ -int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, +static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, long __user *bd_entry_ptr) { u32 bd_entry_32; diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 0cb52ae0a8f075..190e718694b172 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -735,6 +735,15 @@ void pcibios_disable_device (struct pci_dev *dev) pcibios_disable_irq(dev); } +#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +void pcibios_release_device(struct pci_dev *dev) +{ + if (atomic_dec_return(&dev->enable_cnt) >= 0) + pcibios_disable_device(dev); + +} +#endif + int pci_ext_cfg_avail(void) { if (raw_pci_ext_ops) diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index e1fb269c87af7b..292ab0364a89af 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return 1; for_each_pci_msi_entry(msidesc, dev) { - __pci_read_msi_msg(msidesc, &msg); - pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | - ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); - if (msg.data != XEN_PIRQ_MSI_DATA || - xen_irq_from_pirq(pirq) < 0) { - pirq = xen_allocate_pirq_msi(dev, msidesc); - if (pirq < 0) { - irq = -ENODEV; - goto error; - } - xen_msi_compose_msg(dev, pirq, &msg); - __pci_write_msi_msg(msidesc, &msg); - dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); - } else { - dev_dbg(&dev->dev, - "xen: msi already bound to pirq=%d\n", pirq); + pirq = xen_allocate_pirq_msi(dev, msidesc); + if (pirq < 0) { + irq = -ENODEV; + goto error; } + xen_msi_compose_msg(dev, pirq, &msg); + __pci_write_msi_msg(msidesc, &msg); + dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, (type == PCI_CAP_ID_MSI) ? nvec : 1, (type == PCI_CAP_ID_MSIX) ? diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index a7dbec4dce2758..3dbde04febdcca 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile @@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o # MISC Devices obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o +obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c new file mode 100644 index 00000000000000..a6c3705a28ad49 --- /dev/null +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c @@ -0,0 +1,82 @@ +/* + * Intel Merrifield power button support + * + * (C) Copyright 2017 Intel Corporation + * + * Author: Andy Shevchenko + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include +#include +#include +#include + +#include +#include + +static struct resource mrfld_power_btn_resources[] = { + { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device mrfld_power_btn_dev = { + .name = "msic_power_btn", + .id = PLATFORM_DEVID_NONE, + .num_resources = ARRAY_SIZE(mrfld_power_btn_resources), + .resource = mrfld_power_btn_resources, +}; + +static int mrfld_power_btn_scu_status_change(struct notifier_block *nb, + unsigned long code, void *data) +{ + if (code == SCU_DOWN) { + platform_device_unregister(&mrfld_power_btn_dev); + return 0; + } + + return platform_device_register(&mrfld_power_btn_dev); +} + +static struct notifier_block mrfld_power_btn_scu_notifier = { + .notifier_call = mrfld_power_btn_scu_status_change, +}; + +static int __init register_mrfld_power_btn(void) +{ + if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) + return -ENODEV; + + /* + * We need to be sure that the SCU IPC is ready before + * PMIC power button device can be registered: + */ + intel_scu_notifier_add(&mrfld_power_btn_scu_notifier); + + return 0; +} +arch_initcall(register_mrfld_power_btn); + +static void __init *mrfld_power_btn_platform_data(void *info) +{ + struct resource *res = mrfld_power_btn_resources; + struct sfi_device_table_entry *pentry = info; + + res->start = res->end = pentry->irq; + return NULL; +} + +static const struct devs_id mrfld_power_btn_dev_id __initconst = { + .name = "bcove_power_btn", + .type = SFI_DEV_TYPE_IPC, + .delay = 1, + .msic = 1, + .get_platform_data = &mrfld_power_btn_platform_data, +}; + +sfi_device(mrfld_power_btn_dev_id); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index 86edd1e941eb07..9e304e2ea4f55c 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c @@ -19,7 +19,7 @@ #include #include -#define TANGIER_EXT_TIMER0_MSI 15 +#define TANGIER_EXT_TIMER0_MSI 12 static struct platform_device wdt_dev = { .name = "intel_mid_wdt", diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c index e793fe509971f4..e42978d4deafeb 100644 --- a/arch/x86/platform/intel-mid/mfld.c +++ b/arch/x86/platform/intel-mid/mfld.c @@ -17,16 +17,6 @@ #include "intel_mid_weak_decls.h" -static void penwell_arch_setup(void); -/* penwell arch ops */ -static struct intel_mid_ops penwell_ops = { - .arch_setup = penwell_arch_setup, -}; - -static void mfld_power_off(void) -{ -} - static unsigned long __init mfld_calibrate_tsc(void) { unsigned long fast_calibrate; @@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void) static void __init penwell_arch_setup(void) { x86_platform.calibrate_tsc = mfld_calibrate_tsc; - pm_power_off = mfld_power_off; } +static struct intel_mid_ops penwell_ops = { + .arch_setup = penwell_arch_setup, +}; + void *get_penwell_ops(void) { return &penwell_ops; diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 766d4d3529a1d9..f25982cdff9006 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1847,7 +1847,6 @@ static void pq_init(int node, int pnode) ops.write_payload_first(pnode, first); ops.write_payload_last(pnode, last); - ops.write_g_sw_ack(pnode, 0xffffUL); /* in effect, all msg_type's are set to MSG_NOOP */ memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index 9743d0ccfec69a..c34bd8233f7c81 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 555b9fa0ad43cb..7dbdb780264df9 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -8,6 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib targets += purgatory.ro +KASAN_SANITIZE := n KCOV_INSTRUMENT := n # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c index 25e068ba338214..470edad96bb956 100644 --- a/arch/x86/purgatory/purgatory.c +++ b/arch/x86/purgatory/purgatory.c @@ -10,21 +10,19 @@ * Version 2. See the file COPYING for more details. */ +#include +#include + #include "sha256.h" #include "../boot/string.h" -struct sha_region { - unsigned long start; - unsigned long len; -}; - -unsigned long backup_dest = 0; -unsigned long backup_src = 0; -unsigned long backup_sz = 0; +unsigned long purgatory_backup_dest __section(.kexec-purgatory); +unsigned long purgatory_backup_src __section(.kexec-purgatory); +unsigned long purgatory_backup_sz __section(.kexec-purgatory); -u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 }; +u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory); -struct sha_region sha_regions[16] = {}; +struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory); /* * On x86, second kernel requries first 640K of memory to boot. Copy @@ -33,26 +31,28 @@ struct sha_region sha_regions[16] = {}; */ static int copy_backup_region(void) { - if (backup_dest) - memcpy((void *)backup_dest, (void *)backup_src, backup_sz); - + if (purgatory_backup_dest) { + memcpy((void *)purgatory_backup_dest, + (void *)purgatory_backup_src, purgatory_backup_sz); + } return 0; } -int verify_sha256_digest(void) +static int verify_sha256_digest(void) { - struct sha_region *ptr, *end; + struct kexec_sha_region *ptr, *end; u8 digest[SHA256_DIGEST_SIZE]; struct sha256_state sctx; sha256_init(&sctx); - end = &sha_regions[sizeof(sha_regions)/sizeof(sha_regions[0])]; - for (ptr = sha_regions; ptr < end; ptr++) + end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); + + for (ptr = purgatory_sha_regions; ptr < end; ptr++) sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); sha256_final(&sctx, digest); - if (memcmp(digest, sha256_digest, sizeof(digest))) + if (memcmp(digest, purgatory_sha256_digest, sizeof(digest))) return 1; return 0; diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S index fe3c91ba1bd0c6..dfae9b9e60b5ba 100644 --- a/arch/x86/purgatory/setup-x86_64.S +++ b/arch/x86/purgatory/setup-x86_64.S @@ -9,6 +9,7 @@ * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ +#include .text .globl purgatory_start diff --git a/arch/x86/purgatory/sha256.h b/arch/x86/purgatory/sha256.h index bd15a4127735e5..2867d9825a57e5 100644 --- a/arch/x86/purgatory/sha256.h +++ b/arch/x86/purgatory/sha256.h @@ -10,7 +10,6 @@ #ifndef SHA256_H #define SHA256_H - #include #include diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c index e6552275320bcc..10d907098c2614 100644 --- a/arch/x86/um/syscalls_64.c +++ b/arch/x86/um/syscalls_64.c @@ -6,6 +6,7 @@ */ #include +#include #include #include /* XXX This should get the constants from libc */ #include diff --git a/arch/x86/um/sysrq_32.c b/arch/x86/um/sysrq_32.c index 16ee0e450e3e39..f2383484840d3b 100644 --- a/arch/x86/um/sysrq_32.c +++ b/arch/x86/um/sysrq_32.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/um/sysrq_64.c b/arch/x86/um/sysrq_64.c index 38b4e4abd0f836..903ad91b624f29 100644 --- a/arch/x86/um/sysrq_64.c +++ b/arch/x86/um/sysrq_64.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index f6740b5b173808..37cb5aad71de36 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -38,7 +38,7 @@ * * Jeremy Fitzhardinge , XenSource Inc, 2007 */ -#include +#include #include #include #include diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 0dee6f59ea8268..7ff2f1bfb7ec05 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index e54189427b3151..7ee02fe4a63df7 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -93,11 +93,7 @@ endif boot := arch/xtensa/boot -all: zImage - -bzImage : zImage - -zImage: vmlinux +all Image zImage uImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $@ %.dtb: @@ -107,6 +103,8 @@ dtbs: scripts $(Q)$(MAKE) $(build)=$(boot)/dts define archhelp + @echo '* Image - Kernel ELF image with reset vector' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' + @echo '* uImage - U-Boot wrapped image' @echo ' dtbs - Build device tree blobs for enabled boards' endef diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index ca20a892021bb6..53e4178711e695 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile @@ -21,14 +21,17 @@ subdir-y := lib # Subdirs for the boot loader(s) -bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf -bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot -bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot +boot-$(CONFIG_XTENSA_PLATFORM_ISS) += Image +boot-$(CONFIG_XTENSA_PLATFORM_XT2000) += Image zImage uImage +boot-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += Image zImage uImage -zImage Image: $(bootdir-y) +all: $(boot-y) +Image: boot-elf +zImage: boot-redboot +uImage: $(obj)/uImage -$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \ - $(addprefix $(obj)/,$(host-progs)) +boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) \ + $(addprefix $(obj)/,$(host-progs)) $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary @@ -41,4 +44,10 @@ vmlinux.bin.gz: vmlinux.bin FORCE boot-elf: vmlinux.bin boot-redboot: vmlinux.bin.gz -boot-uboot: vmlinux.bin.gz + +UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS) +UIMAGE_COMPRESSION = gzip + +$(obj)/uImage: vmlinux.bin.gz FORCE + $(call if_changed,uimage) + $(Q)$(kecho) ' Kernel: $@ is ready' diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile index 89db089f5a1247..52147198135635 100644 --- a/arch/xtensa/boot/boot-elf/Makefile +++ b/arch/xtensa/boot/boot-elf/Makefile @@ -31,4 +31,4 @@ $(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds -o $@ $(obj)/Image.o $(Q)$(kecho) ' Kernel: $@ is ready' -zImage: $(obj)/../Image.elf +all Image: $(obj)/../Image.elf diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile index 8be8b943698178..8632473ad319ea 100644 --- a/arch/xtensa/boot/boot-redboot/Makefile +++ b/arch/xtensa/boot/boot-redboot/Makefile @@ -32,4 +32,4 @@ $(obj)/../zImage.redboot: $(obj)/zImage.elf $(Q)$(OBJCOPY) -S -O binary $< $@ $(Q)$(kecho) ' Kernel: $@ is ready' -zImage: $(obj)/../zImage.redboot +all zImage: $(obj)/../zImage.redboot diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile deleted file mode 100644 index 0f4c417b4196e9..00000000000000 --- a/arch/xtensa/boot/boot-uboot/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# -# This file is subject to the terms and conditions of the GNU General Public -# License. See the file "COPYING" in the main directory of this archive -# for more details. -# - -UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS) -UIMAGE_COMPRESSION = gzip - -$(obj)/../uImage: vmlinux.bin.gz FORCE - $(call if_changed,uimage) - $(Q)$(kecho) ' Kernel: $@ is ready' - -zImage: $(obj)/../uImage diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index 04c8ebdc45178c..f7e186dfc4e44e 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h @@ -17,6 +17,7 @@ #include #include +#include #include diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 976b1d70edbc0a..4ddbfd57a7c824 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from, #define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) +#ifdef CONFIG_MMU +static inline unsigned long ___pa(unsigned long va) +{ + unsigned long off = va - PAGE_OFFSET; + + if (off >= XCHAL_KSEG_SIZE) + off -= XCHAL_KSEG_SIZE; + + return off + PHYS_OFFSET; +} +#define __pa(x) ___pa((unsigned long)(x)) +#else #define __pa(x) \ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET) +#endif #define __va(x) \ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET)) #define pfn_valid(pfn) \ diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 8aa0e0d9cbb21f..30dd5b2e4ad5af 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -11,6 +11,7 @@ #ifndef _XTENSA_PGTABLE_H #define _XTENSA_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include #include diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index 77d41cc7a688ac..65d3da9db19beb 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h @@ -67,7 +67,11 @@ static inline unsigned long xtensa_get_kio_paddr(void) #endif /* CONFIG_MMU */ #define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR) +#ifdef CONFIG_VECTORS_OFFSET #define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET) +#else +#define VECBASE_VADDR _vecbase +#endif #if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index cd400af4a6b255..6be7eb27fd29d6 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -774,7 +774,10 @@ __SYSCALL(349, sys_pkey_alloc, 2) #define __NR_pkey_free 350 __SYSCALL(350, sys_pkey_free, 1) -#define __NR_syscall_count 351 +#define __NR_statx 351 +__SYSCALL(351, sys_statx, 5) + +#define __NR_syscall_count 352 /* * sysxtensa syscall handler diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 826d25104846f5..58f96d1230d4d5 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -17,6 +17,9 @@ #include #include +#include +#include +#include #include #include #include diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index 32519b71d914b4..e0f583fed06a40 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 8fd4be610607c2..197e75b400b169 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -126,6 +126,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag) __tagtable(BP_TAG_INITRD, parse_tag_initrd); +#endif /* CONFIG_BLK_DEV_INITRD */ + #ifdef CONFIG_OF static int __init parse_tag_fdt(const bp_tag_t *tag) @@ -138,8 +140,6 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt); #endif /* CONFIG_OF */ -#endif /* CONFIG_BLK_DEV_INITRD */ - static int __init parse_tag_cmdline(const bp_tag_t* tag) { strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); @@ -334,6 +334,7 @@ void __init setup_arch(char **cmdline_p) mem_reserve(__pa(&_stext), __pa(&_end)); +#ifdef CONFIG_VECTORS_OFFSET mem_reserve(__pa(&_WindowVectors_text_start), __pa(&_WindowVectors_text_end)); @@ -370,6 +371,8 @@ void __init setup_arch(char **cmdline_p) __pa(&_Level6InterruptVector_text_end)); #endif +#endif /* CONFIG_VECTORS_OFFSET */ + #ifdef CONFIG_SMP mem_reserve(__pa(&_SecondaryResetVector_text_start), __pa(&_SecondaryResetVector_text_end)); diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index c41294745731ee..70a13194544384 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index fcea72019df798..932d64689bacbb 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -21,6 +21,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index d3fd100dffc9a0..06937928cb72dc 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -25,6 +25,7 @@ #include #include #include +#include #include typedef void (*syscall_t)(void); diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 282bf721a4d685..bae697a06a9845 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -24,7 +24,9 @@ */ #include -#include +#include +#include +#include #include #include #include @@ -481,10 +483,8 @@ void show_regs(struct pt_regs * regs) static int show_trace_cb(struct stackframe *frame, void *data) { - if (kernel_text_address(frame->pc)) { - pr_cont(" [<%08lx>]", frame->pc); - print_symbol(" %s\n", frame->pc); - } + if (kernel_text_address(frame->pc)) + pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc); return 0; } diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 31411fc82662c8..30d9fc21e0763c 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -59,6 +59,7 @@ jiffies = jiffies_64; * garbage.) */ +#ifdef CONFIG_VECTORS_OFFSET #define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \ section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \ LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ @@ -68,6 +69,11 @@ jiffies = jiffies_64; *(section) \ sym ## _end = ABSOLUTE(.); \ } +#else +#define SECTION_VECTOR(section, addr) \ + . = addr; \ + *(section) +#endif /* * Mapping of input sections to output sections when linking. @@ -85,6 +91,37 @@ SECTIONS { /* The HEAD_TEXT section must be the first section! */ HEAD_TEXT + +#ifndef CONFIG_VECTORS_OFFSET + . = ALIGN(PAGE_SIZE); + _vecbase = .; + + SECTION_VECTOR (.WindowVectors.text, WINDOW_VECTORS_VADDR) +#if XCHAL_EXCM_LEVEL >= 2 + SECTION_VECTOR (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR) +#endif +#if XCHAL_EXCM_LEVEL >= 3 + SECTION_VECTOR (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR) +#endif +#if XCHAL_EXCM_LEVEL >= 4 + SECTION_VECTOR (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR) +#endif +#if XCHAL_EXCM_LEVEL >= 5 + SECTION_VECTOR (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR) +#endif +#if XCHAL_EXCM_LEVEL >= 6 + SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) +#endif + SECTION_VECTOR (.DebugInterruptVector.literal, DEBUG_VECTOR_VADDR - 4) + SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) + SECTION_VECTOR (.KernelExceptionVector.literal, KERNEL_VECTOR_VADDR - 4) + SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) + SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) + SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) + SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) + SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) +#endif + TEXT_TEXT VMLINUX_SYMBOL(__sched_text_start) = .; *(.sched.literal .sched.text) @@ -132,6 +169,7 @@ SECTIONS . = ALIGN(16); __boot_reloc_table_start = ABSOLUTE(.); +#ifdef CONFIG_VECTORS_OFFSET RELOCATE_ENTRY(_WindowVectors_text, .WindowVectors.text); #if XCHAL_EXCM_LEVEL >= 2 @@ -164,6 +202,7 @@ SECTIONS .DoubleExceptionVector.text); RELOCATE_ENTRY(_DebugInterruptVector_text, .DebugInterruptVector.text); +#endif #if defined(CONFIG_SMP) RELOCATE_ENTRY(_SecondaryResetVector_text, .SecondaryResetVector.text); @@ -186,6 +225,7 @@ SECTIONS . = ALIGN(4); .dummy : { LONG(0) } +#ifdef CONFIG_VECTORS_OFFSET /* The vectors are relocated to the real position at startup time */ SECTION_VECTOR (_WindowVectors_text, @@ -277,6 +317,7 @@ SECTIONS . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; +#endif #if defined(CONFIG_SMP) SECTION_VECTOR (_SecondaryResetVector_text, diff --git a/block/Kconfig b/block/Kconfig index a2a92e57a87db6..e9f780f815f5d7 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -189,4 +189,9 @@ config BLK_MQ_PCI depends on BLOCK && PCI default y +config BLK_MQ_VIRTIO + bool + depends on BLOCK && VIRTIO + default y + source block/Kconfig.iosched diff --git a/block/Makefile b/block/Makefile index 2ad7c304e3f507..081bb680789bc8 100644 --- a/block/Makefile +++ b/block/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o +obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o obj-$(CONFIG_BLK_WBT) += blk-wbt.o obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o diff --git a/block/bio.c b/block/bio.c index 5eec5e08417f6f..e75878f8b14af8 100644 --- a/block/bio.c +++ b/block/bio.c @@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs) bio_list_init(&punt); bio_list_init(&nopunt); - while ((bio = bio_list_pop(current->bio_list))) + while ((bio = bio_list_pop(¤t->bio_list[0]))) bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); + current->bio_list[0] = nopunt; - *current->bio_list = nopunt; + bio_list_init(&nopunt); + while ((bio = bio_list_pop(¤t->bio_list[1]))) + bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); + current->bio_list[1] = nopunt; spin_lock(&bs->rescue_lock); bio_list_merge(&bs->rescue_list, &punt); @@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) * we retry with the original gfp_flags. */ - if (current->bio_list && !bio_list_empty(current->bio_list)) + if (current->bio_list && + (!bio_list_empty(¤t->bio_list[0]) || + !bio_list_empty(¤t->bio_list[1]))) gfp_mask &= ~__GFP_DIRECT_RECLAIM; p = mempool_alloc(bs->bio_pool, gfp_mask); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 295e98c2c8ccdf..bbe7ee00bd3d70 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/block/blk-core.c b/block/blk-core.c index b9e857f4afe85f..d772c221cc178b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -578,9 +578,6 @@ void blk_cleanup_queue(struct request_queue *q) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); - bdi_unregister(q->backing_dev_info); - put_disk_devt(q->disk_devt); - /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); } @@ -1976,7 +1973,14 @@ generic_make_request_checks(struct bio *bio) */ blk_qc_t generic_make_request(struct bio *bio) { - struct bio_list bio_list_on_stack; + /* + * bio_list_on_stack[0] contains bios submitted by the current + * make_request_fn. + * bio_list_on_stack[1] contains bios that were submitted before + * the current make_request_fn, but that haven't been processed + * yet. + */ + struct bio_list bio_list_on_stack[2]; blk_qc_t ret = BLK_QC_T_NONE; if (!generic_make_request_checks(bio)) @@ -1993,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio) * should be added at the tail */ if (current->bio_list) { - bio_list_add(current->bio_list, bio); + bio_list_add(¤t->bio_list[0], bio); goto out; } @@ -2012,23 +2016,39 @@ blk_qc_t generic_make_request(struct bio *bio) * bio_list, and call into ->make_request() again. */ BUG_ON(bio->bi_next); - bio_list_init(&bio_list_on_stack); - current->bio_list = &bio_list_on_stack; + bio_list_init(&bio_list_on_stack[0]); + current->bio_list = bio_list_on_stack; do { struct request_queue *q = bdev_get_queue(bio->bi_bdev); if (likely(blk_queue_enter(q, false) == 0)) { + struct bio_list lower, same; + + /* Create a fresh bio_list for all subordinate requests */ + bio_list_on_stack[1] = bio_list_on_stack[0]; + bio_list_init(&bio_list_on_stack[0]); ret = q->make_request_fn(q, bio); blk_queue_exit(q); - bio = bio_list_pop(current->bio_list); + /* sort new bios into those for a lower level + * and those for the same level + */ + bio_list_init(&lower); + bio_list_init(&same); + while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) + if (q == bdev_get_queue(bio->bi_bdev)) + bio_list_add(&same, bio); + else + bio_list_add(&lower, bio); + /* now assemble so we handle the lowest level first */ + bio_list_merge(&bio_list_on_stack[0], &lower); + bio_list_merge(&bio_list_on_stack[0], &same); + bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); } else { - struct bio *bio_next = bio_list_pop(current->bio_list); - bio_io_error(bio); - bio = bio_next; } + bio = bio_list_pop(&bio_list_on_stack[0]); } while (bio); current->bio_list = NULL; /* deactivate */ diff --git a/block/blk-ioc.c b/block/blk-ioc.c index b12f9c87b4c31c..63898d229cb90b 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "blk.h" @@ -36,8 +37,8 @@ static void icq_free_icq_rcu(struct rcu_head *head) } /* - * Exit an icq. Called with both ioc and q locked for sq, only ioc locked for - * mq. + * Exit an icq. Called with ioc locked for blk-mq, and with both ioc + * and queue locked for legacy. */ static void ioc_exit_icq(struct io_cq *icq) { @@ -54,7 +55,10 @@ static void ioc_exit_icq(struct io_cq *icq) icq->flags |= ICQ_EXITED; } -/* Release an icq. Called with both ioc and q locked. */ +/* + * Release an icq. Called with ioc locked for blk-mq, and with both ioc + * and queue locked for legacy. + */ static void ioc_destroy_icq(struct io_cq *icq) { struct io_context *ioc = icq->ioc; @@ -62,7 +66,6 @@ static void ioc_destroy_icq(struct io_cq *icq) struct elevator_type *et = q->elevator->type; lockdep_assert_held(&ioc->lock); - lockdep_assert_held(q->queue_lock); radix_tree_delete(&ioc->icq_tree, icq->q->id); hlist_del_init(&icq->ioc_node); @@ -222,24 +225,40 @@ void exit_io_context(struct task_struct *task) put_io_context_active(ioc); } +static void __ioc_clear_queue(struct list_head *icq_list) +{ + unsigned long flags; + + while (!list_empty(icq_list)) { + struct io_cq *icq = list_entry(icq_list->next, + struct io_cq, q_node); + struct io_context *ioc = icq->ioc; + + spin_lock_irqsave(&ioc->lock, flags); + ioc_destroy_icq(icq); + spin_unlock_irqrestore(&ioc->lock, flags); + } +} + /** * ioc_clear_queue - break any ioc association with the specified queue * @q: request_queue being cleared * - * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked. + * Walk @q->icq_list and exit all io_cq's. */ void ioc_clear_queue(struct request_queue *q) { - lockdep_assert_held(q->queue_lock); + LIST_HEAD(icq_list); - while (!list_empty(&q->icq_list)) { - struct io_cq *icq = list_entry(q->icq_list.next, - struct io_cq, q_node); - struct io_context *ioc = icq->ioc; + spin_lock_irq(q->queue_lock); + list_splice_init(&q->icq_list, &icq_list); - spin_lock(&ioc->lock); - ioc_destroy_icq(icq); - spin_unlock(&ioc->lock); + if (q->mq_ops) { + spin_unlock_irq(q->queue_lock); + __ioc_clear_queue(&icq_list); + } else { + __ioc_clear_queue(&icq_list); + spin_unlock_irq(q->queue_lock); } } diff --git a/block/blk-map.c b/block/blk-map.c index 2f18c2a0be1b22..3b5cb863318f31 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -2,6 +2,7 @@ * Functions related to mapping data to requests */ #include +#include #include #include #include diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 98c7b061781e55..09af8ff18719a4 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -110,15 +110,14 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, struct blk_mq_alloc_data *data) { struct elevator_queue *e = q->elevator; - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; struct request *rq; blk_queue_enter_live(q); - ctx = blk_mq_get_ctx(q); - hctx = blk_mq_map_queue(q, ctx->cpu); - - blk_mq_set_alloc_data(data, q, data->flags, ctx, hctx); + data->q = q; + if (likely(!data->ctx)) + data->ctx = blk_mq_get_ctx(q); + if (likely(!data->hctx)) + data->hctx = blk_mq_map_queue(q, data->ctx->cpu); if (e) { data->flags |= BLK_MQ_REQ_INTERNAL; @@ -135,8 +134,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, rq = __blk_mq_alloc_request(data, op); } else { rq = __blk_mq_alloc_request(data, op); - if (rq) - data->hctx->tags->rqs[rq->tag] = rq; } if (rq) { @@ -454,7 +451,8 @@ int blk_mq_sched_setup(struct request_queue *q) */ ret = 0; queue_for_each_hw_ctx(q, hctx, i) { - hctx->sched_tags = blk_mq_alloc_rq_map(set, i, q->nr_requests, 0); + hctx->sched_tags = blk_mq_alloc_rq_map(set, i, + q->nr_requests, set->reserved_tags); if (!hctx->sched_tags) { ret = -ENOMEM; break; diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 295e69670c3934..d745ab81033afa 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -17,6 +17,15 @@ static void blk_mq_sysfs_release(struct kobject *kobj) { } +static void blk_mq_hw_sysfs_release(struct kobject *kobj) +{ + struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, + kobj); + free_cpumask_var(hctx->cpumask); + kfree(hctx->ctxs); + kfree(hctx); +} + struct blk_mq_ctx_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_mq_ctx *, char *); @@ -200,7 +209,7 @@ static struct kobj_type blk_mq_ctx_ktype = { static struct kobj_type blk_mq_hw_ktype = { .sysfs_ops = &blk_mq_hw_sysfs_ops, .default_attrs = default_hw_ctx_attrs, - .release = blk_mq_sysfs_release, + .release = blk_mq_hw_sysfs_release, }; static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) @@ -242,24 +251,15 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q) { struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; - int i, j; + int i; - queue_for_each_hw_ctx(q, hctx, i) { + queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); - hctx_for_each_ctx(hctx, ctx, j) - kobject_put(&ctx->kobj); - - kobject_put(&hctx->kobj); - } - blk_mq_debugfs_unregister_hctxs(q); kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); kobject_del(&q->mq_kobj); - kobject_put(&q->mq_kobj); - kobject_put(&dev->kobj); q->mq_sysfs_init_done = false; @@ -277,7 +277,19 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) kobject_init(&hctx->kobj, &blk_mq_hw_ktype); } -static void blk_mq_sysfs_init(struct request_queue *q) +void blk_mq_sysfs_deinit(struct request_queue *q) +{ + struct blk_mq_ctx *ctx; + int cpu; + + for_each_possible_cpu(cpu) { + ctx = per_cpu_ptr(q->queue_ctx, cpu); + kobject_put(&ctx->kobj); + } + kobject_put(&q->mq_kobj); +} + +void blk_mq_sysfs_init(struct request_queue *q) { struct blk_mq_ctx *ctx; int cpu; @@ -297,8 +309,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q) blk_mq_disable_hotplug(); - blk_mq_sysfs_init(q); - ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) goto out; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 54c84363c1b238..9d97bfc4d4657b 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -181,7 +181,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag) { - if (tag >= tags->nr_reserved_tags) { + if (!blk_mq_tag_is_reserved(tags, tag)) { const int real_tag = tag - tags->nr_reserved_tags; BUG_ON(real_tag >= tags->nr_tags); @@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) for (i = 0; i < set->nr_hw_queues; i++) { struct blk_mq_tags *tags = set->tags[i]; + if (!tags) + continue; + for (j = 0; j < tags->nr_tags; j++) { if (!tags->static_rqs[j]) continue; diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 63497423c5cd32..5cb51e53cc0353 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -85,4 +85,10 @@ static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, hctx->tags->rqs[tag] = rq; } +static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, + unsigned int tag) +{ + return tag < tags->nr_reserved_tags; +} + #endif diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c new file mode 100644 index 00000000000000..c3afbca1129956 --- /dev/null +++ b/block/blk-mq-virtio.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016 Christoph Hellwig. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include +#include +#include +#include +#include +#include "blk-mq.h" + +/** + * blk_mq_virtio_map_queues - provide a default queue mapping for virtio device + * @set: tagset to provide the mapping for + * @vdev: virtio device associated with @set. + * @first_vec: first interrupt vectors to use for queues (usually 0) + * + * This function assumes the virtio device @vdev has at least as many available + * interrupt vetors as @set has queues. It will then queuery the vector + * corresponding to each queue for it's affinity mask and built queue mapping + * that maps a queue to the CPUs that have irq affinity for the corresponding + * vector. + */ +int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set, + struct virtio_device *vdev, int first_vec) +{ + const struct cpumask *mask; + unsigned int queue, cpu; + + if (!vdev->config->get_vq_affinity) + goto fallback; + + for (queue = 0; queue < set->nr_hw_queues; queue++) { + mask = vdev->config->get_vq_affinity(vdev, first_vec + queue); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + set->mq_map[cpu] = queue; + } + + return 0; +fallback: + return blk_mq_map_queues(set); +} +EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues); diff --git a/block/blk-mq.c b/block/blk-mq.c index 9e6b064e533979..6b6e7bc041dbf3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #include #include @@ -75,10 +77,20 @@ void blk_mq_freeze_queue_start(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); -static void blk_mq_freeze_queue_wait(struct request_queue *q) +void blk_mq_freeze_queue_wait(struct request_queue *q) { wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); } +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); + +int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, + unsigned long timeout) +{ + return wait_event_timeout(q->mq_freeze_wq, + percpu_ref_is_zero(&q->q_usage_counter), + timeout); +} +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); /* * Guarantee no request is in use, so we can change any data structure of @@ -234,6 +246,7 @@ struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, } rq->tag = tag; rq->internal_tag = -1; + data->hctx->tags->rqs[rq->tag] = rq; } blk_mq_rq_ctx_init(data->q, data->ctx, rq, op); @@ -273,10 +286,9 @@ EXPORT_SYMBOL(blk_mq_alloc_request); struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, unsigned int flags, unsigned int hctx_idx) { - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; + struct blk_mq_alloc_data alloc_data = { .flags = flags }; struct request *rq; - struct blk_mq_alloc_data alloc_data; + unsigned int cpu; int ret; /* @@ -299,25 +311,23 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, * Check if the hardware context is actually mapped to anything. * If not tell the caller that it should skip this queue. */ - hctx = q->queue_hw_ctx[hctx_idx]; - if (!blk_mq_hw_queue_mapped(hctx)) { - ret = -EXDEV; - goto out_queue_exit; - } - ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); - - blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); - rq = __blk_mq_alloc_request(&alloc_data, rw); - if (!rq) { - ret = -EWOULDBLOCK; - goto out_queue_exit; + alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; + if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { + blk_queue_exit(q); + return ERR_PTR(-EXDEV); } + cpu = cpumask_first(alloc_data.hctx->cpumask); + alloc_data.ctx = __blk_mq_get_ctx(q, cpu); - return rq; + rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); -out_queue_exit: + blk_mq_put_ctx(alloc_data.ctx); blk_queue_exit(q); - return ERR_PTR(ret); + + if (!rq) + return ERR_PTR(-EWOULDBLOCK); + + return rq; } EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); @@ -687,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, { struct blk_mq_timeout_data *data = priv; - if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { - /* - * If a request wasn't started before the queue was - * marked dying, kill it here or it'll go unnoticed. - */ - if (unlikely(blk_queue_dying(rq->q))) { - rq->errors = -EIO; - blk_mq_end_request(rq, rq->errors); - } + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) return; - } if (time_after_eq(jiffies, rq->deadline)) { if (!blk_mark_rq_complete(rq)) @@ -852,6 +853,9 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, return true; } + if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) + data.flags |= BLK_MQ_REQ_RESERVED; + rq->tag = blk_mq_get_tag(&data); if (rq->tag >= 0) { if (blk_mq_tag_busy(data.hctx)) { @@ -865,12 +869,9 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, return false; } -static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, - struct request *rq) +static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, + struct request *rq) { - if (rq->tag == -1 || rq->internal_tag == -1) - return; - blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); rq->tag = -1; @@ -880,6 +881,26 @@ static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, } } +static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, + struct request *rq) +{ + if (rq->tag == -1 || rq->internal_tag == -1) + return; + + __blk_mq_put_driver_tag(hctx, rq); +} + +static void blk_mq_put_driver_tag(struct request *rq) +{ + struct blk_mq_hw_ctx *hctx; + + if (rq->tag == -1 || rq->internal_tag == -1) + return; + + hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); + __blk_mq_put_driver_tag(hctx, rq); +} + /* * If we fail getting a driver tag because all the driver tags are already * assigned and on the dispatch list, BUT the first entry does not have a @@ -948,7 +969,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) struct request *rq; LIST_HEAD(driver_list); struct list_head *dptr; - int queued, ret = BLK_MQ_RQ_QUEUE_OK; + int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK; /* * Start off with dptr being NULL, so we start the first request @@ -959,7 +980,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) /* * Now process all the entries, sending them to the driver. */ - queued = 0; + errors = queued = 0; while (!list_empty(list)) { struct blk_mq_queue_data bd; @@ -989,7 +1010,19 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) bd.rq = rq; bd.list = dptr; - bd.last = list_empty(list); + + /* + * Flag last if we have no more requests, or if we have more + * but can't assign a driver tag to it. + */ + if (list_empty(list)) + bd.last = true; + else { + struct request *nxt; + + nxt = list_first_entry(list, struct request, queuelist); + bd.last = !blk_mq_get_driver_tag(nxt, NULL, false); + } ret = q->mq_ops->queue_rq(hctx, &bd); switch (ret) { @@ -997,13 +1030,14 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) queued++; break; case BLK_MQ_RQ_QUEUE_BUSY: - blk_mq_put_driver_tag(hctx, rq); + blk_mq_put_driver_tag_hctx(hctx, rq); list_add(&rq->queuelist, list); __blk_mq_requeue_request(rq); break; default: pr_err("blk-mq: bad return on queue: %d\n", ret); case BLK_MQ_RQ_QUEUE_ERROR: + errors++; rq->errors = -EIO; blk_mq_end_request(rq, rq->errors); break; @@ -1027,6 +1061,13 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) * that is where we will continue on next queue run. */ if (!list_empty(list)) { + /* + * If we got a driver tag for the next request already, + * free it again. + */ + rq = list_first_entry(list, struct request, queuelist); + blk_mq_put_driver_tag(rq); + spin_lock(&hctx->lock); list_splice_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); @@ -1048,7 +1089,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) blk_mq_run_hw_queue(hctx, true); } - return queued != 0; + return (queued + errors) != 0; } static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) @@ -1385,7 +1426,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); } -static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) +static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, + bool may_sleep) { struct request_queue *q = rq->q; struct blk_mq_queue_data bd = { @@ -1426,7 +1468,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) } insert: - blk_mq_sched_insert_request(rq, false, true, true, false); + blk_mq_sched_insert_request(rq, false, true, false, may_sleep); } /* @@ -1520,11 +1562,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) { rcu_read_lock(); - blk_mq_try_issue_directly(old_rq, &cookie); + blk_mq_try_issue_directly(old_rq, &cookie, false); rcu_read_unlock(); } else { srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu); - blk_mq_try_issue_directly(old_rq, &cookie); + blk_mq_try_issue_directly(old_rq, &cookie, true); srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx); } goto done; @@ -1713,16 +1755,20 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, unsigned int reserved_tags) { struct blk_mq_tags *tags; + int node; - tags = blk_mq_init_tags(nr_tags, reserved_tags, - set->numa_node, + node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); + if (node == NUMA_NO_NODE) + node = set->numa_node; + + tags = blk_mq_init_tags(nr_tags, reserved_tags, node, BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); if (!tags) return NULL; tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, - set->numa_node); + node); if (!tags->rqs) { blk_mq_free_tags(tags); return NULL; @@ -1730,7 +1776,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, - set->numa_node); + node); if (!tags->static_rqs) { kfree(tags->rqs); blk_mq_free_tags(tags); @@ -1750,6 +1796,11 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, { unsigned int i, j, entries_per_page, max_order = 4; size_t rq_size, left; + int node; + + node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); + if (node == NUMA_NO_NODE) + node = set->numa_node; INIT_LIST_HEAD(&tags->page_list); @@ -1771,7 +1822,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, this_order--; do { - page = alloc_pages_node(set->numa_node, + page = alloc_pages_node(node, GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, this_order); if (page) @@ -1804,7 +1855,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, if (set->ops->init_request) { if (set->ops->init_request(set->driver_data, rq, hctx_idx, i, - set->numa_node)) { + node)) { tags->static_rqs[i] = NULL; goto fail; } @@ -1897,16 +1948,6 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, } } -static void blk_mq_free_hw_queues(struct request_queue *q, - struct blk_mq_tag_set *set) -{ - struct blk_mq_hw_ctx *hctx; - unsigned int i; - - queue_for_each_hw_ctx(q, hctx, i) - free_cpumask_var(hctx->cpumask); -} - static int blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) @@ -1987,7 +2028,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); struct blk_mq_hw_ctx *hctx; - memset(__ctx, 0, sizeof(*__ctx)); __ctx->cpu = i; spin_lock_init(&__ctx->lock); INIT_LIST_HEAD(&__ctx->rq_list); @@ -2199,15 +2239,19 @@ void blk_mq_release(struct request_queue *q) queue_for_each_hw_ctx(q, hctx, i) { if (!hctx) continue; - kfree(hctx->ctxs); - kfree(hctx); + kobject_put(&hctx->kobj); } q->mq_map = NULL; kfree(q->queue_hw_ctx); - /* ctx kobj stays in queue_ctx */ + /* + * release .mq_kobj and sw queue's kobject now because + * both share lifetime with request queue. + */ + blk_mq_sysfs_deinit(q); + free_percpu(q->queue_ctx); } @@ -2272,10 +2316,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, if (hctx->tags) blk_mq_free_map_and_requests(set, j); blk_mq_exit_hctx(q, set, hctx, j); - free_cpumask_var(hctx->cpumask); kobject_put(&hctx->kobj); - kfree(hctx->ctxs); - kfree(hctx); hctxs[j] = NULL; } @@ -2294,6 +2335,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, if (!q->queue_ctx) goto err_exit; + /* init q->mq_kobj and sw queues' kobjects */ + blk_mq_sysfs_init(q); + q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), GFP_KERNEL, set->numa_node); if (!q->queue_hw_ctx) @@ -2384,7 +2428,6 @@ void blk_mq_free_queue(struct request_queue *q) blk_mq_del_queue_tag_set(q); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); - blk_mq_free_hw_queues(q, set); } /* Basically redo blk_mq_init_queue with queue frozen */ diff --git a/block/blk-mq.h b/block/blk-mq.h index 24b2256186f33f..b79f9a7d8cf620 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -77,6 +77,8 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, /* * sysfs helpers */ +extern void blk_mq_sysfs_init(struct request_queue *q); +extern void blk_mq_sysfs_deinit(struct request_queue *q); extern int blk_mq_sysfs_register(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); @@ -146,16 +148,6 @@ struct blk_mq_alloc_data { struct blk_mq_hw_ctx *hctx; }; -static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, - struct request_queue *q, unsigned int flags, - struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx) -{ - data->q = q; - data->flags = flags; - data->ctx = ctx; - data->hctx = hctx; -} - static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) { if (data->flags & BLK_MQ_REQ_INTERNAL) diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 06cf9807f49a3b..87b7df4851bffd 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "blk.h" diff --git a/block/blk-stat.c b/block/blk-stat.c index 9b43efb8933fb9..186fcb981e9b1d 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat) static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) { + blk_stat_flush_batch(src); + if (!src->nr_samples) return; - blk_stat_flush_batch(src); - dst->min = min(dst->min, src->min); dst->max = max(dst->max, src->max); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 002af836aa87d8..c44b321335f3eb 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -815,9 +815,7 @@ static void blk_release_queue(struct kobject *kobj) blkcg_exit_queue(q); if (q->elevator) { - spin_lock_irq(q->queue_lock); ioc_clear_queue(q); - spin_unlock_irq(q->queue_lock); elevator_exit(q->elevator); } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 13794477785985..440b95ee593c97 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -8,6 +8,7 @@ */ #include #include +#include #include #include #include diff --git a/block/elevator.c b/block/elevator.c index ac1c9f481a9895..01139f549b5be7 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -983,9 +983,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) if (old_registered) elv_unregister_queue(q); - spin_lock_irq(q->queue_lock); ioc_clear_queue(q); - spin_unlock_irq(q->queue_lock); } /* allocate, init and register new elevator */ diff --git a/block/genhd.c b/block/genhd.c index 2f444b87a5f244..a9c516a8b37dbc 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -572,20 +572,6 @@ static void register_disk(struct device *parent, struct gendisk *disk) disk_part_iter_exit(&piter); } -void put_disk_devt(struct disk_devt *disk_devt) -{ - if (disk_devt && atomic_dec_and_test(&disk_devt->count)) - disk_devt->release(disk_devt); -} -EXPORT_SYMBOL(put_disk_devt); - -void get_disk_devt(struct disk_devt *disk_devt) -{ - if (disk_devt) - atomic_inc(&disk_devt->count); -} -EXPORT_SYMBOL(get_disk_devt); - /** * device_add_disk - add partitioning information to kernel list * @parent: parent device for the disk @@ -626,13 +612,6 @@ void device_add_disk(struct device *parent, struct gendisk *disk) disk_alloc_events(disk); - /* - * Take a reference on the devt and assign it to queue since it - * must not be reallocated while the bdi is registered - */ - disk->queue->disk_devt = disk->disk_devt; - get_disk_devt(disk->disk_devt); - /* Register BDI before referencing it from bdev */ bdi = disk->queue->backing_dev_info; bdi_register_owner(bdi, disk_to_dev(disk)); @@ -681,7 +660,16 @@ void del_gendisk(struct gendisk *disk) disk->flags &= ~GENHD_FL_UP; sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); - blk_unregister_queue(disk); + if (disk->queue) { + /* + * Unregister bdi before releasing device numbers (as they can + * get reused and we'd get clashes in sysfs). + */ + bdi_unregister(disk->queue->backing_dev_info); + blk_unregister_queue(disk); + } else { + WARN_ON(1); + } blk_unregister_region(disk_devt(disk), disk->minors); part_stat_set_all(&disk->part0, 0); diff --git a/block/ioprio.c b/block/ioprio.c index 3790669232ff50..0c47a00f92a852 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -23,8 +23,11 @@ #include #include #include +#include #include #include +#include +#include #include #include #include diff --git a/block/sed-opal.c b/block/sed-opal.c index 1e18dca360fc50..14035f826b5e35 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -1023,7 +1023,6 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont) static int gen_key(struct opal_dev *dev, void *data) { - const u8 *method; u8 uid[OPAL_UID_LENGTH]; int err = 0; @@ -1031,7 +1030,6 @@ static int gen_key(struct opal_dev *dev, void *data) set_comid(dev, dev->comid); memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len)); - method = opalmethod[OPAL_GENKEY]; kfree(dev->prev_data); dev->prev_data = NULL; @@ -1669,7 +1667,6 @@ static int add_user_to_lr(struct opal_dev *dev, void *data) static int lock_unlock_locking_range(struct opal_dev *dev, void *data) { u8 lr_buffer[OPAL_UID_LENGTH]; - const u8 *method; struct opal_lock_unlock *lkul = data; u8 read_locked = 1, write_locked = 1; int err = 0; @@ -1677,7 +1674,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data) clear_opal_cmd(dev); set_comid(dev, dev->comid); - method = opalmethod[OPAL_SET]; if (build_locking_range(lr_buffer, sizeof(lr_buffer), lkul->session.opal_key.lr) < 0) return -ERANGE; @@ -1733,14 +1729,12 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data) { u8 lr_buffer[OPAL_UID_LENGTH]; u8 read_locked = 1, write_locked = 1; - const u8 *method; struct opal_lock_unlock *lkul = data; int ret; clear_opal_cmd(dev); set_comid(dev, dev->comid); - method = opalmethod[OPAL_SET]; if (build_locking_range(lr_buffer, sizeof(lr_buffer), lkul->session.opal_key.lr) < 0) return -ERANGE; @@ -2133,7 +2127,7 @@ static int opal_add_user_to_lr(struct opal_dev *dev, pr_err("Locking state was not RO or RW\n"); return -EINVAL; } - if (lk_unlk->session.who < OPAL_USER1 && + if (lk_unlk->session.who < OPAL_USER1 || lk_unlk->session.who > OPAL_USER9) { pr_err("Authority was not within the range of users: %d\n", lk_unlk->session.who); @@ -2316,7 +2310,7 @@ static int opal_activate_user(struct opal_dev *dev, int ret; /* We can't activate Admin1 it's active as manufactured */ - if (opal_session->who < OPAL_USER1 && + if (opal_session->who < OPAL_USER1 || opal_session->who > OPAL_USER9) { pr_err("Who was not a valid user: %d\n", opal_session->who); return -EINVAL; diff --git a/crypto/af_alg.c b/crypto/af_alg.c index f5e18c2a48527b..690deca17c3528 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -266,7 +266,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, return err; } -int af_alg_accept(struct sock *sk, struct socket *newsock) +int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) { struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type; @@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) if (!type) goto unlock; - sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0); + sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern); err = -ENOMEM; if (!sk2) goto unlock; @@ -323,9 +323,10 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) } EXPORT_SYMBOL_GPL(af_alg_accept); -static int alg_accept(struct socket *sock, struct socket *newsock, int flags) +static int alg_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { - return af_alg_accept(sock->sk, newsock); + return af_alg_accept(sock->sk, newsock, kern); } static const struct proto_ops alg_proto_ops = { diff --git a/crypto/algboss.c b/crypto/algboss.c index ccb85e1798f230..960d8548171be5 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 533265f110e029..5a805375865731 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 54fc90e8339ce8..5e92bd275ef38e 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -239,7 +239,8 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, return err ?: len; } -static int hash_accept(struct socket *sock, struct socket *newsock, int flags) +static int hash_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags) if (err) return err; - err = af_alg_accept(ask->parent, newsock); + err = af_alg_accept(ask->parent, newsock, kern); if (err) return err; @@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg, } static int hash_accept_nokey(struct socket *sock, struct socket *newsock, - int flags) + int flags, bool kern) { int err; @@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock, if (err) return err; - return hash_accept(sock, newsock, flags); + return hash_accept(sock, newsock, flags, kern); } static struct proto_ops algif_hash_ops_nokey = { diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index a9e79d8eff8774..43839b00fe6c42 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/crypto/api.c b/crypto/api.c index b16ce165328457..941cd4c6c7ecbb 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include "internal.h" diff --git a/crypto/ccm.c b/crypto/ccm.c index 442848807a52b1..1ce37ae0ce565a 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -45,6 +45,7 @@ struct crypto_rfc4309_req_ctx { struct crypto_ccm_req_priv_ctx { u8 odata[16]; + u8 idata[16]; u8 auth_tag[16]; u32 flags; struct scatterlist src[3]; @@ -183,8 +184,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, AHASH_REQUEST_ON_STACK(ahreq, ctx->mac); unsigned int assoclen = req->assoclen; struct scatterlist sg[3]; - u8 odata[16]; - u8 idata[16]; + u8 *odata = pctx->odata; + u8 *idata = pctx->idata; int ilen, err; /* format control data for input */ diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index f1bf3418d96830..727bd5c3569e3b 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "internal.h" #define CRYPTO_ENGINE_MAX_QLEN 10 diff --git a/crypto/lrw.c b/crypto/lrw.c index ecd8474018e3bd..3ea095adafd9af 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -286,8 +286,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done) subreq->cryptlen = LRW_BUFFER_SIZE; if (req->cryptlen > LRW_BUFFER_SIZE) { - subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); - rctx->ext = kmalloc(subreq->cryptlen, gfp); + unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); + + rctx->ext = kmalloc(n, gfp); + if (rctx->ext) + subreq->cryptlen = n; } rctx->src = req->src; diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index c207458d629933..4e64726588524f 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 006ecc43435180..03f473116f7876 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -22691,7 +22691,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = { "\x09\x75\x9a\x9b\x3c\x9b\x27\x39", .klen = 32, .iv = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d" - "\x43\xf6\x1e\x50", + "\x43\xf6\x1e\x50\0\0\0\0", .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" "\x13\x02\x01\x0c\x83\x4c\x96\x35" "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" diff --git a/crypto/xts.c b/crypto/xts.c index 410a2e299085f1..c976bfac29da52 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -230,8 +230,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done) subreq->cryptlen = XTS_BUFFER_SIZE; if (req->cryptlen > XTS_BUFFER_SIZE) { - subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); - rctx->ext = kmalloc(subreq->cryptlen, gfp); + unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); + + rctx->ext = kmalloc(n, gfp); + if (rctx->ext) + subreq->cryptlen = n; } rctx->src = req->src; @@ -463,6 +466,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) struct xts_instance_ctx *ctx; struct skcipher_alg *alg; const char *cipher_name; + u32 mask; int err; algt = crypto_get_attr_type(tb); @@ -483,18 +487,19 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ctx = skcipher_instance_ctx(inst); crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + + mask = crypto_requires_off(algt->type, algt->mask, + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC); + + err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask); if (err == -ENOENT) { err = -ENAMETOOLONG; if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask); } if (err) diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index a391bbc48105ae..d94f92f88ca1c9 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -2,7 +2,6 @@ # Makefile for the Linux ACPI interpreter # -ccflags-y := -Os ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT # diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index eb76a4c10dbfb1..75443103128255 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index b4c1a6a51da482..03250e1f11039b 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -25,9 +25,11 @@ ACPI_MODULE_NAME("platform"); static const struct acpi_device_id forbidden_id_list[] = { - {"PNP0000", 0}, /* PIC */ - {"PNP0100", 0}, /* Timer */ - {"PNP0200", 0}, /* AT DMA Controller */ + {"PNP0000", 0}, /* PIC */ + {"PNP0100", 0}, /* Timer */ + {"PNP0200", 0}, /* AT DMA Controller */ + {"ACPI0009", 0}, /* IOxAPIC */ + {"ACPI000A", 0}, /* IOAPIC */ {"", 0}, }; diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 4467a8089ab890..0143135b3abe37 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu) void __weak arch_unregister_cpu(int cpu) {} -int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) -{ - return -ENODEV; -} - static int acpi_processor_hotadd_init(struct acpi_processor *pr) { unsigned long long sta; @@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device) pr->acpi_id = value; } + if (acpi_duplicate_processor_id(pr->acpi_id)) { + dev_err(&device->dev, + "Failed to get unique processor _UID (0x%x)\n", + pr->acpi_id); + return -ENODEV; + } + pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); if (invalid_phys_cpuid(pr->phys_id)) @@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = { static int nr_unique_ids __initdata; /* The number of the duplicate processor IDs */ -static int nr_duplicate_ids __initdata; +static int nr_duplicate_ids; /* Used to store the unique processor IDs */ static int unique_processor_ids[] __initdata = { @@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = { }; /* Used to store the duplicate processor IDs */ -static int duplicate_processor_ids[] __initdata = { +static int duplicate_processor_ids[] = { [0 ... NR_CPUS - 1] = -1, }; @@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, void **rv) { acpi_status status; + acpi_object_type acpi_type; + unsigned long long uid; union acpi_object object = { 0 }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); + status = acpi_get_type(handle, &acpi_type); if (ACPI_FAILURE(status)) - acpi_handle_info(handle, "Not get the processor object\n"); - else - processor_validated_ids_update(object.processor.proc_id); + return false; + + switch (acpi_type) { + case ACPI_TYPE_PROCESSOR: + status = acpi_evaluate_object(handle, NULL, NULL, &buffer); + if (ACPI_FAILURE(status)) + goto err; + uid = object.processor.proc_id; + break; + + case ACPI_TYPE_DEVICE: + status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); + if (ACPI_FAILURE(status)) + goto err; + break; + default: + goto err; + } + + processor_validated_ids_update(uid); + return true; + +err: + acpi_handle_info(handle, "Invalid processor object\n"); + return false; - return AE_OK; } -static void __init acpi_processor_check_duplicates(void) +void __init acpi_processor_check_duplicates(void) { - /* Search all processor nodes in ACPI namespace */ + /* check the correctness for all processors in ACPI namespace */ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_processor_ids_walk, NULL, NULL, NULL); + acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk, + NULL, NULL); } -bool __init acpi_processor_validate_proc_id(int proc_id) +bool acpi_duplicate_processor_id(int proc_id) { int i; diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index a05b5c0cf181a5..12771fcf0417df 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -97,6 +97,7 @@ static int __init bert_check_table(struct acpi_table_bert *bert_tab) static int __init bert_init(void) { + struct apei_resources bert_resources; struct acpi_bert_region *boot_error_region; struct acpi_table_bert *bert_tab; unsigned int region_len; @@ -127,13 +128,14 @@ static int __init bert_init(void) } region_len = bert_tab->region_length; - if (!request_mem_region(bert_tab->address, region_len, "APEI BERT")) { - pr_err("Can't request iomem region <%016llx-%016llx>.\n", - (unsigned long long)bert_tab->address, - (unsigned long long)bert_tab->address + region_len - 1); - return -EIO; - } - + apei_resources_init(&bert_resources); + rc = apei_resources_add(&bert_resources, bert_tab->address, + region_len, true); + if (rc) + return rc; + rc = apei_resources_request(&bert_resources, "APEI BERT"); + if (rc) + goto out_fini; boot_error_region = ioremap_cache(bert_tab->address, region_len); if (boot_error_region) { bert_print_all(boot_error_region, region_len); @@ -142,7 +144,9 @@ static int __init bert_init(void) rc = -ENOMEM; } - release_mem_region(bert_tab->address, region_len); + apei_resources_release(&bert_resources); +out_fini: + apei_resources_fini(&bert_resources); return rc; } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index e53bef6cf53c62..79b3c9c5a3bc94 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -1072,6 +1073,7 @@ static int ghes_remove(struct platform_device *ghes_dev) if (list_empty(&ghes_sci)) unregister_acpi_hed_notifier(&ghes_notifier_sci); mutex_unlock(&ghes_list_mutex); + synchronize_rcu(); break; case ACPI_HEST_NOTIFY_NMI: ghes_nmi_remove(ghes); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 80cb5eb75b633d..34fbe027e73a26 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1249,7 +1249,6 @@ static int __init acpi_init(void) acpi_wakeup_device_init(); acpi_debugger_init(); acpi_setup_sb_notify_handler(); - acpi_set_processor_mapping(); return 0; } diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 219b90bc092297..f15900132912a4 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -41,8 +41,10 @@ void acpi_gpe_apply_masked_gpes(void); void acpi_container_init(void); void acpi_memory_hotplug_init(void); #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +void pci_ioapic_remove(struct acpi_pci_root *root); int acpi_ioapic_remove(struct acpi_pci_root *root); #else +static inline void pci_ioapic_remove(struct acpi_pci_root *root) { return; } static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; } #endif #ifdef CONFIG_ACPI_DOCK diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c index 6d7ce6e12aaa66..7e4fbf9a53a3cc 100644 --- a/drivers/acpi/ioapic.c +++ b/drivers/acpi/ioapic.c @@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data) struct resource *res = data; struct resource_win win; + /* + * We might assign this to 'res' later, make sure all pointers are + * cleared before the resource is added to the global list + */ + memset(&win, 0, sizeof(win)); + res->flags = 0; if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM)) return AE_OK; @@ -206,24 +212,34 @@ int acpi_ioapic_add(acpi_handle root_handle) return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV; } -int acpi_ioapic_remove(struct acpi_pci_root *root) +void pci_ioapic_remove(struct acpi_pci_root *root) { - int retval = 0; struct acpi_pci_ioapic *ioapic, *tmp; mutex_lock(&ioapic_list_lock); list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { if (root->device->handle != ioapic->root_handle) continue; - - if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base)) - retval = -EBUSY; - if (ioapic->pdev) { pci_release_region(ioapic->pdev, 0); pci_disable_device(ioapic->pdev); pci_dev_put(ioapic->pdev); } + } + mutex_unlock(&ioapic_list_lock); +} + +int acpi_ioapic_remove(struct acpi_pci_root *root) +{ + int retval = 0; + struct acpi_pci_ioapic *ioapic, *tmp; + + mutex_lock(&ioapic_list_lock); + list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { + if (root->device->handle != ioapic->root_handle) + continue; + if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base)) + retval = -EBUSY; if (ioapic->res.flags && ioapic->res.parent) release_resource(&ioapic->res); list_del(&ioapic->list); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 7361d00818e2bb..662036bdc65eca 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1603,7 +1603,7 @@ static size_t sizeof_nfit_set_info(int num_mappings) + num_mappings * sizeof(struct nfit_set_info_map); } -static int cmp_map(const void *m0, const void *m1) +static int cmp_map_compat(const void *m0, const void *m1) { const struct nfit_set_info_map *map0 = m0; const struct nfit_set_info_map *map1 = m1; @@ -1612,6 +1612,14 @@ static int cmp_map(const void *m0, const void *m1) sizeof(u64)); } +static int cmp_map(const void *m0, const void *m1) +{ + const struct nfit_set_info_map *map0 = m0; + const struct nfit_set_info_map *map1 = m1; + + return map0->region_offset - map1->region_offset; +} + /* Retrieve the nth entry referencing this spa */ static struct acpi_nfit_memory_map *memdev_from_spa( struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) @@ -1667,6 +1675,12 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), cmp_map, NULL); nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + + /* support namespaces created with the wrong sort order */ + sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), + cmp_map_compat, NULL); + nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + ndr_desc->nd_set = nd_set; devm_kfree(dev, info); diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index bf601d4df8cfcb..919be0aa257876 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -648,12 +648,12 @@ static void acpi_pci_root_remove(struct acpi_device *device) pci_stop_root_bus(root->bus); - WARN_ON(acpi_ioapic_remove(root)); - + pci_ioapic_remove(root); device_set_run_wake(root->bus->bridge, false); pci_acpi_remove_bus_pm_notifier(device); pci_remove_root_bus(root->bus); + WARN_ON(acpi_ioapic_remove(root)); dmar_device_remove(device->handle); diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 611a5585a9024a..b933061b6b607c 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void) } static int map_lapic_id(struct acpi_subtable_header *entry, - u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled) + u32 acpi_id, phys_cpuid_t *apic_id) { struct acpi_madt_local_apic *lapic = container_of(entry, struct acpi_madt_local_apic, header); - if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED)) + if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; if (lapic->processor_id != acpi_id) @@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry, } static int map_x2apic_id(struct acpi_subtable_header *entry, - int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, - bool ignore_disabled) + int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) { struct acpi_madt_local_x2apic *apic = container_of(entry, struct acpi_madt_local_x2apic, header); - if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED)) + if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; if (device_declaration && (apic->uid == acpi_id)) { @@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry, } static int map_lsapic_id(struct acpi_subtable_header *entry, - int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, - bool ignore_disabled) + int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) { struct acpi_madt_local_sapic *lsapic = container_of(entry, struct acpi_madt_local_sapic, header); - if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED)) + if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; if (device_declaration) { @@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, * Retrieve the ARM CPU physical identifier (MPIDR) */ static int map_gicc_mpidr(struct acpi_subtable_header *entry, - int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr, - bool ignore_disabled) + int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr) { struct acpi_madt_generic_interrupt *gicc = container_of(entry, struct acpi_madt_generic_interrupt, header); - if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED)) + if (!(gicc->flags & ACPI_MADT_ENABLED)) return -ENODEV; /* device_declaration means Device object in DSDT, in the @@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry, } static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, - int type, u32 acpi_id, bool ignore_disabled) + int type, u32 acpi_id) { unsigned long madt_end, entry; phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */ @@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, struct acpi_subtable_header *header = (struct acpi_subtable_header *)entry; if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { - if (!map_lapic_id(header, acpi_id, &phys_id, - ignore_disabled)) + if (!map_lapic_id(header, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { - if (!map_x2apic_id(header, type, acpi_id, &phys_id, - ignore_disabled)) + if (!map_x2apic_id(header, type, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { - if (!map_lsapic_id(header, type, acpi_id, &phys_id, - ignore_disabled)) + if (!map_lsapic_id(header, type, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) { - if (!map_gicc_mpidr(header, type, acpi_id, &phys_id, - ignore_disabled)) + if (!map_gicc_mpidr(header, type, acpi_id, &phys_id)) break; } entry += header->length; @@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id) if (!madt) return PHYS_CPUID_INVALID; - rv = map_madt_entry(madt, 1, acpi_id, true); + rv = map_madt_entry(madt, 1, acpi_id); acpi_put_table((struct acpi_table_header *)madt); return rv; } -static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id, - bool ignore_disabled) +static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; @@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id, header = (struct acpi_subtable_header *)obj->buffer.pointer; if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) - map_lapic_id(header, acpi_id, &phys_id, ignore_disabled); + map_lapic_id(header, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) - map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled); + map_lsapic_id(header, type, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) - map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled); + map_x2apic_id(header, type, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) - map_gicc_mpidr(header, type, acpi_id, &phys_id, - ignore_disabled); + map_gicc_mpidr(header, type, acpi_id, &phys_id); exit: kfree(buffer.pointer); return phys_id; } -static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type, - u32 acpi_id, bool ignore_disabled) +phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) { phys_cpuid_t phys_id; - phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled); + phys_id = map_mat_entry(handle, type, acpi_id); if (invalid_phys_cpuid(phys_id)) - phys_id = map_madt_entry(get_madt_table(), type, acpi_id, - ignore_disabled); + phys_id = map_madt_entry(get_madt_table(), type, acpi_id); return phys_id; } -phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) -{ - return __acpi_get_phys_id(handle, type, acpi_id, true); -} - int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) { #ifdef CONFIG_SMP @@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) } EXPORT_SYMBOL_GPL(acpi_get_cpuid); -#ifdef CONFIG_ACPI_HOTPLUG_CPU -static bool __init -map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid) -{ - int type, id; - u32 acpi_id; - acpi_status status; - acpi_object_type acpi_type; - unsigned long long tmp; - union acpi_object object = { 0 }; - struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; - - status = acpi_get_type(handle, &acpi_type); - if (ACPI_FAILURE(status)) - return false; - - switch (acpi_type) { - case ACPI_TYPE_PROCESSOR: - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); - if (ACPI_FAILURE(status)) - return false; - acpi_id = object.processor.proc_id; - - /* validate the acpi_id */ - if(acpi_processor_validate_proc_id(acpi_id)) - return false; - break; - case ACPI_TYPE_DEVICE: - status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); - if (ACPI_FAILURE(status)) - return false; - acpi_id = tmp; - break; - default: - return false; - } - - type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; - - *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false); - id = acpi_map_cpuid(*phys_id, acpi_id); - - if (id < 0) - return false; - *cpuid = id; - return true; -} - -static acpi_status __init -set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context, - void **rv) -{ - phys_cpuid_t phys_id; - int cpu_id; - - if (!map_processor(handle, &phys_id, &cpu_id)) - return AE_ERROR; - - acpi_map_cpu2node(handle, cpu_id, phys_id); - return AE_OK; -} - -void __init acpi_set_processor_mapping(void) -{ - /* Set persistent cpu <-> node mapping for all processors. */ - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, set_processor_node_mapping, - NULL, NULL, NULL); -} -#else -void __init acpi_set_processor_mapping(void) {} -#endif /* CONFIG_ACPI_HOTPLUG_CPU */ - #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, u64 *phys_addr, int *ioapic_id) diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 01c94669a2b0ad..3afa8c1fa12702 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h) return true; if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) && - h->oem_revision == 0) + h->oem_revision == 1) return true; return false; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 2bbcdc6fdfeec9..aae4d8d4be361b 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -31,7 +31,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 85d833289f28f8..4c96f3ac4976d9 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) case AHCI_LS1043A: if (!qpriv->ecc_addr) return -EINVAL; - writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr); + writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, + qpriv->ecc_addr); writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); if (qpriv->is_dmacoherent) @@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) case AHCI_LS1046A: if (!qpriv->ecc_addr) return -EINVAL; - writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr); + writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, + qpriv->ecc_addr); writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); if (qpriv->is_dmacoherent) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 12d3a66600a3f7..1ac70744ae7b4b 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -600,6 +600,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) u8 args[4], *argbuf = NULL, *sensebuf = NULL; int argsize = 0; enum dma_data_direction data_dir; + struct scsi_sense_hdr sshdr; int cmd_result; if (arg == NULL) @@ -648,7 +649,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) /* Good values for timeout and retries? Values below from scsi_ioctl_send_command() for default case... */ cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, - sensebuf, (10*HZ), 5, 0, NULL); + sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL); if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ u8 *desc = sensebuf + 8; @@ -657,9 +658,6 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) /* If we set cc then ATA pass-through will cause a * check condition even if no error. Filter that. */ if (cmd_result & SAM_STAT_CHECK_CONDITION) { - struct scsi_sense_hdr sshdr; - scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, - &sshdr); if (sshdr.sense_key == RECOVERED_ERROR && sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; @@ -707,6 +705,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) int rc = 0; u8 scsi_cmd[MAX_COMMAND_SIZE]; u8 args[7], *sensebuf = NULL; + struct scsi_sense_hdr sshdr; int cmd_result; if (arg == NULL) @@ -734,7 +733,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) /* Good values for timeout and retries? Values below from scsi_ioctl_send_command() for default case... */ cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0, - sensebuf, (10*HZ), 5, 0, NULL); + sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL); if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ u8 *desc = sensebuf + 8; @@ -743,9 +742,6 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) /* If we set cc then ATA pass-through will cause a * check condition even if no error. Filter that. */ if (cmd_result & SAM_STAT_CHECK_CONDITION) { - struct scsi_sense_hdr sshdr; - scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, - &sshdr); if (sshdr.sense_key == RECOVERED_ERROR && sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 2bd92dca3e6204..274d6d7193d7ca 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) break; default: - WARN_ON_ONCE(1); return AC_ERR_SYSTEM; } diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index 46698232e6bff0..19e6e539a061b9 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class, static void ata_tport_release(struct device *dev) { - put_device(dev->parent); } /** @@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent, device_initialize(dev); dev->type = &ata_port_type; - dev->parent = get_device(parent); + dev->parent = parent; dev->release = ata_tport_release; dev_set_name(dev, "ata%d", ap->print_id); transport_setup_device(dev); @@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class, static void ata_tlink_release(struct device *dev) { - put_device(dev->parent); } /** @@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link) int error; device_initialize(dev); - dev->parent = get_device(&ap->tdev); + dev->parent = &ap->tdev; dev->release = ata_tlink_release; if (ata_is_host_link(link)) dev_set_name(dev, "link%d", ap->print_id); @@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class, static void ata_tdev_release(struct device *dev) { - put_device(dev->parent); } /** @@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev) int error; device_initialize(dev); - dev->parent = get_device(&link->tdev); + dev->parent = &link->tdev; dev->release = ata_tdev_release; if (ata_is_host_link(link)) dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno); diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 2bf1ef1c3c786e..0f18480b33b55f 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -27,6 +27,7 @@ #include #include +#include #include #include #include diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index bf43b5d2aafcaf..83f1439e57fd8c 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = { { .compatible = "img,boston-lcd", .data = &boston_config }, { .compatible = "mti,malta-lcd", .data = &malta_config }, { .compatible = "mti,sead3-lcd", .data = &sead3_config }, + { /* sentinel */ } }; /** diff --git a/drivers/base/core.c b/drivers/base/core.c index 3050e6f994031f..6bb60fb6a30b7b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include "base.h" @@ -638,11 +639,6 @@ int lock_device_hotplug_sysfs(void) return restart_syscall(); } -void assert_held_device_hotplug(void) -{ - lockdep_assert_held(&device_hotplug_lock); -} - #ifdef CONFIG_BLOCK static inline int device_is_not_partition(struct device *dev) { diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 44a74cf1372c6e..d2fb9c8ed2057b 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -309,7 +309,8 @@ static int handle_remove(const char *nodename, struct device *dev) if (d_really_is_positive(dentry)) { struct kstat stat; struct path p = {.mnt = parent.mnt, .dentry = dentry}; - err = vfs_getattr(&p, &stat); + err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE, + AT_STATX_SYNC_AS_STAT); if (!err && dev_mynode(dev, d_inode(dentry), &stat)) { struct iattr newattrs; /* diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 3a75fb1b4126f0..e697dec9d25bf5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -273,6 +273,93 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) queue_work(pm_wq, &genpd->power_off_work); } +/** + * genpd_power_off - Remove power from a given PM domain. + * @genpd: PM domain to power down. + * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the + * RPM status of the releated device is in an intermediate state, not yet turned + * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not + * be RPM_SUSPENDED, while it tries to power off the PM domain. + * + * If all of the @genpd's devices have been suspended and all of its subdomains + * have been powered down, remove power from @genpd. + */ +static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, + unsigned int depth) +{ + struct pm_domain_data *pdd; + struct gpd_link *link; + unsigned int not_suspended = 0; + + /* + * Do not try to power off the domain in the following situations: + * (1) The domain is already in the "power off" state. + * (2) System suspend is in progress. + */ + if (genpd->status == GPD_STATE_POWER_OFF + || genpd->prepared_count > 0) + return 0; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + enum pm_qos_flags_status stat; + + stat = dev_pm_qos_flags(pdd->dev, + PM_QOS_FLAG_NO_POWER_OFF + | PM_QOS_FLAG_REMOTE_WAKEUP); + if (stat > PM_QOS_FLAGS_NONE) + return -EBUSY; + + /* + * Do not allow PM domain to be powered off, when an IRQ safe + * device is part of a non-IRQ safe domain. + */ + if (!pm_runtime_suspended(pdd->dev) || + irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) + not_suspended++; + } + + if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) + return -EBUSY; + + if (genpd->gov && genpd->gov->power_down_ok) { + if (!genpd->gov->power_down_ok(&genpd->domain)) + return -EAGAIN; + } + + if (genpd->power_off) { + int ret; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + /* + * If sd_count > 0 at this point, one of the subdomains hasn't + * managed to call genpd_power_on() for the master yet after + * incrementing it. In that case genpd_power_on() will wait + * for us to drop the lock, so we can call .power_off() and let + * the genpd_power_on() restore power for us (this shouldn't + * happen very often). + */ + ret = _genpd_power_off(genpd, true); + if (ret) + return ret; + } + + genpd->status = GPD_STATE_POWER_OFF; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_dec(link->master); + genpd_lock_nested(link->master, depth + 1); + genpd_power_off(link->master, false, depth + 1); + genpd_unlock(link->master); + } + + return 0; +} + /** * genpd_power_on - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. @@ -321,7 +408,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); - genpd_queue_power_off_work(link->master); + genpd_lock_nested(link->master, depth + 1); + genpd_power_off(link->master, false, depth + 1); + genpd_unlock(link->master); } return ret; @@ -367,87 +456,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, return NOTIFY_DONE; } -/** - * genpd_power_off - Remove power from a given PM domain. - * @genpd: PM domain to power down. - * @is_async: PM domain is powered down from a scheduled work - * - * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, remove power from @genpd. - */ -static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async) -{ - struct pm_domain_data *pdd; - struct gpd_link *link; - unsigned int not_suspended = 0; - - /* - * Do not try to power off the domain in the following situations: - * (1) The domain is already in the "power off" state. - * (2) System suspend is in progress. - */ - if (genpd->status == GPD_STATE_POWER_OFF - || genpd->prepared_count > 0) - return 0; - - if (atomic_read(&genpd->sd_count) > 0) - return -EBUSY; - - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - enum pm_qos_flags_status stat; - - stat = dev_pm_qos_flags(pdd->dev, - PM_QOS_FLAG_NO_POWER_OFF - | PM_QOS_FLAG_REMOTE_WAKEUP); - if (stat > PM_QOS_FLAGS_NONE) - return -EBUSY; - - /* - * Do not allow PM domain to be powered off, when an IRQ safe - * device is part of a non-IRQ safe domain. - */ - if (!pm_runtime_suspended(pdd->dev) || - irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) - not_suspended++; - } - - if (not_suspended > 1 || (not_suspended == 1 && is_async)) - return -EBUSY; - - if (genpd->gov && genpd->gov->power_down_ok) { - if (!genpd->gov->power_down_ok(&genpd->domain)) - return -EAGAIN; - } - - if (genpd->power_off) { - int ret; - - if (atomic_read(&genpd->sd_count) > 0) - return -EBUSY; - - /* - * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call genpd_power_on() for the master yet after - * incrementing it. In that case genpd_power_on() will wait - * for us to drop the lock, so we can call .power_off() and let - * the genpd_power_on() restore power for us (this shouldn't - * happen very often). - */ - ret = _genpd_power_off(genpd, true); - if (ret) - return ret; - } - - genpd->status = GPD_STATE_POWER_OFF; - - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_dec(link->master); - genpd_queue_power_off_work(link->master); - } - - return 0; -} - /** * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. * @work: Work structure used for scheduling the execution of this function. @@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); genpd_lock(genpd); - genpd_power_off(genpd, true); + genpd_power_off(genpd, false, 0); genpd_unlock(genpd); } @@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev) return 0; genpd_lock(genpd); - genpd_power_off(genpd, false); + genpd_power_off(genpd, true, 0); genpd_unlock(genpd); return 0; @@ -658,7 +666,7 @@ static int genpd_runtime_resume(struct device *dev) if (!pm_runtime_is_irq_safe(dev) || (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { genpd_lock(genpd); - genpd_power_off(genpd, 0); + genpd_power_off(genpd, true, 0); genpd_unlock(genpd); } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 249e0304597f5b..9faee1c893e53c 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 91ec3232d63004..dae61720b31402 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -231,7 +231,8 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) * The caller needs to ensure that opp_table (and hence the regulator) * isn't freed, while we are executing this routine. */ - for (i = 0; reg = regulators[i], i < count; i++) { + for (i = 0; i < count; i++) { + reg = regulators[i]; ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); if (ret > 0) latency_ns += ret * 1000; diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index d888d9869b6a52..f850daeffba441 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -17,12 +17,9 @@ * * This QoS design is best effort based. Dependents register their QoS needs. * Watchers register to keep track of the current QoS needs of the system. - * Watchers can register different types of notification callbacks: - * . a per-device notification callback using the dev_pm_qos_*_notifier API. - * The notification chain data is stored in the per-device constraint - * data struct. - * . a system-wide notification callback using the dev_pm_qos_*_global_notifier - * API. The notification chain data is stored in a static variable. + * Watchers can register a per-device notification callback using the + * dev_pm_qos_*_notifier API. The notification chain data is stored in the + * per-device constraint data struct. * * Note about the per-device constraint data struct allocation: * . The per-device constraints data struct ptr is tored into the device @@ -49,8 +46,6 @@ static DEFINE_MUTEX(dev_pm_qos_mtx); static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); -static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); - /** * __dev_pm_qos_flags - Check PM QoS flags for a given device. * @dev: Device to check the PM QoS flags for. @@ -108,8 +103,7 @@ s32 __dev_pm_qos_read_value(struct device *dev) { lockdep_assert_held(&dev->power.lock); - return IS_ERR_OR_NULL(dev->power.qos) ? - 0 : pm_qos_read_value(&dev->power.qos->resume_latency); + return dev_pm_qos_raw_read_value(dev); } /** @@ -135,8 +129,7 @@ s32 dev_pm_qos_read_value(struct device *dev) * @value: Value to assign to the QoS request. * * Internal function to update the constraints list using the PM QoS core - * code and if needed call the per-device and the global notification - * callbacks + * code and if needed call the per-device callbacks. */ static int apply_constraint(struct dev_pm_qos_request *req, enum pm_qos_req_action action, s32 value) @@ -148,12 +141,6 @@ static int apply_constraint(struct dev_pm_qos_request *req, case DEV_PM_QOS_RESUME_LATENCY: ret = pm_qos_update_target(&qos->resume_latency, &req->data.pnode, action, value); - if (ret) { - value = pm_qos_read_value(&qos->resume_latency); - blocking_notifier_call_chain(&dev_pm_notifiers, - (unsigned long)value, - req); - } break; case DEV_PM_QOS_LATENCY_TOLERANCE: ret = pm_qos_update_target(&qos->latency_tolerance, @@ -535,36 +522,6 @@ int dev_pm_qos_remove_notifier(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); -/** - * dev_pm_qos_add_global_notifier - sets notification entry for changes to - * target value of the PM QoS constraints for any device - * - * @notifier: notifier block managed by caller. - * - * Will register the notifier into a notification chain that gets called - * upon changes to the target value for any device. - */ -int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) -{ - return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); -} -EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); - -/** - * dev_pm_qos_remove_global_notifier - deletes notification for changes to - * target value of PM QoS constraints for any device - * - * @notifier: notifier block to be removed. - * - * Will remove the notifier from the notification chain that gets called - * upon changes to the target value for any device. - */ -int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) -{ - return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); -} -EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); - /** * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. * @dev: Device whose ancestor to add the request for. diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index a14fac6a01d316..7bcf80fa9adad4 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -7,7 +7,7 @@ * This file is released under the GPLv2. */ -#include +#include #include #include #include diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index f546f8f107b06a..1368549704893c 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 27d613795653bd..8e1a4554951c0d 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -348,7 +348,7 @@ static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c) pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); } -static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, +static int cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, SGDescriptor_struct *chain_block, int len) { SGDescriptor_struct *chain_sg; @@ -359,8 +359,16 @@ static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, chain_sg->Len = len; temp64.val = pci_map_single(h->pdev, chain_block, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&h->pdev->dev, temp64.val)) { + dev_warn(&h->pdev->dev, + "%s: error mapping chain block for DMA\n", + __func__); + return -1; + } chain_sg->Addr.lower = temp64.val32.lower; chain_sg->Addr.upper = temp64.val32.upper; + + return 0; } #include "cciss_scsi.c" /* For SCSI tape support */ @@ -3369,15 +3377,31 @@ static void do_cciss_request(struct request_queue *q) temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), tmp_sg[i].offset, tmp_sg[i].length, dir); + if (dma_mapping_error(&h->pdev->dev, temp64.val)) { + dev_warn(&h->pdev->dev, + "%s: error mapping page for DMA\n", __func__); + creq->errors = make_status_bytes(SAM_STAT_GOOD, + 0, DRIVER_OK, + DID_SOFT_ERROR); + cmd_free(h, c); + return; + } curr_sg[sg_index].Addr.lower = temp64.val32.lower; curr_sg[sg_index].Addr.upper = temp64.val32.upper; curr_sg[sg_index].Ext = 0; /* we are not chaining */ ++sg_index; } - if (chained) - cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], + if (chained) { + if (cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], (seg - (h->max_cmd_sgentries - 1)) * - sizeof(SGDescriptor_struct)); + sizeof(SGDescriptor_struct))) { + creq->errors = make_status_bytes(SAM_STAT_GOOD, + 0, DRIVER_OK, + DID_SOFT_ERROR); + cmd_free(h, c); + return; + } + } /* track how many SG entries we are using */ if (seg > h->maxSG) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 4cb8f21ff4eff2..724d1c50fc5283 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 116509852a34da..92c60cbd04ee8c 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -52,6 +52,7 @@ #define __KERNEL_SYSCALLS__ #include #include +#include #include #include "drbd_int.h" @@ -1846,7 +1847,7 @@ int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_requ int drbd_send(struct drbd_connection *connection, struct socket *sock, void *buf, size_t size, unsigned msg_flags) { - struct kvec iov; + struct kvec iov = {.iov_base = buf, .iov_len = size}; struct msghdr msg; int rv, sent = 0; @@ -1855,15 +1856,14 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, /* THINK if (signal_pending) return ... ? */ - iov.iov_base = buf; - iov.iov_len = size; - msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size); + if (sock == connection->data.socket) { rcu_read_lock(); connection->ko_count = rcu_dereference(connection->net_conf)->ko_count; @@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, drbd_update_congested(connection); } do { - rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + rv = sock_sendmsg(sock, &msg); if (rv == -EAGAIN) { if (we_should_drop_the_connection(connection, sock)) break; @@ -1885,8 +1885,6 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, if (rv < 0) break; sent += rv; - iov.iov_base += rv; - iov.iov_len -= rv; } while (sent < size); if (sock == connection->data.socket) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c7728dd77230a8..aa6bf9692effec 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -36,6 +36,8 @@ #include #include #include +#include +#include #include #define __KERNEL_SYSCALLS__ #include diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index c6755c9a0aeab4..3bff33f21435ce 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 4b52a16903298c..0ecb6461ed81e2 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -501,9 +501,9 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, cmd->iocb.ki_flags = IOCB_DIRECT; if (rw == WRITE) - ret = file->f_op->write_iter(&cmd->iocb, &iter); + ret = call_write_iter(file, &cmd->iocb, &iter); else - ret = file->f_op->read_iter(&cmd->iocb, &iter); + ret = call_read_iter(file, &cmd->iocb, &iter); if (ret != -EIOCBQUEUED) cmd->iocb.ki_complete(&cmd->iocb, ret, 0); @@ -1142,13 +1142,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) (info->lo_flags & LO_FLAGS_AUTOCLEAR)) lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; - if ((info->lo_flags & LO_FLAGS_PARTSCAN) && - !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { - lo->lo_flags |= LO_FLAGS_PARTSCAN; - lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; - loop_reread_partitions(lo, lo->lo_device); - } - lo->lo_encrypt_key_size = info->lo_encrypt_key_size; lo->lo_init[0] = info->lo_init[0]; lo->lo_init[1] = info->lo_init[1]; @@ -1163,6 +1156,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) exit: blk_mq_unfreeze_queue(lo->lo_queue); + + if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && + !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { + lo->lo_flags |= LO_FLAGS_PARTSCAN; + lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; + loop_reread_partitions(lo, lo->lo_device); + } + return err; } @@ -1175,7 +1176,8 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) if (lo->lo_state != Lo_bound) return -ENXIO; - error = vfs_getattr(&file->f_path, &stat); + error = vfs_getattr(&file->f_path, &stat, + STATX_INO, AT_STATX_SYNC_AS_STAT); if (error) return error; memset(info, 0, sizeof(*info)); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 0bf2b21a62cb77..d8a23561b4cb4b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -47,6 +47,8 @@ static DEFINE_MUTEX(nbd_index_mutex); struct nbd_sock { struct socket *sock; struct mutex tx_lock; + struct request *pending; + int sent; }; #define NBD_TIMEDOUT 0 @@ -124,7 +126,8 @@ static const char *nbdcmd_to_ascii(int cmd) static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) { - bd_set_size(bdev, 0); + if (bdev->bd_openers <= 1) + bd_set_size(bdev, 0); set_capacity(nbd->disk, 0); kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); @@ -190,7 +193,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); - req->errors++; + req->errors = -EIO; mutex_lock(&nbd->config_lock); sock_shutdown(nbd); @@ -201,13 +204,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, /* * Send or receive packet. */ -static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, - int size, int msg_flags) +static int sock_xmit(struct nbd_device *nbd, int index, int send, + struct iov_iter *iter, int msg_flags, int *sent) { struct socket *sock = nbd->socks[index]->sock; int result; struct msghdr msg; - struct kvec iov; unsigned long pflags = current->flags; if (unlikely(!sock)) { @@ -217,11 +219,11 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, return -EINVAL; } + msg.msg_iter = *iter; + current->flags |= PF_MEMALLOC; do { sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; - iov.iov_base = buf; - iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; @@ -229,46 +231,40 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) - result = kernel_sendmsg(sock, &msg, &iov, 1, size); + result = sock_sendmsg(sock, &msg); else - result = kernel_recvmsg(sock, &msg, &iov, 1, size, - msg.msg_flags); + result = sock_recvmsg(sock, &msg, msg.msg_flags); if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } - size -= result; - buf += result; - } while (size > 0); + if (sent) + *sent += result; + } while (msg_data_left(&msg)); tsk_restore_flags(current, pflags, PF_MEMALLOC); return result; } -static inline int sock_send_bvec(struct nbd_device *nbd, int index, - struct bio_vec *bvec, int flags) -{ - int result; - void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset, - bvec->bv_len, flags); - kunmap(bvec->bv_page); - return result; -} - /* always call with the tx_lock held */ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); + struct nbd_sock *nsock = nbd->socks[index]; int result; - struct nbd_request request; + struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; + struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; + struct iov_iter from; unsigned long size = blk_rq_bytes(req); struct bio *bio; u32 type; u32 tag = blk_mq_unique_tag(req); + int sent = nsock->sent, skip = 0; + + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); switch (req_op(req)) { case REQ_OP_DISCARD: @@ -294,8 +290,17 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) return -EIO; } - memset(&request, 0, sizeof(request)); - request.magic = htonl(NBD_REQUEST_MAGIC); + /* We did a partial send previously, and we at least sent the whole + * request struct, so just go and send the rest of the pages in the + * request. + */ + if (sent) { + if (sent >= sizeof(request)) { + skip = sent - sizeof(request); + goto send_pages; + } + iov_iter_advance(&from, sent); + } request.type = htonl(type); if (type != NBD_CMD_FLUSH) { request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); @@ -306,16 +311,28 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", cmd, nbdcmd_to_ascii(type), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); - result = sock_xmit(nbd, index, 1, &request, sizeof(request), - (type == NBD_CMD_WRITE) ? MSG_MORE : 0); + result = sock_xmit(nbd, index, 1, &from, + (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); if (result <= 0) { + if (result == -ERESTARTSYS) { + /* If we havne't sent anything we can just return BUSY, + * however if we have sent something we need to make + * sure we only allow this req to be sent until we are + * completely done. + */ + if (sent) { + nsock->pending = req; + nsock->sent = sent; + } + return BLK_MQ_RQ_QUEUE_BUSY; + } dev_err_ratelimited(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); return -EIO; } - +send_pages: if (type != NBD_CMD_WRITE) - return 0; + goto out; bio = req->bio; while (bio) { @@ -329,8 +346,27 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", cmd, bvec.bv_len); - result = sock_send_bvec(nbd, index, &bvec, flags); + iov_iter_bvec(&from, ITER_BVEC | WRITE, + &bvec, 1, bvec.bv_len); + if (skip) { + if (skip >= iov_iter_count(&from)) { + skip -= iov_iter_count(&from); + continue; + } + iov_iter_advance(&from, skip); + skip = 0; + } + result = sock_xmit(nbd, index, 1, &from, flags, &sent); if (result <= 0) { + if (result == -ERESTARTSYS) { + /* We've already sent the header, we + * have no choice but to set pending and + * return BUSY. + */ + nsock->pending = req; + nsock->sent = sent; + return BLK_MQ_RQ_QUEUE_BUSY; + } dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); @@ -347,20 +383,12 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) } bio = next; } +out: + nsock->pending = NULL; + nsock->sent = 0; return 0; } -static inline int sock_recv_bvec(struct nbd_device *nbd, int index, - struct bio_vec *bvec) -{ - int result; - void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset, - bvec->bv_len, MSG_WAITALL); - kunmap(bvec->bv_page); - return result; -} - /* NULL returned = something went wrong, inform userspace */ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) { @@ -370,9 +398,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct request *req = NULL; u16 hwq; u32 tag; + struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; + struct iov_iter to; reply.magic = 0; - result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL); + iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); + result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); if (result <= 0) { if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) @@ -402,7 +433,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) if (ntohl(reply.error)) { dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", ntohl(reply.error)); - req->errors++; + req->errors = -EIO; return cmd; } @@ -412,11 +443,13 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct bio_vec bvec; rq_for_each_segment(bvec, req, iter) { - result = sock_recv_bvec(nbd, index, &bvec); + iov_iter_bvec(&to, ITER_BVEC | READ, + &bvec, 1, bvec.bv_len); + result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); - req->errors++; + req->errors = -EIO; return cmd; } dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", @@ -486,7 +519,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved) if (!blk_mq_request_started(req)) return; cmd = blk_mq_rq_to_pdu(req); - req->errors++; + req->errors = -EIO; nbd_end_request(cmd); } @@ -499,22 +532,23 @@ static void nbd_clear_que(struct nbd_device *nbd) } -static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) +static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); struct nbd_device *nbd = cmd->nbd; struct nbd_sock *nsock; + int ret; if (index >= nbd->num_connections) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Attempted send on invalid socket\n"); - goto error_out; + return -EINVAL; } if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); - goto error_out; + return -EINVAL; } req->errors = 0; @@ -525,29 +559,30 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) mutex_unlock(&nsock->tx_lock); dev_err_ratelimited(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); - goto error_out; + return -EINVAL; } - if (nbd_send_cmd(nbd, cmd, index) != 0) { - dev_err_ratelimited(disk_to_dev(nbd->disk), - "Request send failed\n"); - req->errors++; - nbd_end_request(cmd); + /* Handle the case that we have a pending request that was partially + * transmitted that _has_ to be serviced first. We need to call requeue + * here so that it gets put _after_ the request that is already on the + * dispatch list. + */ + if (unlikely(nsock->pending && nsock->pending != req)) { + blk_mq_requeue_request(req, true); + ret = 0; + goto out; } - + ret = nbd_send_cmd(nbd, cmd, index); +out: mutex_unlock(&nsock->tx_lock); - - return; - -error_out: - req->errors++; - nbd_end_request(cmd); + return ret; } static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); + int ret; /* * Since we look at the bio's to send the request over the network we @@ -560,10 +595,20 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, */ init_completion(&cmd->send_complete); blk_mq_start_request(bd->rq); - nbd_handle_cmd(cmd, hctx->queue_num); + + /* We can be called directly from the user space process, which means we + * could possibly have signals pending so our sendmsg will fail. In + * this case we need to return that we are busy, otherwise error out as + * appropriate. + */ + ret = nbd_handle_cmd(cmd, hctx->queue_num); + if (ret < 0) + ret = BLK_MQ_RQ_QUEUE_ERROR; + if (!ret) + ret = BLK_MQ_RQ_QUEUE_OK; complete(&cmd->send_complete); - return BLK_MQ_RQ_QUEUE_OK; + return ret; } static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, @@ -598,6 +643,8 @@ static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, mutex_init(&nsock->tx_lock); nsock->sock = sock; + nsock->pending = NULL; + nsock->sent = 0; socks[nbd->num_connections++] = nsock; if (max_part) @@ -619,6 +666,8 @@ static void nbd_reset(struct nbd_device *nbd) static void nbd_bdev_reset(struct block_device *bdev) { + if (bdev->bd_openers > 1) + return; set_device_ro(bdev, false); bdev->bd_inode->i_size = 0; if (max_part > 0) { @@ -641,14 +690,17 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev) static void send_disconnects(struct nbd_device *nbd) { - struct nbd_request request = {}; + struct nbd_request request = { + .magic = htonl(NBD_REQUEST_MAGIC), + .type = htonl(NBD_CMD_DISC), + }; + struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; + struct iov_iter from; int i, ret; - request.magic = htonl(NBD_REQUEST_MAGIC); - request.type = htonl(NBD_CMD_DISC); - for (i = 0; i < nbd->num_connections; i++) { - ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0); + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + ret = sock_xmit(nbd, i, 1, &from, 0, NULL); if (ret <= 0) dev_err(disk_to_dev(nbd->disk), "Send disconnect failed %d\n", ret); @@ -679,7 +731,8 @@ static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev) { sock_shutdown(nbd); nbd_clear_que(nbd); - kill_bdev(bdev); + + __invalidate_device(bdev, true); nbd_bdev_reset(bdev); /* * We want to give the run thread a chance to wait for everybody @@ -689,8 +742,10 @@ static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev) nbd->num_connections) { int i; - for (i = 0; i < nbd->num_connections; i++) + for (i = 0; i < nbd->num_connections; i++) { + sockfd_put(nbd->socks[i]->sock); kfree(nbd->socks[i]); + } kfree(nbd->socks); nbd->socks = NULL; nbd->num_connections = 0; @@ -793,7 +848,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd_size_set(nbd, bdev, nbd->blksize, arg); return 0; case NBD_SET_TIMEOUT: - nbd->tag_set.timeout = arg * HZ; + if (arg) { + nbd->tag_set.timeout = arg * HZ; + blk_queue_rq_timeout(nbd->disk->queue, arg * HZ); + } return 0; case NBD_SET_FLAGS: diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 10aed84244f518..939641d6e2625e 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -50,7 +50,7 @@ the slower the port i/o. In some cases, setting this to zero will speed up the device. (default -1) - major You may use this parameter to overide the + major You may use this parameter to override the default major number (46) that this driver will use. Be sure to change the device name as well. diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 644ba0888bd41b..9cfd2e06a64917 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -61,7 +61,7 @@ first drive found. - major You may use this parameter to overide the + major You may use this parameter to override the default major number (45) that this driver will use. Be sure to change the device name as well. diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ed93e8badf5684..14c5d32f5d8bc0 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -59,7 +59,7 @@ the slower the port i/o. In some cases, setting this to zero will speed up the device. (default -1) - major You may use this parameter to overide the + major You may use this parameter to override the default major number (47) that this driver will use. Be sure to change the device name as well. diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 5db955fe3a9490..3b5882bfb7364e 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c @@ -84,7 +84,7 @@ the slower the port i/o. In some cases, setting this to zero will speed up the device. (default -1) - major You may use this parameter to overide the + major You may use this parameter to override the default major number (97) that this driver will use. Be sure to change the device name as well. diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 61fc6824299ac1..e815312a00add6 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c @@ -61,7 +61,7 @@ the slower the port i/o. In some cases, setting this to zero will speed up the device. (default -1) - major You may use this parameter to overide the + major You may use this parameter to override the default major number (96) that this driver will use. Be sure to change the device name as well. diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4d680772379828..517838b659646d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -120,10 +120,11 @@ static int atomic_dec_return_safe(atomic_t *v) /* Feature bits */ -#define RBD_FEATURE_LAYERING (1<<0) -#define RBD_FEATURE_STRIPINGV2 (1<<1) -#define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2) -#define RBD_FEATURE_DATA_POOL (1<<7) +#define RBD_FEATURE_LAYERING (1ULL<<0) +#define RBD_FEATURE_STRIPINGV2 (1ULL<<1) +#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) +#define RBD_FEATURE_DATA_POOL (1ULL<<7) + #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ RBD_FEATURE_STRIPINGV2 | \ RBD_FEATURE_EXCLUSIVE_LOCK | \ @@ -499,16 +500,23 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) return is_lock_owner; } +static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf) +{ + return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); +} + static BUS_ATTR(add, S_IWUSR, NULL, rbd_add); static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove); static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major); static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major); +static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL); static struct attribute *rbd_bus_attrs[] = { &bus_attr_add.attr, &bus_attr_remove.attr, &bus_attr_add_single_major.attr, &bus_attr_remove_single_major.attr, + &bus_attr_supported_features.attr, NULL, }; diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index aabd8e9d3035c8..61b3ffa4f45897 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 024b473524c096..1d4c9f8bc1e16e 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -12,6 +13,7 @@ #include #include #include +#include #include #define PART_BITS 4 @@ -426,6 +428,7 @@ static int init_vq(struct virtio_blk *vblk) struct virtqueue **vqs; unsigned short num_vqs; struct virtio_device *vdev = vblk->vdev; + struct irq_affinity desc = { 0, }; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, struct virtio_blk_config, num_queues, @@ -452,7 +455,8 @@ static int init_vq(struct virtio_blk *vblk) } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, + &desc); if (err) goto out; @@ -586,10 +590,18 @@ static int virtblk_init_request(void *data, struct request *rq, return 0; } +static int virtblk_map_queues(struct blk_mq_tag_set *set) +{ + struct virtio_blk *vblk = set->driver_data; + + return blk_mq_virtio_map_queues(set, vblk->vdev, 0); +} + static struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .complete = virtblk_request_done, .init_request = virtblk_init_request, + .map_queues = virtblk_map_queues, }; static unsigned int virtblk_queue_depth; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index e27d89a36c3417..dceb5edd1e5455 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1189,6 +1189,8 @@ static int zram_add(void) blk_queue_io_min(zram->disk->queue, PAGE_SIZE); blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); zram->disk->queue->limits.discard_granularity = PAGE_SIZE; + zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE; + zram->disk->queue->limits.chunk_sectors = 0; blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); /* * zram_bio_discard() will clear all logical blocks if logical block diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index c2c14a12713b56..08e054507d0bcd 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -344,7 +344,8 @@ config BT_WILINK config BT_QCOMSMD tristate "Qualcomm SMD based HCI support" - depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST + depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n) select BT_QCA help Qualcomm SMD based HCI driver. diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index e5c62dcf2c11cb..e770ad97747235 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 20b32bb8c2aff5..8bdc38d81adf91 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 4a99ac756f0815..9959c762da2f8e 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl); struct amd768_priv { void __iomem *iobase; struct pci_dev *pcidev; + u32 pmbase; }; static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) @@ -148,33 +149,58 @@ static int __init mod_init(void) if (pmbase == 0) return -EIO; - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, - PMBASE_SIZE, DRV_NAME)) { + if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", pmbase + 0xF0); - return -EBUSY; + err = -EBUSY; + goto out; } - priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, - PMBASE_SIZE); + priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE); if (!priv->iobase) { pr_err(DRV_NAME "Cannot map ioport\n"); - return -ENOMEM; + err = -EINVAL; + goto err_iomap; } amd_rng.priv = (unsigned long)priv; + priv->pmbase = pmbase; priv->pcidev = pdev; pr_info(DRV_NAME " detected\n"); - return devm_hwrng_register(&pdev->dev, &amd_rng); + err = hwrng_register(&amd_rng); + if (err) { + pr_err(DRV_NAME " registering failed (%d)\n", err); + goto err_hwrng; + } + return 0; + +err_hwrng: + ioport_unmap(priv->iobase); +err_iomap: + release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); +out: + kfree(priv); + return err; } static void __exit mod_exit(void) { + struct amd768_priv *priv; + + priv = (struct amd768_priv *)amd_rng.priv; + + hwrng_unregister(&amd_rng); + + ioport_unmap(priv->iobase); + + release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); + + kfree(priv); } module_init(mod_init); diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 5c654b5d4adf0c..503a41dfa1936b 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index e7a2459420291b..e1d421a36a138d 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -31,6 +31,9 @@ #include #include + +#define PFX KBUILD_MODNAME ": " + #define GEODE_RNG_DATA_REG 0x50 #define GEODE_RNG_STATUS_REG 0x54 @@ -82,6 +85,7 @@ static struct hwrng geode_rng = { static int __init mod_init(void) { + int err = -ENODEV; struct pci_dev *pdev = NULL; const struct pci_device_id *ent; void __iomem *mem; @@ -89,27 +93,43 @@ static int __init mod_init(void) for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); - if (ent) { - rng_base = pci_resource_start(pdev, 0); - if (rng_base == 0) - return -ENODEV; - - mem = devm_ioremap(&pdev->dev, rng_base, 0x58); - if (!mem) - return -ENOMEM; - geode_rng.priv = (unsigned long)mem; - - pr_info("AMD Geode RNG detected\n"); - return devm_hwrng_register(&pdev->dev, &geode_rng); - } + if (ent) + goto found; } - /* Device not found. */ - return -ENODEV; + goto out; + +found: + rng_base = pci_resource_start(pdev, 0); + if (rng_base == 0) + goto out; + err = -ENOMEM; + mem = ioremap(rng_base, 0x58); + if (!mem) + goto out; + geode_rng.priv = (unsigned long)mem; + + pr_info("AMD Geode RNG detected\n"); + err = hwrng_register(&geode_rng); + if (err) { + pr_err(PFX "RNG registering failed (%d)\n", + err); + goto err_unmap; + } +out: + return err; + +err_unmap: + iounmap(mem); + goto out; } static void __exit mod_exit(void) { + void __iomem *mem = (void __iomem *)geode_rng.priv; + + hwrng_unregister(&geode_rng); + iounmap(mem); } module_init(mod_init); diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 3ad86fdf954e96..b1ad12552b566a 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, irq, err); return err; } - omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK); - priv->clk = of_clk_get(pdev->dev.of_node, 0); + priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER) return -EPROBE_DEFER; if (!IS_ERR(priv->clk)) { @@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, dev_err(&pdev->dev, "unable to enable the clk, " "err = %d\n", err); } + + /* + * On OMAP4, enabling the shutdown_oflo interrupt is + * done in the interrupt mask register. There is no + * such register on EIP76, and it's enabled by the + * same bit in the control register + */ + if (priv->pdata->regs[RNG_INTMASK_REG]) + omap_rng_write(priv, RNG_INTMASK_REG, + RNG_SHUTDOWN_OFLO_MASK); + else + omap_rng_write(priv, RNG_CONTROL_REG, + RNG_SHUTDOWN_OFLO_MASK); } return 0; } diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 30b9e83bf1bfc6..5ca24d9b101b92 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -53,6 +53,7 @@ #include #include #include +#include #ifdef CONFIG_X86 /* diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 5b674277065674..565e4cf04a0215 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -117,7 +117,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index a5b1eb276c0bf9..e6d0d271c58c83 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 87885d146dbb02..3e73bcdf9e658d 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -58,7 +58,7 @@ #include #include -#include +#include #include #include #include @@ -84,11 +84,14 @@ struct pp_struct { struct ieee1284_info state; struct ieee1284_info saved_state; long default_inactivity; + int index; }; /* should we use PARDEVICE_MAX here? */ static struct device *devices[PARPORT_MAX]; +static DEFINE_IDA(ida_index); + /* pp_struct.flags bitfields */ #define PP_CLAIMED (1<<0) #define PP_EXCL (1<<1) @@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp) struct pardevice *pdev = NULL; char *name; struct pardev_cb ppdev_cb; - int rc = 0; + int rc = 0, index; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) @@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp) goto err; } + index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); memset(&ppdev_cb, 0, sizeof(ppdev_cb)); ppdev_cb.irq_func = pp_irq; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.private = pp; - pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); + pdev = parport_register_dev_model(port, name, &ppdev_cb, index); parport_put_port(port); if (!pdev) { pr_warn("%s: failed to register device!\n", name); rc = -ENXIO; + ida_simple_remove(&ida_index, index); goto err; } pp->pdev = pdev; + pp->index = index; dev_dbg(&pdev->dev, "registered pardevice\n"); err: kfree(name); @@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file) if (pp->pdev) { parport_unregister_device(pp->pdev); + ida_simple_remove(&ida_index, pp->index); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } diff --git a/drivers/char/random.c b/drivers/char/random.c index 1ef26403bcc83f..0ab0249189072b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -312,13 +312,6 @@ static int random_read_wakeup_bits = 64; */ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; -/* - * The minimum number of seconds between urandom pool reseeding. We - * do this to limit the amount of entropy that can be drained from the - * input pool even if there are heavy demands on /dev/urandom. - */ -static int random_min_urandom_seed = 60; - /* * Originally, we used a primitive polynomial of degree .poolwords * over GF(2). The taps for various sizes are defined below. They @@ -409,7 +402,6 @@ static struct poolinfo { */ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); -static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); static struct fasync_struct *fasync; static DEFINE_SPINLOCK(random_ready_list_lock); @@ -467,7 +459,6 @@ struct entropy_store { int entropy_count; int entropy_total; unsigned int initialized:1; - unsigned int limit:1; unsigned int last_data_init:1; __u8 last_data[EXTRACT_SIZE]; }; @@ -485,7 +476,6 @@ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy; static struct entropy_store input_pool = { .poolinfo = &poolinfo_table[0], .name = "input", - .limit = 1, .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), .pool = input_pool_data }; @@ -493,7 +483,6 @@ static struct entropy_store input_pool = { static struct entropy_store blocking_pool = { .poolinfo = &poolinfo_table[1], .name = "blocking", - .limit = 1, .pull = &input_pool, .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), .pool = blocking_pool_data, @@ -855,13 +844,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) spin_unlock_irqrestore(&primary_crng.lock, flags); } -static inline void maybe_reseed_primary_crng(void) -{ - if (crng_init > 2 && - time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL)) - crng_reseed(&primary_crng, &input_pool); -} - static inline void crng_wait_ready(void) { wait_event_interruptible(crng_init_wait, crng_ready()); @@ -1220,15 +1202,6 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) r->entropy_count > r->poolinfo->poolfracbits) return; - if (r->limit == 0 && random_min_urandom_seed) { - unsigned long now = jiffies; - - if (time_before(now, - r->last_pulled + random_min_urandom_seed * HZ)) - return; - r->last_pulled = now; - } - _xfer_secondary_pool(r, nbytes); } @@ -1236,8 +1209,6 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) { __u32 tmp[OUTPUT_POOL_WORDS]; - /* For /dev/random's pool, always leave two wakeups' worth */ - int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4; int bytes = nbytes; /* pull at least as much as a wakeup */ @@ -1248,7 +1219,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); bytes = extract_entropy(r->pull, tmp, bytes, - random_read_wakeup_bits / 8, rsvd_bytes); + random_read_wakeup_bits / 8, 0); mix_pool_bytes(r, tmp, bytes); credit_entropy_bits(r, bytes*8); } @@ -1276,7 +1247,7 @@ static void push_to_pool(struct work_struct *work) static size_t account(struct entropy_store *r, size_t nbytes, int min, int reserved) { - int entropy_count, orig; + int entropy_count, orig, have_bytes; size_t ibytes, nfrac; BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); @@ -1285,14 +1256,12 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, retry: entropy_count = orig = ACCESS_ONCE(r->entropy_count); ibytes = nbytes; - /* If limited, never pull more than available */ - if (r->limit) { - int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); + /* never pull more than available */ + have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); - if ((have_bytes -= reserved) < 0) - have_bytes = 0; - ibytes = min_t(size_t, ibytes, have_bytes); - } + if ((have_bytes -= reserved) < 0) + have_bytes = 0; + ibytes = min_t(size_t, ibytes, have_bytes); if (ibytes < min) ibytes = 0; @@ -1912,6 +1881,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, static int min_read_thresh = 8, min_write_thresh; static int max_read_thresh = OUTPUT_POOL_WORDS * 32; static int max_write_thresh = INPUT_POOL_WORDS * 32; +static int random_min_urandom_seed = 60; static char sysctl_bootid[16]; /* @@ -2042,63 +2012,64 @@ struct ctl_table random_table[] = { }; #endif /* CONFIG_SYSCTL */ -static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; - -int random_int_secret_init(void) -{ - get_random_bytes(random_int_secret, sizeof(random_int_secret)); - return 0; -} - -static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash) - __aligned(sizeof(unsigned long)); +struct batched_entropy { + union { + u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)]; + u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)]; + }; + unsigned int position; +}; /* - * Get a random word for internal kernel use only. Similar to urandom but - * with the goal of minimal entropy pool depletion. As a result, the random - * value is not cryptographically secure but for several uses the cost of - * depleting entropy is too high + * Get a random word for internal kernel use only. The quality of the random + * number is either as good as RDRAND or as good as /dev/urandom, with the + * goal of being quite fast and not depleting entropy. */ -unsigned int get_random_int(void) +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); +u64 get_random_u64(void) { - __u32 *hash; - unsigned int ret; + u64 ret; + struct batched_entropy *batch; - if (arch_get_random_int(&ret)) +#if BITS_PER_LONG == 64 + if (arch_get_random_long((unsigned long *)&ret)) return ret; +#else + if (arch_get_random_long((unsigned long *)&ret) && + arch_get_random_long((unsigned long *)&ret + 1)) + return ret; +#endif - hash = get_cpu_var(get_random_int_hash); - - hash[0] += current->pid + jiffies + random_get_entropy(); - md5_transform(hash, random_int_secret); - ret = hash[0]; - put_cpu_var(get_random_int_hash); - + batch = &get_cpu_var(batched_entropy_u64); + if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { + extract_crng((u8 *)batch->entropy_u64); + batch->position = 0; + } + ret = batch->entropy_u64[batch->position++]; + put_cpu_var(batched_entropy_u64); return ret; } -EXPORT_SYMBOL(get_random_int); +EXPORT_SYMBOL(get_random_u64); -/* - * Same as get_random_int(), but returns unsigned long. - */ -unsigned long get_random_long(void) +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); +u32 get_random_u32(void) { - __u32 *hash; - unsigned long ret; + u32 ret; + struct batched_entropy *batch; - if (arch_get_random_long(&ret)) + if (arch_get_random_int(&ret)) return ret; - hash = get_cpu_var(get_random_int_hash); - - hash[0] += current->pid + jiffies + random_get_entropy(); - md5_transform(hash, random_int_secret); - ret = *(unsigned long *)hash; - put_cpu_var(get_random_int_hash); - + batch = &get_cpu_var(batched_entropy_u32); + if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { + extract_crng((u8 *)batch->entropy_u32); + batch->position = 0; + } + ret = batch->entropy_u32[batch->position++]; + put_cpu_var(batched_entropy_u32); return ret; } -EXPORT_SYMBOL(get_random_long); +EXPORT_SYMBOL(get_random_u32); /** * randomize_page - Generate a random, page aligned address diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 35259961cc38f7..974d48927b0776 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -74,7 +74,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c index ec07f0e99732ec..6aa32679fd58ea 100644 --- a/drivers/char/snsc.c +++ b/drivers/char/snsc.c @@ -16,7 +16,7 @@ */ #include -#include +#include #include #include #include diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c index 59bcefd6ec7c8b..e452673dff6612 100644 --- a/drivers/char/snsc_event.c +++ b/drivers/char/snsc_event.c @@ -16,7 +16,7 @@ */ #include -#include +#include #include #include #include diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 17857beb489294..e9b7e0b3cabe60 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1136,6 +1136,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) { struct port *port; struct scatterlist sg[1]; + void *data; + int ret; if (unlikely(early_put_chars)) return early_put_chars(vtermno, buf, count); @@ -1144,8 +1146,14 @@ static int put_chars(u32 vtermno, const char *buf, int count) if (!port) return -EPIPE; - sg_init_one(sg, buf, count); - return __send_to_port(port, sg, 1, count, (void *)buf, false); + data = kmemdup(buf, count, GFP_ATOMIC); + if (!data) + return -ENOMEM; + + sg_init_one(sg, data, count); + ret = __send_to_port(port, sg, 1, count, data, false); + kfree(data); + return ret; } /* @@ -1939,7 +1947,7 @@ static int init_vqs(struct ports_device *portdev) /* Find the queues. */ err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, io_callbacks, - (const char **)io_names); + (const char **)io_names, NULL); if (err) goto free; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 0fb39fe217d17a..67201f67a14af7 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, clk->core = hw->core; clk->dev_id = dev_id; - clk->con_id = con_id; + clk->con_id = kstrdup_const(con_id, GFP_KERNEL); clk->max_rate = ULONG_MAX; clk_prepare_lock(); @@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk) hlist_del(&clk->clks_node); clk_prepare_unlock(); + kfree_const(clk->con_id); kfree(clk); } diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 9d9af446bafc94..1c1ec137a3cc72 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -564,6 +564,46 @@ static struct clk_gate gxbb_clk81 = { }, }; +static struct clk_mux gxbb_sar_adc_clk_sel = { + .reg = (void *)HHI_SAR_CLK_CNTL, + .mask = 0x3, + .shift = 9, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sar_adc_clk_sel", + .ops = &clk_mux_ops, + /* NOTE: The datasheet doesn't list the parents for bit 10 */ + .parent_names = (const char *[]){ "xtal", "clk81", }, + .num_parents = 2, + }, +}; + +static struct clk_divider gxbb_sar_adc_clk_div = { + .reg = (void *)HHI_SAR_CLK_CNTL, + .shift = 0, + .width = 8, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sar_adc_clk_div", + .ops = &clk_divider_ops, + .parent_names = (const char *[]){ "sar_adc_clk_sel" }, + .num_parents = 1, + }, +}; + +static struct clk_gate gxbb_sar_adc_clk = { + .reg = (void *)HHI_SAR_CLK_CNTL, + .bit_idx = 8, + .lock = &clk_lock, + .hw.init = &(struct clk_init_data){ + .name = "sar_adc_clk", + .ops = &clk_gate_ops, + .parent_names = (const char *[]){ "sar_adc_clk_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + /* Everything Else (EE) domain gates */ static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0); static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1); @@ -754,6 +794,9 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = { [CLKID_SD_EMMC_A] = &gxbb_emmc_a.hw, [CLKID_SD_EMMC_B] = &gxbb_emmc_b.hw, [CLKID_SD_EMMC_C] = &gxbb_emmc_c.hw, + [CLKID_SAR_ADC_CLK] = &gxbb_sar_adc_clk.hw, + [CLKID_SAR_ADC_SEL] = &gxbb_sar_adc_clk_sel.hw, + [CLKID_SAR_ADC_DIV] = &gxbb_sar_adc_clk_div.hw, }, .num = NR_CLKS, }; @@ -856,6 +899,7 @@ static struct clk_gate *gxbb_clk_gates[] = { &gxbb_emmc_a, &gxbb_emmc_b, &gxbb_emmc_c, + &gxbb_sar_adc_clk, }; static int gxbb_clkc_probe(struct platform_device *pdev) @@ -888,6 +932,10 @@ static int gxbb_clkc_probe(struct platform_device *pdev) gxbb_mpeg_clk_sel.reg = clk_base + (u64)gxbb_mpeg_clk_sel.reg; gxbb_mpeg_clk_div.reg = clk_base + (u64)gxbb_mpeg_clk_div.reg; + /* Populate the base address for the SAR ADC clks */ + gxbb_sar_adc_clk_sel.reg = clk_base + (u64)gxbb_sar_adc_clk_sel.reg; + gxbb_sar_adc_clk_div.reg = clk_base + (u64)gxbb_sar_adc_clk_div.reg; + /* Populate base address for gates */ for (i = 0; i < ARRAY_SIZE(gxbb_clk_gates); i++) gxbb_clk_gates[i]->reg = clk_base + diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h index 0252939ba58f3e..8ee2022ce5d563 100644 --- a/drivers/clk/meson/gxbb.h +++ b/drivers/clk/meson/gxbb.h @@ -191,7 +191,7 @@ #define CLKID_PERIPHS 20 #define CLKID_SPICC 21 /* CLKID_I2C */ -#define CLKID_SAR_ADC 23 +/* #define CLKID_SAR_ADC */ #define CLKID_SMART_CARD 24 #define CLKID_RNG0 25 #define CLKID_UART0 26 @@ -204,7 +204,7 @@ #define CLKID_ASSIST_MISC 33 /* CLKID_SPI */ #define CLKID_I2S_SPDIF 35 -#define CLKID_ETH 36 +/* CLKID_ETH */ #define CLKID_DEMUX 37 #define CLKID_AIU_GLUE 38 #define CLKID_IEC958 39 @@ -231,13 +231,13 @@ #define CLKID_AHB_DATA_BUS 60 #define CLKID_AHB_CTRL_BUS 61 #define CLKID_HDMI_INTR_SYNC 62 -#define CLKID_HDMI_PCLK 63 +/* CLKID_HDMI_PCLK */ /* CLKID_USB1_DDR_BRIDGE */ /* CLKID_USB0_DDR_BRIDGE */ #define CLKID_MMC_PCLK 66 #define CLKID_DVIN 67 #define CLKID_UART2 68 -#define CLKID_SANA 69 +/* #define CLKID_SANA */ #define CLKID_VPU_INTR 70 #define CLKID_SEC_AHB_AHB3_BRIDGE 71 #define CLKID_CLK81_A53 72 @@ -245,7 +245,7 @@ #define CLKID_VCLK2_VENCI1 74 #define CLKID_VCLK2_VENCP0 75 #define CLKID_VCLK2_VENCP1 76 -#define CLKID_GCLK_VENCI_INT0 77 +/* CLKID_GCLK_VENCI_INT0 */ #define CLKID_GCLK_VENCI_INT 78 #define CLKID_DAC_CLK 79 #define CLKID_AOCLK_GATE 80 @@ -265,8 +265,11 @@ /* CLKID_SD_EMMC_A */ /* CLKID_SD_EMMC_B */ /* CLKID_SD_EMMC_C */ +/* CLKID_SAR_ADC_CLK */ +/* CLKID_SAR_ADC_SEL */ +#define CLKID_SAR_ADC_DIV 99 -#define NR_CLKS 97 +#define NR_CLKS 100 /* include the CLKIDs that have been made part of the stable DT binding */ #include diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index 924f560dcf80e8..00d4150e33c374 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c @@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" }; PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" }; PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; -PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll" "usb480m" }; +PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" }; PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; @@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np) return; } + /* + * Make uart_pll_clk a child of the gpll, as all other sources are + * not that usable / stable. + */ + writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10), + reg_base + RK2928_CLKSEL_CON(13)); + ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index 695bbf9ef428f9..72109d2cf41b29 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig @@ -80,6 +80,7 @@ config SUN6I_A31_CCU select SUNXI_CCU_DIV select SUNXI_CCU_NK select SUNXI_CCU_NKM + select SUNXI_CCU_NKMP select SUNXI_CCU_NM select SUNXI_CCU_MP select SUNXI_CCU_PHASE diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index e3c084cc6da55e..f54114c607df76 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu", 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); /* Fixed Factor clocks */ -static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0); +static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0); /* We hardcode the divider to 4 for now */ static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 4c9a920ff4ab7c..89e68d29bf456a 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents, 0x150, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); -static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0); +static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0); static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c index 22c2ca7a2a221c..b583f186a804df 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.c +++ b/drivers/clk/sunxi-ng/ccu_mp.c @@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw, unsigned int m, p; u32 reg; + /* Adjust parent_rate according to pre-dividers */ + ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux, + -1, &parent_rate); + reg = readl(cmp->common.base + cmp->common.reg); m = reg >> cmp->m.shift; @@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate, unsigned int m, p; u32 reg; + /* Adjust parent_rate according to pre-dividers */ + ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux, + -1, &parent_rate); + max_m = cmp->m.max ?: 1 << cmp->m.width; max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index a2b40a0001577d..488055ed944f2b 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw, p = reg >> nkmp->p.shift; p &= (1 << nkmp->p.width) - 1; - return parent_rate * n * k >> p / m; + return (parent_rate * n * k >> p) / m; } static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 93aa1364376ac8..7a8a4117f123d6 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include diff --git a/drivers/clocksource/clkevt-probe.c b/drivers/clocksource/clkevt-probe.c index 8c30fec86094df..eb89b502acbdfd 100644 --- a/drivers/clocksource/clkevt-probe.c +++ b/drivers/clocksource/clkevt-probe.c @@ -17,7 +17,7 @@ #include #include -#include +#include extern struct of_device_id __clkevt_of_table[]; diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index 9cae38eebec2c7..1c24de215c142a 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 745844ee973e1d..d4ca9962a7595a 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -10,7 +10,6 @@ #include #include #include -#include /* @@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs) return (upper << 16) | lower; } -static u32 tc_get_cv32(void) -{ - return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); -} - static u64 tc_get_cycles32(struct clocksource *cs) { - return tc_get_cv32(); + return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); } static struct clocksource clksrc = { @@ -75,11 +69,6 @@ static struct clocksource clksrc = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static u64 notrace tc_read_sched_clock(void) -{ - return tc_get_cv32(); -} - #ifdef CONFIG_GENERIC_CLOCKEVENTS struct tc_clkevt_device { @@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void) clksrc.read = tc_get_cycles32; /* setup ony channel 0 */ tcb_setup_single_chan(tc, best_divisor_idx); - - /* register sched_clock on chips with single 32 bit counter */ - sched_clock_register(tc_read_sched_clock, 32, divided_rate); } else { /* tclib will give us three clocks no matter what the * underlying platform supports. diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c index 10318cc99c0e8f..e9f50d28936290 100644 --- a/drivers/clocksource/timer-digicolor.c +++ b/drivers/clocksource/timer-digicolor.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a475432818642f..bc96d423781aa8 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, char *buf) { unsigned int cur_freq = __cpufreq_get(policy); - if (!cur_freq) - return sprintf(buf, ""); - return sprintf(buf, "%u\n", cur_freq); + + if (cur_freq) + return sprintf(buf, "%u\n", cur_freq); + + return sprintf(buf, "\n"); } /** @@ -916,11 +918,19 @@ static struct kobj_type ktype_cpufreq = { .release = cpufreq_sysfs_release, }; -static int add_cpu_dev_symlink(struct cpufreq_policy *policy, - struct device *dev) +static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) { + struct device *dev = get_cpu_device(cpu); + + if (!dev) + return; + + if (cpumask_test_and_set_cpu(cpu, policy->real_cpus)) + return; + dev_dbg(dev, "%s: Adding symlink\n", __func__); - return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); + if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq")) + dev_err(dev, "cpufreq symlink creation failed\n"); } static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, @@ -1178,10 +1188,13 @@ static int cpufreq_online(unsigned int cpu) policy->user_policy.min = policy->min; policy->user_policy.max = policy->max; - write_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu(j, policy->related_cpus) + for_each_cpu(j, policy->related_cpus) { per_cpu(cpufreq_cpu_data, j) = policy; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); + add_cpu_dev_symlink(policy, j); + } + } else { + policy->min = policy->user_policy.min; + policy->max = policy->user_policy.max; } if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { @@ -1270,13 +1283,15 @@ static int cpufreq_online(unsigned int cpu) if (cpufreq_driver->exit) cpufreq_driver->exit(policy); + + for_each_cpu(j, policy->real_cpus) + remove_cpu_dev_symlink(policy, get_cpu_device(j)); + out_free_policy: cpufreq_policy_free(policy); return ret; } -static int cpufreq_offline(unsigned int cpu); - /** * cpufreq_add_dev - the cpufreq interface for a CPU device. * @dev: CPU device. @@ -1298,16 +1313,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* Create sysfs link on CPU registration */ policy = per_cpu(cpufreq_cpu_data, cpu); - if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus)) - return 0; - - ret = add_cpu_dev_symlink(policy, dev); - if (ret) { - cpumask_clear_cpu(cpu, policy->real_cpus); - cpufreq_offline(cpu); - } + if (policy) + add_cpu_dev_symlink(policy, cpu); - return ret; + return 0; } static int cpufreq_offline(unsigned int cpu) @@ -2532,4 +2541,5 @@ static int __init cpufreq_core_init(void) return 0; } +module_param(off, int, 0444); core_initcall(cpufreq_core_init); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 631bd2c86c5e6e..47e24b5384b379 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -18,7 +18,6 @@ #include #include -#include #include #include "cpufreq_governor.h" diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index f5717ca070cc39..0236ec2cd654b3 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 4a017e8952962c..3937acf7e026cd 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "cpufreq_ondemand.h" diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index eb0f7fb7168589..283491f742d3d7 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include @@ -39,11 +39,6 @@ #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 -#define ATOM_RATIOS 0x66a -#define ATOM_VIDS 0x66b -#define ATOM_TURBO_RATIOS 0x66c -#define ATOM_TURBO_VIDS 0x66d - #ifdef CONFIG_ACPI #include #include @@ -89,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y) return div64_u64(x << EXT_FRAC_BITS, y); } +static inline int32_t percent_ext_fp(int percent) +{ + return div_ext_fp(percent, 100); +} + /** * struct sample - Store performance sample * @core_avg_perf: Ratio of APERF/MPERF which is the actual average @@ -364,37 +364,16 @@ static bool driver_registered __read_mostly; static bool acpi_ppc; #endif -static struct perf_limits performance_limits = { - .no_turbo = 0, - .turbo_disabled = 0, - .max_perf_pct = 100, - .max_perf = int_ext_tofp(1), - .min_perf_pct = 100, - .min_perf = int_ext_tofp(1), - .max_policy_pct = 100, - .max_sysfs_pct = 100, - .min_policy_pct = 0, - .min_sysfs_pct = 0, -}; +static struct perf_limits global; -static struct perf_limits powersave_limits = { - .no_turbo = 0, - .turbo_disabled = 0, - .max_perf_pct = 100, - .max_perf = int_ext_tofp(1), - .min_perf_pct = 0, - .min_perf = 0, - .max_policy_pct = 100, - .max_sysfs_pct = 100, - .min_policy_pct = 0, - .min_sysfs_pct = 0, -}; - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE -static struct perf_limits *limits = &performance_limits; -#else -static struct perf_limits *limits = &powersave_limits; -#endif +static void intel_pstate_init_limits(struct perf_limits *limits) +{ + memset(limits, 0, sizeof(*limits)); + limits->max_perf_pct = 100; + limits->max_perf = int_ext_tofp(1); + limits->max_policy_pct = 100; + limits->max_sysfs_pct = 100; +} static DEFINE_MUTEX(intel_pstate_driver_lock); static DEFINE_MUTEX(intel_pstate_limits_lock); @@ -518,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) * correct max turbo frequency based on the turbo state. * Also need to convert to MHz as _PSS freq is in MHz. */ - if (!limits->turbo_disabled) + if (!global.turbo_disabled) cpu->acpi_perf_data.states[0].core_frequency = policy->cpuinfo.max_freq / 1000; cpu->valid_pss_table = true; @@ -637,7 +616,7 @@ static inline void update_turbo_state(void) cpu = all_cpu_data[0]; rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - limits->turbo_disabled = + global.turbo_disabled = (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); } @@ -861,12 +840,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { static void intel_pstate_hwp_set(struct cpufreq_policy *policy) { - int min, hw_min, max, hw_max, cpu, range, adj_range; - struct perf_limits *perf_limits = limits; + int min, hw_min, max, hw_max, cpu; + struct perf_limits *perf_limits = &global; u64 value, cap; for_each_cpu(cpu, policy->cpus) { - int max_perf_pct, min_perf_pct; struct cpudata *cpu_data = all_cpu_data[cpu]; s16 epp; @@ -875,24 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy) rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); hw_min = HWP_LOWEST_PERF(cap); - if (limits->no_turbo) + if (global.no_turbo) hw_max = HWP_GUARANTEED_PERF(cap); else hw_max = HWP_HIGHEST_PERF(cap); - range = hw_max - hw_min; - max_perf_pct = perf_limits->max_perf_pct; - min_perf_pct = perf_limits->min_perf_pct; + max = fp_ext_toint(hw_max * perf_limits->max_perf); + if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) + min = max; + else + min = fp_ext_toint(hw_max * perf_limits->min_perf); rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); - adj_range = min_perf_pct * range / 100; - min = hw_min + adj_range; + value &= ~HWP_MIN_PERF(~0L); value |= HWP_MIN_PERF(min); - adj_range = max_perf_pct * range / 100; - max = hw_min + adj_range; - value &= ~HWP_MAX_PERF(~0L); value |= HWP_MAX_PERF(max); @@ -996,6 +972,7 @@ static void intel_pstate_update_policies(void) static int pid_param_set(void *data, u64 val) { *(u32 *)data = val; + pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; intel_pstate_reset_all_pid(); return 0; } @@ -1067,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void) static ssize_t show_##file_name \ (struct kobject *kobj, struct attribute *attr, char *buf) \ { \ - return sprintf(buf, "%u\n", limits->object); \ + return sprintf(buf, "%u\n", global.object); \ } static ssize_t intel_pstate_show_status(char *buf); @@ -1158,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj, } update_turbo_state(); - if (limits->turbo_disabled) - ret = sprintf(buf, "%u\n", limits->turbo_disabled); + if (global.turbo_disabled) + ret = sprintf(buf, "%u\n", global.turbo_disabled); else - ret = sprintf(buf, "%u\n", limits->no_turbo); + ret = sprintf(buf, "%u\n", global.no_turbo); mutex_unlock(&intel_pstate_driver_lock); @@ -1188,14 +1165,14 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, mutex_lock(&intel_pstate_limits_lock); update_turbo_state(); - if (limits->turbo_disabled) { + if (global.turbo_disabled) { pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_driver_lock); return -EPERM; } - limits->no_turbo = clamp_t(int, input, 0, 1); + global.no_turbo = clamp_t(int, input, 0, 1); mutex_unlock(&intel_pstate_limits_lock); @@ -1225,14 +1202,11 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, mutex_lock(&intel_pstate_limits_lock); - limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); - limits->max_perf_pct = min(limits->max_policy_pct, - limits->max_sysfs_pct); - limits->max_perf_pct = max(limits->min_policy_pct, - limits->max_perf_pct); - limits->max_perf_pct = max(limits->min_perf_pct, - limits->max_perf_pct); - limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); + global.max_sysfs_pct = clamp_t(int, input, 0 , 100); + global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct); + global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct); + global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct); + global.max_perf = percent_ext_fp(global.max_perf_pct); mutex_unlock(&intel_pstate_limits_lock); @@ -1262,14 +1236,11 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, mutex_lock(&intel_pstate_limits_lock); - limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); - limits->min_perf_pct = max(limits->min_policy_pct, - limits->min_sysfs_pct); - limits->min_perf_pct = min(limits->max_policy_pct, - limits->min_perf_pct); - limits->min_perf_pct = min(limits->max_perf_pct, - limits->min_perf_pct); - limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); + global.min_sysfs_pct = clamp_t(int, input, 0 , 100); + global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct); + global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct); + global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct); + global.min_perf = percent_ext_fp(global.min_perf_pct); mutex_unlock(&intel_pstate_limits_lock); @@ -1367,7 +1338,7 @@ static int atom_get_min_pstate(void) { u64 value; - rdmsrl(ATOM_RATIOS, value); + rdmsrl(MSR_ATOM_CORE_RATIOS, value); return (value >> 8) & 0x7F; } @@ -1375,7 +1346,7 @@ static int atom_get_max_pstate(void) { u64 value; - rdmsrl(ATOM_RATIOS, value); + rdmsrl(MSR_ATOM_CORE_RATIOS, value); return (value >> 16) & 0x7F; } @@ -1383,7 +1354,7 @@ static int atom_get_turbo_pstate(void) { u64 value; - rdmsrl(ATOM_TURBO_RATIOS, value); + rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); return value & 0x7F; } @@ -1394,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (limits->no_turbo && !limits->turbo_disabled) + if (global.no_turbo && !global.turbo_disabled) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1445,7 +1416,7 @@ static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(ATOM_VIDS, value); + rdmsrl(MSR_ATOM_CORE_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -1453,7 +1424,7 @@ static void atom_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(ATOM_TURBO_VIDS, value); + rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -1564,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (limits->no_turbo && !limits->turbo_disabled) + if (global.no_turbo && !global.turbo_disabled) val |= (u64)1 << 32; return val; @@ -1690,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) int max_perf = cpu->pstate.turbo_pstate; int max_perf_adj; int min_perf; - struct perf_limits *perf_limits = limits; + struct perf_limits *perf_limits = &global; - if (limits->no_turbo || limits->turbo_disabled) + if (global.no_turbo || global.turbo_disabled) max_perf = cpu->pstate.max_pstate; if (per_cpu_limits) @@ -1827,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) sample->busy_scaled = busy_frac * 100; - target = limits->no_turbo || limits->turbo_disabled ? + target = global.no_turbo || global.turbo_disabled ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; target += target >> 2; target = mul_fp(target, busy_frac); @@ -1891,13 +1862,11 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) intel_pstate_get_min_max(cpu, &min_perf, &max_perf); pstate = clamp_t(int, pstate, min_perf, max_perf); - trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); return pstate; } static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) { - pstate = intel_pstate_prepare_request(cpu, pstate); if (pstate == cpu->pstate.current_pstate) return; @@ -1917,6 +1886,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) update_turbo_state(); + target_pstate = intel_pstate_prepare_request(cpu, target_pstate); + trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); intel_pstate_update_pstate(cpu, target_pstate); sample = &cpu->sample; @@ -2084,53 +2055,37 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) synchronize_sched(); } -static void intel_pstate_set_performance_limits(struct perf_limits *limits) -{ - limits->no_turbo = 0; - limits->turbo_disabled = 0; - limits->max_perf_pct = 100; - limits->max_perf = int_ext_tofp(1); - limits->min_perf_pct = 100; - limits->min_perf = int_ext_tofp(1); - limits->max_policy_pct = 100; - limits->max_sysfs_pct = 100; - limits->min_policy_pct = 0; - limits->min_sysfs_pct = 0; -} - static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, struct perf_limits *limits) { + int32_t max_policy_perf, min_policy_perf; - limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, - policy->cpuinfo.max_freq); - limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100); + max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq); + max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1)); if (policy->max == policy->min) { - limits->min_policy_pct = limits->max_policy_pct; + min_policy_perf = max_policy_perf; } else { - limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100, - policy->cpuinfo.max_freq); - limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, - 0, 100); + min_policy_perf = div_ext_fp(policy->min, + policy->cpuinfo.max_freq); + min_policy_perf = clamp_t(int32_t, min_policy_perf, + 0, max_policy_perf); } - /* Normalize user input to [min_policy_pct, max_policy_pct] */ - limits->min_perf_pct = max(limits->min_policy_pct, - limits->min_sysfs_pct); - limits->min_perf_pct = min(limits->max_policy_pct, - limits->min_perf_pct); - limits->max_perf_pct = min(limits->max_policy_pct, - limits->max_sysfs_pct); - limits->max_perf_pct = max(limits->min_policy_pct, - limits->max_perf_pct); - - /* Make sure min_perf_pct <= max_perf_pct */ - limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); - - limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); - limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); + /* Normalize user input to [min_perf, max_perf] */ + limits->min_perf = max(min_policy_perf, + percent_ext_fp(limits->min_sysfs_pct)); + limits->min_perf = min(limits->min_perf, max_policy_perf); + limits->max_perf = min(max_policy_perf, + percent_ext_fp(limits->max_sysfs_pct)); + limits->max_perf = max(min_policy_perf, limits->max_perf); + + /* Make sure min_perf <= max_perf */ + limits->min_perf = min(limits->min_perf, limits->max_perf); + limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS); limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS); + limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100); + limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100); pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, limits->max_perf_pct, limits->min_perf_pct); @@ -2139,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, static int intel_pstate_set_policy(struct cpufreq_policy *policy) { struct cpudata *cpu; - struct perf_limits *perf_limits = NULL; + struct perf_limits *perf_limits = &global; if (!policy->cpuinfo.max_freq) return -ENODEV; @@ -2162,28 +2117,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) mutex_lock(&intel_pstate_limits_lock); - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { - if (!perf_limits) { - limits = &performance_limits; - perf_limits = limits; - } - if (policy->max >= policy->cpuinfo.max_freq && - !limits->no_turbo) { - pr_debug("set performance\n"); - intel_pstate_set_performance_limits(perf_limits); - goto out; - } - } else { - pr_debug("set powersave\n"); - if (!perf_limits) { - limits = &powersave_limits; - perf_limits = limits; - } - - } - intel_pstate_update_perf_limits(policy, perf_limits); - out: + if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { /* * NOHZ_FULL CPUs need this as the governor callback may not @@ -2205,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) static int intel_pstate_verify_policy(struct cpufreq_policy *policy) { struct cpudata *cpu = all_cpu_data[policy->cpu]; - struct perf_limits *perf_limits; - - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) - perf_limits = &performance_limits; - else - perf_limits = &powersave_limits; update_turbo_state(); - policy->cpuinfo.max_freq = perf_limits->turbo_disabled || - perf_limits->no_turbo ? + policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; @@ -2229,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) unsigned int max_freq, min_freq; max_freq = policy->cpuinfo.max_freq * - limits->max_sysfs_pct / 100; + global.max_sysfs_pct / 100; min_freq = policy->cpuinfo.max_freq * - limits->min_sysfs_pct / 100; + global.min_sysfs_pct / 100; cpufreq_verify_within_limits(policy, min_freq, max_freq); } @@ -2274,13 +2202,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - /* - * We need sane value in the cpu->perf_limits, so inherit from global - * perf_limits limits, which are seeded with values based on the - * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up. - */ if (per_cpu_limits) - memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits)); + intel_pstate_init_limits(cpu->perf_limits); policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; @@ -2288,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; update_turbo_state(); - policy->cpuinfo.max_freq = limits->turbo_disabled ? + policy->cpuinfo.max_freq = global.turbo_disabled ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; policy->cpuinfo.max_freq *= cpu->pstate.scaling; @@ -2308,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) return ret; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) + if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; @@ -2332,46 +2255,16 @@ static struct cpufreq_driver intel_pstate = { static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) { struct cpudata *cpu = all_cpu_data[policy->cpu]; - struct perf_limits *perf_limits = limits; update_turbo_state(); - policy->cpuinfo.max_freq = limits->turbo_disabled ? + policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; cpufreq_verify_within_cpu_limits(policy); - if (per_cpu_limits) - perf_limits = cpu->perf_limits; - - mutex_lock(&intel_pstate_limits_lock); - - intel_pstate_update_perf_limits(policy, perf_limits); - - mutex_unlock(&intel_pstate_limits_lock); - return 0; } -static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu, - struct cpufreq_policy *policy, - unsigned int target_freq) -{ - unsigned int max_freq; - - update_turbo_state(); - - max_freq = limits->no_turbo || limits->turbo_disabled ? - cpu->pstate.max_freq : cpu->pstate.turbo_freq; - policy->cpuinfo.max_freq = max_freq; - if (policy->max > max_freq) - policy->max = max_freq; - - if (target_freq > max_freq) - target_freq = max_freq; - - return target_freq; -} - static int intel_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) @@ -2380,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int target_pstate; + update_turbo_state(); + freqs.old = policy->cur; - freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq); + freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); switch (relation) { @@ -2401,6 +2296,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, target_pstate)); } + freqs.new = target_pstate * cpu->pstate.scaling; cpufreq_freq_transition_end(policy, &freqs, false); return 0; @@ -2412,10 +2308,12 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, struct cpudata *cpu = all_cpu_data[policy->cpu]; int target_pstate; - target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); + update_turbo_state(); + target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); + target_pstate = intel_pstate_prepare_request(cpu, target_pstate); intel_pstate_update_pstate(cpu, target_pstate); - return target_freq; + return target_pstate * cpu->pstate.scaling; } static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -2466,6 +2364,8 @@ static int intel_pstate_register_driver(void) { int ret; + intel_pstate_init_limits(&global); + ret = cpufreq_register_driver(intel_pstate_driver); if (ret) { intel_pstate_driver_cleanup(); diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index a6fefac8afe49a..bfec1bcd3835f3 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -23,10 +23,6 @@ #include #include -#if !defined(CONFIG_ARM) -#include /* for get_hard_smp_processor_id() in UP configs */ -#endif - /** * struct cpu_data * @pclk: the parent clock of cpu diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c index b73feeb666f9f9..35ddb6da93aaf8 100644 --- a/drivers/cpufreq/sparc-us2e-cpufreq.c +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c @@ -234,7 +234,7 @@ static unsigned int us2e_freq_get(unsigned int cpu) cpumask_t cpus_allowed; unsigned long clock_tick, estar; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); clock_tick = sparc64_get_clock_tick(cpu) / 1000; @@ -252,7 +252,7 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index) unsigned long clock_tick, divisor, old_divisor, estar; cpumask_t cpus_allowed; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index 9bb42ba50efaf9..a8d86a449ca11f 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -82,7 +82,7 @@ static unsigned int us3_freq_get(unsigned int cpu) unsigned long reg; unsigned int ret; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); reg = read_safari_cfg(); @@ -99,7 +99,7 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index) unsigned long new_bits, new_freq, reg; cpumask_t cpus_allowed; - cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); + cpumask_copy(&cpus_allowed, ¤t->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(cpu)); new_freq = sparc64_get_clock_tick(cpu) / 1000; diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 370593006f5f76..cda8f62d555b57 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -175,6 +175,24 @@ static int powernv_cpuidle_driver_init(void) drv->state_count += 1; } + /* + * On the PowerNV platform cpu_present may be less than cpu_possible in + * cases when firmware detects the CPU, but it is not available to the + * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at + * run time and hence cpu_devices are not created for those CPUs by the + * generic topology_init(). + * + * drv->cpumask defaults to cpu_possible_mask in + * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where + * cpu_devices are not created for CPUs in cpu_possible_mask that + * cannot be hot-added later at run time. + * + * Trying cpuidle_register_device() on a CPU without a cpu_device is + * incorrect, so pass a correct CPU mask to the generic cpuidle driver. + */ + + drv->cpumask = (struct cpumask *)cpu_present_mask; + return 0; } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 62810ff3b00f33..548b90be768548 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index ab264d39323368..e53fb861beb045 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 8d6d25c38c020e..b2330fd69e3464 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include @@ -287,7 +289,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) unsigned int interactivity_req; unsigned int expected_interval; unsigned long nr_iowaiters, cpu_load; - int resume_latency = dev_pm_qos_read_value(device); + int resume_latency = dev_pm_qos_raw_read_value(device); if (data->needs_update) { menu_update(drv, dev); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index c5adc8c9ac43af..ae948b1da93a37 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); int error; + /* + * Return if cpu_device is not setup for this CPU. + * + * This could happen if the arch did not set up cpu_device + * since this CPU is not in cpu_present mask and the + * driver did not send a correct CPU mask during registration. + * Without this check we would end up passing bogus + * value for &cpu_dev->kobj in kobject_init_and_add() + */ + if (!cpu_dev) + return -ENODEV; + kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); if (!kdev) return -ENOMEM; diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 0b49dbc423e244..473d31288ad86e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -459,6 +459,7 @@ config CRYPTO_DEV_ATMEL_AES config CRYPTO_DEV_ATMEL_TDES tristate "Support for Atmel DES/TDES hw accelerator" + depends on HAS_DMA depends on ARCH_AT91 || COMPILE_TEST select CRYPTO_DES select CRYPTO_BLKCIPHER @@ -472,6 +473,7 @@ config CRYPTO_DEV_ATMEL_TDES config CRYPTO_DEV_ATMEL_SHA tristate "Support for Atmel SHA hw accelerator" + depends on HAS_DMA depends on ARCH_AT91 || COMPILE_TEST select CRYPTO_HASH help @@ -583,6 +585,7 @@ config CRYPTO_DEV_ROCKCHIP config CRYPTO_DEV_MEDIATEK tristate "MediaTek's EIP97 Cryptographic Engine driver" + depends on HAS_DMA depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST select CRYPTO_AES select CRYPTO_AEAD diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c index aac2966ff8d92d..6ffc740c7431d2 100644 --- a/drivers/crypto/cavium/cpt/cptvf_main.c +++ b/drivers/crypto/cavium/cpt/cptvf_main.c @@ -242,6 +242,7 @@ static int alloc_command_queues(struct cpt_vf *cptvf, if (!curr->head) { dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", i, queue->nchunks); + kfree(curr); goto cmd_qfail; } @@ -815,8 +816,10 @@ static void cptvf_remove(struct pci_dev *pdev) { struct cpt_vf *cptvf = pci_get_drvdata(pdev); - if (!cptvf) + if (!cptvf) { dev_err(&pdev->dev, "Invalid CPT-VF device\n"); + return; + } /* Convey DOWN to PF */ if (cptvf_send_vf_down(cptvf)) { diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 7f57f30f88636c..169e66231bcf15 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -330,8 +330,8 @@ void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info) { struct pci_dev *pdev = cptvf->pdev; - if (!info || !cptvf) { - dev_err(&pdev->dev, "Input params are incorrect for post processing\n"); + if (!info) { + dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n"); return; } diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 41cc853f8569cf..fc08b4ed69d936 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -1015,6 +1015,7 @@ const struct ccp_vdata ccpv5a = { const struct ccp_vdata ccpv5b = { .version = CCP_VERSION(5, 0), + .dma_chan_attr = DMA_PRIVATE, .setup = ccp5other_config, .perform = &ccp5_actions, .bar = 2, diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 511ab042b5e793..92d1c6959f08b8 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version); */ int ccp_enqueue_cmd(struct ccp_cmd *cmd) { - struct ccp_device *ccp = ccp_get_device(); + struct ccp_device *ccp; unsigned long flags; unsigned int i; int ret; + /* Some commands might need to be sent to a specific device */ + ccp = cmd->ccp ? cmd->ccp : ccp_get_device(); + if (!ccp) return -ENODEV; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 2b5c01fade05a5..aa36f3f8186056 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -179,6 +179,10 @@ /* ------------------------ General CCP Defines ------------------------ */ +#define CCP_DMA_DFLT 0x0 +#define CCP_DMA_PRIV 0x1 +#define CCP_DMA_PUB 0x2 + #define CCP_DMAPOOL_MAX_SIZE 64 #define CCP_DMAPOOL_ALIGN BIT(5) @@ -636,6 +640,7 @@ struct ccp_actions { /* Structure to hold CCP version-specific values */ struct ccp_vdata { const unsigned int version; + const unsigned int dma_chan_attr; void (*setup)(struct ccp_device *); const struct ccp_actions *perform; const unsigned int bar; diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index e5d9278f401974..e00be01fbf5a03 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -25,6 +26,37 @@ (mask == 0) ? 64 : fls64(mask); \ }) +/* The CCP as a DMA provider can be configured for public or private + * channels. Default is specified in the vdata for the device (PCI ID). + * This module parameter will override for all channels on all devices: + * dma_chan_attr = 0x2 to force all channels public + * = 0x1 to force all channels private + * = 0x0 to defer to the vdata setting + * = any other value: warning, revert to 0x0 + */ +static unsigned int dma_chan_attr = CCP_DMA_DFLT; +module_param(dma_chan_attr, uint, 0444); +MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); + +unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) +{ + switch (dma_chan_attr) { + case CCP_DMA_DFLT: + return ccp->vdata->dma_chan_attr; + + case CCP_DMA_PRIV: + return DMA_PRIVATE; + + case CCP_DMA_PUB: + return 0; + + default: + dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n", + dma_chan_attr); + return ccp->vdata->dma_chan_attr; + } +} + static void ccp_free_cmd_resources(struct ccp_device *ccp, struct list_head *list) { @@ -390,6 +422,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, goto err; ccp_cmd = &cmd->ccp_cmd; + ccp_cmd->ccp = chan->ccp; ccp_pt = &ccp_cmd->u.passthru_nomap; ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; @@ -674,6 +707,15 @@ int ccp_dmaengine_register(struct ccp_device *ccp) dma_cap_set(DMA_SG, dma_dev->cap_mask); dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); + /* The DMA channels for this device can be set to public or private, + * and overridden by the module parameter dma_chan_attr. + * Default: according to the value in vdata (dma_chan_attr=0) + * dma_chan_attr=0x1: all channels private (override vdata) + * dma_chan_attr=0x2: all channels public (override vdata) + */ + if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE) + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); + INIT_LIST_HEAD(&dma_dev->channels); for (i = 0; i < ccp->cmd_q_count; i++) { chan = ccp->ccp_dma_chan + i; diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index dce1af0ce85ce8..1b9da3dc799b05 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, scatterwalk_done(&walk, out, 0); } -static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) +static void s5p_sg_done(struct s5p_aes_dev *dev) { if (dev->sg_dst_cpy) { dev_dbg(dev->dev, @@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) } s5p_free_sg_cpy(dev, &dev->sg_src_cpy); s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); +} - /* holding a lock outside */ +/* Calls the completion. Cannot be called with dev->lock hold. */ +static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) +{ dev->req->base.complete(&dev->req->base, err); dev->busy = false; } @@ -368,51 +371,44 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) } /* - * Returns true if new transmitting (output) data is ready and its - * address+length have to be written to device (by calling - * s5p_set_dma_outdata()). False otherwise. + * Returns -ERRNO on error (mapping of new data failed). + * On success returns: + * - 0 if there is no more data, + * - 1 if new transmitting (output) data is ready and its address+length + * have to be written to device (by calling s5p_set_dma_outdata()). */ -static bool s5p_aes_tx(struct s5p_aes_dev *dev) +static int s5p_aes_tx(struct s5p_aes_dev *dev) { - int err = 0; - bool ret = false; + int ret = 0; s5p_unset_outdata(dev); if (!sg_is_last(dev->sg_dst)) { - err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); - if (err) - s5p_aes_complete(dev, err); - else - ret = true; - } else { - s5p_aes_complete(dev, err); - - dev->busy = true; - tasklet_schedule(&dev->tasklet); + ret = s5p_set_outdata(dev, sg_next(dev->sg_dst)); + if (!ret) + ret = 1; } return ret; } /* - * Returns true if new receiving (input) data is ready and its - * address+length have to be written to device (by calling - * s5p_set_dma_indata()). False otherwise. + * Returns -ERRNO on error (mapping of new data failed). + * On success returns: + * - 0 if there is no more data, + * - 1 if new receiving (input) data is ready and its address+length + * have to be written to device (by calling s5p_set_dma_indata()). */ -static bool s5p_aes_rx(struct s5p_aes_dev *dev) +static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/) { - int err; - bool ret = false; + int ret = 0; s5p_unset_indata(dev); if (!sg_is_last(dev->sg_src)) { - err = s5p_set_indata(dev, sg_next(dev->sg_src)); - if (err) - s5p_aes_complete(dev, err); - else - ret = true; + ret = s5p_set_indata(dev, sg_next(dev->sg_src)); + if (!ret) + ret = 1; } return ret; @@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct s5p_aes_dev *dev = platform_get_drvdata(pdev); - bool set_dma_tx = false; - bool set_dma_rx = false; + int err_dma_tx = 0; + int err_dma_rx = 0; + bool tx_end = false; unsigned long flags; uint32_t status; + int err; spin_lock_irqsave(&dev->lock, flags); + /* + * Handle rx or tx interrupt. If there is still data (scatterlist did not + * reach end), then map next scatterlist entry. + * In case of such mapping error, s5p_aes_complete() should be called. + * + * If there is no more data in tx scatter list, call s5p_aes_complete() + * and schedule new tasklet. + */ status = SSS_READ(dev, FCINTSTAT); if (status & SSS_FCINTSTAT_BRDMAINT) - set_dma_rx = s5p_aes_rx(dev); - if (status & SSS_FCINTSTAT_BTDMAINT) - set_dma_tx = s5p_aes_tx(dev); + err_dma_rx = s5p_aes_rx(dev); + + if (status & SSS_FCINTSTAT_BTDMAINT) { + if (sg_is_last(dev->sg_dst)) + tx_end = true; + err_dma_tx = s5p_aes_tx(dev); + } SSS_WRITE(dev, FCINTPEND, status); - /* - * Writing length of DMA block (either receiving or transmitting) - * will start the operation immediately, so this should be done - * at the end (even after clearing pending interrupts to not miss the - * interrupt). - */ - if (set_dma_tx) - s5p_set_dma_outdata(dev, dev->sg_dst); - if (set_dma_rx) - s5p_set_dma_indata(dev, dev->sg_src); + if (err_dma_rx < 0) { + err = err_dma_rx; + goto error; + } + if (err_dma_tx < 0) { + err = err_dma_tx; + goto error; + } + + if (tx_end) { + s5p_sg_done(dev); + + spin_unlock_irqrestore(&dev->lock, flags); + + s5p_aes_complete(dev, 0); + dev->busy = true; + tasklet_schedule(&dev->tasklet); + } else { + /* + * Writing length of DMA block (either receiving or + * transmitting) will start the operation immediately, so this + * should be done at the end (even after clearing pending + * interrupts to not miss the interrupt). + */ + if (err_dma_tx == 1) + s5p_set_dma_outdata(dev, dev->sg_dst); + if (err_dma_rx == 1) + s5p_set_dma_indata(dev, dev->sg_src); + spin_unlock_irqrestore(&dev->lock, flags); + } + + return IRQ_HANDLED; + +error: + s5p_sg_done(dev); spin_unlock_irqrestore(&dev->lock, flags); + s5p_aes_complete(dev, err); return IRQ_HANDLED; } @@ -597,8 +633,9 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) s5p_unset_indata(dev); indata_error: - s5p_aes_complete(dev, err); + s5p_sg_done(dev); spin_unlock_irqrestore(&dev->lock, flags); + s5p_aes_complete(dev, err); } static void s5p_tasklet_cb(unsigned long data) @@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev) dev_warn(dev, "feed control interrupt is not available.\n"); goto err_irq; } - err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt, - IRQF_SHARED, pdev->name, pdev); + err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL, + s5p_aes_interrupt, IRQF_ONESHOT, + pdev->name, pdev); if (err < 0) { dev_warn(dev, "feed control interrupt is not available.\n"); goto err_irq; diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 43a0c8a26ab0c5..00a16ab601cb07 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -82,7 +82,7 @@ void cryp_activity(struct cryp_device_data *device_data, void cryp_flush_inoutfifo(struct cryp_device_data *device_data) { /* - * We always need to disble the hardware before trying to flush the + * We always need to disable the hardware before trying to flush the * FIFO. This is something that isn't written in the design * specification, but we have been informed by the hardware designers * that this must be done. diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index b5b153317376eb..21472e427f6fe7 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -120,7 +120,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi) } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names); + names, NULL); if (ret) goto err_find; diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 94ad5c0adbcbd3..72a26eb4e95466 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -27,11 +27,12 @@ #include #include #include +#include #include "aesp8-ppc.h" struct p8_aes_cbc_ctx { - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct aes_key enc_key; struct aes_key dec_key; }; @@ -39,7 +40,7 @@ struct p8_aes_cbc_ctx { static int p8_aes_cbc_init(struct crypto_tfm *tfm) { const char *alg; - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); if (!(alg = crypto_tfm_alg_name(tfm))) { @@ -47,8 +48,9 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm) return -ENOENT; } - fallback = - crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + fallback = crypto_alloc_skcipher(alg, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", @@ -56,11 +58,12 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm) return PTR_ERR(fallback); } printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + crypto_skcipher_driver_name(fallback)); + - crypto_blkcipher_set_flags( + crypto_skcipher_set_flags( fallback, - crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); ctx->fallback = fallback; return 0; @@ -71,7 +74,7 @@ static void p8_aes_cbc_exit(struct crypto_tfm *tfm) struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->fallback) { - crypto_free_blkcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); ctx->fallback = NULL; } } @@ -91,7 +94,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, pagefault_enable(); preempt_enable(); - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret; } @@ -103,15 +106,14 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; if (in_interrupt()) { - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, - nbytes); + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); + skcipher_request_set_tfm(req, ctx->fallback); + skcipher_request_set_callback(req, desc->flags, NULL, NULL); + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); + ret = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); } else { preempt_disable(); pagefault_disable(); @@ -144,15 +146,14 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; if (in_interrupt()) { - ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, - nbytes); + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); + skcipher_request_set_tfm(req, ctx->fallback); + skcipher_request_set_callback(req, desc->flags, NULL, NULL); + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); + ret = crypto_skcipher_decrypt(req); + skcipher_request_zero(req); } else { preempt_disable(); pagefault_disable(); diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 24353ec336c5bc..6adc9290557a4a 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -28,11 +28,12 @@ #include #include #include +#include #include "aesp8-ppc.h" struct p8_aes_xts_ctx { - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct aes_key enc_key; struct aes_key dec_key; struct aes_key tweak_key; @@ -41,7 +42,7 @@ struct p8_aes_xts_ctx { static int p8_aes_xts_init(struct crypto_tfm *tfm) { const char *alg; - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); if (!(alg = crypto_tfm_alg_name(tfm))) { @@ -49,8 +50,8 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm) return -ENOENT; } - fallback = - crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + fallback = crypto_alloc_skcipher(alg, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", @@ -58,11 +59,11 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm) return PTR_ERR(fallback); } printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + crypto_skcipher_driver_name(fallback)); - crypto_blkcipher_set_flags( + crypto_skcipher_set_flags( fallback, - crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); ctx->fallback = fallback; return 0; @@ -73,7 +74,7 @@ static void p8_aes_xts_exit(struct crypto_tfm *tfm) struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->fallback) { - crypto_free_blkcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); ctx->fallback = NULL; } } @@ -98,7 +99,7 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key, pagefault_enable(); preempt_enable(); - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret; } @@ -113,15 +114,14 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; if (in_interrupt()) { - ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) : - crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); + skcipher_request_set_tfm(req, ctx->fallback); + skcipher_request_set_callback(req, desc->flags, NULL, NULL); + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); + ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); + skcipher_request_zero(req); } else { preempt_disable(); pagefault_disable(); diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index b75c77254fdb56..80c6db279ae10c 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -426,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) int rc = VM_FAULT_SIGBUS; phys_addr_t phys; pfn_t pfn; + unsigned int fault_size = PAGE_SIZE; if (check_vma(dax_dev, vmf->vma, __func__)) return VM_FAULT_SIGBUS; @@ -436,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + if (fault_size != dax_region->align) + return VM_FAULT_SIGBUS; + phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, + dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, vmf->pgoff); return VM_FAULT_SIGBUS; } @@ -463,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) phys_addr_t phys; pgoff_t pgoff; pfn_t pfn; + unsigned int fault_size = PMD_SIZE; if (check_vma(dax_dev, vmf->vma, __func__)) return VM_FAULT_SIGBUS; @@ -479,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + if (fault_size < dax_region->align) + return VM_FAULT_SIGBUS; + else if (fault_size > dax_region->align) + return VM_FAULT_FALLBACK; + + /* if we are outside of the VMA */ + if (pmd_addr < vmf->vma->vm_start || + (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) + return VM_FAULT_SIGBUS; + pgoff = linear_page_index(vmf->vma, pmd_addr); phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, + dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, pgoff); return VM_FAULT_SIGBUS; } @@ -502,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) phys_addr_t phys; pgoff_t pgoff; pfn_t pfn; + unsigned int fault_size = PUD_SIZE; + if (check_vma(dax_dev, vmf->vma, __func__)) return VM_FAULT_SIGBUS; @@ -518,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + if (fault_size < dax_region->align) + return VM_FAULT_SIGBUS; + else if (fault_size > dax_region->align) + return VM_FAULT_FALLBACK; + + /* if we are outside of the VMA */ + if (pud_addr < vmf->vma->vm_start || + (pud_addr + PUD_SIZE) > vmf->vma->vm_end) + return VM_FAULT_SIGBUS; + pgoff = linear_page_index(vmf->vma, pud_addr); phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, + dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, pgoff); return VM_FAULT_SIGBUS; } diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index d1f1f456f5c48f..d195d617076d6c 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -22,6 +22,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index e18dc596cf2447..6204cc32d09c50 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -251,8 +251,11 @@ static void bcm2835_dma_create_cb_set_length( */ /* have we filled in period_length yet? */ - if (*total_len + control_block->length < period_len) + if (*total_len + control_block->length < period_len) { + /* update number of bytes in this period so far */ + *total_len += control_block->length; return; + } /* calculate the length that remains to reach period_length */ control_block->length = period_len - *total_len; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 24e0221fd66d1f..d9118ec2302541 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1108,12 +1108,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) switch (order) { case 0 ... 1: return &unmap_pool[0]; +#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) case 2 ... 4: return &unmap_pool[1]; case 5 ... 7: return &unmap_pool[2]; case 8: return &unmap_pool[3]; +#endif default: BUG(); return NULL; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index c9297605058c1a..54d581d407aa72 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 82d85cce81f815..4773f286723414 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -43,6 +43,7 @@ config EDAC_LEGACY_SYSFS config EDAC_DEBUG bool "Debugging" + select DEBUG_FS help This turns on debugging information for the entire EDAC subsystem. You do so by inserting edac_module with "edac_debug_level=x." Valid @@ -259,6 +260,15 @@ config EDAC_SKX Support for error detection and correction the Intel Skylake server Integrated Memory Controllers. +config EDAC_PND2 + tristate "Intel Pondicherry2" + depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL + help + Support for error detection and correction on the Intel + Pondicherry2 Integrated Memory Controller. This SoC IP is + first used on the Apollo Lake platform and Denverton + micro-server but may appear on others in the future. + config EDAC_MPC85XX tristate "Freescale MPC83xx / MPC85xx" depends on EDAC_MM_EDAC && FSL_SOC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 88e472e8b9a918..587107e909967d 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I7300) += i7300_edac.o obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o obj-$(CONFIG_EDAC_SKX) += skx_edac.o +obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o obj-$(CONFIG_EDAC_E752X) += e752x_edac.o obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 1670d27bcac82d..f683919981b067 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c @@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci) dimm->mtype = MEM_FB_DDR2; /* ask what device type on this row */ - if (MTR_DRAM_WIDTH(mtr)) + if (MTR_DRAM_WIDTH(mtr) == 8) dimm->dtype = DEV_X8; else dimm->dtype = DEV_X4; diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index abf6ef22e22060..37a9ba71da449b 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c @@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci) dimm->nr_pages = size_mb << 8; dimm->grain = 8; - dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; + dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ? + DEV_X8 : DEV_X4; dimm->mtype = MEM_FB_DDR2; /* * The eccc mechanism is SDDC (aka SECC), with * is similar to Chipkill. */ - dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? + dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ? EDAC_S8ECD8ED : EDAC_S4ECD4ED; ndimms++; } diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c new file mode 100644 index 00000000000000..928e0dba41fc23 --- /dev/null +++ b/drivers/edac/pnd2_edac.c @@ -0,0 +1,1546 @@ +/* + * Driver for Pondicherry2 memory controller. + * + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * [Derived from sb_edac.c] + * + * Translation of system physical addresses to DIMM addresses + * is a two stage process: + * + * First the Pondicherry 2 memory controller handles slice and channel interleaving + * in "sys2pmi()". This is (almost) completley common between platforms. + * + * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM, + * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "edac_mc.h" +#include "edac_module.h" +#include "pnd2_edac.h" + +#define APL_NUM_CHANNELS 4 +#define DNV_NUM_CHANNELS 2 +#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */ + +enum type { + APL, + DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */ +}; + +struct dram_addr { + int chan; + int dimm; + int rank; + int bank; + int row; + int col; +}; + +struct pnd2_pvt { + int dimm_geom[APL_NUM_CHANNELS]; + u64 tolm, tohm; +}; + +/* + * System address space is divided into multiple regions with + * different interleave rules in each. The as0/as1 regions + * have no interleaving at all. The as2 region is interleaved + * between two channels. The mot region is magic and may overlap + * other regions, with its interleave rules taking precedence. + * Addresses not in any of these regions are interleaved across + * all four channels. + */ +static struct region { + u64 base; + u64 limit; + u8 enabled; +} mot, as0, as1, as2; + +static struct dunit_ops { + char *name; + enum type type; + int pmiaddr_shift; + int pmiidx_shift; + int channels; + int dimms_per_channel; + int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name); + int (*get_registers)(void); + int (*check_ecc)(void); + void (*mk_region)(char *name, struct region *rp, void *asym); + void (*get_dimm_config)(struct mem_ctl_info *mci); + int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, + struct dram_addr *daddr, char *msg); +} *ops; + +static struct mem_ctl_info *pnd2_mci; + +#define PND2_MSG_SIZE 256 + +/* Debug macros */ +#define pnd2_printk(level, fmt, arg...) \ + edac_printk(level, "pnd2", fmt, ##arg) + +#define pnd2_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg) + +#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12 +#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13 +#define SELECTOR_DISABLED (-1) +#define _4GB (1ul << 32) + +#define PMI_ADDRESS_WIDTH 31 +#define PND_MAX_PHYS_BIT 39 + +#define APL_ASYMSHIFT 28 +#define DNV_ASYMSHIFT 31 +#define CH_HASH_MASK_LSB 6 +#define SLICE_HASH_MASK_LSB 6 +#define MOT_SLC_INTLV_BIT 12 +#define LOG2_PMI_ADDR_GRANULARITY 5 +#define MOT_SHIFT 24 + +#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo)) +#define U64_LSHIFT(val, s) ((u64)(val) << (s)) + +#ifdef CONFIG_X86_INTEL_SBI_APL +#include "linux/platform_data/sbi_apl.h" +int sbi_send(int port, int off, int op, u32 *data) +{ + struct sbi_apl_message sbi_arg; + int ret, read = 0; + + memset(&sbi_arg, 0, sizeof(sbi_arg)); + + if (op == 0 || op == 4 || op == 6) + read = 1; + else + sbi_arg.data = *data; + + sbi_arg.opcode = op; + sbi_arg.port_address = port; + sbi_arg.register_offset = off; + ret = sbi_apl_commit(&sbi_arg); + if (ret || sbi_arg.status) + edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n", + sbi_arg.status, ret, sbi_arg.data); + + if (ret == 0) + ret = sbi_arg.status; + + if (ret == 0 && read) + *data = sbi_arg.data; + + return ret; +} +#else +int sbi_send(int port, int off, int op, u32 *data) +{ + return -EUNATCH; +} +#endif + +static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) +{ + int ret = 0; + + edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op); + switch (sz) { + case 8: + ret = sbi_send(port, off + 4, op, (u32 *)(data + 4)); + case 4: + ret = sbi_send(port, off, op, (u32 *)data); + pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name, + sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret); + break; + } + + return ret; +} + +static u64 get_mem_ctrl_hub_base_addr(void) +{ + struct b_cr_mchbar_lo_pci lo; + struct b_cr_mchbar_hi_pci hi; + struct pci_dev *pdev; + + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); + if (pdev) { + pci_read_config_dword(pdev, 0x48, (u32 *)&lo); + pci_read_config_dword(pdev, 0x4c, (u32 *)&hi); + pci_dev_put(pdev); + } else { + return 0; + } + + if (!lo.enable) { + edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n"); + return 0; + } + + return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15); +} + +static u64 get_sideband_reg_base_addr(void) +{ + struct pci_dev *pdev; + u32 hi, lo; + + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL); + if (pdev) { + pci_read_config_dword(pdev, 0x10, &lo); + pci_read_config_dword(pdev, 0x14, &hi); + pci_dev_put(pdev); + return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0)); + } else { + return 0xfd000000; + } +} + +static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) +{ + struct pci_dev *pdev; + char *base; + u64 addr; + + if (op == 4) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); + if (!pdev) + return -ENODEV; + + pci_read_config_dword(pdev, off, data); + pci_dev_put(pdev); + } else { + /* MMIO via memory controller hub base address */ + if (op == 0 && port == 0x4c) { + addr = get_mem_ctrl_hub_base_addr(); + if (!addr) + return -ENODEV; + } else { + /* MMIO via sideband register base address */ + addr = get_sideband_reg_base_addr(); + if (!addr) + return -ENODEV; + addr += (port << 16); + } + + base = ioremap((resource_size_t)addr, 0x10000); + if (!base) + return -ENODEV; + + if (sz == 8) + *(u32 *)(data + 4) = *(u32 *)(base + off + 4); + *(u32 *)data = *(u32 *)(base + off); + + iounmap(base); + } + + edac_dbg(2, "Read %s=%.8x_%.8x\n", name, + (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data); + + return 0; +} + +#define RD_REGP(regp, regname, port) \ + ops->rd_reg(port, \ + regname##_offset, \ + regname##_r_opcode, \ + regp, sizeof(struct regname), \ + #regname) + +#define RD_REG(regp, regname) \ + ops->rd_reg(regname ## _port, \ + regname##_offset, \ + regname##_r_opcode, \ + regp, sizeof(struct regname), \ + #regname) + +static u64 top_lm, top_hm; +static bool two_slices; +static bool two_channels; /* Both PMI channels in one slice enabled */ + +static u8 sym_chan_mask; +static u8 asym_chan_mask; +static u8 chan_mask; + +static int slice_selector = -1; +static int chan_selector = -1; +static u64 slice_hash_mask; +static u64 chan_hash_mask; + +static void mk_region(char *name, struct region *rp, u64 base, u64 limit) +{ + rp->enabled = 1; + rp->base = base; + rp->limit = limit; + edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit); +} + +static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask) +{ + if (mask == 0) { + pr_info(FW_BUG "MOT mask cannot be zero\n"); + return; + } + if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) { + pr_info(FW_BUG "MOT mask not power of two\n"); + return; + } + if (base & ~mask) { + pr_info(FW_BUG "MOT region base/mask alignment error\n"); + return; + } + rp->base = base; + rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0); + rp->enabled = 1; + edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit); +} + +static bool in_region(struct region *rp, u64 addr) +{ + if (!rp->enabled) + return false; + + return rp->base <= addr && addr <= rp->limit; +} + +static int gen_sym_mask(struct b_cr_slice_channel_hash *p) +{ + int mask = 0; + + if (!p->slice_0_mem_disabled) + mask |= p->sym_slice0_channel_enabled; + + if (!p->slice_1_disabled) + mask |= p->sym_slice1_channel_enabled << 2; + + if (p->ch_1_disabled || p->enable_pmi_dual_data_mode) + mask &= 0x5; + + return mask; +} + +static int gen_asym_mask(struct b_cr_slice_channel_hash *p, + struct b_cr_asym_mem_region0_mchbar *as0, + struct b_cr_asym_mem_region1_mchbar *as1, + struct b_cr_asym_2way_mem_region_mchbar *as2way) +{ + const int intlv[] = { 0x5, 0xA, 0x3, 0xC }; + int mask = 0; + + if (as2way->asym_2way_interleave_enable) + mask = intlv[as2way->asym_2way_intlv_mode]; + if (as0->slice0_asym_enable) + mask |= (1 << as0->slice0_asym_channel_select); + if (as1->slice1_asym_enable) + mask |= (4 << as1->slice1_asym_channel_select); + if (p->slice_0_mem_disabled) + mask &= 0xc; + if (p->slice_1_disabled) + mask &= 0x3; + if (p->ch_1_disabled || p->enable_pmi_dual_data_mode) + mask &= 0x5; + + return mask; +} + +static struct b_cr_tolud_pci tolud; +static struct b_cr_touud_lo_pci touud_lo; +static struct b_cr_touud_hi_pci touud_hi; +static struct b_cr_asym_mem_region0_mchbar asym0; +static struct b_cr_asym_mem_region1_mchbar asym1; +static struct b_cr_asym_2way_mem_region_mchbar asym_2way; +static struct b_cr_mot_out_base_mchbar mot_base; +static struct b_cr_mot_out_mask_mchbar mot_mask; +static struct b_cr_slice_channel_hash chash; + +/* Apollo Lake dunit */ +/* + * Validated on board with just two DIMMs in the [0] and [2] positions + * in this array. Other port number matches documentation, but caution + * advised. + */ +static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 }; +static struct d_cr_drp0 drp0[APL_NUM_CHANNELS]; + +/* Denverton dunit */ +static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 }; +static struct d_cr_dsch dsch; +static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS]; +static struct d_cr_drp drp[DNV_NUM_CHANNELS]; +static struct d_cr_dmap dmap[DNV_NUM_CHANNELS]; +static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS]; +static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS]; +static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS]; +static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS]; +static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS]; + +static void apl_mk_region(char *name, struct region *rp, void *asym) +{ + struct b_cr_asym_mem_region0_mchbar *a = asym; + + mk_region(name, rp, + U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT), + U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) + + GENMASK_ULL(APL_ASYMSHIFT - 1, 0)); +} + +static void dnv_mk_region(char *name, struct region *rp, void *asym) +{ + struct b_cr_asym_mem_region_denverton *a = asym; + + mk_region(name, rp, + U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT), + U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) + + GENMASK_ULL(DNV_ASYMSHIFT - 1, 0)); +} + +static int apl_get_registers(void) +{ + int i; + + if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar)) + return -ENODEV; + + for (i = 0; i < APL_NUM_CHANNELS; i++) + if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i])) + return -ENODEV; + + return 0; +} + +static int dnv_get_registers(void) +{ + int i; + + if (RD_REG(&dsch, d_cr_dsch)) + return -ENODEV; + + for (i = 0; i < DNV_NUM_CHANNELS; i++) + if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) || + RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) || + RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) || + RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) || + RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) || + RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) || + RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) || + RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i])) + return -ENODEV; + + return 0; +} + +/* + * Read all the h/w config registers once here (they don't + * change at run time. Figure out which address ranges have + * which interleave characteristics. + */ +static int get_registers(void) +{ + const int intlv[] = { 10, 11, 12, 12 }; + + if (RD_REG(&tolud, b_cr_tolud_pci) || + RD_REG(&touud_lo, b_cr_touud_lo_pci) || + RD_REG(&touud_hi, b_cr_touud_hi_pci) || + RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) || + RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) || + RD_REG(&mot_base, b_cr_mot_out_base_mchbar) || + RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) || + RD_REG(&chash, b_cr_slice_channel_hash)) + return -ENODEV; + + if (ops->get_registers()) + return -ENODEV; + + if (ops->type == DNV) { + /* PMI channel idx (always 0) for asymmetric region */ + asym0.slice0_asym_channel_select = 0; + asym1.slice1_asym_channel_select = 0; + /* PMI channel bitmap (always 1) for symmetric region */ + chash.sym_slice0_channel_enabled = 0x1; + chash.sym_slice1_channel_enabled = 0x1; + } + + if (asym0.slice0_asym_enable) + ops->mk_region("as0", &as0, &asym0); + + if (asym1.slice1_asym_enable) + ops->mk_region("as1", &as1, &asym1); + + if (asym_2way.asym_2way_interleave_enable) { + mk_region("as2way", &as2, + U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT), + U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) + + GENMASK_ULL(APL_ASYMSHIFT - 1, 0)); + } + + if (mot_base.imr_en) { + mk_region_mask("mot", &mot, + U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT), + U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT)); + } + + top_lm = U64_LSHIFT(tolud.tolud, 20); + top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20); + + two_slices = !chash.slice_1_disabled && + !chash.slice_0_mem_disabled && + (chash.sym_slice0_channel_enabled != 0) && + (chash.sym_slice1_channel_enabled != 0); + two_channels = !chash.ch_1_disabled && + !chash.enable_pmi_dual_data_mode && + ((chash.sym_slice0_channel_enabled == 3) || + (chash.sym_slice1_channel_enabled == 3)); + + sym_chan_mask = gen_sym_mask(&chash); + asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way); + chan_mask = sym_chan_mask | asym_chan_mask; + + if (two_slices && !two_channels) { + if (chash.hvm_mode) + slice_selector = 29; + else + slice_selector = intlv[chash.interleave_mode]; + } else if (!two_slices && two_channels) { + if (chash.hvm_mode) + chan_selector = 29; + else + chan_selector = intlv[chash.interleave_mode]; + } else if (two_slices && two_channels) { + if (chash.hvm_mode) { + slice_selector = 29; + chan_selector = 30; + } else { + slice_selector = intlv[chash.interleave_mode]; + chan_selector = intlv[chash.interleave_mode] + 1; + } + } + + if (two_slices) { + if (!chash.hvm_mode) + slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB; + if (!two_channels) + slice_hash_mask |= BIT_ULL(slice_selector); + } + + if (two_channels) { + if (!chash.hvm_mode) + chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB; + if (!two_slices) + chan_hash_mask |= BIT_ULL(chan_selector); + } + + return 0; +} + +/* Get a contiguous memory address (remove the MMIO gap) */ +static u64 remove_mmio_gap(u64 sys) +{ + return (sys < _4GB) ? sys : sys - (_4GB - top_lm); +} + +/* Squeeze out one address bit, shift upper part down to fill gap */ +static void remove_addr_bit(u64 *addr, int bitidx) +{ + u64 mask; + + if (bitidx == -1) + return; + + mask = (1ull << bitidx) - 1; + *addr = ((*addr >> 1) & ~mask) | (*addr & mask); +} + +/* XOR all the bits from addr specified in mask */ +static int hash_by_mask(u64 addr, u64 mask) +{ + u64 result = addr & mask; + + result = (result >> 32) ^ result; + result = (result >> 16) ^ result; + result = (result >> 8) ^ result; + result = (result >> 4) ^ result; + result = (result >> 2) ^ result; + result = (result >> 1) ^ result; + + return (int)result & 1; +} + +/* + * First stage decode. Take the system address and figure out which + * second stage will deal with it based on interleave modes. + */ +static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg) +{ + u64 contig_addr, contig_base, contig_offset, contig_base_adj; + int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH : + MOT_CHAN_INTLV_BIT_1SLC_2CH; + int slice_intlv_bit_rm = SELECTOR_DISABLED; + int chan_intlv_bit_rm = SELECTOR_DISABLED; + /* Determine if address is in the MOT region. */ + bool mot_hit = in_region(&mot, addr); + /* Calculate the number of symmetric regions enabled. */ + int sym_channels = hweight8(sym_chan_mask); + + /* + * The amount we need to shift the asym base can be determined by the + * number of enabled symmetric channels. + * NOTE: This can only work because symmetric memory is not supposed + * to do a 3-way interleave. + */ + int sym_chan_shift = sym_channels >> 1; + + /* Give up if address is out of range, or in MMIO gap */ + if (addr >= (1ul << PND_MAX_PHYS_BIT) || + (addr >= top_lm && addr < _4GB) || addr >= top_hm) { + snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr); + return -EINVAL; + } + + /* Get a contiguous memory address (remove the MMIO gap) */ + contig_addr = remove_mmio_gap(addr); + + if (in_region(&as0, addr)) { + *pmiidx = asym0.slice0_asym_channel_select; + + contig_base = remove_mmio_gap(as0.base); + contig_offset = contig_addr - contig_base; + contig_base_adj = (contig_base >> sym_chan_shift) * + ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1); + contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull); + } else if (in_region(&as1, addr)) { + *pmiidx = 2u + asym1.slice1_asym_channel_select; + + contig_base = remove_mmio_gap(as1.base); + contig_offset = contig_addr - contig_base; + contig_base_adj = (contig_base >> sym_chan_shift) * + ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1); + contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull); + } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) { + bool channel1; + + mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH; + *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1; + channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) : + hash_by_mask(contig_addr, chan_hash_mask); + *pmiidx |= (u32)channel1; + + contig_base = remove_mmio_gap(as2.base); + chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector; + contig_offset = contig_addr - contig_base; + remove_addr_bit(&contig_offset, chan_intlv_bit_rm); + contig_addr = (contig_base >> sym_chan_shift) + contig_offset; + } else { + /* Otherwise we're in normal, boring symmetric mode. */ + *pmiidx = 0u; + + if (two_slices) { + bool slice1; + + if (mot_hit) { + slice_intlv_bit_rm = MOT_SLC_INTLV_BIT; + slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1; + } else { + slice_intlv_bit_rm = slice_selector; + slice1 = hash_by_mask(addr, slice_hash_mask); + } + + *pmiidx = (u32)slice1 << 1; + } + + if (two_channels) { + bool channel1; + + mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH : + MOT_CHAN_INTLV_BIT_1SLC_2CH; + + if (mot_hit) { + chan_intlv_bit_rm = mot_intlv_bit; + channel1 = (addr >> mot_intlv_bit) & 1; + } else { + chan_intlv_bit_rm = chan_selector; + channel1 = hash_by_mask(contig_addr, chan_hash_mask); + } + + *pmiidx |= (u32)channel1; + } + } + + /* Remove the chan_selector bit first */ + remove_addr_bit(&contig_addr, chan_intlv_bit_rm); + /* Remove the slice bit (we remove it second because it must be lower */ + remove_addr_bit(&contig_addr, slice_intlv_bit_rm); + *pmiaddr = contig_addr; + + return 0; +} + +/* Translate PMI address to memory (rank, row, bank, column) */ +#define C(n) (0x10 | (n)) /* column */ +#define B(n) (0x20 | (n)) /* bank */ +#define R(n) (0x40 | (n)) /* row */ +#define RS (0x80) /* rank */ + +/* addrdec values */ +#define AMAP_1KB 0 +#define AMAP_2KB 1 +#define AMAP_4KB 2 +#define AMAP_RSVD 3 + +/* dden values */ +#define DEN_4Gb 0 +#define DEN_8Gb 2 + +/* dwid values */ +#define X8 0 +#define X16 1 + +static struct dimm_geometry { + u8 addrdec; + u8 dden; + u8 dwid; + u8 rowbits, colbits; + u16 bits[PMI_ADDRESS_WIDTH]; +} dimms[] = { + { + .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16, + .rowbits = 15, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), + R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), + 0, 0, 0, 0 + } + }, + { + .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), + R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), + R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8, + .rowbits = 16, .colbits = 11, + .bits = { + C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), + R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), + R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13), + R(14), R(15), 0, 0 + } + }, + { + .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16, + .rowbits = 15, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), + R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), + 0, 0, 0, 0 + } + }, + { + .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), + R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), + R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8, + .rowbits = 16, .colbits = 11, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), + R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), + R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13), + R(14), R(15), 0, 0 + } + }, + { + .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16, + .rowbits = 15, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), + R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), + 0, 0, 0, 0 + } + }, + { + .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), + R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16, + .rowbits = 16, .colbits = 10, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), + R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), + R(15), 0, 0, 0 + } + }, + { + .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8, + .rowbits = 16, .colbits = 11, + .bits = { + C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), + B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), + R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13), + R(14), R(15), 0, 0 + } + } +}; + +static int bank_hash(u64 pmiaddr, int idx, int shft) +{ + int bhash = 0; + + switch (idx) { + case 0: + bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1; + break; + case 1: + bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1; + bhash ^= ((pmiaddr >> 22) & 1) << 1; + break; + case 2: + bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2; + break; + } + + return bhash; +} + +static int rank_hash(u64 pmiaddr) +{ + return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1; +} + +/* Second stage decode. Compute rank, bank, row & column. */ +static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, + struct dram_addr *daddr, char *msg) +{ + struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx]; + struct pnd2_pvt *pvt = mci->pvt_info; + int g = pvt->dimm_geom[pmiidx]; + struct dimm_geometry *d = &dimms[g]; + int column = 0, bank = 0, row = 0, rank = 0; + int i, idx, type, skiprs = 0; + + for (i = 0; i < PMI_ADDRESS_WIDTH; i++) { + int bit = (pmiaddr >> i) & 1; + + if (i + skiprs >= PMI_ADDRESS_WIDTH) { + snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n"); + return -EINVAL; + } + + type = d->bits[i + skiprs] & ~0xf; + idx = d->bits[i + skiprs] & 0xf; + + /* + * On single rank DIMMs ignore the rank select bit + * and shift remainder of "bits[]" down one place. + */ + if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) { + skiprs = 1; + type = d->bits[i + skiprs] & ~0xf; + idx = d->bits[i + skiprs] & 0xf; + } + + switch (type) { + case C(0): + column |= (bit << idx); + break; + case B(0): + bank |= (bit << idx); + if (cr_drp0->bahen) + bank ^= bank_hash(pmiaddr, idx, d->addrdec); + break; + case R(0): + row |= (bit << idx); + break; + case RS: + rank = bit; + if (cr_drp0->rsien) + rank ^= rank_hash(pmiaddr); + break; + default: + if (bit) { + snprintf(msg, PND2_MSG_SIZE, "Bad translation\n"); + return -EINVAL; + } + goto done; + } + } + +done: + daddr->col = column; + daddr->bank = bank; + daddr->row = row; + daddr->rank = rank; + daddr->dimm = 0; + + return 0; +} + +/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */ +#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out)) + +static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, + struct dram_addr *daddr, char *msg) +{ + /* Rank 0 or 1 */ + daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0); + /* Rank 2 or 3 */ + daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1); + + /* + * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we + * flip them if DIMM1 is larger than DIMM0. + */ + daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip; + + daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0); + daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1); + daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2); + if (dsch.ddr4en) + daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3); + if (dmap1[pmiidx].bxor) { + if (dsch.ddr4en) { + daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0); + daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1); + if (dsch.chan_width == 0) + /* 64/72 bit dram channel width */ + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2); + else + /* 32/40 bit dram channel width */ + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2); + daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3); + } else { + daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0); + daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1); + if (dsch.chan_width == 0) + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2); + else + daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2); + } + } + + daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4); + daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10); + daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11); + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12); + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13); + if (dmap4[pmiidx].row14 != 31) + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14); + if (dmap4[pmiidx].row15 != 31) + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15); + if (dmap4[pmiidx].row16 != 31) + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16); + if (dmap4[pmiidx].row17 != 31) + daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17); + + daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8); + daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9); + if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f) + daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11); + + return 0; +} + +static int check_channel(int ch) +{ + if (drp0[ch].dramtype != 0) { + pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch); + return 1; + } else if (drp0[ch].eccen == 0) { + pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch); + return 1; + } + return 0; +} + +static int apl_check_ecc_active(void) +{ + int i, ret = 0; + + /* Check dramtype and ECC mode for each present DIMM */ + for (i = 0; i < APL_NUM_CHANNELS; i++) + if (chan_mask & BIT(i)) + ret += check_channel(i); + return ret ? -EINVAL : 0; +} + +#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3) + +static int check_unit(int ch) +{ + struct d_cr_drp *d = &drp[ch]; + + if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) { + pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch); + return 1; + } + return 0; +} + +static int dnv_check_ecc_active(void) +{ + int i, ret = 0; + + for (i = 0; i < DNV_NUM_CHANNELS; i++) + ret += check_unit(i); + return ret ? -EINVAL : 0; +} + +static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr, + struct dram_addr *daddr, char *msg) +{ + u64 pmiaddr; + u32 pmiidx; + int ret; + + ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg); + if (ret) + return ret; + + pmiaddr >>= ops->pmiaddr_shift; + /* pmi channel idx to dimm channel idx */ + pmiidx >>= ops->pmiidx_shift; + daddr->chan = pmiidx; + + ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg); + if (ret) + return ret; + + edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n", + addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col); + + return 0; +} + +static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, + struct dram_addr *daddr) +{ + enum hw_event_mc_err_type tp_event; + char *optype, msg[PND2_MSG_SIZE]; + bool ripv = m->mcgstatus & MCG_STATUS_RIPV; + bool overflow = m->status & MCI_STATUS_OVER; + bool uc_err = m->status & MCI_STATUS_UC; + bool recov = m->status & MCI_STATUS_S; + u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); + u32 mscod = GET_BITFIELD(m->status, 16, 31); + u32 errcode = GET_BITFIELD(m->status, 0, 15); + u32 optypenum = GET_BITFIELD(m->status, 4, 6); + int rc; + + tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) : + HW_EVENT_ERR_CORRECTED; + + /* + * According with Table 15-9 of the Intel Architecture spec vol 3A, + * memory errors should fit in this mask: + * 000f 0000 1mmm cccc (binary) + * where: + * f = Correction Report Filtering Bit. If 1, subsequent errors + * won't be shown + * mmm = error type + * cccc = channel + * If the mask doesn't match, report an error to the parsing logic + */ + if (!((errcode & 0xef80) == 0x80)) { + optype = "Can't parse: it is not a mem"; + } else { + switch (optypenum) { + case 0: + optype = "generic undef request error"; + break; + case 1: + optype = "memory read error"; + break; + case 2: + optype = "memory write error"; + break; + case 3: + optype = "addr/cmd error"; + break; + case 4: + optype = "memory scrubbing error"; + break; + default: + optype = "reserved"; + break; + } + } + + /* Only decode errors with an valid address (ADDRV) */ + if (!(m->status & MCI_STATUS_ADDRV)) + return; + + rc = get_memory_error_data(mci, m->addr, daddr, msg); + if (rc) + goto address_error; + + snprintf(msg, sizeof(msg), + "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d", + overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod, + errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col); + + edac_dbg(0, "%s\n", msg); + + /* Call the helper to output message */ + edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, + m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg); + + return; + +address_error: + edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, ""); +} + +static void apl_get_dimm_config(struct mem_ctl_info *mci) +{ + struct pnd2_pvt *pvt = mci->pvt_info; + struct dimm_info *dimm; + struct d_cr_drp0 *d; + u64 capacity; + int i, g; + + for (i = 0; i < APL_NUM_CHANNELS; i++) { + if (!(chan_mask & BIT(i))) + continue; + + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0); + if (!dimm) { + edac_dbg(0, "No allocated DIMM for channel %d\n", i); + continue; + } + + d = &drp0[i]; + for (g = 0; g < ARRAY_SIZE(dimms); g++) + if (dimms[g].addrdec == d->addrdec && + dimms[g].dden == d->dden && + dimms[g].dwid == d->dwid) + break; + + if (g == ARRAY_SIZE(dimms)) { + edac_dbg(0, "Channel %d: unrecognized DIMM\n", i); + continue; + } + + pvt->dimm_geom[i] = g; + capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) * + (1ul << dimms[g].colbits); + edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3)); + dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); + dimm->grain = 32; + dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16; + dimm->mtype = MEM_DDR3; + dimm->edac_mode = EDAC_SECDED; + snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2); + } +} + +static const int dnv_dtypes[] = { + DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN +}; + +static void dnv_get_dimm_config(struct mem_ctl_info *mci) +{ + int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype; + struct dimm_info *dimm; + struct d_cr_drp *d; + u64 capacity; + + if (dsch.ddr4en) { + memtype = MEM_DDR4; + banks = 16; + colbits = 10; + } else { + memtype = MEM_DDR3; + banks = 8; + } + + for (i = 0; i < DNV_NUM_CHANNELS; i++) { + if (dmap4[i].row14 == 31) + rowbits = 14; + else if (dmap4[i].row15 == 31) + rowbits = 15; + else if (dmap4[i].row16 == 31) + rowbits = 16; + else if (dmap4[i].row17 == 31) + rowbits = 17; + else + rowbits = 18; + + if (memtype == MEM_DDR3) { + if (dmap1[i].ca11 != 0x3f) + colbits = 12; + else + colbits = 10; + } + + d = &drp[i]; + /* DIMM0 is present if rank0 and/or rank1 is enabled */ + ranks_of_dimm[0] = d->rken0 + d->rken1; + /* DIMM1 is present if rank2 and/or rank3 is enabled */ + ranks_of_dimm[1] = d->rken2 + d->rken3; + + for (j = 0; j < DNV_MAX_DIMMS; j++) { + if (!ranks_of_dimm[j]) + continue; + + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); + if (!dimm) { + edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j); + continue; + } + + capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits); + edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3)); + dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); + dimm->grain = 32; + dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1]; + dimm->mtype = memtype; + dimm->edac_mode = EDAC_SECDED; + snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j); + } + } +} + +static int pnd2_register_mci(struct mem_ctl_info **ppmci) +{ + struct edac_mc_layer layers[2]; + struct mem_ctl_info *mci; + struct pnd2_pvt *pvt; + int rc; + + rc = ops->check_ecc(); + if (rc < 0) + return rc; + + /* Allocate a new MC control structure */ + layers[0].type = EDAC_MC_LAYER_CHANNEL; + layers[0].size = ops->channels; + layers[0].is_virt_csrow = false; + layers[1].type = EDAC_MC_LAYER_SLOT; + layers[1].size = ops->dimms_per_channel; + layers[1].is_virt_csrow = true; + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); + if (!mci) + return -ENOMEM; + + pvt = mci->pvt_info; + memset(pvt, 0, sizeof(*pvt)); + + mci->mod_name = "pnd2_edac.c"; + mci->dev_name = ops->name; + mci->ctl_name = "Pondicherry2"; + + /* Get dimm basic config and the memory layout */ + ops->get_dimm_config(mci); + + if (edac_mc_add_mc(mci)) { + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); + edac_mc_free(mci); + return -EINVAL; + } + + *ppmci = mci; + + return 0; +} + +static void pnd2_unregister_mci(struct mem_ctl_info *mci) +{ + if (unlikely(!mci || !mci->pvt_info)) { + pnd2_printk(KERN_ERR, "Couldn't find mci handler\n"); + return; + } + + /* Remove MC sysfs nodes */ + edac_mc_del_mc(NULL); + edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); + edac_mc_free(mci); +} + +/* + * Callback function registered with core kernel mce code. + * Called once for each logged error. + */ +static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data) +{ + struct mce *mce = (struct mce *)data; + struct mem_ctl_info *mci; + struct dram_addr daddr; + char *type; + + if (get_edac_report_status() == EDAC_REPORTING_DISABLED) + return NOTIFY_DONE; + + mci = pnd2_mci; + if (!mci) + return NOTIFY_DONE; + + /* + * Just let mcelog handle it if the error is + * outside the memory controller. A memory error + * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. + * bit 12 has an special meaning. + */ + if ((mce->status & 0xefff) >> 7 != 1) + return NOTIFY_DONE; + + if (mce->mcgstatus & MCG_STATUS_MCIP) + type = "Exception"; + else + type = "Event"; + + pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n"); + pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n", + mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status); + pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc); + pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr); + pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc); + pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", + mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid); + + pnd2_mce_output_error(mci, mce, &daddr); + + /* Advice mcelog that the error were handled */ + return NOTIFY_STOP; +} + +static struct notifier_block pnd2_mce_dec = { + .notifier_call = pnd2_mce_check_error, +}; + +#ifdef CONFIG_EDAC_DEBUG +/* + * Write an address to this file to exercise the address decode + * logic in this driver. + */ +static u64 pnd2_fake_addr; +#define PND2_BLOB_SIZE 1024 +static char pnd2_result[PND2_BLOB_SIZE]; +static struct dentry *pnd2_test; +static struct debugfs_blob_wrapper pnd2_blob = { + .data = pnd2_result, + .size = 0 +}; + +static int debugfs_u64_set(void *data, u64 val) +{ + struct dram_addr daddr; + struct mce m; + + *(u64 *)data = val; + m.mcgstatus = 0; + /* ADDRV + MemRd + Unknown channel */ + m.status = MCI_STATUS_ADDRV + 0x9f; + m.addr = val; + pnd2_mce_output_error(pnd2_mci, &m, &daddr); + snprintf(pnd2_blob.data, PND2_BLOB_SIZE, + "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n", + m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col); + pnd2_blob.size = strlen(pnd2_blob.data); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); + +static void setup_pnd2_debug(void) +{ + pnd2_test = edac_debugfs_create_dir("pnd2_test"); + edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test, + &pnd2_fake_addr, &fops_u64_wo); + debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob); +} + +static void teardown_pnd2_debug(void) +{ + debugfs_remove_recursive(pnd2_test); +} +#else +static void setup_pnd2_debug(void) {} +static void teardown_pnd2_debug(void) {} +#endif /* CONFIG_EDAC_DEBUG */ + + +static int pnd2_probe(void) +{ + int rc; + + edac_dbg(2, "\n"); + rc = get_registers(); + if (rc) + return rc; + + return pnd2_register_mci(&pnd2_mci); +} + +static void pnd2_remove(void) +{ + edac_dbg(0, "\n"); + pnd2_unregister_mci(pnd2_mci); +} + +static struct dunit_ops apl_ops = { + .name = "pnd2/apl", + .type = APL, + .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY, + .pmiidx_shift = 0, + .channels = APL_NUM_CHANNELS, + .dimms_per_channel = 1, + .rd_reg = apl_rd_reg, + .get_registers = apl_get_registers, + .check_ecc = apl_check_ecc_active, + .mk_region = apl_mk_region, + .get_dimm_config = apl_get_dimm_config, + .pmi2mem = apl_pmi2mem, +}; + +static struct dunit_ops dnv_ops = { + .name = "pnd2/dnv", + .type = DNV, + .pmiaddr_shift = 0, + .pmiidx_shift = 1, + .channels = DNV_NUM_CHANNELS, + .dimms_per_channel = 2, + .rd_reg = dnv_rd_reg, + .get_registers = dnv_get_registers, + .check_ecc = dnv_check_ecc_active, + .mk_region = dnv_mk_region, + .get_dimm_config = dnv_get_dimm_config, + .pmi2mem = dnv_pmi2mem, +}; + +static const struct x86_cpu_id pnd2_cpuids[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops }, + { } +}; +MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); + +static int __init pnd2_init(void) +{ + const struct x86_cpu_id *id; + int rc; + + edac_dbg(2, "\n"); + + id = x86_match_cpu(pnd2_cpuids); + if (!id) + return -ENODEV; + + ops = (struct dunit_ops *)id->driver_data; + + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ + opstate_init(); + + rc = pnd2_probe(); + if (rc < 0) { + pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc); + return rc; + } + + if (!pnd2_mci) + return -ENODEV; + + mce_register_decode_chain(&pnd2_mce_dec); + setup_pnd2_debug(); + + return 0; +} + +static void __exit pnd2_exit(void) +{ + edac_dbg(2, "\n"); + teardown_pnd2_debug(); + mce_unregister_decode_chain(&pnd2_mce_dec); + pnd2_remove(); +} + +module_init(pnd2_init); +module_exit(pnd2_exit); + +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Tony Luck"); +MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller"); diff --git a/drivers/edac/pnd2_edac.h b/drivers/edac/pnd2_edac.h new file mode 100644 index 00000000000000..61b6e79492bb11 --- /dev/null +++ b/drivers/edac/pnd2_edac.h @@ -0,0 +1,301 @@ +/* + * Register bitfield descriptions for Pondicherry2 memory controller. + * + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _PND2_REGS_H +#define _PND2_REGS_H + +struct b_cr_touud_lo_pci { + u32 lock : 1; + u32 reserved_1 : 19; + u32 touud : 12; +}; + +#define b_cr_touud_lo_pci_port 0x4c +#define b_cr_touud_lo_pci_offset 0xa8 +#define b_cr_touud_lo_pci_r_opcode 0x04 + +struct b_cr_touud_hi_pci { + u32 touud : 7; + u32 reserved_0 : 25; +}; + +#define b_cr_touud_hi_pci_port 0x4c +#define b_cr_touud_hi_pci_offset 0xac +#define b_cr_touud_hi_pci_r_opcode 0x04 + +struct b_cr_tolud_pci { + u32 lock : 1; + u32 reserved_0 : 19; + u32 tolud : 12; +}; + +#define b_cr_tolud_pci_port 0x4c +#define b_cr_tolud_pci_offset 0xbc +#define b_cr_tolud_pci_r_opcode 0x04 + +struct b_cr_mchbar_lo_pci { + u32 enable : 1; + u32 pad_3_1 : 3; + u32 pad_14_4: 11; + u32 base: 17; +}; + +struct b_cr_mchbar_hi_pci { + u32 base : 7; + u32 pad_31_7 : 25; +}; + +/* Symmetric region */ +struct b_cr_slice_channel_hash { + u64 slice_1_disabled : 1; + u64 hvm_mode : 1; + u64 interleave_mode : 2; + u64 slice_0_mem_disabled : 1; + u64 reserved_0 : 1; + u64 slice_hash_mask : 14; + u64 reserved_1 : 11; + u64 enable_pmi_dual_data_mode : 1; + u64 ch_1_disabled : 1; + u64 reserved_2 : 1; + u64 sym_slice0_channel_enabled : 2; + u64 sym_slice1_channel_enabled : 2; + u64 ch_hash_mask : 14; + u64 reserved_3 : 11; + u64 lock : 1; +}; + +#define b_cr_slice_channel_hash_port 0x4c +#define b_cr_slice_channel_hash_offset 0x4c58 +#define b_cr_slice_channel_hash_r_opcode 0x06 + +struct b_cr_mot_out_base_mchbar { + u32 reserved_0 : 14; + u32 mot_out_base : 15; + u32 reserved_1 : 1; + u32 tr_en : 1; + u32 imr_en : 1; +}; + +#define b_cr_mot_out_base_mchbar_port 0x4c +#define b_cr_mot_out_base_mchbar_offset 0x6af0 +#define b_cr_mot_out_base_mchbar_r_opcode 0x00 + +struct b_cr_mot_out_mask_mchbar { + u32 reserved_0 : 14; + u32 mot_out_mask : 15; + u32 reserved_1 : 1; + u32 ia_iwb_en : 1; + u32 gt_iwb_en : 1; +}; + +#define b_cr_mot_out_mask_mchbar_port 0x4c +#define b_cr_mot_out_mask_mchbar_offset 0x6af4 +#define b_cr_mot_out_mask_mchbar_r_opcode 0x00 + +struct b_cr_asym_mem_region0_mchbar { + u32 pad : 4; + u32 slice0_asym_base : 11; + u32 pad_18_15 : 4; + u32 slice0_asym_limit : 11; + u32 slice0_asym_channel_select : 1; + u32 slice0_asym_enable : 1; +}; + +#define b_cr_asym_mem_region0_mchbar_port 0x4c +#define b_cr_asym_mem_region0_mchbar_offset 0x6e40 +#define b_cr_asym_mem_region0_mchbar_r_opcode 0x00 + +struct b_cr_asym_mem_region1_mchbar { + u32 pad : 4; + u32 slice1_asym_base : 11; + u32 pad_18_15 : 4; + u32 slice1_asym_limit : 11; + u32 slice1_asym_channel_select : 1; + u32 slice1_asym_enable : 1; +}; + +#define b_cr_asym_mem_region1_mchbar_port 0x4c +#define b_cr_asym_mem_region1_mchbar_offset 0x6e44 +#define b_cr_asym_mem_region1_mchbar_r_opcode 0x00 + +/* Some bit fields moved in above two structs on Denverton */ +struct b_cr_asym_mem_region_denverton { + u32 pad : 4; + u32 slice_asym_base : 8; + u32 pad_19_12 : 8; + u32 slice_asym_limit : 8; + u32 pad_28_30 : 3; + u32 slice_asym_enable : 1; +}; + +struct b_cr_asym_2way_mem_region_mchbar { + u32 pad : 2; + u32 asym_2way_intlv_mode : 2; + u32 asym_2way_base : 11; + u32 pad_16_15 : 2; + u32 asym_2way_limit : 11; + u32 pad_30_28 : 3; + u32 asym_2way_interleave_enable : 1; +}; + +#define b_cr_asym_2way_mem_region_mchbar_port 0x4c +#define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50 +#define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00 + +/* Apollo Lake d-unit */ + +struct d_cr_drp0 { + u32 rken0 : 1; + u32 rken1 : 1; + u32 ddmen : 1; + u32 rsvd3 : 1; + u32 dwid : 2; + u32 dden : 3; + u32 rsvd13_9 : 5; + u32 rsien : 1; + u32 bahen : 1; + u32 rsvd18_16 : 3; + u32 caswizzle : 2; + u32 eccen : 1; + u32 dramtype : 3; + u32 blmode : 3; + u32 addrdec : 2; + u32 dramdevice_pr : 2; +}; + +#define d_cr_drp0_offset 0x1400 +#define d_cr_drp0_r_opcode 0x00 + +/* Denverton d-unit */ + +struct d_cr_dsch { + u32 ch0en : 1; + u32 ch1en : 1; + u32 ddr4en : 1; + u32 coldwake : 1; + u32 newbypdis : 1; + u32 chan_width : 1; + u32 rsvd6_6 : 1; + u32 ooodis : 1; + u32 rsvd18_8 : 11; + u32 ic : 1; + u32 rsvd31_20 : 12; +}; + +#define d_cr_dsch_port 0x16 +#define d_cr_dsch_offset 0x0 +#define d_cr_dsch_r_opcode 0x0 + +struct d_cr_ecc_ctrl { + u32 eccen : 1; + u32 rsvd31_1 : 31; +}; + +#define d_cr_ecc_ctrl_offset 0x180 +#define d_cr_ecc_ctrl_r_opcode 0x0 + +struct d_cr_drp { + u32 rken0 : 1; + u32 rken1 : 1; + u32 rken2 : 1; + u32 rken3 : 1; + u32 dimmdwid0 : 2; + u32 dimmdden0 : 2; + u32 dimmdwid1 : 2; + u32 dimmdden1 : 2; + u32 rsvd15_12 : 4; + u32 dimmflip : 1; + u32 rsvd31_17 : 15; +}; + +#define d_cr_drp_offset 0x158 +#define d_cr_drp_r_opcode 0x0 + +struct d_cr_dmap { + u32 ba0 : 5; + u32 ba1 : 5; + u32 bg0 : 5; /* if ddr3, ba2 = bg0 */ + u32 bg1 : 5; /* if ddr3, ba3 = bg1 */ + u32 rs0 : 5; + u32 rs1 : 5; + u32 rsvd : 2; +}; + +#define d_cr_dmap_offset 0x174 +#define d_cr_dmap_r_opcode 0x0 + +struct d_cr_dmap1 { + u32 ca11 : 6; + u32 bxor : 1; + u32 rsvd : 25; +}; + +#define d_cr_dmap1_offset 0xb4 +#define d_cr_dmap1_r_opcode 0x0 + +struct d_cr_dmap2 { + u32 row0 : 5; + u32 row1 : 5; + u32 row2 : 5; + u32 row3 : 5; + u32 row4 : 5; + u32 row5 : 5; + u32 rsvd : 2; +}; + +#define d_cr_dmap2_offset 0x148 +#define d_cr_dmap2_r_opcode 0x0 + +struct d_cr_dmap3 { + u32 row6 : 5; + u32 row7 : 5; + u32 row8 : 5; + u32 row9 : 5; + u32 row10 : 5; + u32 row11 : 5; + u32 rsvd : 2; +}; + +#define d_cr_dmap3_offset 0x14c +#define d_cr_dmap3_r_opcode 0x0 + +struct d_cr_dmap4 { + u32 row12 : 5; + u32 row13 : 5; + u32 row14 : 5; + u32 row15 : 5; + u32 row16 : 5; + u32 row17 : 5; + u32 rsvd : 2; +}; + +#define d_cr_dmap4_offset 0x150 +#define d_cr_dmap4_r_opcode 0x0 + +struct d_cr_dmap5 { + u32 ca3 : 4; + u32 ca4 : 4; + u32 ca5 : 4; + u32 ca6 : 4; + u32 ca7 : 4; + u32 ca8 : 4; + u32 ca9 : 4; + u32 rsvd : 4; +}; + +#define d_cr_dmap5_offset 0x154 +#define d_cr_dmap5_r_opcode 0x0 + +#endif /* _PND2_REGS_H */ diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index 6c270d9d304a8e..669246056812e8 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -1596,7 +1596,7 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev) reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); if (!reg) goto chk_iob_axi0; - dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n"); + dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n"); if (reg & IOBPA_RDATA_CORRUPT_MASK) dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); if (reg & IOBPA_M_RDATA_CORRUPT_MASK) diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 96bbae579c0b01..fc09c76248b416 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -44,7 +44,7 @@ config EXTCON_GPIO config EXTCON_INTEL_INT3496 tristate "Intel INT3496 ACPI device extcon driver" - depends on GPIOLIB && ACPI + depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST) help Say Y here to enable extcon support for USB OTG ports controlled by an Intel INT3496 ACPI device. diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index a3131b036de681..9d17984bbbd49a 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = { EXTCON_NONE, }; +static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false }; +static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false }; +static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false }; + +static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = { + { "id-gpios", &id_gpios, 1 }, + { "vbus-gpios", &vbus_gpios, 1 }, + { "mux-gpios", &mux_gpios, 1 }, + { }, +}; + static void int3496_do_usb_id(struct work_struct *work) { struct int3496_data *data = @@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev) struct int3496_data *data; int ret; + ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), + acpi_int3496_default_gpios); + if (ret) { + dev_err(dev, "can't add GPIO ACPI mapping\n"); + return ret; + } + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev) data->dev = dev; INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); - data->gpio_usb_id = devm_gpiod_get_index(dev, "id", - INT3496_GPIO_USB_ID, - GPIOD_IN); + data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN); if (IS_ERR(data->gpio_usb_id)) { ret = PTR_ERR(data->gpio_usb_id); dev_err(dev, "can't request USB ID GPIO: %d\n", ret); return ret; + } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) { + dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n"); + gpiod_direction_input(data->gpio_usb_id); } data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); - if (data->usb_id_irq <= 0) { + if (data->usb_id_irq < 0) { dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); - return -EINVAL; + return data->usb_id_irq; } - data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", - INT3496_GPIO_VBUS_EN, - GPIOD_ASIS); + data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS); if (IS_ERR(data->gpio_vbus_en)) dev_info(dev, "can't request VBUS EN GPIO\n"); - data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", - INT3496_GPIO_USB_MUX, - GPIOD_ASIS); + data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS); if (IS_ERR(data->gpio_usb_mux)) dev_info(dev, "can't request USB MUX GPIO\n"); @@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev) devm_free_irq(&pdev->dev, data->usb_id_irq, data); cancel_delayed_work_sync(&data->work); + acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev)); + return 0; } diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 349dc3e1e52e0a..974c5a31a00598 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void) bool systab_found; efi_mm.pgd = pgd_alloc(&efi_mm); + mm_init_cpumask(&efi_mm); init_new_context(NULL, &efi_mm); systab_found = false; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index e7d404059b7316..b372aad3b449c3 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -389,7 +389,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) return 0; } } - pr_err_once("requested map not found.\n"); return -ENOENT; } diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 08b026864d4e7d..8554d7aec31c64 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -254,7 +254,7 @@ void __init efi_esrt_init(void) rc = efi_mem_desc_lookup(efi.esrt, &md); if (rc < 0) { - pr_err("ESRT header is not in the memory map.\n"); + pr_warn("ESRT header is not in the memory map.\n"); return; } diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 6def402bf5691f..5da36e56b36a1c 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -45,6 +45,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) size = sizeof(secboot); status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid, NULL, &size, &secboot); + if (status == EFI_NOT_FOUND) + return efi_secureboot_mode_disabled; if (status != EFI_SUCCESS) goto out_efi_err; @@ -78,7 +80,5 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) out_efi_err: pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); - if (status == EFI_NOT_FOUND) - return efi_secureboot_mode_disabled; return efi_secureboot_mode_unknown; } diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c index 29d58feaf67535..6523ce96286597 100644 --- a/drivers/firmware/psci_checker.c +++ b/drivers/firmware/psci_checker.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 4ff02d310868b6..84e4c9a58a0c74 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c index 9e1a138fed5337..16a8951b2beda3 100644 --- a/drivers/gpio/gpio-altera-a10sr.c +++ b/drivers/gpio/gpio-altera-a10sr.c @@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev) gpio->regmap = a10sr->regmap; gpio->gp = altr_a10sr_gc; - + gpio->gp.parent = pdev->dev.parent; gpio->gp.of_node = pdev->dev.of_node; ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio); diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 5bddbd507ca9f1..3fe6a21e05a571 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d, altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d)); - if (type == IRQ_TYPE_NONE) + if (type == IRQ_TYPE_NONE) { + irq_set_handler_locked(d, handle_bad_irq); return 0; - if (type == IRQ_TYPE_LEVEL_HIGH && - altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH) - return 0; - if (type == IRQ_TYPE_EDGE_RISING && - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING) - return 0; - if (type == IRQ_TYPE_EDGE_FALLING && - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING) - return 0; - if (type == IRQ_TYPE_EDGE_BOTH && - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH) + } + if (type == altera_gc->interrupt_trigger) { + if (type == IRQ_TYPE_LEVEL_HIGH) + irq_set_handler_locked(d, handle_level_irq); + else + irq_set_handler_locked(d, handle_simple_irq); return 0; - + } + irq_set_handler_locked(d, handle_bad_irq); return -EINVAL; } @@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } - static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) { struct altera_gpio_chip *altera_gc; @@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev) altera_gc->interrupt_trigger = reg; ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0, - handle_simple_irq, IRQ_TYPE_NONE); + handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(&pdev->dev, "could not add irqchip\n"); diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index bdb692345428cc..2a57d024481db8 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value) static irqreturn_t mcp23s08_irq(int irq, void *data) { struct mcp23s08 *mcp = data; - int intcap, intf, i; + int intcap, intf, i, gpio, gpio_orig, intcap_mask; unsigned int child_irq; + bool intf_set, intcap_changed, gpio_bit_changed, + defval_changed, gpio_set; mutex_lock(&mcp->lock); if (mcp_read(mcp, MCP_INTF, &intf) < 0) { @@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data) } mcp->cache[MCP_INTCAP] = intcap; + + /* This clears the interrupt(configurable on S18) */ + if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) { + mutex_unlock(&mcp->lock); + return IRQ_HANDLED; + } + gpio_orig = mcp->cache[MCP_GPIO]; + mcp->cache[MCP_GPIO] = gpio; mutex_unlock(&mcp->lock); + if (mcp->cache[MCP_INTF] == 0) { + /* There is no interrupt pending */ + return IRQ_HANDLED; + } + + dev_dbg(mcp->chip.parent, + "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n", + intcap, intf, gpio_orig, gpio); for (i = 0; i < mcp->chip.ngpio; i++) { - if ((BIT(i) & mcp->cache[MCP_INTF]) && - ((BIT(i) & intcap & mcp->irq_rise) || - (mcp->irq_fall & ~intcap & BIT(i)) || - (BIT(i) & mcp->cache[MCP_INTCON]))) { + /* We must check all of the inputs on the chip, + * otherwise we may not notice a change on >=2 pins. + * + * On at least the mcp23s17, INTCAP is only updated + * one byte at a time(INTCAPA and INTCAPB are + * not written to at the same time - only on a per-bank + * basis). + * + * INTF only contains the single bit that caused the + * interrupt per-bank. On the mcp23s17, there is + * INTFA and INTFB. If two pins are changed on the A + * side at the same time, INTF will only have one bit + * set. If one pin on the A side and one pin on the B + * side are changed at the same time, INTF will have + * two bits set. Thus, INTF can't be the only check + * to see if the input has changed. + */ + + intf_set = BIT(i) & mcp->cache[MCP_INTF]; + if (i < 8 && intf_set) + intcap_mask = 0x00FF; + else if (i >= 8 && intf_set) + intcap_mask = 0xFF00; + else + intcap_mask = 0x00; + + intcap_changed = (intcap_mask & + (BIT(i) & mcp->cache[MCP_INTCAP])) != + (intcap_mask & (BIT(i) & gpio_orig)); + gpio_set = BIT(i) & mcp->cache[MCP_GPIO]; + gpio_bit_changed = (BIT(i) & gpio_orig) != + (BIT(i) & mcp->cache[MCP_GPIO]); + defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) && + ((BIT(i) & mcp->cache[MCP_GPIO]) != + (BIT(i) & mcp->cache[MCP_DEFVAL])); + + if (((gpio_bit_changed || intcap_changed) && + (BIT(i) & mcp->irq_rise) && gpio_set) || + ((gpio_bit_changed || intcap_changed) && + (BIT(i) & mcp->irq_fall) && !gpio_set) || + defval_changed) { child_irq = irq_find_mapping(mcp->chip.irqdomain, i); handle_nested_irq(child_irq); } diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 06dac72cb69c0c..d9933868921387 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file, struct seq_file *sfile; struct gpio_desc *desc; struct gpio_chip *gc; - int status, val; + int val; char buf; sfile = file->private_data; @@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file, chip = priv->chip; gc = &chip->gc; - status = copy_from_user(&buf, usr_buf, 1); - if (status) - return status; + if (copy_from_user(&buf, usr_buf, 1)) + return -EFAULT; if (buf == '0') val = 0; diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c index 40a8881c2ce882..f1c6ec17b90a83 100644 --- a/drivers/gpio/gpio-xgene.c +++ b/drivers/gpio/gpio-xgene.c @@ -42,9 +42,7 @@ struct xgene_gpio { struct gpio_chip chip; void __iomem *base; spinlock_t lock; -#ifdef CONFIG_PM u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; -#endif }; static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset) @@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc, return 0; } -#ifdef CONFIG_PM -static int xgene_gpio_suspend(struct device *dev) +static __maybe_unused int xgene_gpio_suspend(struct device *dev) { struct xgene_gpio *gpio = dev_get_drvdata(dev); unsigned long bank_offset; @@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev) return 0; } -static int xgene_gpio_resume(struct device *dev) +static __maybe_unused int xgene_gpio_resume(struct device *dev) { struct xgene_gpio *gpio = dev_get_drvdata(dev); unsigned long bank_offset; @@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev) } static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); -#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm) -#else -#define XGENE_GPIO_PM_OPS NULL -#endif static int xgene_gpio_probe(struct platform_device *pdev) { @@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = { .name = "xgene-gpio", .of_match_table = xgene_gpio_of_match, .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match), - .pm = XGENE_GPIO_PM_OPS, + .pm = &xgene_gpio_pm, }, .probe = xgene_gpio_probe, }; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 9b37a3692b3fee..2bd683e2be022d 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -266,6 +266,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, goto fail_free_event; } + if (agpio->wake_capable == ACPI_WAKE_CAPABLE) + enable_irq_wake(irq); + list_add_tail(&event->node, &acpi_gpio->events); return AE_OK; @@ -339,6 +342,9 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { struct gpio_desc *desc; + if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) + disable_irq_wake(event->irq); + free_irq(event->irq, event); desc = event->desc; if (WARN_ON(IS_ERR(desc))) @@ -571,8 +577,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev, } desc = acpi_get_gpiod_by_index(adev, propname, idx, &info); - if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) + if (!IS_ERR(desc)) break; + if (PTR_ERR(desc) == -EPROBE_DEFER) + return ERR_CAST(desc); } /* Then from plain _CRS GPIOs */ diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile index 8363cb57915b0b..8a08e81ee90d57 100644 --- a/drivers/gpu/drm/amd/acp/Makefile +++ b/drivers/gpu/drm/amd/acp/Makefile @@ -3,6 +3,4 @@ # of AMDSOC/AMDGPU drm driver. # It provides the HW control for ACP related functionalities. -subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include - AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d2d0f60ff36d1f..99424cb8020bdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -240,6 +240,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) for (; i >= 0; i--) drm_free_large(p->chunks[i].kdata); kfree(p->chunks); + p->chunks = NULL; + p->nchunks = 0; put_ctx: amdgpu_ctx_put(p->ctx); free_chunk: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6abb238b25c97e..de0cf3315484c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int r; if (adev->wb.wb_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, + r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->wb.wb_obj, &adev->wb.gpu_addr, (void **)&adev->wb.wb); @@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) memset(&adev->wb.used, 0, sizeof(adev->wb.used)); /* clear wb memory */ - memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE); + memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); } return 0; @@ -2094,8 +2094,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } r = amdgpu_late_init(adev); - if (r) + if (r) { + if (fbcon) + console_unlock(); return r; + } /* pin cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -2587,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, use_bank = 0; } - *pos &= 0x3FFFF; + *pos &= (1UL << 22) - 1; if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || @@ -2663,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, use_bank = 0; } - *pos &= 0x3FFFF; + *pos &= (1UL << 22) - 1; if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 75fc376ba73587..b76cd699eb0d73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -59,9 +59,10 @@ * - 3.7.0 - Add support for VCE clock list packet * - 3.8.0 - Add support raster config init in the kernel * - 3.9.0 - Add support for memory query info about VRAM and GTT. + * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 9 +#define KMS_DRIVER_MINOR 10 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -420,6 +421,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0, 0, 0} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 51d75946338460..106cf83c2e6b46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, bool kernel = false; int r; + /* reject invalid gem flags */ + if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_VRAM_CLEARED| + AMDGPU_GEM_CREATE_SHADOW | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { + r = -EINVAL; + goto error_unlock; + } + /* reject invalid gem domains */ + if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU | + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GDS | + AMDGPU_GEM_DOMAIN_GWS | + AMDGPU_GEM_DOMAIN_OA)) { + r = -EINVAL; + goto error_unlock; + } + /* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 31375bdde6f176..011800f621c6ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -788,7 +788,7 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) } } - /* disble sdma engine before programing it */ + /* disable sdma engine before programing it */ sdma_v3_0_ctx_switch_enable(adev, false); sdma_v3_0_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index f55e45b52fbce2..c5dec210d52999 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -3464,6 +3464,16 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, (adev->pdev->device == 0x6667)) { max_sclk = 75000; } + } else if (adev->asic_type == CHIP_OLAND) { + if ((adev->pdev->revision == 0xC7) || + (adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0x87) || + (adev->pdev->device == 0x6604) || + (adev->pdev->device == 0x6605)) { + max_sclk = 75000; + } } if (rps->vce_active) { diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 50bdb24ef8d6e9..4a785d6acfb9af 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle) /* rev0 hardware requires workarounds to support PG */ adev->pg_flags = 0; if (adev->rev_id != 0x00) { - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | AMD_PG_SUPPORT_CP | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 6a3470f849989a..d1ce83d73a877b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index d83de985e88cf8..6acc4313363e1f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -23,6 +23,8 @@ #include #include +#include + #include "kfd_priv.h" #include "kfd_mqd_manager.h" #include "cik_regs.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index fa32c32fa1c2bc..a9b9882a9a7723 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -23,6 +23,8 @@ #include #include +#include + #include "kfd_priv.h" #include "kfd_mqd_manager.h" #include "vi_structs.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ca5f2aa7232da7..84d1ffd1eef950 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 8cf71f3c6d0ea4..261b828ad59086 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) if (bgate) { cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); + AMD_PG_STATE_GATE); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 1bf83ed113b3cc..16f96563cd2b8b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include "gpu_scheduler.h" diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 08e6a71f5d05f4..294b53697334cc 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc) clk_prepare_enable(hwdev->pxlclk); - /* mclk needs to be set to the same or higher rate than pxlclk */ - clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000); + /* We rely on firmware to set mclk to a sensible level. */ clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); hwdev->modeset(hwdev, &vm); diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index 488aedf5b58d54..9f5513006eeef8 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = { { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE }, { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, - { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 }, + { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE }, }; #define MALIDP_DE_DEFAULT_PREFETCH_START 5 diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 414aada10fe5e7..d5aec082294cbd 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -37,6 +37,8 @@ #define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) #define MALIDP_LAYER_COMP_SIZE 0x010 #define MALIDP_LAYER_OFFSET 0x014 +#define MALIDP550_LS_ENABLE 0x01c +#define MALIDP550_LS_R1_IN_SIZE 0x020 /* * This 4-entry look-up-table is used to determine the full 8-bit alpha value @@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane, LAYER_V_VAL(plane->state->crtc_y), mp->layer->base + MALIDP_LAYER_OFFSET); + if (mp->layer->id == DE_SMART) + malidp_hw_write(mp->hwdev, + LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), + mp->layer->base + MALIDP550_LS_R1_IN_SIZE); + /* first clear the rotation bits */ val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); val &= ~LAYER_ROT_MASK; @@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm) plane->hwdev = malidp->dev; plane->layer = &map->layers[i]; - /* Skip the features which the SMART layer doesn't have */ - if (id == DE_SMART) + if (id == DE_SMART) { + /* + * Enable the first rectangle in the SMART layer to be + * able to use it as a drm plane. + */ + malidp_hw_write(malidp->dev, 1, + plane->layer->base + MALIDP550_LS_ENABLE); + /* Skip the features which the SMART layer doesn't have. */ continue; + } drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags); malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT, diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index aff6d4a84e998c..b816067a65c572 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -84,6 +84,7 @@ /* Stride register offsets relative to Lx_BASE */ #define MALIDP_DE_LG_STRIDE 0x18 #define MALIDP_DE_LV_STRIDE0 0x18 +#define MALIDP550_DE_LS_R1_STRIDE 0x28 /* macros to set values into registers */ #define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c8baab9bee0d05..ba58f1b11d1e16 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -148,6 +148,9 @@ static const struct edid_quirk { /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, + + /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/ + { "ETR", 13896, EDID_QUIRK_FORCE_8BPC }, }; /* diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index f6d4d9700734e6..324a688b3f3013 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1260,9 +1260,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, * to KMS, hence fail if different settings are requested. */ if (var->bits_per_pixel != fb->format->cpp[0] * 8 || - var->xres != fb->width || var->yres != fb->height || - var->xres_virtual != fb->width || var->yres_virtual != fb->height) { - DRM_DEBUG("fb userspace requested width/height/bpp different than current fb " + var->xres > fb->width || var->yres > fb->height || + var->xres_virtual > fb->width || var->yres_virtual > fb->height) { + DRM_DEBUG("fb requested width/height/bpp can't fit in current fb " "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, var->xres_virtual, var->yres_virtual, diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 32d43f86a8f20f..96bb6badb818d1 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -34,6 +34,8 @@ */ #include +#include + #include #include "drm_legacy.h" #include "drm_internal.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index e78f1406885d10..fd56f92f3469a0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -16,6 +16,8 @@ #include #include +#include +#include #include "etnaviv_drv.h" #include "etnaviv_gem.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 130d7d517a19a1..da48819ff2e655 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1311,6 +1311,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, goto out_pm_put; } + mutex_lock(&gpu->lock); + fence = etnaviv_gpu_fence_alloc(gpu); if (!fence) { event_free(gpu, event); @@ -1318,8 +1320,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, goto out_pm_put; } - mutex_lock(&gpu->lock); - gpu->event[event].fence = fence; submit->fence = fence->seqno; gpu->active_fence = submit->fence; diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 0fd6f7a18364a6..c0e8d3302292c9 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -68,6 +68,8 @@ struct decon_context { unsigned long flags; unsigned long out_type; int first_win; + spinlock_t vblank_lock; + u32 frame_id; }; static const uint32_t decon_formats[] = { @@ -103,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc) if (ctx->out_type & IFTYPE_I80) val |= VIDINTCON0_FRAMEDONE; else - val |= VIDINTCON0_INTFRMEN; + val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP; writel(val, ctx->addr + DECON_VIDINTCON0); } @@ -122,14 +124,56 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc) writel(0, ctx->addr + DECON_VIDINTCON0); } +/* return number of starts/ends of frame transmissions since reset */ +static u32 decon_get_frame_count(struct decon_context *ctx, bool end) +{ + u32 frm, pfrm, status, cnt = 2; + + /* To get consistent result repeat read until frame id is stable. + * Usually the loop will be executed once, in rare cases when the loop + * is executed at frame change time 2nd pass will be needed. + */ + frm = readl(ctx->addr + DECON_CRFMID); + do { + status = readl(ctx->addr + DECON_VIDCON1); + pfrm = frm; + frm = readl(ctx->addr + DECON_CRFMID); + } while (frm != pfrm && --cnt); + + /* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case + * of RGB, it should be taken into account. + */ + if (!frm) + return 0; + + switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) { + case VIDCON1_VSTATUS_VS: + if (!(ctx->out_type & IFTYPE_I80)) + --frm; + break; + case VIDCON1_VSTATUS_BP: + --frm; + break; + case VIDCON1_I80_ACTIVE: + case VIDCON1_VSTATUS_AC: + if (end) + --frm; + break; + default: + break; + } + + return frm; +} + static void decon_setup_trigger(struct decon_context *ctx) { if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) return; if (!(ctx->out_type & I80_HW_TRG)) { - writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN - | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, + writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | + TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, ctx->addr + DECON_TRIGCON); return; } @@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, static void decon_atomic_flush(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; + unsigned long flags; int i; if (test_bit(BIT_SUSPENDED, &ctx->flags)) return; + spin_lock_irqsave(&ctx->vblank_lock, flags); + for (i = ctx->first_win; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false); @@ -378,11 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) if (ctx->out_type & IFTYPE_I80) set_bit(BIT_WIN_UPDATED, &ctx->flags); + + ctx->frame_id = decon_get_frame_count(ctx, true); + + exynos_crtc_handle_event(crtc); + + spin_unlock_irqrestore(&ctx->vblank_lock, flags); } static void decon_swreset(struct decon_context *ctx) { unsigned int tries; + unsigned long flags; writel(0, ctx->addr + DECON_VIDCON0); for (tries = 2000; tries; --tries) { @@ -400,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx) WARN(tries == 0, "failed to software reset DECON\n"); + spin_lock_irqsave(&ctx->vblank_lock, flags); + ctx->frame_id = 0; + spin_unlock_irqrestore(&ctx->vblank_lock, flags); + if (!(ctx->out_type & IFTYPE_HDMI)) return; @@ -578,6 +636,24 @@ static const struct component_ops decon_component_ops = { .unbind = decon_unbind, }; +static void decon_handle_vblank(struct decon_context *ctx) +{ + u32 frm; + + spin_lock(&ctx->vblank_lock); + + frm = decon_get_frame_count(ctx, true); + + if (frm != ctx->frame_id) { + /* handle only if incremented, take care of wrap-around */ + if ((s32)(frm - ctx->frame_id) > 0) + drm_crtc_handle_vblank(&ctx->crtc->base); + ctx->frame_id = frm; + } + + spin_unlock(&ctx->vblank_lock); +} + static irqreturn_t decon_irq_handler(int irq, void *dev_id) { struct decon_context *ctx = dev_id; @@ -598,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) return IRQ_HANDLED; } - drm_crtc_handle_vblank(&ctx->crtc->base); + decon_handle_vblank(ctx); } out: @@ -671,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev) __set_bit(BIT_SUSPENDED, &ctx->flags); ctx->dev = dev; ctx->out_type = (unsigned long)of_device_get_match_data(dev); + spin_lock_init(&ctx->vblank_lock); if (ctx->out_type & IFTYPE_HDMI) { ctx->first_win = 1; @@ -678,7 +755,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev) ctx->out_type |= IFTYPE_I80; } - if (ctx->out_type | I80_HW_TRG) { + if (ctx->out_type & I80_HW_TRG) { ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, "samsung,disp-sysreg"); if (IS_ERR(ctx->sysreg)) { diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f9ab19e205e243..48811806fa2727 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) for (i = 0; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false); + exynos_crtc_handle_event(crtc); } static void decon_init(struct decon_context *ctx) diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 5367b6664fe37d..c65f4509932c56 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -85,16 +85,28 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - struct drm_pending_vblank_event *event; - unsigned long flags; if (exynos_crtc->ops->atomic_flush) exynos_crtc->ops->atomic_flush(exynos_crtc); +} + +static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { + .enable = exynos_drm_crtc_enable, + .disable = exynos_drm_crtc_disable, + .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, + .atomic_check = exynos_crtc_atomic_check, + .atomic_begin = exynos_crtc_atomic_begin, + .atomic_flush = exynos_crtc_atomic_flush, +}; + +void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc) +{ + struct drm_crtc *crtc = &exynos_crtc->base; + struct drm_pending_vblank_event *event = crtc->state->event; + unsigned long flags; - event = crtc->state->event; if (event) { crtc->state->event = NULL; - spin_lock_irqsave(&crtc->dev->event_lock, flags); if (drm_crtc_vblank_get(crtc) == 0) drm_crtc_arm_vblank_event(crtc, event); @@ -105,15 +117,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, } -static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { - .enable = exynos_drm_crtc_enable, - .disable = exynos_drm_crtc_disable, - .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, - .atomic_check = exynos_crtc_atomic_check, - .atomic_begin = exynos_crtc_atomic_begin, - .atomic_flush = exynos_crtc_atomic_flush, -}; - static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 6a581a8af4650f..abd5d6ceac0c2f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -40,4 +40,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, */ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); +void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc); + #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 812e2ec0761d0b..d7ef26370e67c5 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -86,7 +86,7 @@ #define DSIM_SYNC_INFORM (1 << 27) #define DSIM_EOT_DISABLE (1 << 28) #define DSIM_MFLUSH_VS (1 << 29) -/* This flag is valid only for exynos3250/3472/4415/5260/5430 */ +/* This flag is valid only for exynos3250/3472/5260/5430 */ #define DSIM_CLKLANE_STOP (1 << 30) /* DSIM_ESCMODE */ @@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = { .reg_values = reg_values, }; -static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = { - .reg_ofs = exynos_reg_ofs, - .plltmr_reg = 0x58, - .has_clklane_stop = 1, - .num_clks = 2, - .max_freq = 1000, - .wait_for_reset = 1, - .num_bits_resol = 11, - .reg_values = reg_values, -}; - static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { .reg_ofs = exynos_reg_ofs, .plltmr_reg = 0x58, @@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = { .data = &exynos3_dsi_driver_data }, { .compatible = "samsung,exynos4210-mipi-dsi", .data = &exynos4_dsi_driver_data }, - { .compatible = "samsung,exynos4415-mipi-dsi", - .data = &exynos4415_dsi_driver_data }, { .compatible = "samsung,exynos5410-mipi-dsi", .data = &exynos5_dsi_driver_data }, { .compatible = "samsung,exynos5422-mipi-dsi", @@ -979,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, bool first = !xfer->tx_done; u32 reg; - dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", + dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n", xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); if (length > DSI_TX_FIFO_SIZE) @@ -1177,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi) spin_unlock_irqrestore(&dsi->transfer_lock, flags); dev_dbg(dsi->dev, - "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", + "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, xfer->rx_done); @@ -1348,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi) int te_gpio_irq; dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); + if (dsi->te_gpio == -ENOENT) + return 0; + if (!gpio_is_valid(dsi->te_gpio)) { - dev_err(dsi->dev, "no te-gpios specified\n"); ret = dsi->te_gpio; + dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret); goto out; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 95871577015d8a..5b18b5c5fdf255 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev) goto err_put_clk; } - DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); + DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); spin_lock_init(&ctx->lock); platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index a9fa444c6053c0..3f04d72c448d38 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -71,10 +71,10 @@ #define TRIGCON 0x1A4 #define TRGMODE_ENABLE (1 << 0) #define SWTRGCMD_ENABLE (1 << 1) -/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */ +/* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */ #define HWTRGEN_ENABLE (1 << 3) #define HWTRGMASK_ENABLE (1 << 4) -/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */ +/* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */ #define HWTRIGEN_PER_ENABLE (1 << 31) /* display mode change control register except exynos4 */ @@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = { .has_vtsel = 1, }; -static struct fimd_driver_data exynos4415_fimd_driver_data = { - .timing_base = 0x20000, - .lcdblk_offset = 0x210, - .lcdblk_vt_shift = 10, - .lcdblk_bypass_shift = 1, - .trg_type = I80_HW_TRG, - .has_shadowcon = 1, - .has_vidoutcon = 1, - .has_vtsel = 1, - .has_trigger_per_te = 1, -}; - static struct fimd_driver_data exynos5_fimd_driver_data = { .timing_base = 0x20000, .lcdblk_offset = 0x214, @@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = { .data = &exynos3_fimd_driver_data }, { .compatible = "samsung,exynos4210-fimd", .data = &exynos4_fimd_driver_data }, - { .compatible = "samsung,exynos4415-fimd", - .data = &exynos4415_fimd_driver_data }, { .compatible = "samsung,exynos5250-fimd", .data = &exynos5_fimd_driver_data }, { .compatible = "samsung,exynos5420-fimd", @@ -257,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc) val |= VIDINTCON0_INT_FRAME; val &= ~VIDINTCON0_FRAMESEL0_MASK; - val |= VIDINTCON0_FRAMESEL0_VSYNC; + val |= VIDINTCON0_FRAMESEL0_FRONTPORCH; val &= ~VIDINTCON0_FRAMESEL1_MASK; val |= VIDINTCON0_FRAMESEL1_NONE; } @@ -723,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc) for (i = 0; i < WINDOWS_NR; i++) fimd_shadow_protect_win(ctx, i, false); + + exynos_crtc_handle_event(crtc); } static void fimd_update_plane(struct exynos_drm_crtc *crtc, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 4c28f7ffcc4dd1..55a1579d11b3d7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } - DRM_DEBUG_KMS("created file object = %p\n", obj->filp); + DRM_DEBUG_KMS("created file object = %pK\n", obj->filp); return exynos_gem; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index bef57987759d2c..0506b2b17ac1c4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev) return ret; } - DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); + DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); mutex_init(&ctx->lock); platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 9c84ee76f18adc..3edda18cc2d2d6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) * e.g PAUSE state, queue buf, command control. */ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv); + DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv); mutex_lock(&ippdrv->cmd_lock); list_for_each_entry(c_node, &ippdrv->cmd_list, list) { @@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, } property->prop_id = ret; - DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n", + DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n", property->prop_id, property->cmd, ippdrv); /* stored property information and ippdrv in private data */ @@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev, { int i; - DRM_DEBUG_KMS("node[%p]\n", m_node); + DRM_DEBUG_KMS("node[%pK]\n", m_node); if (!m_node) { DRM_ERROR("invalid dequeue node.\n"); @@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node m_node->buf_id = qbuf->buf_id; INIT_LIST_HEAD(&m_node->list); - DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id); + DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id); DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); for_each_ipp_planar(i) { @@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, mutex_lock(&c_node->event_lock); list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { - DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e); + DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e); /* * qbuf == NULL condition means all event deletion. @@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node /* find memory node from memory list */ list_for_each_entry(m_node, head, list) { - DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node); + DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node); /* compare buffer id */ if (m_node->buf_id == qbuf->buf_id) @@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, struct exynos_drm_ipp_ops *ops = NULL; int ret = 0; - DRM_DEBUG_KMS("node[%p]\n", m_node); + DRM_DEBUG_KMS("node[%pK]\n", m_node); if (!m_node) { DRM_ERROR("invalid queue node.\n"); @@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, m_node = list_first_entry(head, struct drm_exynos_ipp_mem_node, list); - DRM_DEBUG_KMS("m_node[%p]\n", m_node); + DRM_DEBUG_KMS("m_node[%pK]\n", m_node); ret = ipp_set_mem_node(ippdrv, c_node, m_node); if (ret) { @@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) } ippdrv->prop_list.ipp_id = ret; - DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n", + DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n", count++, ippdrv, ret); /* store parent device for node */ @@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, file_priv->ipp_dev = dev; - DRM_DEBUG_KMS("done priv[%p]\n", dev); + DRM_DEBUG_KMS("done priv[%pK]\n", dev); return 0; } @@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, mutex_lock(&ippdrv->cmd_lock); list_for_each_entry_safe(c_node, tc_node, &ippdrv->cmd_list, list) { - DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", + DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv); if (c_node->filp == file) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 6591e406084c16..79282a820ecce1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev) goto err_ippdrv_register; } - DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv); + DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv); platform_set_drvdata(pdev, rot); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 57fe514d5c5bf9..5d9a62a87eec75 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = { .enable_vblank = vidi_enable_vblank, .disable_vblank = vidi_disable_vblank, .update_plane = vidi_update_plane, + .atomic_flush = exynos_crtc_handle_event, }; static void vidi_fake_vblank_timer(unsigned long arg) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 72143ac1052526..25edb635a19762 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) return; mixer_vsync_set_update(mixer_ctx, true); + exynos_crtc_handle_event(crtc); } static void mixer_enable(struct exynos_drm_crtc *crtc) diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 3b6caaca975135..325618d969feed 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, const char *item; if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { - gvt_err("Invalid vGPU creation params\n"); + gvt_vgpu_err("Invalid vGPU creation params\n"); return -EINVAL; } @@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu, return 0; no_enough_resource: - gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); - gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", - vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), + gvt_vgpu_err("fail to allocate resource %s\n", item); + gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n", + BYTES_TO_MB(request), BYTES_TO_MB(avail), BYTES_TO_MB(max), BYTES_TO_MB(taken)); return -ENOSPC; } diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 4a6a2ed65732e1..b7d7721e72fadd 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -41,6 +41,54 @@ enum { INTEL_GVT_PCI_BAR_MAX, }; +/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one + * byte) byte by byte in standard pci configuration space. (not the full + * 256 bytes.) + */ +static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { + [PCI_COMMAND] = 0xff, 0x07, + [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */ + [PCI_CACHE_LINE_SIZE] = 0xff, + [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff, + [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff, + [PCI_INTERRUPT_LINE] = 0xff, +}; + +/** + * vgpu_pci_cfg_mem_write - write virtual cfg space memory + * + * Use this function to write virtual cfg space memory. + * For standard cfg space, only RW bits can be changed, + * and we emulates the RW1C behavior of PCI_STATUS register. + */ +static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, + u8 *src, unsigned int bytes) +{ + u8 *cfg_base = vgpu_cfg_space(vgpu); + u8 mask, new, old; + int i = 0; + + for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) { + mask = pci_cfg_space_rw_bmp[off + i]; + old = cfg_base[off + i]; + new = src[i] & mask; + + /** + * The PCI_STATUS high byte has RW1C bits, here + * emulates clear by writing 1 for these bits. + * Writing a 0b to RW1C bits has no effect. + */ + if (off + i == PCI_STATUS + 1) + new = (~new & old) & mask; + + cfg_base[off + i] = (old & ~mask) | new; + } + + /* For other configuration space directly copy as it is. */ + if (i < bytes) + memcpy(cfg_base + off + i, src + i, bytes - i); +} + /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read * @@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, u8 changed = old ^ new; int ret; - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); + vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); if (!(changed & PCI_COMMAND_MEMORY)) return 0; @@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, { int ret; + if (vgpu->failsafe) + return 0; + if (WARN_ON(bytes > 4)) return -EINVAL; @@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, if (ret) return ret; - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); + vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); break; default: - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); + vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); break; } return 0; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index b9c8e2407682fc..2b92cc8a7d1aa5 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id) if (d_info == NULL) return; - gvt_err("opcode=0x%x %s sub_ops:", + gvt_dbg_cmd("opcode=0x%x %s sub_ops:", cmd >> (32 - d_info->op_len), d_info->name); for (i = 0; i < d_info->nr_sub_op; i++) @@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s) int cnt = 0; int i; - gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" + gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, s->ring_id, s->ring_start, s->ring_start + s->ring_size, s->ring_head, s->ring_tail); - gvt_err(" %s %s ip_gma(%08lx) ", + gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", s->buf_type == RING_BUFFER_INSTRUCTION ? "RING_BUFFER" : "BATCH_BUFFER", s->buf_addr_type == GTT_BUFFER ? "GTT" : "PPGTT", s->ip_gma); if (s->ip_va == NULL) { - gvt_err(" ip_va(NULL)"); + gvt_dbg_cmd(" ip_va(NULL)"); return; } - gvt_err(" ip_va=%p: %08x %08x %08x %08x\n", + gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n", s->ip_va, cmd_val(s, 0), cmd_val(s, 1), cmd_val(s, 2), cmd_val(s, 3)); @@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset) return ret; } +static inline bool is_force_nonpriv_mmio(unsigned int offset) +{ + return (offset >= 0x24d0 && offset < 0x2500); +} + +static int force_nonpriv_reg_handler(struct parser_exec_state *s, + unsigned int offset, unsigned int index) +{ + struct intel_gvt *gvt = s->vgpu->gvt; + unsigned int data = cmd_val(s, index + 1); + + if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) { + gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n", + offset, data); + return -EINVAL; + } + return 0; +} + static int cmd_reg_handler(struct parser_exec_state *s, unsigned int offset, unsigned int index, char *cmd) { @@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s, struct intel_gvt *gvt = vgpu->gvt; if (offset + 4 > gvt->device_info.mmio_size) { - gvt_err("%s access to (%x) outside of MMIO range\n", + gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", cmd, offset); return -EINVAL; } if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { - gvt_err("vgpu%d: %s access to non-render register (%x)\n", - s->vgpu->id, cmd, offset); + gvt_vgpu_err("%s access to non-render register (%x)\n", + cmd, offset); return 0; } if (is_shadowed_mmio(offset)) { - gvt_err("vgpu%d: found access of shadowed MMIO %x\n", - s->vgpu->id, offset); + gvt_vgpu_err("found access of shadowed MMIO %x\n", offset); return 0; } + if (is_force_nonpriv_mmio(offset) && + force_nonpriv_reg_handler(s, offset, index)) + return -EINVAL; + if (offset == i915_mmio_reg_offset(DERRMR) || offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ @@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); else if (post_sync == 1) { /* check ggtt*/ - if ((cmd_val(s, 2) & (1 << 2))) { + if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) { gma = cmd_val(s, 2) & GENMASK(31, 3); if (gmadr_bytes == 8) gma |= (cmd_gma_hi(s, 3)) << 32; @@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; + struct intel_vgpu *vgpu = s->vgpu; u32 dword0 = cmd_val(s, 0); u32 dword1 = cmd_val(s, 1); u32 dword2 = cmd_val(s, 2); @@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, break; default: - gvt_err("unknown plane code %d\n", plane); + gvt_vgpu_err("unknown plane code %d\n", plane); return -EINVAL; } @@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip( static int cmd_handler_mi_display_flip(struct parser_exec_state *s) { struct mi_display_flip_command_info info; + struct intel_vgpu *vgpu = s->vgpu; int ret; int i; int len = cmd_length(s); ret = decode_mi_display_flip(s, &info); if (ret) { - gvt_err("fail to decode MI display flip command\n"); + gvt_vgpu_err("fail to decode MI display flip command\n"); return ret; } ret = check_mi_display_flip(s, &info); if (ret) { - gvt_err("invalid MI display flip command\n"); + gvt_vgpu_err("invalid MI display flip command\n"); return ret; } ret = update_plane_mmio_from_mi_display_flip(s, &info); if (ret) { - gvt_err("fail to update plane mmio\n"); + gvt_vgpu_err("fail to update plane mmio\n"); return ret; } @@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s, int ret; if (op_size > max_surface_size) { - gvt_err("command address audit fail name %s\n", s->info->name); + gvt_vgpu_err("command address audit fail name %s\n", + s->info->name); return -EINVAL; } @@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, } return 0; err: - gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", + gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", s->info->name, guest_gma, op_size); pr_err("cmd dump: "); @@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) static inline int unexpected_cmd(struct parser_exec_state *s) { - gvt_err("vgpu%d: Unexpected %s in command buffer!\n", - s->vgpu->id, s->info->name); + struct intel_vgpu *vgpu = s->vgpu; + + gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); + return -EINVAL; } @@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, while (gma != end_gma) { gpa = intel_vgpu_gma_to_gpa(mm, gma); if (gpa == INTEL_GVT_INVALID_ADDR) { - gvt_err("invalid gma address: %lx\n", gma); + gvt_vgpu_err("invalid gma address: %lx\n", gma); return -EFAULT; } @@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) uint32_t bb_size = 0; uint32_t cmd_len = 0; bool met_bb_end = false; + struct intel_vgpu *vgpu = s->vgpu; u32 cmd; /* get the start gm address of the batch buffer */ @@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { - gvt_err("unknown cmd 0x%x, opcode=0x%x\n", + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); return -EINVAL; } @@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) gma, gma + 4, &cmd); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { - gvt_err("unknown cmd 0x%x, opcode=0x%x\n", + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); return -EINVAL; } @@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) static int perform_bb_shadow(struct parser_exec_state *s) { struct intel_shadow_bb_entry *entry_obj; + struct intel_vgpu *vgpu = s->vgpu; unsigned long gma = 0; uint32_t bb_size; void *dst = NULL; @@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); if (ret) { - gvt_err("failed to set shadow batch to CPU\n"); + gvt_vgpu_err("failed to set shadow batch to CPU\n"); goto unmap_src; } @@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) gma, gma + bb_size, dst); if (ret) { - gvt_err("fail to copy guest ring buffer\n"); + gvt_vgpu_err("fail to copy guest ring buffer\n"); goto unmap_src; } @@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) { bool second_level; int ret = 0; + struct intel_vgpu *vgpu = s->vgpu; if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { - gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); + gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); return -EINVAL; } second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { - gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); + gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n"); return -EINVAL; } @@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) if (batch_buffer_needs_scan(s)) { ret = perform_bb_shadow(s); if (ret < 0) - gvt_err("invalid shadow batch buffer\n"); + gvt_vgpu_err("invalid shadow batch buffer\n"); } else { /* emulate a batch buffer end to do return right */ ret = cmd_handler_mi_batch_buffer_end(s); @@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) int ret = 0; cycles_t t0, t1, t2; struct parser_exec_state s_before_advance_custom; + struct intel_vgpu *vgpu = s->vgpu; t0 = get_cycles(); @@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { - gvt_err("unknown cmd 0x%x, opcode=0x%x\n", + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); return -EINVAL; } @@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) if (info->handler) { ret = info->handler(s); if (ret < 0) { - gvt_err("%s handler error\n", info->name); + gvt_vgpu_err("%s handler error\n", info->name); return ret; } } @@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { ret = cmd_advance_default(s); if (ret) { - gvt_err("%s IP advance error\n", info->name); + gvt_vgpu_err("%s IP advance error\n", info->name); return ret; } } @@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s, unsigned long gma_head, gma_tail, gma_bottom; int ret = 0; + struct intel_vgpu *vgpu = s->vgpu; gma_head = rb_start + rb_head; gma_tail = rb_start + rb_tail; @@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s, if (s->buf_type == RING_BUFFER_INSTRUCTION) { if (!(s->ip_gma >= rb_start) || !(s->ip_gma < gma_bottom)) { - gvt_err("ip_gma %lx out of ring scope." + gvt_vgpu_err("ip_gma %lx out of ring scope." "(base:0x%lx, bottom: 0x%lx)\n", s->ip_gma, rb_start, gma_bottom); @@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s, return -EINVAL; } if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { - gvt_err("ip_gma %lx out of range." + gvt_vgpu_err("ip_gma %lx out of range." "base 0x%lx head 0x%lx tail 0x%lx\n", s->ip_gma, rb_start, rb_head, rb_tail); @@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s, } ret = cmd_parser_exec(s); if (ret) { - gvt_err("cmd parser error\n"); + gvt_vgpu_err("cmd parser error\n"); parser_exec_state_dump(s); break; } @@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_head, gma_top, workload->shadow_ring_buffer_va); if (ret) { - gvt_err("fail to copy guest ring buffer\n"); + gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } copy_len = gma_top - gma_head; @@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_head, gma_tail, workload->shadow_ring_buffer_va + copy_len); if (ret) { - gvt_err("fail to copy guest ring buffer\n"); + gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } ring->tail += workload->rb_len; @@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) { int ret; + struct intel_vgpu *vgpu = workload->vgpu; ret = shadow_workload_ring_buffer(workload); if (ret) { - gvt_err("fail to shadow workload ring_buffer\n"); + gvt_vgpu_err("fail to shadow workload ring_buffer\n"); return ret; } ret = scan_workload(workload); if (ret) { - gvt_err("scan workload error\n"); + gvt_vgpu_err("scan workload error\n"); return ret; } return 0; @@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ctx_size = wa_ctx->indirect_ctx.size; unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; + struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; struct drm_i915_gem_object *obj; int ret = 0; void *map; @@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) /* get the va of the shadow batch buffer */ map = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(map)) { - gvt_err("failed to vmap shadow indirect ctx\n"); + gvt_vgpu_err("failed to vmap shadow indirect ctx\n"); ret = PTR_ERR(map); goto put_obj; } ret = i915_gem_object_set_to_cpu_domain(obj, false); if (ret) { - gvt_err("failed to set shadow indirect ctx to CPU\n"); + gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); goto unmap_src; } @@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) guest_gma, guest_gma + ctx_size, map); if (ret) { - gvt_err("fail to copy guest indirect ctx\n"); + gvt_vgpu_err("fail to copy guest indirect ctx\n"); goto unmap_src; } @@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ret; + struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; if (wa_ctx->indirect_ctx.size == 0) return 0; ret = shadow_indirect_ctx(wa_ctx); if (ret) { - gvt_err("fail to shadow indirect ctx\n"); + gvt_vgpu_err("fail to shadow indirect ctx\n"); return ret; } @@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ret = scan_wa_ctx(wa_ctx); if (ret) { - gvt_err("scan wa ctx error\n"); + gvt_vgpu_err("scan wa ctx error\n"); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h index 68cba7bd980af8..b0cff4dc268479 100644 --- a/drivers/gpu/drm/i915/gvt/debug.h +++ b/drivers/gpu/drm/i915/gvt/debug.h @@ -27,6 +27,14 @@ #define gvt_err(fmt, args...) \ DRM_ERROR("gvt: "fmt, ##args) +#define gvt_vgpu_err(fmt, args...) \ +do { \ + if (IS_ERR_OR_NULL(vgpu)) \ + DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \ + else \ + DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\ +} while (0) + #define gvt_dbg_core(fmt, args...) \ DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6d8fde880c3993..5419ae6ec6339c 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) return 0; } +static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { + { +/* EDID with 1024x768 as its resolution */ + /*Header*/ + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, + /* Vendor & Product Identification */ + 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, + /* Version & Revision */ + 0x01, 0x04, + /* Basic Display Parameters & Features */ + 0xa5, 0x34, 0x20, 0x78, 0x23, + /* Color Characteristics */ + 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, + /* Established Timings: maximum resolution is 1024x768 */ + 0x21, 0x08, 0x00, + /* Standard Timings. All invalid */ + 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00, + 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, + /* 18 Byte Data Blocks 1: invalid */ + 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0, + 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, + /* 18 Byte Data Blocks 2: invalid */ + 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + /* 18 Byte Data Blocks 3: invalid */ + 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, + 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, + /* 18 Byte Data Blocks 4: invalid */ + 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, + 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, + /* Extension Block Count */ + 0x00, + /* Checksum */ + 0xef, + }, + { /* EDID with 1920x1200 as its resolution */ -static unsigned char virtual_dp_monitor_edid[] = { - /*Header*/ - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - /* Vendor & Product Identification */ - 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, - /* Version & Revision */ - 0x01, 0x04, - /* Basic Display Parameters & Features */ - 0xa5, 0x34, 0x20, 0x78, 0x23, - /* Color Characteristics */ - 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, - /* Established Timings: maximum resolution is 1024x768 */ - 0x21, 0x08, 0x00, - /* - * Standard Timings. - * below new resolutions can be supported: - * 1920x1080, 1280x720, 1280x960, 1280x1024, - * 1440x900, 1600x1200, 1680x1050 - */ - 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, - 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, - /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ - 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, - 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, - /* 18 Byte Data Blocks 2: invalid */ - 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - /* 18 Byte Data Blocks 3: invalid */ - 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, - 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, - /* 18 Byte Data Blocks 4: invalid */ - 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, - 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, - /* Extension Block Count */ - 0x00, - /* Checksum */ - 0x45, + /*Header*/ + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, + /* Vendor & Product Identification */ + 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, + /* Version & Revision */ + 0x01, 0x04, + /* Basic Display Parameters & Features */ + 0xa5, 0x34, 0x20, 0x78, 0x23, + /* Color Characteristics */ + 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, + /* Established Timings: maximum resolution is 1024x768 */ + 0x21, 0x08, 0x00, + /* + * Standard Timings. + * below new resolutions can be supported: + * 1920x1080, 1280x720, 1280x960, 1280x1024, + * 1440x900, 1600x1200, 1680x1050 + */ + 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, + 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, + /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ + 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, + 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, + /* 18 Byte Data Blocks 2: invalid */ + 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + /* 18 Byte Data Blocks 3: invalid */ + 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, + 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, + /* 18 Byte Data Blocks 4: invalid */ + 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, + 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, + /* Extension Block Count */ + 0x00, + /* Checksum */ + 0x45, + }, }; #define DPCD_HEADER_SIZE 0xb @@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | SDE_PORTE_HOTPLUG_SPT); - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; + vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; + } - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; + vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; + } - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; + vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; + } if (IS_SKYLAKE(dev_priv) && intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { @@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) GEN8_PORT_DP_A_HOTPLUG; else vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; + + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; } } @@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) } static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, - int type) + int type, unsigned int resolution) { struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); + if (WARN_ON(resolution >= GVT_EDID_NUM)) + return -EINVAL; + port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); if (!port->edid) return -ENOMEM; @@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, return -ENOMEM; } - memcpy(port->edid->edid_block, virtual_dp_monitor_edid, + memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], EDID_SIZE); port->edid->data_valid = true; @@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) * Zero on success, negative error code if failed. * */ -int intel_vgpu_init_display(struct intel_vgpu *vgpu) +int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; intel_vgpu_init_i2c_edid(vgpu); if (IS_SKYLAKE(dev_priv)) - return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D); + return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, + resolution); else - return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); + return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B, + resolution); } /** diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index 8b234ea961f67b..d73de22102e2b7 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -154,10 +154,28 @@ struct intel_vgpu_port { int type; }; +enum intel_vgpu_edid { + GVT_EDID_1024_768, + GVT_EDID_1920_1200, + GVT_EDID_NUM, +}; + +static inline char *vgpu_edid_str(enum intel_vgpu_edid id) +{ + switch (id) { + case GVT_EDID_1024_768: + return "1024x768"; + case GVT_EDID_1920_1200: + return "1920x1200"; + default: + return ""; + } +} + void intel_gvt_emulate_vblank(struct intel_gvt *gvt); void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); -int intel_vgpu_init_display(struct intel_vgpu *vgpu); +int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution); void intel_vgpu_reset_display(struct intel_vgpu *vgpu); void intel_vgpu_clean_display(struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index bda85dff7b2a99..42cd09ec63fa7c 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) unsigned char chr = 0; if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { - gvt_err("Driver tries to read EDID without proper sequence!\n"); + gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); return 0; } if (edid->current_edid_read >= EDID_SIZE) { - gvt_err("edid_get_byte() exceeds the size of EDID!\n"); + gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n"); return 0; } if (!edid->edid_available) { - gvt_err("Reading EDID but EDID is not available!\n"); + gvt_vgpu_err("Reading EDID but EDID is not available!\n"); return 0; } @@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) chr = edid_data->edid_block[edid->current_edid_read]; edid->current_edid_read++; } else { - gvt_err("No EDID available during the reading?\n"); + gvt_vgpu_err("No EDID available during the reading?\n"); } return chr; } @@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; break; default: - gvt_err("Unknown/reserved GMBUS cycle detected!\n"); + gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n"); break; } /* @@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, */ } else { memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); - gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", - vgpu->id); + gvt_vgpu_err("warning: gmbus3 read with nothing returned\n"); } return 0; } @@ -496,7 +495,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, unsigned char val = edid_get_byte(vgpu); aux_data_for_write = (val << 16); - } + } else + aux_data_for_write = (0xff << 16); } /* write the return value in AUX_CH_DATA reg which includes: * ACK of I2C_WRITE diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 46eb9fd3c03f6b..f1f426a97aa9d4 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out( struct intel_vgpu_execlist *execlist, struct execlist_ctx_descriptor_format *ctx) { + struct intel_vgpu *vgpu = execlist->vgpu; struct intel_vgpu_execlist_slot *running = execlist->running_slot; struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; @@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out( gvt_dbg_el("schedule out context id %x\n", ctx->context_id); if (WARN_ON(!same_context(ctx, execlist->running_context))) { - gvt_err("schedule out context is not running context," + gvt_vgpu_err("schedule out context is not running context," "ctx id %x running ctx id %x\n", ctx->context_id, execlist->running_context->context_id); @@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot( status.udw = vgpu_vreg(vgpu, status_reg + 4); if (status.execlist_queue_full) { - gvt_err("virtual execlist slots are full\n"); + gvt_vgpu_err("virtual execlist slots are full\n"); return NULL; } @@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, struct execlist_ctx_descriptor_format *ctx0, *ctx1; struct execlist_context_status_format status; + struct intel_vgpu *vgpu = execlist->vgpu; gvt_dbg_el("emulate schedule-in\n"); if (!slot) { - gvt_err("no available execlist slot\n"); + gvt_vgpu_err("no available execlist slot\n"); return -EINVAL; } @@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); if (IS_ERR(vma)) { - gvt_err("Cannot pin\n"); return; } @@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, CACHELINE_BYTES, 0); if (IS_ERR(vma)) { - gvt_err("Cannot pin indirect ctx obj\n"); return; } @@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) { struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct intel_vgpu_mm *mm; + struct intel_vgpu *vgpu = workload->vgpu; int page_table_level; u32 pdp[8]; @@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ page_table_level = 4; } else { - gvt_err("Advanced Context mode(SVM) is not supported!\n"); + gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); return -EINVAL; } @@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, pdp, page_table_level, 0); if (IS_ERR(mm)) { - gvt_err("fail to create mm object.\n"); + gvt_vgpu_err("fail to create mm object.\n"); return PTR_ERR(mm); } } @@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_err("invalid guest context LRCA: %x\n", desc->lrca); + gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); return -EINVAL; } @@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) continue; if (!desc[i]->privilege_access) { - gvt_err("vgpu%d: unexpected GGTT elsp submission\n", - vgpu->id); + gvt_vgpu_err("unexpected GGTT elsp submission\n"); return -EINVAL; } @@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) } if (!valid_desc_bitmap) { - gvt_err("vgpu%d: no valid desc in a elsp submission\n", - vgpu->id); + gvt_vgpu_err("no valid desc in a elsp submission\n"); return -EINVAL; } if (!test_bit(0, (void *)&valid_desc_bitmap) && test_bit(1, (void *)&valid_desc_bitmap)) { - gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", - vgpu->id); + gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n"); return -EINVAL; } @@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) ret = submit_context(vgpu, ring_id, &valid_desc[i], emulate_schedule_in); if (ret) { - gvt_err("vgpu%d: fail to schedule workload\n", - vgpu->id); + gvt_vgpu_err("fail to schedule workload\n"); return ret; } emulate_schedule_in = false; diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 1cb29b2d7dc638..933a7c211a1c29 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) int ret; size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; - firmware = vmalloc(size); + firmware = vzalloc(size); if (!firmware) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 28c92346db0e4e..b832bea64e0367 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) { if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { - gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", - vgpu->id, addr, size); + gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", + addr, size); return false; } return true; @@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate gfn: 0x%lx\n", gfn); + gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); return -ENXIO; } @@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu, daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); if (dma_mapping_error(kdev, daddr)) { - gvt_err("fail to map dma addr\n"); + gvt_vgpu_err("fail to map dma addr\n"); return -EINVAL; } @@ -735,7 +735,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( if (reclaim_one_mm(vgpu->gvt)) goto retry; - gvt_err("fail to allocate ppgtt shadow page\n"); + gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); return ERR_PTR(-ENOMEM); } @@ -750,14 +750,14 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( */ ret = init_shadow_page(vgpu, &spt->shadow_page, type); if (ret) { - gvt_err("fail to initialize shadow page for spt\n"); + gvt_vgpu_err("fail to initialize shadow page for spt\n"); goto err; } ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, gfn, ppgtt_write_protection_handler, NULL); if (ret) { - gvt_err("fail to initialize guest page for spt\n"); + gvt_vgpu_err("fail to initialize guest page for spt\n"); goto err; } @@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( if (p) return shadow_page_to_ppgtt_spt(p); - gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", - vgpu->id, mfn); + gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn); return NULL; } @@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, } s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); if (!s) { - gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", - vgpu->id, ops->get_pfn(e)); + gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", + ops->get_pfn(e)); return -ENXIO; } return ppgtt_invalidate_shadow_page(s); @@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) { + struct intel_vgpu *vgpu = spt->vgpu; struct intel_gvt_gtt_entry e; unsigned long index; int ret; @@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) for_each_present_shadow_entry(spt, &e, index) { if (!gtt_type_is_pt(get_next_pt_type(e.type))) { - gvt_err("GVT doesn't support pse bit for now\n"); + gvt_vgpu_err("GVT doesn't support pse bit for now\n"); return -EINVAL; } ret = ppgtt_invalidate_shadow_page_by_shadow_entry( @@ -868,8 +868,8 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) ppgtt_free_shadow_page(spt); return 0; fail: - gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", - spt->vgpu->id, spt, e.val64, e.type); + gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", + spt, e.val64, e.type); return ret; } @@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( } return s; fail: - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", - vgpu->id, s, we->val64, we->type); + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", + s, we->val64, we->type); return ERR_PTR(ret); } @@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) for_each_present_guest_entry(spt, &ge, i) { if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { - gvt_err("GVT doesn't support pse bit now\n"); + gvt_vgpu_err("GVT doesn't support pse bit now\n"); ret = -EINVAL; goto fail; } @@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) } return 0; fail: - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", - vgpu->id, spt, ge.val64, ge.type); + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", + spt, ge.val64, ge.type); return ret; } @@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, struct intel_vgpu_ppgtt_spt *s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); if (!s) { - gvt_err("fail to find guest page\n"); + gvt_vgpu_err("fail to find guest page\n"); ret = -ENXIO; goto fail; } @@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, ppgtt_set_shadow_entry(spt, &e, index); return 0; fail: - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", - vgpu->id, spt, e.val64, e.type); + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", + spt, e.val64, e.type); return ret; } @@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt, } return 0; fail: - gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, - spt, we->val64, we->type); + gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", + spt, we->val64, we->type); return ret; } @@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table( } return 0; fail: - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", - vgpu->id, spt, we->val64, we->type); + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", + spt, we->val64, we->type); return ret; } @@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm) spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); if (IS_ERR(spt)) { - gvt_err("fail to populate guest root pointer\n"); + gvt_vgpu_err("fail to populate guest root pointer\n"); ret = PTR_ERR(spt); goto fail; } @@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, ret = gtt->mm_alloc_page_table(mm); if (ret) { - gvt_err("fail to allocate page table for mm\n"); + gvt_vgpu_err("fail to allocate page table for mm\n"); goto fail; } @@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, } return mm; fail: - gvt_err("fail to create mm\n"); + gvt_vgpu_err("fail to create mm\n"); if (mm) intel_gvt_mm_unreference(mm); return ERR_PTR(ret); @@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) mm->page_table_level, gma, gpa); return gpa; err: - gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); + gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); return INTEL_GVT_INVALID_ADDR; } @@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, gma = g_gtt_index << GTT_PAGE_SHIFT; /* the VM may configure the whole GM space when ballooning is used */ - if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma), - "vgpu%d: found oob ggtt write, offset %x\n", - vgpu->id, off)) { + if (!vgpu_gmadr_is_valid(vgpu, gma)) return 0; - } ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); @@ -1839,13 +1836,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, if (ops->test_present(&e)) { ret = gtt_entry_p2m(vgpu, &e, &m); if (ret) { - gvt_err("vgpu%d: fail to translate guest gtt entry\n", - vgpu->id); - return ret; + gvt_vgpu_err("fail to translate guest gtt entry\n"); + /* guest driver may read/write the entry when partial + * update the entry in this situation p2m will fail + * settting the shadow entry to point to a scratch page + */ + ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); } } else { m = e; - m.val64 = 0; + ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); } ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); @@ -1896,14 +1896,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); if (!scratch_pt) { - gvt_err("fail to allocate scratch page\n"); + gvt_vgpu_err("fail to allocate scratch page\n"); return -ENOMEM; } daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, PCI_DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, daddr)) { - gvt_err("fail to dmamap scratch_pt\n"); + gvt_vgpu_err("fail to dmamap scratch_pt\n"); __free_page(virt_to_page(scratch_pt)); return -ENOMEM; } @@ -2006,7 +2006,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, NULL, 1, 0); if (IS_ERR(ggtt_mm)) { - gvt_err("fail to create mm for ggtt.\n"); + gvt_vgpu_err("fail to create mm for ggtt.\n"); return PTR_ERR(ggtt_mm); } @@ -2015,6 +2015,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) return create_scratch_page_tree(vgpu); } +static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) +{ + struct list_head *pos, *n; + struct intel_vgpu_mm *mm; + + list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { + mm = container_of(pos, struct intel_vgpu_mm, list); + if (mm->type == type) { + vgpu->gvt->gtt.mm_free_page_table(mm); + list_del(&mm->list); + list_del(&mm->lru_list); + kfree(mm); + } + } +} + /** * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization * @vgpu: a vGPU @@ -2027,19 +2043,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) */ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) { - struct list_head *pos, *n; - struct intel_vgpu_mm *mm; - ppgtt_free_all_shadow_page(vgpu); release_scratch_page_tree(vgpu); - list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { - mm = container_of(pos, struct intel_vgpu_mm, list); - vgpu->gvt->gtt.mm_free_page_table(mm); - list_del(&mm->list); - list_del(&mm->lru_list); - kfree(mm); - } + intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); + intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT); } static void clean_spt_oos(struct intel_gvt *gvt) @@ -2071,7 +2079,6 @@ static int setup_spt_oos(struct intel_gvt *gvt) for (i = 0; i < preallocated_oos_pages; i++) { oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); if (!oos_page) { - gvt_err("fail to pre-allocate oos page\n"); ret = -ENOMEM; goto fail; } @@ -2161,7 +2168,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, pdp, page_table_level, 0); if (IS_ERR(mm)) { - gvt_err("fail to create mm\n"); + gvt_vgpu_err("fail to create mm\n"); return PTR_ERR(mm); } } @@ -2191,7 +2198,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); if (!mm) { - gvt_err("fail to find ppgtt instance.\n"); + gvt_vgpu_err("fail to find ppgtt instance.\n"); return -EINVAL; } intel_gvt_mm_unreference(mm); @@ -2322,6 +2329,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) int i; ppgtt_free_all_shadow_page(vgpu); + + /* Shadow pages are only created when there is no page + * table tracking data, so remove page tracking data after + * removing the shadow pages. + */ + intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); + if (!dmlr) return; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index e227caf5859ebd..6dfc48b63b718b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -143,6 +143,8 @@ struct intel_vgpu { int id; unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ bool active; + bool pv_notified; + bool failsafe; bool resetting; void *sched_data; @@ -160,7 +162,6 @@ struct intel_vgpu { atomic_t running_workload_num; DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); struct i915_gem_context *shadow_ctx; - struct notifier_block shadow_ctx_notifier_block; #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) struct { @@ -203,18 +204,18 @@ struct intel_gvt_firmware { }; struct intel_gvt_opregion { - void __iomem *opregion_va; + void *opregion_va; u32 opregion_pa; }; #define NR_MAX_INTEL_VGPU_TYPES 20 struct intel_vgpu_type { char name[16]; - unsigned int max_instance; unsigned int avail_instance; unsigned int low_gm_size; unsigned int high_gm_size; unsigned int fence; + enum intel_vgpu_edid resolution; }; struct intel_gvt { @@ -231,6 +232,7 @@ struct intel_gvt { struct intel_gvt_gtt gtt; struct intel_gvt_opregion opregion; struct intel_gvt_workload_scheduler scheduler; + struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); struct intel_vgpu_type *types; unsigned int num_types; @@ -317,6 +319,7 @@ struct intel_vgpu_creation_params { __u64 low_gm_sz; /* in MB */ __u64 high_gm_sz; /* in MB */ __u64 fence_sz; + __u64 resolution; __s32 primary; __u64 vgpu_id; }; @@ -449,6 +452,11 @@ struct intel_gvt_ops { }; +enum { + GVT_FAILSAFE_UNSUPPORTED_GUEST, + GVT_FAILSAFE_INSUFFICIENT_RESOURCE, +}; + #include "mpt.h" #endif diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 1d450627ff6540..6da9ae1618e35e 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt, info->size = size; info->length = (i + 4) < end ? 4 : (end - i); info->addr_mask = addr_mask; + info->ro_mask = ro_mask; info->device = device; info->read = read ? read : intel_vgpu_default_mmio_read; info->write = write ? write : intel_vgpu_default_mmio_write; @@ -150,15 +151,42 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) #define fence_num_to_offset(num) \ (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) + +static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) +{ + switch (reason) { + case GVT_FAILSAFE_UNSUPPORTED_GUEST: + pr_err("Detected your guest driver doesn't support GVT-g.\n"); + break; + case GVT_FAILSAFE_INSUFFICIENT_RESOURCE: + pr_err("Graphics resource is not enough for the guest\n"); + default: + break; + } + pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); + vgpu->failsafe = true; +} + static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, unsigned int fence_num, void *p_data, unsigned int bytes) { if (fence_num >= vgpu_fence_sz(vgpu)) { - gvt_err("vgpu%d: found oob fence register access\n", - vgpu->id); - gvt_err("vgpu%d: total fence num %d access fence num %d\n", - vgpu->id, vgpu_fence_sz(vgpu), fence_num); + + /* When guest access oob fence regs without access + * pv_info first, we treat guest not supporting GVT, + * and we will let vgpu enter failsafe mode. + */ + if (!vgpu->pv_notified) + enter_failsafe_mode(vgpu, + GVT_FAILSAFE_UNSUPPORTED_GUEST); + + if (!vgpu->mmio.disable_warn_untrack) { + gvt_vgpu_err("found oob fence register access\n"); + gvt_vgpu_err("total fence %d, access fence %d\n", + vgpu_fence_sz(vgpu), fence_num); + } memset(p_data, 0, bytes); + return -EINVAL; } return 0; } @@ -219,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, break; default: /*should not hit here*/ - gvt_err("invalid forcewake offset 0x%x\n", offset); + gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset); return -EINVAL; } } else { @@ -369,6 +397,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +/* ascendingly sorted */ +static i915_reg_t force_nonpriv_white_list[] = { + GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) + GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) + GEN8_CS_CHICKEN1,//_MMIO(0x2580) + _MMIO(0x2690), + _MMIO(0x2694), + _MMIO(0x2698), + _MMIO(0x4de0), + _MMIO(0x4de4), + _MMIO(0x4dfc), + GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010) + _MMIO(0x7014), + HDC_CHICKEN0,//_MMIO(0x7300) + GEN8_HDC_CHICKEN1,//_MMIO(0x7304) + _MMIO(0x7700), + _MMIO(0x7704), + _MMIO(0x7708), + _MMIO(0x770c), + _MMIO(0xb110), + GEN8_L3SQCREG4,//_MMIO(0xb118) + _MMIO(0xe100), + _MMIO(0xe18c), + _MMIO(0xe48c), + _MMIO(0xe5f4), +}; + +/* a simple bsearch */ +static inline bool in_whitelist(unsigned int reg) +{ + int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list); + i915_reg_t *array = force_nonpriv_white_list; + + while (left < right) { + int mid = (left + right)/2; + + if (reg > array[mid].reg) + left = mid + 1; + else if (reg < array[mid].reg) + right = mid; + else + return true; + } + return false; +} + +static int force_nonpriv_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 reg_nonpriv = *(u32 *)p_data; + int ret = -EINVAL; + + if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) { + gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n", + vgpu->id, offset, bytes); + return ret; + } + + if (in_whitelist(reg_nonpriv)) { + ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, + bytes); + } else { + gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n", + vgpu->id, reg_nonpriv); + } + return ret; +} + static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -432,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu, fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; } else { - gvt_err("Invalid train pattern %d\n", train_pattern); + gvt_vgpu_err("Invalid train pattern %d\n", train_pattern); return -EINVAL; } @@ -490,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) index = FDI_RX_IMR_TO_PIPE(offset); else { - gvt_err("Unsupport registers %x\n", offset); + gvt_vgpu_err("Unsupport registers %x\n", offset); return -EINVAL; } @@ -720,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, u32 data; if (!dpy_is_valid_port(port_index)) { - gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); + gvt_vgpu_err("Unsupported DP port access!\n"); return 0; } @@ -874,6 +970,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, return 0; } +static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH); + write_vreg(vgpu, offset, p_data, bytes); + return 0; +} + static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -918,8 +1022,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu, if (i == num) { if (num == SBI_REG_MAX) { - gvt_err("vgpu%d: SBI caching meets maximum limits\n", - vgpu->id); + gvt_vgpu_err("SBI caching meets maximum limits\n"); return; } display->sbi.number++; @@ -999,8 +1102,9 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, break; } if (invalid_read) - gvt_err("invalid pvinfo read: [%x:%x] = %x\n", + gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", offset, bytes, *(u32 *)p_data); + vgpu->pv_notified = true; return 0; } @@ -1026,7 +1130,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) case 1: /* Remove this in guest driver. */ break; default: - gvt_err("Invalid PV notification %d\n", notification); + gvt_vgpu_err("Invalid PV notification %d\n", notification); } return ret; } @@ -1039,7 +1143,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) char vmid_str[20]; char display_ready_str[20]; - snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready); + snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready); env[0] = display_ready_str; snprintf(vmid_str, 20, "VMID=%d", vgpu->id); @@ -1078,8 +1182,11 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, case _vgtif_reg(execlist_context_descriptor_lo): case _vgtif_reg(execlist_context_descriptor_hi): break; + case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]): + enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); + break; default: - gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", + gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n", offset, bytes, data); break; } @@ -1203,26 +1310,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); switch (cmd) { - case 0x6: - /** - * "Read memory latency" command on gen9. - * Below memory latency values are read - * from skylake platform. - */ - if (!*data0) - *data0 = 0x1e1a1100; - else - *data0 = 0x61514b3d; + case GEN9_PCODE_READ_MEM_LATENCY: + if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { + /** + * "Read memory latency" command on gen9. + * Below memory latency values are read + * from skylake platform. + */ + if (!*data0) + *data0 = 0x1e1a1100; + else + *data0 = 0x61514b3d; + } break; - case 0x5: + case SKL_PCODE_CDCLK_CONTROL: + if (IS_SKYLAKE(vgpu->gvt->dev_priv)) + *data0 = SKL_CDCLK_READY_FOR_CHANGE; + break; + case GEN6_PCODE_READ_RC6VIDS: *data0 |= 0x1; break; } gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n", vgpu->id, value, *data0); - - value &= ~(1 << 31); + /** + * PCODE_READY clear means ready for pcode read/write, + * PCODE_ERROR_MASK clear means no error happened. In GVT-g we + * always emulate as pcode read/write success and ready for access + * anytime, since we don't touch real physical registers here. + */ + value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK); return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); } @@ -1302,7 +1420,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, if (execlist->elsp_dwords.index == 3) { ret = intel_vgpu_submit_execlist(vgpu, ring_id); if(ret) - gvt_err("fail submit workload on ring %d\n", ring_id); + gvt_vgpu_err("fail submit workload on ring %d\n", + ring_id); } ++execlist->elsp_dwords.index; @@ -1318,6 +1437,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, bool enable_execlist; write_vreg(vgpu, offset, p_data, bytes); + + /* when PPGTT mode enabled, we will check if guest has called + * pvinfo, if not, we will treat this guest as non-gvtg-aware + * guest, and stop emulating its cfg space, mmio, gtt, etc. + */ + if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) || + (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))) + && !vgpu->pv_notified) { + enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); + return 0; + } if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); @@ -1400,6 +1530,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, #define MMIO_GM(reg, d, r, w) \ MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) +#define MMIO_GM_RDR(reg, d, r, w) \ + MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w) + #define MMIO_RO(reg, d, f, rm, r, w) \ MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) @@ -1419,6 +1552,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, #define MMIO_RING_GM(prefix, d, r, w) \ MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) +#define MMIO_RING_GM_RDR(prefix, d, r, w) \ + MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w) + #define MMIO_RING_RO(prefix, d, f, rm, r, w) \ MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) @@ -1427,73 +1563,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) struct drm_i915_private *dev_priv = gvt->dev_priv; int ret; - MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); + MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL, + intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); MMIO_D(SDEISR, D_ALL); - MMIO_RING_D(RING_HWSTAM, D_ALL); + MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); - MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); - MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); - MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); + MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); + MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); + MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); + MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); #define RING_REG(base) (base + 0x28) - MMIO_RING_D(RING_REG, D_ALL); + MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); #undef RING_REG #define RING_REG(base) (base + 0x134) - MMIO_RING_D(RING_REG, D_ALL); + MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); #undef RING_REG - MMIO_GM(0x2148, D_ALL, NULL, NULL); - MMIO_GM(CCID, D_ALL, NULL, NULL); - MMIO_GM(0x12198, D_ALL, NULL, NULL); + MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL); + MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); + MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL); MMIO_D(GEN7_CXT_SIZE, D_ALL); - MMIO_RING_D(RING_TAIL, D_ALL); - MMIO_RING_D(RING_HEAD, D_ALL); - MMIO_RING_D(RING_CTL, D_ALL); - MMIO_RING_D(RING_ACTHD, D_ALL); - MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); + MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); /* RING MODE */ #define RING_REG(base) (base + 0x29c) - MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write); + MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, + ring_mode_mmio_write); #undef RING_REG - MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL); + MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); + MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, ring_timestamp_mmio_read, NULL); MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, ring_timestamp_mmio_read, NULL); - MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL); + MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - - MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_D(GAM_ECOCHK, D_ALL); - MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL); + MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + + MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(0x9030, D_ALL); - MMIO_D(0x20a0, D_ALL); - MMIO_D(0x2420, D_ALL); - MMIO_D(0x2430, D_ALL); - MMIO_D(0x2434, D_ALL); - MMIO_D(0x2438, D_ALL); - MMIO_D(0x243c, D_ALL); - MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL); + MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL); + MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* display */ MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL); @@ -2022,8 +2166,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(FORCEWAKE_ACK, D_ALL); MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); - MMIO_D(GTFIFODBG, D_ALL); - MMIO_D(GTFIFOCTL, D_ALL); + MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); MMIO_D(ECOBUS, D_ALL); @@ -2080,7 +2224,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL); + MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW); MMIO_D(GEN6_PCODE_DATA, D_ALL); MMIO_D(0x13812c, D_ALL); MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); @@ -2102,7 +2246,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(0x7180, D_ALL); MMIO_D(0x7408, D_ALL); MMIO_D(0x7c00, D_ALL); - MMIO_D(GEN6_MBCTL, D_ALL); + MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write); MMIO_D(0x911c, D_ALL); MMIO_D(0x9120, D_ALL); MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); @@ -2159,36 +2303,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(0x1a054, D_ALL); MMIO_D(0x44070, D_ALL); - - MMIO_D(0x215c, D_HSW_PLUS); + MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); - MMIO_D(GEN7_OACONTROL, D_HSW); + MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL); + MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL); MMIO_D(0x2b00, D_BDW_PLUS); MMIO_D(0x2360, D_BDW_PLUS); - MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL); + MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_D(BCS_SWCTRL, D_ALL); - - MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); + MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL); + + MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); + MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); @@ -2196,6 +2339,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL); + MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } @@ -2204,7 +2358,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) struct drm_i915_private *dev_priv = gvt->dev_priv; int ret; - MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, + MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -2269,24 +2423,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, intel_vgpu_reg_master_irq_handler); - MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS); - MMIO_D(0x1c134, D_BDW_PLUS); - - MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); - MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); - MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); - MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); - MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); - MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS); - MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write); - MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, - NULL, NULL); - MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, - NULL, NULL); + MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, + F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + + MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, + NULL, NULL); + MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, + F_CMD_ACCESS, NULL, NULL); + MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); + MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, + NULL, NULL); + MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, + F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS, + F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, + ring_mode_mmio_write); + MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, ring_timestamp_mmio_read, NULL); - MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS); + MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); #define RING_REG(base) (base + 0xd0) MMIO_RING_F(RING_REG, 4, F_RO, 0, @@ -2303,13 +2464,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) #undef RING_REG #define RING_REG(base) (base + 0x234) - MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); - MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL); + MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, + NULL, NULL); + MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0, + ~0LL, D_BDW_PLUS, NULL, NULL); #undef RING_REG #define RING_REG(base) (base + 0x244) - MMIO_RING_D(RING_REG, D_BDW_PLUS); - MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS); + MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, + NULL, NULL); #undef RING_REG #define RING_REG(base) (base + 0x370) @@ -2331,6 +2495,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); MMIO_D(0x1c054, D_BDW_PLUS); + MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); + MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); @@ -2341,14 +2507,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); #undef RING_REG - MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); - MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL); + MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); + MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW); - MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW); - MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW); + MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS); + MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS); + MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); MMIO_D(WM_MISC, D_BDW); MMIO_D(BDW_EDP_PSR_BASE, D_BDW); @@ -2362,27 +2528,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); - MMIO_D(0xfdc, D_BDW); - MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS); - MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS); + MMIO_D(0xfdc, D_BDW_PLUS); + MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); + MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); + MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_D(0xb1f0, D_BDW); - MMIO_D(0xb1c0, D_BDW); + MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_D(0xb100, D_BDW); - MMIO_D(0xb10c, D_BDW); + MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_D(0xb110, D_BDW); - MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, + NULL, force_nonpriv_write); - MMIO_D(0x83a4, D_BDW); + MMIO_D(0x22040, D_BDW_PLUS); + MMIO_D(0x44484, D_BDW_PLUS); + MMIO_D(0x4448c, D_BDW_PLUS); + + MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); - MMIO_D(0x8430, D_BDW); + MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_D(0x110000, D_BDW_PLUS); @@ -2394,10 +2564,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); - - MMIO_D(0x2248, D_BDW); - + MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + + MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL); + + MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } @@ -2420,7 +2599,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); - MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write); MMIO_D(0xa210, D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); @@ -2578,16 +2756,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); MMIO_D(0xd08, D_SKL); - MMIO_D(0x20e0, D_SKL); - MMIO_D(0x20ec, D_SKL); + MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL); + MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* TRTT */ - MMIO_D(0x4de0, D_SKL); - MMIO_D(0x4de4, D_SKL); - MMIO_D(0x4de8, D_SKL); - MMIO_D(0x4dec, D_SKL); - MMIO_D(0x4df0, D_SKL); - MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write); + MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write); MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); MMIO_D(0x45008, D_SKL); @@ -2611,7 +2789,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(0x65f08, D_SKL); MMIO_D(0x320f0, D_SKL); - MMIO_D(_REG_VCS2_EXCC, D_SKL); + MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL); MMIO_D(0x70034, D_SKL); MMIO_D(0x71034, D_SKL); MMIO_D(0x72034, D_SKL); @@ -2624,6 +2802,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); MMIO_D(0x44500, D_SKL); + MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); return 0; } @@ -2813,3 +2994,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, write_vreg(vgpu, offset, p_data, bytes); return 0; } + +/** + * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be + * force-nopriv register + * + * @gvt: a GVT device + * @offset: register offset + * + * Returns: + * True if the register is in force-nonpriv whitelist; + * False if outside; + */ +bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, + unsigned int offset) +{ + return in_whitelist(offset); +} diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 0f7f5d97f5829d..d641214578a7dc 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn, struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; dma_addr_t daddr; - page = pfn_to_page(pfn); - if (is_error_page(page)) + if (unlikely(!pfn_valid(pfn))) return -EFAULT; + page = pfn_to_page(pfn); daddr = dma_map_page(dev, page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, daddr)) @@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, return 0; return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" - "fence: %d\n", - BYTES_TO_MB(type->low_gm_size), - BYTES_TO_MB(type->high_gm_size), - type->fence); + "fence: %d\nresolution: %s\n", + BYTES_TO_MB(type->low_gm_size), + BYTES_TO_MB(type->high_gm_size), + type->fence, vgpu_edid_str(type->resolution)); } static MDEV_TYPE_ATTR_RO(available_instances); @@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) { - struct intel_vgpu *vgpu; + struct intel_vgpu *vgpu = NULL; struct intel_vgpu_type *type; struct device *pdev; void *gvt; @@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); if (!type) { - gvt_err("failed to find type %s to create\n", + gvt_vgpu_err("failed to find type %s to create\n", kobject_name(kobj)); ret = -EINVAL; goto out; @@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) vgpu = intel_gvt_ops->vgpu_create(gvt, type); if (IS_ERR_OR_NULL(vgpu)) { ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); - gvt_err("failed to create intel vgpu: %d\n", ret); + gvt_vgpu_err("failed to create intel vgpu: %d\n", ret); goto out; } @@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, &vgpu->vdev.iommu_notifier); if (ret != 0) { - gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); + gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", + ret); goto out; } @@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, &vgpu->vdev.group_notifier); if (ret != 0) { - gvt_err("vfio_register_notifier for group failed: %d\n", ret); + gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", + ret); goto undo_iommu; } @@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, if (index >= VFIO_PCI_NUM_REGIONS) { - gvt_err("invalid index: %u\n", index); + gvt_vgpu_err("invalid index: %u\n", index); return -EINVAL; } @@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, case VFIO_PCI_VGA_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX: default: - gvt_err("unsupported region: %u\n", index); + gvt_vgpu_err("unsupported region: %u\n", index); } return ret == 0 ? count : ret; @@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, trigger = eventfd_ctx_fdget(fd); if (IS_ERR(trigger)) { - gvt_err("eventfd_ctx_fdget failed\n"); + gvt_vgpu_err("eventfd_ctx_fdget failed\n"); return PTR_ERR(trigger); } vgpu->vdev.msi_trigger = trigger; @@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS, &data_size); if (ret) { - gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); + gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); return -EINVAL; } if (data_size) { @@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) kvm = vgpu->vdev.kvm; if (!kvm || kvm->mm != current->mm) { - gvt_err("KVM is required to use Intel vGPU\n"); + gvt_vgpu_err("KVM is required to use Intel vGPU\n"); return -ESRCH; } @@ -1324,6 +1326,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) vgpu->handle = (unsigned long)info; info->vgpu = vgpu; info->kvm = kvm; + kvm_get_kvm(info->kvm); kvmgt_protect_table_init(info); gvt_cache_init(vgpu); @@ -1337,12 +1340,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) { + struct intel_vgpu *vgpu = info->vgpu; + if (!info) { - gvt_err("kvmgt_guest_info invalid\n"); + gvt_vgpu_err("kvmgt_guest_info invalid\n"); return false; } kvm_page_track_unregister_notifier(info->kvm, &info->track_node); + kvm_put_kvm(info->kvm); kvmgt_protect_table_destroy(info); gvt_cache_destroy(info->vgpu); vfree(info); @@ -1383,12 +1389,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) unsigned long iova, pfn; struct kvmgt_guest_info *info; struct device *dev; + struct intel_vgpu *vgpu; int rc; if (!handle_valid(handle)) return INTEL_GVT_INVALID_ADDR; info = (struct kvmgt_guest_info *)handle; + vgpu = info->vgpu; iova = gvt_cache_find(info->vgpu, gfn); if (iova != INTEL_GVT_INVALID_ADDR) return iova; @@ -1397,13 +1405,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) dev = mdev_dev(info->vgpu->vdev.mdev); rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); if (rc != 1) { - gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); + gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", + gfn, rc); return INTEL_GVT_INVALID_ADDR; } /* transfer to host iova for GFX to use DMA */ rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); if (rc) { - gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); + gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); vfio_unpin_pages(dev, &gfn, 1); return INTEL_GVT_INVALID_ADDR; } @@ -1417,7 +1426,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, { struct kvmgt_guest_info *info; struct kvm *kvm; - int ret; + int idx, ret; bool kthread = current->mm == NULL; if (!handle_valid(handle)) @@ -1429,8 +1438,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, if (kthread) use_mm(kvm->mm); + idx = srcu_read_lock(&kvm->srcu); ret = write ? kvm_write_guest(kvm, gpa, buf, len) : kvm_read_guest(kvm, gpa, buf, len); + srcu_read_unlock(&kvm->srcu, idx); if (kthread) unuse_mm(kvm->mm); diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 4df078bc5d042b..1ba3bdb0934166 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) (reg >= gvt->device_info.gtt_start_offset \ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) +static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, + void *p_data, unsigned int bytes, bool read) +{ + struct intel_gvt *gvt = NULL; + void *pt = NULL; + unsigned int offset = 0; + + if (!vgpu || !p_data) + return; + + gvt = vgpu->gvt; + mutex_lock(&gvt->lock); + offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); + if (reg_is_mmio(gvt, offset)) { + if (read) + intel_vgpu_default_mmio_read(vgpu, offset, p_data, + bytes); + else + intel_vgpu_default_mmio_write(vgpu, offset, p_data, + bytes); + } else if (reg_is_gtt(gvt, offset) && + vgpu->gtt.ggtt_mm->virtual_page_table) { + offset -= gvt->device_info.gtt_start_offset; + pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset; + if (read) + memcpy(p_data, pt, bytes); + else + memcpy(pt, p_data, bytes); + + } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { + struct intel_vgpu_guest_page *gp; + + /* Since we enter the failsafe mode early during guest boot, + * guest may not have chance to set up its ppgtt table, so + * there should not be any wp pages for guest. Keep the wp + * related code here in case we need to handle it in furture. + */ + gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); + if (gp) { + /* remove write protection to prevent furture traps */ + intel_vgpu_clean_guest_page(vgpu, gp); + if (read) + intel_gvt_hypervisor_read_gpa(vgpu, pa, + p_data, bytes); + else + intel_gvt_hypervisor_write_gpa(vgpu, pa, + p_data, bytes); + } + } + mutex_unlock(&gvt->lock); +} + /** * intel_vgpu_emulate_mmio_read - emulate MMIO read * @vgpu: a vGPU @@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, unsigned int offset = 0; int ret = -EINVAL; + + if (vgpu->failsafe) { + failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); + return 0; + } mutex_lock(&gvt->lock); if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { @@ -85,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); if (ret) { - gvt_err("vgpu%d: guest page read error %d, " + gvt_vgpu_err("guest page read error %d, " "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", - vgpu->id, ret, - gp->gfn, pa, *(u32 *)p_data, bytes); + ret, gp->gfn, pa, *(u32 *)p_data, + bytes); } mutex_unlock(&gvt->lock); return ret; @@ -143,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); if (!vgpu->mmio.disable_warn_untrack) { - gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", - vgpu->id, offset, bytes, *(u32 *)p_data); + gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n", + offset, bytes, *(u32 *)p_data); if (offset == 0x206c) { - gvt_err("------------------------------------------\n"); - gvt_err("vgpu%d: likely triggers a gfx reset\n", - vgpu->id); - gvt_err("------------------------------------------\n"); + gvt_vgpu_err("------------------------------------------\n"); + gvt_vgpu_err("likely triggers a gfx reset\n"); + gvt_vgpu_err("------------------------------------------\n"); vgpu->mmio.disable_warn_untrack = true; } } @@ -163,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, mutex_unlock(&gvt->lock); return 0; err: - gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", - vgpu->id, offset, bytes); + gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", + offset, bytes); mutex_unlock(&gvt->lock); return ret; } @@ -188,6 +244,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, u32 old_vreg = 0, old_sreg = 0; int ret = -EINVAL; + if (vgpu->failsafe) { + failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false); + return 0; + } + mutex_lock(&gvt->lock); if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { @@ -197,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, if (gp) { ret = gp->handler(gp, pa, p_data, bytes); if (ret) { - gvt_err("vgpu%d: guest page write error %d, " - "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", - vgpu->id, ret, - gp->gfn, pa, *(u32 *)p_data, bytes); + gvt_err("guest page write error %d, " + "gfn 0x%lx, pa 0x%llx, " + "var 0x%x, len %d\n", + ret, gp->gfn, pa, + *(u32 *)p_data, bytes); } mutex_unlock(&gvt->lock); return ret; @@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); if (!mmio && !vgpu->mmio.disable_warn_untrack) - gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n", + gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n", vgpu->id, offset, bytes, *(u32 *)p_data); if (!intel_gvt_mmio_is_unalign(gvt, offset)) { @@ -267,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, /* all register bits are RO. */ if (ro_mask == ~(u64)0) { - gvt_err("vgpu%d: try to write RO reg %x\n", - vgpu->id, offset); + gvt_vgpu_err("try to write RO reg %x\n", + offset); ret = 0; goto out; } @@ -298,8 +360,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, mutex_unlock(&gvt->lock); return 0; err: - gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", - vgpu->id, offset, bytes); + gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, + bytes); mutex_unlock(&gvt->lock); return ret; } @@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; + + vgpu->mmio.disable_warn_untrack = false; } /** diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 3bc620f56f351e..a3a027025cd0a4 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); + +bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, + unsigned int offset); #endif diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index d9fb41ab71198c..311799136d7f6e 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -27,7 +27,6 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) { - void __iomem *host_va = vgpu->gvt->opregion.opregion_va; u8 *buf; int i; @@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) if (!vgpu_opregion(vgpu)->va) return -ENOMEM; - memcpy_fromio(vgpu_opregion(vgpu)->va, host_va, - INTEL_GVT_OPREGION_SIZE); + memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va, + INTEL_GVT_OPREGION_SIZE); for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; @@ -68,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va + i * PAGE_SIZE); if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to get MFN from VA\n"); + gvt_vgpu_err("fail to get MFN from VA\n"); return -EINVAL; } ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, vgpu_opregion(vgpu)->gfn[i], mfn, 1, map); if (ret) { - gvt_err("fail to map GFN to MFN, errno: %d\n", ret); + gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n", + ret); return ret; } } @@ -288,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; if (!(swsci & SWSCI_SCI_SELECT)) { - gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); + gvt_vgpu_err("requesting SMI service\n"); return 0; } /* ignore non 0->1 trasitions */ @@ -301,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) func = GVT_OPREGION_FUNC(*scic); subfunc = GVT_OPREGION_SUBFUNC(*scic); if (!querying_capabilities(*scic)) { - gvt_err("vgpu%d: requesting runtime service: func \"%s\"," + gvt_vgpu_err("requesting runtime service: func \"%s\"," " subfunc \"%s\"\n", - vgpu->id, opregion_func_name(func), opregion_subfunc_name(subfunc)); /* diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 2b3a642284b6da..0beb83563b0870 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = { {RCS, _MMIO(0x24d4), 0, false}, {RCS, _MMIO(0x24d8), 0, false}, {RCS, _MMIO(0x24dc), 0, false}, + {RCS, _MMIO(0x24e0), 0, false}, + {RCS, _MMIO(0x24e4), 0, false}, + {RCS, _MMIO(0x24e8), 0, false}, + {RCS, _MMIO(0x24ec), 0, false}, + {RCS, _MMIO(0x24f0), 0, false}, + {RCS, _MMIO(0x24f4), 0, false}, + {RCS, _MMIO(0x24f8), 0, false}, + {RCS, _MMIO(0x24fc), 0, false}, {RCS, _MMIO(0x7004), 0xffff, true}, {RCS, _MMIO(0x7008), 0xffff, true}, {RCS, _MMIO(0x7000), 0xffff, true}, @@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = { {RCS, _MMIO(0x24d4), 0, false}, {RCS, _MMIO(0x24d8), 0, false}, {RCS, _MMIO(0x24dc), 0, false}, + {RCS, _MMIO(0x24e0), 0, false}, + {RCS, _MMIO(0x24e4), 0, false}, + {RCS, _MMIO(0x24e8), 0, false}, + {RCS, _MMIO(0x24ec), 0, false}, + {RCS, _MMIO(0x24f0), 0, false}, + {RCS, _MMIO(0x24f4), 0, false}, + {RCS, _MMIO(0x24f8), 0, false}, + {RCS, _MMIO(0x24fc), 0, false}, {RCS, _MMIO(0x7004), 0xffff, true}, {RCS, _MMIO(0x7008), 0xffff, true}, {RCS, _MMIO(0x7000), 0xffff, true}, @@ -151,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) I915_WRITE_FW(reg, 0x1); if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) - gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); + gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); else vgpu_vreg(vgpu, regs[ring_id]) = 0; @@ -191,7 +207,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id) l3_offset.reg = 0xb020; for (i = 0; i < 32; i++) { gen9_render_mocs_L3[i] = I915_READ(l3_offset); - I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset)); + I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset)); POSTING_READ(l3_offset); l3_offset.reg += 4; } diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 06c9584ac5f033..34b9acdf34791c 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -101,7 +101,7 @@ struct tbs_sched_data { struct list_head runq_head; }; -#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) +#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1)) static void tbs_sched_func(struct work_struct *work) { @@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) return; list_add_tail(&vgpu_data->list, &sched_data->runq_head); - schedule_delayed_work(&sched_data->work, sched_data->period); + schedule_delayed_work(&sched_data->work, 0); } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d6b6d0efdd1aee..a44782412f2c99 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) (u32)((workload->ctx_desc.lrca + i) << GTT_PAGE_SHIFT)); if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_err("Invalid guest context descriptor\n"); + gvt_vgpu_err("Invalid guest context descriptor\n"); return -EINVAL; } @@ -127,18 +127,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return 0; } +static inline bool is_gvt_request(struct drm_i915_gem_request *req) +{ + return i915_gem_context_force_single_submission(req->ctx); +} + static int shadow_context_status_change(struct notifier_block *nb, unsigned long action, void *data) { - struct intel_vgpu *vgpu = container_of(nb, - struct intel_vgpu, shadow_ctx_notifier_block); - struct drm_i915_gem_request *req = - (struct drm_i915_gem_request *)data; - struct intel_gvt_workload_scheduler *scheduler = - &vgpu->gvt->scheduler; + struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data; + struct intel_gvt *gvt = container_of(nb, struct intel_gvt, + shadow_ctx_notifier_block[req->engine->id]); + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = scheduler->current_workload[req->engine->id]; + if (!is_gvt_request(req) || unlikely(!workload)) + return NOTIFY_OK; + switch (action) { case INTEL_CONTEXT_SCHEDULE_IN: intel_gvt_load_render_mmio(workload->vgpu, @@ -148,6 +154,15 @@ static int shadow_context_status_change(struct notifier_block *nb, case INTEL_CONTEXT_SCHEDULE_OUT: intel_gvt_restore_render_mmio(workload->vgpu, workload->ring_id); + /* If the status is -EINPROGRESS means this workload + * doesn't meet any issue during dispatching so when + * get the SCHEDULE_OUT set the status to be zero for + * good. If the status is NOT -EINPROGRESS means there + * is something wrong happened during dispatching and + * the status should not be set to zero + */ + if (workload->status == -EINPROGRESS) + workload->status = 0; atomic_set(&workload->shadow_ctx_active, 0); break; default: @@ -163,7 +178,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) int ring_id = workload->ring_id; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct drm_i915_gem_request *rq; + struct intel_vgpu *vgpu = workload->vgpu; int ret; gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", @@ -175,9 +192,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&dev_priv->drm.struct_mutex); + /* pin shadow context by gvt even the shadow context will be pinned + * when i915 alloc request. That is because gvt will update the guest + * context from shadow context when workload is completed, and at that + * moment, i915 may already unpined the shadow context to make the + * shadow_ctx pages invalid. So gvt need to pin itself. After update + * the guest context, gvt can unpin the shadow_ctx safely. + */ + ret = engine->context_pin(engine, shadow_ctx); + if (ret) { + gvt_vgpu_err("fail to pin shadow context\n"); + workload->status = ret; + mutex_unlock(&dev_priv->drm.struct_mutex); + return ret; + } + rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { - gvt_err("fail to allocate gem request\n"); + gvt_vgpu_err("fail to allocate gem request\n"); ret = PTR_ERR(rq); goto out; } @@ -190,9 +222,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) if (ret) goto out; - ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); - if (ret) - goto out; + if ((workload->ring_id == RCS) && + (workload->wa_ctx.indirect_ctx.size != 0)) { + ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); + if (ret) + goto out; + } ret = populate_shadow_context(workload); if (ret) @@ -215,6 +250,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) if (!IS_ERR_OR_NULL(rq)) i915_add_request_no_flush(rq); + else + engine->context_unpin(engine, shadow_ctx); + mutex_unlock(&dev_priv->drm.struct_mutex); return ret; } @@ -310,7 +348,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) (u32)((workload->ctx_desc.lrca + i) << GTT_PAGE_SHIFT)); if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_err("invalid guest context descriptor\n"); + gvt_vgpu_err("invalid guest context descriptor\n"); return; } @@ -359,15 +397,31 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) workload = scheduler->current_workload[ring_id]; vgpu = workload->vgpu; - if (!workload->status && !vgpu->resetting) { + /* For the workload w/ request, needs to wait for the context + * switch to make sure request is completed. + * For the workload w/o request, directly complete the workload. + */ + if (workload->req) { + struct drm_i915_private *dev_priv = + workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = + dev_priv->engine[workload->ring_id]; wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); - update_guest_context(workload); + i915_gem_request_put(fetch_and_zero(&workload->req)); + + if (!workload->status && !vgpu->resetting) { + update_guest_context(workload); - for_each_set_bit(event, workload->pending_events, - INTEL_GVT_EVENT_MAX) - intel_vgpu_trigger_virtual_event(vgpu, event); + for_each_set_bit(event, workload->pending_events, + INTEL_GVT_EVENT_MAX) + intel_vgpu_trigger_virtual_event(vgpu, event); + } + mutex_lock(&dev_priv->drm.struct_mutex); + /* unpin shadow ctx as the shadow_ctx update is done */ + engine->context_unpin(engine, workload->vgpu->shadow_ctx); + mutex_unlock(&dev_priv->drm.struct_mutex); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", @@ -397,7 +451,7 @@ static int workload_thread(void *priv) int ring_id = p->ring_id; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; - long lret; + struct intel_vgpu *vgpu = NULL; int ret; bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); DEFINE_WAIT_FUNC(wait, woken_wake_function); @@ -440,29 +494,19 @@ static int workload_thread(void *priv) mutex_unlock(&gvt->lock); if (ret) { - gvt_err("fail to dispatch workload, skip\n"); + vgpu = workload->vgpu; + gvt_vgpu_err("fail to dispatch workload, skip\n"); goto complete; } gvt_dbg_sched("ring id %d wait workload %p\n", workload->ring_id, workload); - - lret = i915_wait_request(workload->req, - 0, MAX_SCHEDULE_TIMEOUT); - if (lret < 0) { - workload->status = lret; - gvt_err("fail to wait workload, skip\n"); - } else { - workload->status = 0; - } + i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); complete: gvt_dbg_sched("will complete workload %p, status: %d\n", workload, workload->status); - if (workload->req) - i915_gem_request_put(fetch_and_zero(&workload->req)); - complete_current_workload(gvt, ring_id); if (need_force_wake) @@ -493,15 +537,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - int i; + struct intel_engine_cs *engine; + enum intel_engine_id i; gvt_dbg_core("clean workload scheduler\n"); - for (i = 0; i < I915_NUM_ENGINES; i++) { - if (scheduler->thread[i]) { - kthread_stop(scheduler->thread[i]); - scheduler->thread[i] = NULL; - } + for_each_engine(engine, gvt->dev_priv, i) { + atomic_notifier_chain_unregister( + &engine->context_status_notifier, + &gvt->shadow_ctx_notifier_block[i]); + kthread_stop(scheduler->thread[i]); } } @@ -509,18 +554,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct workload_thread_param *param = NULL; + struct intel_engine_cs *engine; + enum intel_engine_id i; int ret; - int i; gvt_dbg_core("init workload scheduler\n"); init_waitqueue_head(&scheduler->workload_complete_wq); - for (i = 0; i < I915_NUM_ENGINES; i++) { - /* check ring mask at init time */ - if (!HAS_ENGINE(gvt->dev_priv, i)) - continue; - + for_each_engine(engine, gvt->dev_priv, i) { init_waitqueue_head(&scheduler->waitq[i]); param = kzalloc(sizeof(*param), GFP_KERNEL); @@ -539,6 +581,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) ret = PTR_ERR(scheduler->thread[i]); goto err; } + + gvt->shadow_ctx_notifier_block[i].notifier_call = + shadow_context_status_change; + atomic_notifier_chain_register(&engine->context_status_notifier, + &gvt->shadow_ctx_notifier_block[i]); } return 0; err: @@ -550,9 +597,6 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) { - atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier, - &vgpu->shadow_ctx_notifier_block); - i915_gem_context_put_unlocked(vgpu->shadow_ctx); } @@ -567,10 +611,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) vgpu->shadow_ctx->engine[RCS].initialised = true; - vgpu->shadow_ctx_notifier_block.notifier_call = - shadow_context_status_change; - - atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier, - &vgpu->shadow_ctx_notifier_block); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 95a97aa0051e78..41cfa5ccae84ce 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); } +static struct { + unsigned int low_mm; + unsigned int high_mm; + unsigned int fence; + enum intel_vgpu_edid edid; + char *name; +} vgpu_types[] = { +/* Fixed vGPU type table */ + { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, + { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, + { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, + { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, +}; + /** * intel_gvt_init_vgpu_types - initialize vGPU type list * @gvt : GVT device @@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) unsigned int min_low; /* vGPU type name is defined as GVTg_Vx_y which contains - * physical GPU generation type and 'y' means maximum vGPU - * instances user can create on one physical GPU for this - * type. + * physical GPU generation type (e.g V4 as BDW server, V5 as + * SKL server). * * Depend on physical SKU resource, might see vGPU types like * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create @@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) */ low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; - num_types = 4; + num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]); gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), GFP_KERNEL); @@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) min_low = MB_TO_BYTES(32); for (i = 0; i < num_types; ++i) { - if (low_avail / min_low == 0) + if (low_avail / vgpu_types[i].low_mm == 0) break; - gvt->types[i].low_gm_size = min_low; - gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); - gvt->types[i].fence = 4; - gvt->types[i].max_instance = min(low_avail / min_low, - high_avail / gvt->types[i].high_gm_size); - gvt->types[i].avail_instance = gvt->types[i].max_instance; + + gvt->types[i].low_gm_size = vgpu_types[i].low_mm; + gvt->types[i].high_gm_size = vgpu_types[i].high_mm; + gvt->types[i].fence = vgpu_types[i].fence; + gvt->types[i].resolution = vgpu_types[i].edid; + gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, + high_avail / vgpu_types[i].high_mm); if (IS_GEN8(gvt->dev_priv)) - sprintf(gvt->types[i].name, "GVTg_V4_%u", - gvt->types[i].max_instance); + sprintf(gvt->types[i].name, "GVTg_V4_%s", + vgpu_types[i].name); else if (IS_GEN9(gvt->dev_priv)) - sprintf(gvt->types[i].name, "GVTg_V5_%u", - gvt->types[i].max_instance); + sprintf(gvt->types[i].name, "GVTg_V5_%s", + vgpu_types[i].name); - min_low <<= 1; - gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n", - i, gvt->types[i].name, gvt->types[i].max_instance, + gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n", + i, gvt->types[i].name, gvt->types[i].avail_instance, gvt->types[i].low_gm_size, - gvt->types[i].high_gm_size, gvt->types[i].fence); + gvt->types[i].high_gm_size, gvt->types[i].fence, + vgpu_edid_str(gvt->types[i].resolution)); } gvt->num_types = i; @@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) { int i; unsigned int low_gm_avail, high_gm_avail, fence_avail; - unsigned int low_gm_min, high_gm_min, fence_min, total_min; + unsigned int low_gm_min, high_gm_min, fence_min; /* Need to depend on maxium hw resource size but keep on * static config for now. @@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; fence_min = fence_avail / gvt->types[i].fence; - total_min = min(min(low_gm_min, high_gm_min), fence_min); - gvt->types[i].avail_instance = min(gvt->types[i].max_instance, - total_min); + gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min), + fence_min); - gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n", - i, gvt->types[i].name, gvt->types[i].max_instance, + gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n", + i, gvt->types[i].name, gvt->types[i].avail_instance, gvt->types[i].low_gm_size, gvt->types[i].high_gm_size, gvt->types[i].fence); } @@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_detach_hypervisor_vgpu; - ret = intel_vgpu_init_display(vgpu); + ret = intel_vgpu_init_display(vgpu, param->resolution); if (ret) goto out_clean_gtt; @@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, param.low_gm_sz = type->low_gm_size; param.high_gm_sz = type->high_gm_size; param.fence_sz = type->fence; + param.resolution = type->resolution; /* XXX current param based on MB */ param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); @@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, populate_pvinfo_page(vgpu); intel_vgpu_reset_display(vgpu); - if (dmlr) + if (dmlr) { intel_vgpu_reset_cfg_space(vgpu); + /* only reset the failsafe mode when dmlr reset */ + vgpu->failsafe = false; + vgpu->pv_notified = false; + } } vgpu->resetting = false; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e703556eba999a..1c75402a59c137 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_IRQ_ACTIVE: case I915_PARAM_ALLOW_BATCHBUFFER: case I915_PARAM_LAST_DISPATCH: + case I915_PARAM_HAS_EXEC_CONSTANTS: /* Reject all old ums/dri params. */ return -ENODEV; case I915_PARAM_CHIPSET_ID: @@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_BSD2: value = !!dev_priv->engine[VCS2]; break; - case I915_PARAM_HAS_EXEC_CONSTANTS: - value = INTEL_GEN(dev_priv) >= 4; - break; case I915_PARAM_HAS_LLC: value = HAS_LLC(dev_priv); break; @@ -1788,7 +1786,7 @@ void i915_reset(struct drm_i915_private *dev_priv) goto error; } - i915_gem_reset_finish(dev_priv); + i915_gem_reset(dev_priv); intel_overlay_reset(dev_priv); /* Ok, now get things going again... */ @@ -1814,6 +1812,7 @@ void i915_reset(struct drm_i915_private *dev_priv) i915_queue_hangcheck(dev_priv); wakeup: + i915_gem_reset_finish(dev_priv); enable_irq(dev_priv->drm.irq); wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); return; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0a4b42d313912c..1e53c31b6826ec 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -293,6 +293,7 @@ enum plane_id { PLANE_PRIMARY, PLANE_SPRITE0, PLANE_SPRITE1, + PLANE_SPRITE2, PLANE_CURSOR, I915_MAX_PLANES, }; @@ -1324,7 +1325,7 @@ struct intel_gen6_power_mgmt { unsigned boosts; /* manual wa residency calculations */ - struct intel_rps_ei up_ei, down_ei; + struct intel_rps_ei ei; /* * Protects RPS/RC6 register access and PCU communication. @@ -2063,8 +2064,6 @@ struct drm_i915_private { const struct intel_device_info info; - int relative_constants_mode; - void __iomem *regs; struct intel_uncore uncore; @@ -3341,6 +3340,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) } int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); +void i915_gem_reset(struct drm_i915_private *dev_priv); void i915_gem_reset_finish(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv); void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6908123162d17c..67b1fc5a03313b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, trace_i915_gem_object_pwrite(obj, args->offset, args->size); + ret = -ENODEV; + if (obj->ops->pwrite) + ret = obj->ops->pwrite(obj, args); + if (ret != -ENODEV) + goto err; + ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, @@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) */ shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; + obj->mm.pages = ERR_PTR(-EFAULT); } /* Try to discard unwanted pages */ @@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, __i915_gem_object_reset_page_iter(obj); - obj->ops->put_pages(obj, pages); + if (!IS_ERR(pages)) + obj->ops->put_pages(obj, pages); + unlock: mutex_unlock(&obj->mm.lock); } @@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) if (err) return err; - if (unlikely(!obj->mm.pages)) { + if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { err = ____i915_gem_object_get_pages(obj); if (err) goto unlock; @@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, pinned = true; if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { - if (unlikely(!obj->mm.pages)) { + if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { ret = ____i915_gem_object_get_pages(obj); if (ret) goto err_unlock; @@ -2563,6 +2572,75 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, goto out_unlock; } +static int +i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pwrite *arg) +{ + struct address_space *mapping = obj->base.filp->f_mapping; + char __user *user_data = u64_to_user_ptr(arg->data_ptr); + u64 remain, offset; + unsigned int pg; + + /* Before we instantiate/pin the backing store for our use, we + * can prepopulate the shmemfs filp efficiently using a write into + * the pagecache. We avoid the penalty of instantiating all the + * pages, important if the user is just writing to a few and never + * uses the object on the GPU, and using a direct write into shmemfs + * allows it to avoid the cost of retrieving a page (either swapin + * or clearing-before-use) before it is overwritten. + */ + if (READ_ONCE(obj->mm.pages)) + return -ENODEV; + + /* Before the pages are instantiated the object is treated as being + * in the CPU domain. The pages will be clflushed as required before + * use, and we can freely write into the pages directly. If userspace + * races pwrite with any other operation; corruption will ensue - + * that is userspace's prerogative! + */ + + remain = arg->size; + offset = arg->offset; + pg = offset_in_page(offset); + + do { + unsigned int len, unwritten; + struct page *page; + void *data, *vaddr; + int err; + + len = PAGE_SIZE - pg; + if (len > remain) + len = remain; + + err = pagecache_write_begin(obj->base.filp, mapping, + offset, len, 0, + &page, &data); + if (err < 0) + return err; + + vaddr = kmap(page); + unwritten = copy_from_user(vaddr + pg, user_data, len); + kunmap(page); + + err = pagecache_write_end(obj->base.filp, mapping, + offset, len, len - unwritten, + page, data); + if (err < 0) + return err; + + if (unwritten) + return -EFAULT; + + remain -= len; + user_data += len; + offset += len; + pg = 0; + } while (remain); + + return 0; +} + static bool ban_context(const struct i915_gem_context *ctx) { return (i915_gem_context_is_bannable(ctx) && @@ -2641,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) { struct drm_i915_gem_request *request; + /* Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its engine->irq_tasklet *just* as we are + * calling engine->init_hw() and also writing the ELSP. + * Turning off the engine->irq_tasklet until the reset is over + * prevents the race. + */ tasklet_kill(&engine->irq_tasklet); + tasklet_disable(&engine->irq_tasklet); if (engine_stalled(engine)) { request = i915_gem_find_active_request(engine); @@ -2756,7 +2843,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) engine->reset_hw(engine, request); } -void i915_gem_reset_finish(struct drm_i915_private *dev_priv) +void i915_gem_reset(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -2778,6 +2865,17 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) } } +void i915_gem_reset_finish(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + for_each_engine(engine, dev_priv, id) + tasklet_enable(&engine->irq_tasklet); +} + static void nop_submit_request(struct drm_i915_gem_request *request) { dma_fence_set_error(&request->fence, -EIO); @@ -3029,6 +3127,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); if (args->timeout_ns < 0) args->timeout_ns = 0; + + /* + * Apparently ktime isn't accurate enough and occasionally has a + * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch + * things up to make the test happy. We allow up to 1 jiffy. + * + * This is a regression from the timespec->ktime conversion. + */ + if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) + args->timeout_ns = 0; } i915_gem_object_put(obj); @@ -3974,8 +4082,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, static const struct drm_i915_gem_object_ops i915_gem_object_ops = { .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_IS_SHRINKABLE, + .get_pages = i915_gem_object_get_pages_gtt, .put_pages = i915_gem_object_put_pages_gtt, + + .pwrite = i915_gem_object_pwrite_gtt, }; struct drm_i915_gem_object * @@ -4583,8 +4694,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) init_waitqueue_head(&dev_priv->gpu_error.wait_queue); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); - dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - init_waitqueue_head(&dev_priv->pending_flip_queue); dev_priv->mm.interruptible = true; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 17f90c6182081c..e2d83b6d376b03 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->ring_size = 4 * PAGE_SIZE; ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << GEN8_CTX_ADDRESSING_MODE_SHIFT; - ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier); /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not * present or not in use we still need a small bias as ring wraparound diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 0ac750b90f3d33..e9c008fe14b1d7 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -160,9 +160,6 @@ struct i915_gem_context { /** desc_template: invariant fields for the HW context descriptor */ u32 desc_template; - /** status_notifier: list of callbacks for context-switch changes */ - struct atomic_notifier_head status_notifier; - /** guilty_count: How many times this context has caused a GPU hang. */ unsigned int guilty_count; /** diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index d037adcda6f20b..29bb8011dbc4a0 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -141,7 +141,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * if (!obj->base.filp) return -ENODEV; - ret = obj->base.filp->f_op->mmap(obj->base.filp, vma); + ret = call_mmap(obj->base.filp, vma); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index c181b1bb3d2c9e..3be2503aa042c0 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * those as well to make room for our guard pages. */ if (check_color) { - if (vma->node.start + vma->node.size == node->start) { - if (vma->node.color == node->color) + if (node->start + node->size == target->start) { + if (node->color == target->color) continue; } - if (vma->node.start == node->start + node->size) { - if (vma->node.color == node->color) + if (node->start == target->start + target->size) { + if (node->color == target->color) continue; } } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d02cfaefe1c84e..30e0675fd7dab7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1408,10 +1408,7 @@ execbuf_submit(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas) { - struct drm_i915_private *dev_priv = params->request->i915; u64 exec_start, exec_len; - int instp_mode; - u32 instp_mask; int ret; ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); @@ -1422,56 +1419,11 @@ execbuf_submit(struct i915_execbuffer_params *params, if (ret) return ret; - instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; - instp_mask = I915_EXEC_CONSTANTS_MASK; - switch (instp_mode) { - case I915_EXEC_CONSTANTS_REL_GENERAL: - case I915_EXEC_CONSTANTS_ABSOLUTE: - case I915_EXEC_CONSTANTS_REL_SURFACE: - if (instp_mode != 0 && params->engine->id != RCS) { - DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); - return -EINVAL; - } - - if (instp_mode != dev_priv->relative_constants_mode) { - if (INTEL_INFO(dev_priv)->gen < 4) { - DRM_DEBUG("no rel constants on pre-gen4\n"); - return -EINVAL; - } - - if (INTEL_INFO(dev_priv)->gen > 5 && - instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { - DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); - return -EINVAL; - } - - /* The HW changed the meaning on this bit on gen6 */ - if (INTEL_INFO(dev_priv)->gen >= 6) - instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; - } - break; - default: - DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); + if (args->flags & I915_EXEC_CONSTANTS_MASK) { + DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n"); return -EINVAL; } - if (params->engine->id == RCS && - instp_mode != dev_priv->relative_constants_mode) { - struct intel_ring *ring = params->request->ring; - - ret = intel_ring_begin(params->request, 4); - if (ret) - return ret; - - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, INSTPM); - intel_ring_emit(ring, instp_mask << 16 | instp_mode); - intel_ring_advance(ring); - - dev_priv->relative_constants_mode = instp_mode; - } - if (args->flags & I915_EXEC_GEN7_SOL_RESET) { ret = i915_reset_gen7_sol_offsets(params->request); if (ret) diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index bf90b07163d126..76b80a0be79767 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops { struct sg_table *(*get_pages)(struct drm_i915_gem_object *); void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); + int (*pwrite)(struct drm_i915_gem_object *, + const struct drm_i915_gem_pwrite *); + int (*dmabuf_export)(struct drm_i915_gem_object *); void (*release)(struct drm_i915_gem_object *); }; diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index f31deeb727039f..e7c3c0318ff60f 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -24,6 +24,9 @@ #include #include +#include +#include +#include #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 401006b4c6a36b..d5d2b4c6ed382d 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_ACTIVE); - rcu_barrier(); /* wait until our RCU delayed slab frees are completed */ + synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */ return freed; } diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 0115989e324a20..22b46398831e09 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -31,6 +31,7 @@ #include #include #include +#include struct i915_mm_struct { struct mm_struct *mm; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e6ffef2f707a01..b6c886ac901bd7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv, ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); } -static bool vlv_c0_above(struct drm_i915_private *dev_priv, - const struct intel_rps_ei *old, - const struct intel_rps_ei *now, - int threshold) -{ - u64 time, c0; - unsigned int mul = 100; - - if (old->cz_clock == 0) - return false; - - if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) - mul <<= 8; - - time = now->cz_clock - old->cz_clock; - time *= threshold * dev_priv->czclk_freq; - - /* Workload can be split between render + media, e.g. SwapBuffers - * being blitted in X after being rendered in mesa. To account for - * this we need to combine both engines into our activity counter. - */ - c0 = now->render_c0 - old->render_c0; - c0 += now->media_c0 - old->media_c0; - c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; - - return c0 >= time; -} - void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) { - vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); - dev_priv->rps.up_ei = dev_priv->rps.down_ei; + memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); } static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) { + const struct intel_rps_ei *prev = &dev_priv->rps.ei; struct intel_rps_ei now; u32 events = 0; - if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) + if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) return 0; vlv_c0_read(dev_priv, &now); if (now.cz_clock == 0) return 0; - if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { - if (!vlv_c0_above(dev_priv, - &dev_priv->rps.down_ei, &now, - dev_priv->rps.down_threshold)) - events |= GEN6_PM_RP_DOWN_THRESHOLD; - dev_priv->rps.down_ei = now; - } + if (prev->cz_clock) { + u64 time, c0; + unsigned int mul; + + mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */ + if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) + mul <<= 8; - if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { - if (vlv_c0_above(dev_priv, - &dev_priv->rps.up_ei, &now, - dev_priv->rps.up_threshold)) - events |= GEN6_PM_RP_UP_THRESHOLD; - dev_priv->rps.up_ei = now; + time = now.cz_clock - prev->cz_clock; + time *= dev_priv->czclk_freq; + + /* Workload can be split between render + media, + * e.g. SwapBuffers being blitted in X after being rendered in + * mesa. To account for this we need to combine both engines + * into our activity counter. + */ + c0 = now.render_c0 - prev->render_c0; + c0 += now.media_c0 - prev->media_c0; + c0 *= mul; + + if (c0 > time * dev_priv->rps.up_threshold) + events = GEN6_PM_RP_UP_THRESHOLD; + else if (c0 < time * dev_priv->rps.down_threshold) + events = GEN6_PM_RP_DOWN_THRESHOLD; } + dev_priv->rps.ei = now; return events; } @@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) /* Let's track the enabled rps events */ if (IS_VALLEYVIEW(dev_priv)) /* WaGsvRC0ResidencyMethod:vlv */ - dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; + dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; else dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; @@ -4266,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (!IS_GEN2(dev_priv)) dev->vblank_disable_immediate = true; + /* Most platforms treat the display irq block as an always-on + * power domain. vlv/chv can disable it at runtime and need + * special care to avoid writing any of the display block registers + * outside of the power domain. We defer setting up the display irqs + * in this case to the runtime pm. + */ + dev_priv->display_irqs_enabled = true; + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + dev_priv->display_irqs_enabled = false; + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 155906e848120a..df20e9bc1c0f3d 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -512,10 +512,36 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) return ret; } +static void +i915_vma_remove(struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj = vma->obj; + + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + + drm_mm_remove_node(&vma->node); + list_move_tail(&vma->vm_link, &vma->vm->unbound_list); + + /* Since the unbound list is global, only move to that list if + * no more VMAs exist. + */ + if (--obj->bind_count == 0) + list_move_tail(&obj->global_link, + &to_i915(obj->base.dev)->mm.unbound_list); + + /* And finally now the object is completely decoupled from this vma, + * we can drop its hold on the backing storage and allow it to be + * reaped by the shrinker. + */ + i915_gem_object_unpin_pages(obj); + GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); +} + int __i915_vma_do_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - unsigned int bound = vma->flags; + const unsigned int bound = vma->flags; int ret; lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); @@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma, if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { ret = -EBUSY; - goto err; + goto err_unpin; } if ((bound & I915_VMA_BIND_MASK) == 0) { ret = i915_vma_insert(vma, size, alignment, flags); if (ret) - goto err; + goto err_unpin; } ret = i915_vma_bind(vma, vma->obj->cache_level, flags); if (ret) - goto err; + goto err_remove; if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) __i915_vma_set_map_and_fenceable(vma); @@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma, GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); return 0; -err: +err_remove: + if ((bound & I915_VMA_BIND_MASK) == 0) { + GEM_BUG_ON(vma->pages); + i915_vma_remove(vma); + } +err_unpin: __i915_vma_unpin(vma); return ret; } @@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma) } vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); - drm_mm_remove_node(&vma->node); - list_move_tail(&vma->vm_link, &vma->vm->unbound_list); - if (vma->pages != obj->mm.pages) { GEM_BUG_ON(!vma->pages); sg_free_table(vma->pages); @@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma) } vma->pages = NULL; - /* Since the unbound list is global, only move to that list if - * no more VMAs exist. */ - if (--obj->bind_count == 0) - list_move_tail(&obj->global_link, - &to_i915(obj->base.dev)->mm.unbound_list); - - /* And finally now the object is completely decoupled from this vma, - * we can drop its hold on the backing storage and allow it to be - * reaped by the shrinker. - */ - i915_gem_object_unpin_pages(obj); - GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); + i915_vma_remove(vma); destroy: if (unlikely(i915_vma_is_closed(vma))) diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index fcfa423d08bdfb..7044e9a6abf7a5 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -23,6 +23,7 @@ */ #include +#include #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 0085bc745f6aa5..de219b71fb76ec 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -35,7 +35,6 @@ */ #define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin" -MODULE_FIRMWARE(I915_CSR_GLK); #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 01341670738fbb..ed1f4f272b4fb3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ crtc->base.mode = crtc->base.state->mode; - DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n", - old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h, - pipe_config->pipe_src_w, pipe_config->pipe_src_h); - /* * Update pipe size and adjust fitter if needed: the reason for this is * that in compute_mode_changes we check the native mode (not the pfit @@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc) struct intel_crtc_scaler_state *scaler_state = &crtc->config->scaler_state; - DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); - if (crtc->config->pch_pfit.enabled) { int id; - if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { - DRM_ERROR("Requesting pfit without getting a scaler first\n"); + if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) return; - } id = scaler_state->scaler_id; I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); - - DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); } } @@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state, } while (progress); } +static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) +{ + struct intel_atomic_state *state, *next; + struct llist_node *freed; + + freed = llist_del_all(&dev_priv->atomic_helper.free_list); + llist_for_each_entry_safe(state, next, freed, freed) + drm_atomic_state_put(&state->base); +} + +static void intel_atomic_helper_free_state_worker(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), atomic_helper.free_work); + + intel_atomic_helper_free_state(dev_priv); +} + static void intel_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; @@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * can happen also when the device is completely off. */ intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + + intel_atomic_helper_free_state(dev_priv); } static void intel_atomic_commit_work(struct work_struct *work) @@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, to_intel_atomic_state(old_crtc_state->state); bool modeset = needs_modeset(crtc->state); + if (!modeset && + (intel_cstate->base.color_mgmt_changed || + intel_cstate->update_pipe)) { + intel_color_set_csc(crtc->state); + intel_color_load_luts(crtc->state); + } + /* Perform vblank evasion around commit operation */ intel_pipe_update_start(intel_crtc); if (modeset) goto out; - if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) { - intel_color_set_csc(crtc->state); - intel_color_load_luts(crtc->state); - } - if (intel_cstate->update_pipe) intel_update_pipe_config(intel_crtc, old_intel_cstate); else if (INTEL_GEN(dev_priv) >= 9) @@ -16599,18 +16611,6 @@ static void sanitize_watermarks(struct drm_device *dev) drm_modeset_acquire_fini(&ctx); } -static void intel_atomic_helper_free_state(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), atomic_helper.free_work); - struct intel_atomic_state *state, *next; - struct llist_node *freed; - - freed = llist_del_all(&dev_priv->atomic_helper.free_list); - llist_for_each_entry_safe(state, next, freed, freed) - drm_atomic_state_put(&state->base); -} - int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev) dev->mode_config.funcs = &intel_mode_funcs; INIT_WORK(&dev_priv->atomic_helper.free_work, - intel_atomic_helper_free_state); + intel_atomic_helper_free_state_worker); intel_init_quirks(dev); @@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev) } } - intel_update_czclk(dev_priv); - intel_update_cdclk(dev_priv); - dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; - intel_shared_dpll_init(dev); + intel_update_czclk(dev_priv); + intel_modeset_init_hw(dev); + if (dev_priv->max_cdclk_freq == 0) intel_update_max_cdclk(dev_priv); @@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_init_gt_powersave(dev_priv); - intel_modeset_init_hw(dev); - intel_setup_overlay(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b9cde116dab34c..344f238b283f3b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "i915_drv.h" #include diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 371acf109e3432..ab1be5c80ea596 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv, /* Nothing to do here, execute in order of dependencies */ engine->schedule = NULL; + ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); + dev_priv->engine[id] = engine; return 0; } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 1b8ba2e7753957..2d449fb5d1d2b0 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, bool *enabled, int width, int height) { struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); - unsigned long conn_configured, mask; + unsigned long conn_configured, conn_seq, mask; unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); int i, j; bool *save_enabled; bool fallback = true; int num_connectors_enabled = 0; int num_connectors_detected = 0; - int pass = 0; save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); if (!save_enabled) @@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, mask = BIT(count) - 1; conn_configured = 0; retry: + conn_seq = conn_configured; for (i = 0; i < count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; @@ -387,7 +387,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, if (conn_configured & BIT(i)) continue; - if (pass == 0 && !connector->has_tile) + if (conn_seq == 0 && !connector->has_tile) continue; if (connector->status == connector_status_connected) @@ -498,10 +498,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, conn_configured |= BIT(i); } - if ((conn_configured & mask) != mask) { - pass++; + if ((conn_configured & mask) != mask && conn_configured != conn_seq) goto retry; - } /* * If the BIOS didn't enable everything it could, fall back to have the diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index d23c0fcff7516a..8c04eca84351cb 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) goto bail; } + if (!i915.enable_execlists) { + DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n"); + goto bail; + } + /* * We're not in host or fail to find a MPT module, disable GVT-g */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ebae2bd839189c..24b2fa5b62824d 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector, static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc_state->base.crtc->dev; + struct drm_i915_private *dev_priv = + to_i915(crtc_state->base.crtc->dev); + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; + struct drm_connector *connector; + int i; - if (HAS_GMCH_DISPLAY(to_i915(dev))) + if (HAS_GMCH_DISPLAY(dev_priv)) return false; /* * HDMI 12bpc affects the clocks, so it's only possible * when not cloning with other encoder types. */ - return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI; + if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI) + return false; + + for_each_connector_in_state(state, connector, connector_state, i) { + const struct drm_display_info *info = &connector->display_info; + + if (connector_state->crtc != crtc_state->base.crtc) + continue; + + if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0) + return false; + } + + return true; } bool intel_hdmi_compute_config(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index b62e3f8ad415f6..54208bef7a8356 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) } } } - if (dev_priv->display.hpd_irq_setup) + if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); @@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, } } - if (storm_detected) + if (storm_detected && dev_priv->display_irqs_enabled) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock(&dev_priv->irq_lock); @@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) * Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked checks happy. */ - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) { + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + dev_priv->display.hpd_irq_setup(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + } } static void i915_hpd_poll_init_work(struct work_struct *work) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index ebf8023d21e6fb..471af3b480adc3 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq, if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) return; - atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); + atomic_notifier_call_chain(&rq->engine->context_status_notifier, + status, rq); } static void diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 249623d45be0ca..6a29784d2b4137 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) break; } + /* When byt can survive without system hang with dynamic + * sw freq adjustments, this restriction can be lifted. + */ + if (IS_VALLEYVIEW(dev_priv)) + goto skip_hw_write; + I915_WRITE(GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(dev_priv, ei_up)); I915_WRITE(GEN6_RP_UP_THRESHOLD, @@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) GEN6_RP_UP_BUSY_AVG | GEN6_RP_DOWN_IDLE_AVG); +skip_hw_write: dev_priv->rps.power = new_power; dev_priv->rps.up_threshold = threshold_up; dev_priv->rps.down_threshold = threshold_down; @@ -4921,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) { u32 mask = 0; + /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */ if (val > dev_priv->rps.min_freq_softlimit) - mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; if (val < dev_priv->rps.max_freq_softlimit) mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; @@ -5032,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { - if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) + if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) gen6_rps_reset_ei(dev_priv); I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); @@ -7916,10 +7924,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, * @timeout_base_ms: timeout for polling with preemption enabled * * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE - * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. + * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. * The request is acknowledged once the PCODE reply dword equals @reply after * applying @reply_mask. Polling is first attempted with preemption enabled - * for @timeout_base_ms and if this times out for another 10 ms with + * for @timeout_base_ms and if this times out for another 50 ms with * preemption disabled. * * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some @@ -7955,14 +7963,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, * worst case) _and_ PCODE was busy for some reason even after a * (queued) request and @timeout_base_ms delay. As a workaround retry * the poll with preemption disabled to maximize the number of - * requests. Increase the timeout from @timeout_base_ms to 10ms to + * requests. Increase the timeout from @timeout_base_ms to 50ms to * account for interrupts that could reduce the number of these - * requests. + * requests, and for any quirks of the PCODE firmware that delays + * the request completion. */ DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); WARN_ON_ONCE(timeout_base_ms > 3); preempt_disable(); - ret = wait_for_atomic(COND, 10); + ret = wait_for_atomic(COND, 50); preempt_enable(); out: diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 91bc4abf5d3e57..6c5f9958197d55 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2024,6 +2024,8 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine, ret = context_pin(ctx, flags); if (ret) goto error; + + ce->state->obj->mm.dirty = true; } /* The kernel context is only used as a placeholder for flushing the diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 79c2b8d72322cf..13dccb18cd43ed 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -403,6 +403,9 @@ struct intel_engine_cs { */ struct i915_gem_context *legacy_active_context; + /* status_notifier: list of callbacks for context-switch changes */ + struct atomic_notifier_head context_status_notifier; + struct intel_engine_hangcheck hangcheck; bool needs_cmd_parser; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 9ef54688872a86..9481ca9a3ae7e0 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane, int scaler_id = plane_state->scaler_id; const struct intel_scaler *scaler; - DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", - plane_id, PS_PLANE_SEL(plane_id)); - scaler = &crtc_state->scaler_state.scalers[scaler_id]; I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index abe08885a5ba4e..b7ff592b14f5e0 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma for_each_fw_domain_masked(d, fw_domains, dev_priv) fw_domain_wait_ack(d); + + dev_priv->uncore.fw_domains_active |= fw_domains; } static void @@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma fw_domain_put(d); fw_domain_posting_read(d); } + + dev_priv->uncore.fw_domains_active &= ~fw_domains; } static void @@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) if (WARN_ON(domain->wake_count == 0)) domain->wake_count++; - if (--domain->wake_count == 0) { + if (--domain->wake_count == 0) dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); - dev_priv->uncore.fw_domains_active &= ~domain->mask; - } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, fw_domains &= ~domain->mask; } - if (fw_domains) { + if (fw_domains) dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); - dev_priv->uncore.fw_domains_active |= fw_domains; - } } /** @@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, fw_domain_arm_timer(domain); dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); - dev_priv->uncore.fw_domains_active |= fw_domains; } static inline void __force_wake_auto(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 4414cf73735d26..36602ac7e24835 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu) } if (a5xx_gpu->gpmu_bo) { - if (a5xx_gpu->gpmu_bo) + if (a5xx_gpu->gpmu_iova) msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); } @@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = { .idle = a5xx_idle, .irq = a5xx_irq, .destroy = a5xx_destroy, +#ifdef CONFIG_DEBUG_FS .show = a5xx_show, +#endif }, .get_timestamp = a5xx_get_timestamp, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index c9bd1e6225f4f9..5ae65426b4e559 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return 0; } -void adreno_gpu_cleanup(struct adreno_gpu *gpu) +void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { - if (gpu->memptrs_bo) { - if (gpu->memptrs) - msm_gem_put_vaddr(gpu->memptrs_bo); + struct msm_gpu *gpu = &adreno_gpu->base; + + if (adreno_gpu->memptrs_bo) { + if (adreno_gpu->memptrs) + msm_gem_put_vaddr(adreno_gpu->memptrs_bo); + + if (adreno_gpu->memptrs_iova) + msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id); + + drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo); + } + release_firmware(adreno_gpu->pm4); + release_firmware(adreno_gpu->pfp); - if (gpu->memptrs_iova) - msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); + msm_gpu_cleanup(gpu); - drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + if (gpu->aspace) { + gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(gpu->aspace); } - release_firmware(gpu->pm4); - release_firmware(gpu->pfp); - msm_gpu_cleanup(&gpu->base); } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 921270ea6059de..a879ffa534b4d8 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id, } } } else { - msm_dsi_host_reset_phy(mdsi->host); + msm_dsi_host_reset_phy(msm_dsi->host); ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c index a54d3bb5baad9c..8177e8511afd8c 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c @@ -18,13 +18,6 @@ #include #include "hdmi.h" - -/* Supported HDMI Audio channels */ -#define MSM_HDMI_AUDIO_CHANNEL_2 0 -#define MSM_HDMI_AUDIO_CHANNEL_4 1 -#define MSM_HDMI_AUDIO_CHANNEL_6 2 -#define MSM_HDMI_AUDIO_CHANNEL_8 3 - /* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ static int nchannels[] = { 2, 4, 6, 8 }; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h index 611da7a660c942..238901987e00b0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h @@ -18,7 +18,8 @@ #ifndef __MDP5_PIPE_H__ #define __MDP5_PIPE_H__ -#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ +/* TODO: Add SSPP_MAX in mdp5.xml.h */ +#define SSPP_MAX (SSPP_CURSOR1 + 1) /* represents a hw pipe, which is dynamically assigned to a plane */ struct mdp5_hw_pipe { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 59811f29607de6..68e509b3b9e4d0 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, size = PAGE_ALIGN(size); + /* Disallow zero sized objects as they make the underlying + * infrastructure grumpy + */ + if (size == 0) + return ERR_PTR(-EINVAL); + ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 99e05aacbee181..af5b6ba4095b06 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_ringbuffer_destroy(gpu->rb); } - if (gpu->aspace) - msm_gem_address_space_destroy(gpu->aspace); - if (gpu->fctx) msm_fence_context_free(gpu->fctx); } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index e10a4eda4078ba..1144e0c9e8942d 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c @@ -65,13 +65,11 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb) switch (format) { case DRM_FORMAT_RGB565: dev_dbg(drm->dev, "Setting up RGB565 mode\n"); - ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT); ctrl |= CTRL_SET_WORD_LENGTH(0); ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf); break; case DRM_FORMAT_XRGB8888: dev_dbg(drm->dev, "Setting up XRGB8888 mode\n"); - ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT); ctrl |= CTRL_SET_WORD_LENGTH(3); /* Do not use packed pixels = one pixel per word instead. */ ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7); @@ -87,6 +85,36 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb) return 0; } +static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb) +{ + struct drm_crtc *crtc = &mxsfb->pipe.crtc; + struct drm_device *drm = crtc->dev; + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + u32 reg; + + reg = readl(mxsfb->base + LCDC_CTRL); + + if (mxsfb->connector.display_info.num_bus_formats) + bus_format = mxsfb->connector.display_info.bus_formats[0]; + + reg &= ~CTRL_BUS_WIDTH_MASK; + switch (bus_format) { + case MEDIA_BUS_FMT_RGB565_1X16: + reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT); + break; + case MEDIA_BUS_FMT_RGB666_1X18: + reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_18BIT); + break; + case MEDIA_BUS_FMT_RGB888_1X24: + reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT); + break; + default: + dev_err(drm->dev, "Unknown media bus format %d\n", bus_format); + break; + } + writel(reg, mxsfb->base + LCDC_CTRL); +} + static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb) { u32 reg; @@ -168,13 +196,22 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; if (m->flags & DRM_MODE_FLAG_PVSYNC) vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; - if (bus_flags & DRM_BUS_FLAG_DE_HIGH) + /* Make sure Data Enable is high active by default */ + if (!(bus_flags & DRM_BUS_FLAG_DE_LOW)) vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; - if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + /* + * DRM_BUS_FLAG_PIXDATA_ defines are controller centric, + * controllers VDCTRL0_DOTCLK is display centric. + * Drive on positive edge -> display samples on falling edge + * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING + */ + if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING; writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0); + mxsfb_set_bus_fmt(mxsfb); + /* Frame length in lines. */ writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1); @@ -184,8 +221,8 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal), mxsfb->base + LCDC_VDCTRL2); - writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) | - SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end), + writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) | + SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start), mxsfb->base + LCDC_VDCTRL3); writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay), diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index cdfbe0284635de..ff6d6a6f842e5a 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -102,14 +102,18 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe, { struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); + drm_panel_prepare(mxsfb->panel); mxsfb_crtc_enable(mxsfb); + drm_panel_enable(mxsfb->panel); } static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe) { struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); + drm_panel_disable(mxsfb->panel); mxsfb_crtc_disable(mxsfb); + drm_panel_unprepare(mxsfb->panel); } static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe, diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c index fa8d173994071d..b8e81422d4e26f 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_out.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c @@ -112,6 +112,7 @@ static int mxsfb_attach_endpoint(struct drm_device *drm, int mxsfb_create_output(struct drm_device *drm) { + struct mxsfb_drm_private *mxsfb = drm->dev_private; struct device_node *ep_np = NULL; struct of_endpoint ep; int ret; @@ -127,5 +128,8 @@ int mxsfb_create_output(struct drm_device *drm) } } + if (!mxsfb->panel) + return -EPROBE_DEFER; + return 0; } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h index 31d62cd0d3d78a..66a6ba9ec533ff 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h +++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h @@ -44,6 +44,7 @@ #define CTRL_DATA_SELECT (1 << 16) #define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10) #define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3) +#define CTRL_BUS_WIDTH_MASK (0x3 << 10) #define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8) #define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3) #define CTRL_MASTER (1 << 5) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index a2bb855a2851f7..ac5800c72cb48e 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index af267c35d813cc..ee5883f59be5a1 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct drm_gem_object *obj = buffer->priv; int ret = 0; - if (WARN_ON(!obj->filp)) - return -EINVAL; - ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 684f1703aa5c71..aaa3e80fecb425 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, rbo->placement.num_busy_placement = 0; for (i = 0; i < rbo->placement.num_placement; i++) { if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { - if (rbo->placements[0].fpfn < fpfn) - rbo->placements[0].fpfn = fpfn; + if (rbo->placements[i].fpfn < fpfn) + rbo->placements[i].fpfn = fpfn; } else { rbo->placement.busy_placement = &rbo->placements[i]; diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index d12b8978142f69..c7af9fdd20c729 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2984,6 +2984,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6667)) { max_sclk = 75000; } + } else if (rdev->family == CHIP_OLAND) { + if ((rdev->pdev->revision == 0xC7) || + (rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605)) { + max_sclk = 75000; + } } if (rps->vce_active) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index b5bfbe50bd8716..b0ff304ce3dc4a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -32,6 +32,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode; struct rcar_du_device *rcdu = crtc->group->dev; + struct vsp1_du_lif_config cfg = { + .width = mode->hdisplay, + .height = mode->vdisplay, + }; struct rcar_du_plane_state state = { .state = { .crtc = &crtc->crtc, @@ -66,12 +70,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) */ crtc->group->need_restart = true; - vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay); + vsp1_du_setup_lif(crtc->vsp->vsp, &cfg); } void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { - vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0); + vsp1_du_setup_lif(crtc->vsp->vsp, NULL); } void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index f80bf9385e412d..d745e8b50fb864 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); + unsigned long flags; WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); mutex_lock(&tilcdc_crtc->enable_lock); @@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY), LCDC_PALETTE_LOAD_MODE_MASK); + + /* There is no real chance for a race here as the time stamp + * is taken before the raster DMA is started. The spin-lock is + * taken to have a memory barrier after taking the time-stamp + * and to avoid a context switch between taking the stamp and + * enabling the raster. + */ + spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); + tilcdc_crtc->last_vblank = ktime_get(); tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); + spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); drm_crtc_vblank_on(crtc); @@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown) } drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); - tilcdc_crtc->last_vblank = 0; tilcdc_crtc->enabled = false; mutex_unlock(&tilcdc_crtc->enable_lock); @@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; - unsigned long flags; WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); @@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, drm_framebuffer_reference(fb); crtc->primary->fb = fb; + tilcdc_crtc->event = event; - spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); + mutex_lock(&tilcdc_crtc->enable_lock); - if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) { + if (tilcdc_crtc->enabled) { + unsigned long flags; ktime_t next_vblank; s64 tdiff; - next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, - 1000000 / crtc->hwmode.vrefresh); + spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); + next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, + 1000000 / crtc->hwmode.vrefresh); tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) tilcdc_crtc->next_fb = fb; - } - - if (tilcdc_crtc->next_fb != fb) - set_scanout(crtc, fb); + else + set_scanout(crtc, fb); - tilcdc_crtc->event = event; + spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); + } - spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); + mutex_unlock(&tilcdc_crtc->enable_lock); return 0; } @@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev) fail: tilcdc_crtc_destroy(crtc); - return -ENOMEM; + return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index f154fb1929bd18..913f4318cdc03a 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #define TTM_WRITE_LOCK_PENDING (1 << 0) diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index fdb451e3ec0118..26a7ad0f478978 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, if (unlikely(ret != 0)) goto out_err0; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); if (unlikely(ret != 0)) goto out_err1; @@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists); int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed) + enum ttm_ref_type ref_type, bool *existed, + bool require_existed) { struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; struct ttm_ref_object *ref; @@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, } rcu_read_unlock(); + if (require_existed) + return -EPERM; + ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false); if (unlikely(ret != 0)) @@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) ttm_ref_object_release(&ref->kref); } + spin_unlock(&tfile->lock); for (i = 0; i < TTM_REF_NUM; ++i) drm_ht_remove(&tfile->ref_hash[i]); - spin_unlock(&tfile->lock); ttm_object_file_unref(&tfile); } EXPORT_SYMBOL(ttm_object_file_release); @@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) *p_tdev = NULL; - spin_lock(&tdev->object_lock); drm_ht_remove(&tdev->object_hash); - spin_unlock(&tdev->object_lock); kfree(tdev); } @@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, prime = (struct ttm_prime_object *) dma_buf->priv; base = &prime->base; *handle = base->hash.key; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); dma_buf_put(dma_buf); diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 0c06844af4455d..9fcf05ca492b0c 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -846,6 +846,17 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc, drm_atomic_helper_crtc_destroy_state(crtc, state); } +static void +vc4_crtc_reset(struct drm_crtc *crtc) +{ + if (crtc->state) + __drm_atomic_helper_crtc_destroy_state(crtc->state); + + crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); + if (crtc->state) + crtc->state->crtc = crtc; +} + static const struct drm_crtc_funcs vc4_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = vc4_crtc_destroy, @@ -853,7 +864,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { .set_property = NULL, .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ - .reset = drm_atomic_helper_crtc_reset, + .reset = vc4_crtc_reset, .atomic_duplicate_state = vc4_crtc_duplicate_state, .atomic_destroy_state = vc4_crtc_destroy_state, .gamma_set = vc4_crtc_gamma_set, diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index ab3016982466c3..1eef98c3331dfc 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 7ccbb03e98de5d..a1f42d125e6e84 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -288,7 +288,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj, if (!obj->filp) return -ENODEV; - ret = obj->filp->f_op->mmap(obj->filp, vma); + ret = call_mmap(obj->filp, vma); if (ret) return ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 30f989a0cafca0..491866865c3397 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -176,7 +176,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) #endif ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs, - callbacks, names); + callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); goto err_vqs; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 6541dd8b82dc07..6b2708b4eafe84 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman, struct vmw_fence_obj **p_fence) { struct vmw_fence_obj *fence; - int ret; + int ret; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (unlikely(fence == NULL)) @@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman) } +/** + * vmw_fence_obj_lookup - Look up a user-space fence object + * + * @tfile: A struct ttm_object_file identifying the caller. + * @handle: A handle identifying the fence object. + * @return: A struct vmw_user_fence base ttm object on success or + * an error pointer on failure. + * + * The fence object is looked up and type-checked. The caller needs + * to have opened the fence object first, but since that happens on + * creation and fence objects aren't shareable, that's not an + * issue currently. + */ +static struct ttm_base_object * +vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle) +{ + struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle); + + if (!base) { + pr_err("Invalid fence object handle 0x%08lx.\n", + (unsigned long)handle); + return ERR_PTR(-EINVAL); + } + + if (base->refcount_release != vmw_user_fence_base_release) { + pr_err("Invalid fence object handle 0x%08lx.\n", + (unsigned long)handle); + ttm_base_object_unref(&base); + return ERR_PTR(-EINVAL); + } + + return base; +} + + int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, arg->kernel_cookie = jiffies + wait_timeout; } - base = ttm_base_object_lookup(tfile, arg->handle); - if (unlikely(base == NULL)) { - printk(KERN_ERR "Wait invalid fence object handle " - "0x%08lx.\n", - (unsigned long)arg->handle); - return -EINVAL; - } + base = vmw_fence_obj_lookup(tfile, arg->handle); + if (IS_ERR(base)) + return PTR_ERR(base); fence = &(container_of(base, struct vmw_user_fence, base)->fence); @@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); - base = ttm_base_object_lookup(tfile, arg->handle); - if (unlikely(base == NULL)) { - printk(KERN_ERR "Fence signaled invalid fence object handle " - "0x%08lx.\n", - (unsigned long)arg->handle); - return -EINVAL; - } + base = vmw_fence_obj_lookup(tfile, arg->handle); + if (IS_ERR(base)) + return PTR_ERR(base); fence = &(container_of(base, struct vmw_user_fence, base)->fence); fman = fman_from_fence(fence); @@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, (struct drm_vmw_fence_event_arg *) data; struct vmw_fence_obj *fence = NULL; struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); + struct ttm_object_file *tfile = vmw_fp->tfile; struct drm_vmw_fence_rep __user *user_fence_rep = (struct drm_vmw_fence_rep __user *)(unsigned long) arg->fence_rep; @@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, */ if (arg->handle) { struct ttm_base_object *base = - ttm_base_object_lookup_for_ref(dev_priv->tdev, - arg->handle); - - if (unlikely(base == NULL)) { - DRM_ERROR("Fence event invalid fence object handle " - "0x%08lx.\n", - (unsigned long)arg->handle); - return -EINVAL; - } + vmw_fence_obj_lookup(tfile, arg->handle); + + if (IS_ERR(base)) + return PTR_ERR(base); + fence = &(container_of(base, struct vmw_user_fence, base)->fence); (void) vmw_fence_obj_reference(fence); if (user_fence_rep != NULL) { - bool existed; - ret = ttm_ref_object_add(vmw_fp->tfile, base, - TTM_REF_USAGE, &existed); + TTM_REF_USAGE, NULL, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed to reference a fence " "object.\n"); @@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, return 0; out_no_create: if (user_fence_rep != NULL) - ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - handle, TTM_REF_USAGE); + ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); out_no_ref_obj: vmw_fence_obj_unreference(&fence); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index b8c6a03c8c54df..5ec24fd801cd2b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, param->value = dev_priv->has_dx; break; default: - DRM_ERROR("Illegal vmwgfx get param request: %d\n", - param->param); return -EINVAL; } @@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); - if (unlikely(arg->pad64 != 0)) { + if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) { DRM_ERROR("Illegal GET_3D_CAP argument.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 65b3f036963671..bf23153d4f5551 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, return ret; ret = ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_SYNCCPU_WRITE, &existed); + TTM_REF_SYNCCPU_WRITE, &existed, false); if (ret != 0 || existed) ttm_bo_synccpu_write_release(&user_bo->dma.base); @@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, *handle = user_bo->prime.base.hash.key; return ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_USAGE, NULL); + TTM_REF_USAGE, NULL, false); } /* diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index b445ce9b975786..05fa092c942bee 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 128; num_sizes = 0; - for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { + if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) + return -EINVAL; num_sizes += req->mip_levels[i]; + } - if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * - DRM_VMW_MAX_MIP_LEVELS) + if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || + num_sizes == 0) return -EINVAL; size = vmw_user_surface_size + 128 + @@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, uint32_t handle; struct ttm_base_object *base; int ret; + bool require_exist = false; if (handle_type == DRM_VMW_HANDLE_PRIME) { ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); if (unlikely(ret != 0)) return ret; } else { - if (unlikely(drm_is_render_client(file_priv))) { - DRM_ERROR("Render client refused legacy " - "surface reference.\n"); - return -EACCES; - } + if (unlikely(drm_is_render_client(file_priv))) + require_exist = true; + if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { DRM_ERROR("Locked master refused legacy " "surface reference.\n"); @@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, /* * Make sure the surface creator has the same - * authenticating master. + * authenticating master, or is already registered with us. */ if (drm_is_primary_client(file_priv) && - user_srf->master != file_priv->master) { - DRM_ERROR("Trying to reference surface outside of" - " master domain.\n"); - ret = -EACCES; - goto out_bad_resource; - } + user_srf->master != file_priv->master) + require_exist = true; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, + require_exist); if (unlikely(ret != 0)) { DRM_ERROR("Could not add a reference to a surface.\n"); goto out_bad_resource; diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 0f5b2dd2450758..92f1452dad57f6 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 1aeb80e5242461..8c54cb8f5d6d10 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -175,11 +175,11 @@ config HID_CHERRY Support for Cherry Cymotion keyboard. config HID_CHICONY - tristate "Chicony Tactical pad" + tristate "Chicony devices" depends on HID default !EXPERT ---help--- - Support for Chicony Tactical pad. + Support for Chicony Tactical pad and special keys on Chicony keyboards. config HID_CORSAIR tristate "Corsair devices" @@ -190,6 +190,7 @@ config HID_CORSAIR Supported devices: - Vengeance K90 + - Scimitar PRO RGB config HID_PRODIKEYS tristate "Prodikeys PC-MIDI Keyboard support" diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index bc3cec199feefd..f04ed9aabc3f9f 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c @@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, { } }; MODULE_DEVICE_TABLE(hid, ch_devices); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e9e87d33744691..63ec1993eaaa90 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1870,6 +1870,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, @@ -1910,6 +1911,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, @@ -2110,6 +2112,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index c0303f61c26a94..9ba5d98a118042 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -3,8 +3,10 @@ * * Supported devices: * - Vengeance K90 Keyboard + * - Scimitar PRO RGB Gaming Mouse * * Copyright (c) 2015 Clement Vuchener + * Copyright (c) 2017 Oscar Campos */ /* @@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev, return 0; } +/* + * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is + * non parseable as they define two consecutive Logical Minimum for + * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16 + * that should be obviousy 0x26 for Logical Magimum of 16 bits. This + * prevents poper parsing of the report descriptor due Logical + * Minimum being larger than Logical Maximum. + * + * This driver fixes the report descriptor for: + * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse + */ + +static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + + if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { + /* + * Corsair Scimitar RGB Pro report descriptor is broken and + * defines two different Logical Minimum for the Consumer + * Application. The byte 77 should be a 0x26 defining a 16 + * bits integer for the Logical Maximum but it is a 0x16 + * instead (Logical Minimum) + */ + switch (hdev->product) { + case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB: + if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16 + && rdesc[78] == 0xff && rdesc[79] == 0x0f) { + hid_info(hdev, "Fixing up report descriptor\n"); + rdesc[77] = 0x26; + } + break; + } + + } + return rdesc; +} + static const struct hid_device_id corsair_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90), .driver_data = CORSAIR_USE_K90_MACRO | CORSAIR_USE_K90_BACKLIGHT }, + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, + USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, {} }; @@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = { .event = corsair_event, .remove = corsair_remove, .input_mapping = corsair_input_mapping, + .report_fixup = corsair_mouse_report_fixup, }; module_hid_driver(corsair_driver); MODULE_LICENSE("GPL"); +/* Original K90 driver author */ MODULE_AUTHOR("Clement Vuchener"); +/* Scimitar PRO RGB driver author */ +MODULE_AUTHOR("Oscar Campos"); MODULE_DESCRIPTION("HID driver for Corsair devices"); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index acfb522a432ae5..c6c9c51c806f0d 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -30,7 +30,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 86c95d30ac801f..4e2648c86c8c56 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -278,6 +278,9 @@ #define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13 #define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15 #define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17 +#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38 +#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39 +#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e #define USB_VENDOR_ID_CREATIVELABS 0x041e #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c @@ -557,6 +560,7 @@ #define USB_VENDOR_ID_JESS 0x0c45 #define USB_DEVICE_ID_JESS_YUREX 0x1010 +#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112 #define USB_VENDOR_ID_JESS2 0x0f30 #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111 @@ -1078,6 +1082,7 @@ #define USB_VENDOR_ID_XIN_MO 0x16c0 #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1 +#define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1 #define USB_VENDOR_ID_XIROKU 0x1477 #define USB_DEVICE_ID_XIROKU_SPX 0x1006 diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index 76d06cf87b2ac3..fb77dec720a465 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index f405b07d038165..740996f9bdd49d 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -2632,6 +2632,8 @@ static int sony_input_configured(struct hid_device *hdev, sony_leds_remove(sc); if (sc->quirks & SONY_BATTERY_SUPPORT) sony_battery_remove(sc); + if (sc->touchpad) + sony_unregister_touchpad(sc); sony_cancel_work_sync(sc); kfree(sc->output_report_dmabuf); sony_remove_dev_list(sc); diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c index 7df5227a7e61d6..9ad7731d2e10da 100644 --- a/drivers/hid/hid-xinmo.c +++ b/drivers/hid/hid-xinmo.c @@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id xinmo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, { } }; diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index f0e2757cb9094d..ec530454e6f687 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index d6847a66444652..a69a3c88ab29f5 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -80,6 +80,9 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 700145b1508894..774bd701dae0b8 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -27,6 +27,7 @@ #include #include +#include #include #include #include diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index be8f7e2a026f42..e2666ef84dc1ca 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2165,6 +2165,14 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) wacom_update_name(wacom, wireless ? " (WL)" : ""); + /* pen only Bamboo neither support touch nor pad */ + if ((features->type == BAMBOO_PEN) && + ((features->device_type & WACOM_DEVICETYPE_TOUCH) || + (features->device_type & WACOM_DEVICETYPE_PAD))) { + error = -ENODEV; + goto fail; + } + error = wacom_add_shared_data(hdev); if (error) goto fail; @@ -2208,14 +2216,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) /* touch only Bamboo doesn't support pen */ if ((features->type == BAMBOO_TOUCH) && (features->device_type & WACOM_DEVICETYPE_PEN)) { - error = -ENODEV; - goto fail_quirks; - } - - /* pen only Bamboo neither support touch nor pad */ - if ((features->type == BAMBOO_PEN) && - ((features->device_type & WACOM_DEVICETYPE_TOUCH) || - (features->device_type & WACOM_DEVICETYPE_PAD))) { + cancel_delayed_work_sync(&wacom->init_work); + _wacom_query_tablet_data(wacom); error = -ENODEV; goto fail_quirks; } @@ -2579,7 +2581,9 @@ static void wacom_remove(struct hid_device *hdev) /* make sure we don't trigger the LEDs */ wacom_led_groups_release(wacom); - wacom_release_resources(wacom); + + if (wacom->wacom_wac.features.type != REMOTE) + wacom_release_resources(wacom); hid_set_drvdata(hdev, NULL); } diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 4aa3de9f1163b3..94250c293be2a1 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1959,8 +1959,10 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH); input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL); input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH); - input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); - input_set_capability(input, EV_KEY, BTN_TOOL_LENS); + if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) { + input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); + input_set_capability(input, EV_KEY, BTN_TOOL_LENS); + } break; case WACOM_HID_WD_FINGERWHEEL: wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); @@ -4197,10 +4199,10 @@ static const struct wacom_features wacom_features_0x343 = WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; static const struct wacom_features wacom_features_0x360 = { "Wacom Intuos Pro M", 44800, 29600, 8191, 63, - INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; + INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; static const struct wacom_features wacom_features_0x361 = { "Wacom Intuos Pro L", 62200, 43200, 8191, 63, - INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; + INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; static const struct wacom_features wacom_features_HID_ANY_ID = { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c index 7175e6bedf2185..727f968ac1cbb9 100644 --- a/drivers/hsi/clients/cmt_speech.c +++ b/drivers/hsi/clients/cmt_speech.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 81a80c82f1bd2b..321b8833fa6f35 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) wait_for_completion(&info->waitevent); - if (channel->rescind) { - ret = -ENODEV; - goto post_msg_err; - } - post_msg_err: + /* + * If the channel has been rescinded; + * we will be awakened by the rescind + * handler; set the error code to zero so we don't leak memory. + */ + if (channel->rescind) + ret = 0; + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&info->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); @@ -530,20 +533,18 @@ static int vmbus_close_internal(struct vmbus_channel *channel) int ret; /* - * vmbus_on_event(), running in the tasklet, can race + * vmbus_on_event(), running in the per-channel tasklet, can race * with vmbus_close_internal() in the case of SMP guest, e.g., when * the former is accessing channel->inbound.ring_buffer, the latter - * could be freeing the ring_buffer pages. - * - * To resolve the race, we can serialize them by disabling the - * tasklet when the latter is running here. + * could be freeing the ring_buffer pages, so here we must stop it + * first. */ - hv_event_tasklet_disable(channel); + tasklet_disable(&channel->callback_event); /* * In case a device driver's probe() fails (e.g., * util_probe() -> vmbus_open() returns -ENOMEM) and the device is - * rescinded later (e.g., we dynamically disble an Integrated Service + * rescinded later (e.g., we dynamically disable an Integrated Service * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): * here we should skip most of the below cleanup work. */ @@ -605,8 +606,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); out: - hv_event_tasklet_enable(channel); - return ret; } diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index f33465d78a0256..fbcb0635230828 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void) static void free_channel(struct vmbus_channel *channel) { tasklet_kill(&channel->callback_event); - kfree(channel); + + kfree_rcu(channel, rcu); } static void percpu_channel_enq(void *arg) @@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg) struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context); - list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); + list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list); } static void percpu_channel_deq(void *arg) { struct vmbus_channel *channel = arg; - list_del(&channel->percpu_list); + list_del_rcu(&channel->percpu_list); } @@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid) true); } -void hv_event_tasklet_disable(struct vmbus_channel *channel) -{ - tasklet_disable(&channel->callback_event); -} - -void hv_event_tasklet_enable(struct vmbus_channel *channel) -{ - tasklet_enable(&channel->callback_event); - - /* In case there is any pending event */ - tasklet_schedule(&channel->callback_event); -} - void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) { unsigned long flags; @@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) BUG_ON(!channel->rescind); BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); - hv_event_tasklet_disable(channel); if (channel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(channel->target_cpu, @@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) percpu_channel_deq(channel); put_cpu(); } - hv_event_tasklet_enable(channel); if (channel->primary_channel == NULL) { list_del(&channel->listentry); @@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) init_vp_index(newchannel, dev_type); - hv_event_tasklet_disable(newchannel); if (newchannel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(newchannel->target_cpu, @@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) percpu_channel_enq(newchannel); put_cpu(); } - hv_event_tasklet_enable(newchannel); /* * This state is used to indicate a successful open @@ -565,7 +549,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) list_del(&newchannel->listentry); mutex_unlock(&vmbus_connection.channel_mutex); - hv_event_tasklet_disable(newchannel); if (newchannel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(newchannel->target_cpu, @@ -574,7 +557,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) percpu_channel_deq(newchannel); put_cpu(); } - hv_event_tasklet_enable(newchannel); vmbus_release_relid(newchannel->offermsg.child_relid); @@ -814,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) /* Allocate the channel object and save this offer. */ newchannel = alloc_channel(); if (!newchannel) { + vmbus_release_relid(offer->child_relid); pr_err("Unable to allocate channel object\n"); return; } diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index 9aee6014339dff..a5596a642ed06b 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c @@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data); static const char fcopy_devname[] = "vmbus/hv_fcopy"; static u8 *recv_buffer; static struct hvutil_transport *hvt; -static struct completion release_event; /* * This state maintains the version number registered by the daemon. */ @@ -331,7 +330,6 @@ static void fcopy_on_reset(void) if (cancel_delayed_work_sync(&fcopy_timeout_work)) fcopy_respond_to_host(HV_E_FAIL); - complete(&release_event); } int hv_fcopy_init(struct hv_util_service *srv) @@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv) recv_buffer = srv->recv_buffer; fcopy_transaction.recv_channel = srv->channel; - init_completion(&release_event); /* * When this driver loads, the user level daemon that * processes the host requests may not yet be running. @@ -361,5 +358,4 @@ void hv_fcopy_deinit(void) fcopy_transaction.state = HVUTIL_DEVICE_DYING; cancel_delayed_work_sync(&fcopy_timeout_work); hvutil_transport_destroy(hvt); - wait_for_completion(&release_event); } diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index de263712e247c2..a1adfe2cfb3424 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); static const char kvp_devname[] = "vmbus/hv_kvp"; static u8 *recv_buffer; static struct hvutil_transport *hvt; -static struct completion release_event; /* * Register the kernel component with the user-level daemon. * As part of this registration, pass the LIC version number. @@ -714,7 +713,6 @@ static void kvp_on_reset(void) if (cancel_delayed_work_sync(&kvp_timeout_work)) kvp_respond_to_host(NULL, HV_E_FAIL); kvp_transaction.state = HVUTIL_DEVICE_INIT; - complete(&release_event); } int @@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv) recv_buffer = srv->recv_buffer; kvp_transaction.recv_channel = srv->channel; - init_completion(&release_event); /* * When this driver loads, the user level daemon that * processes the host requests may not yet be running. @@ -747,5 +744,4 @@ void hv_kvp_deinit(void) cancel_delayed_work_sync(&kvp_timeout_work); cancel_work_sync(&kvp_sendkey_work); hvutil_transport_destroy(hvt); - wait_for_completion(&release_event); } diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index bcc03f0748d61c..e659d1b94a5794 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c @@ -79,7 +79,6 @@ static int dm_reg_value; static const char vss_devname[] = "vmbus/hv_vss"; static __u8 *recv_buffer; static struct hvutil_transport *hvt; -static struct completion release_event; static void vss_timeout_func(struct work_struct *dummy); static void vss_handle_request(struct work_struct *dummy); @@ -361,13 +360,11 @@ static void vss_on_reset(void) if (cancel_delayed_work_sync(&vss_timeout_work)) vss_respond_to_host(HV_E_FAIL); vss_transaction.state = HVUTIL_DEVICE_INIT; - complete(&release_event); } int hv_vss_init(struct hv_util_service *srv) { - init_completion(&release_event); if (vmbus_proto_version < VERSION_WIN8_1) { pr_warn("Integration service 'Backup (volume snapshot)'" " not supported on this host version.\n"); @@ -400,5 +397,4 @@ void hv_vss_deinit(void) cancel_delayed_work_sync(&vss_timeout_work); cancel_work_sync(&vss_handle_request_work); hvutil_transport_destroy(hvt); - wait_for_completion(&release_event); } diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 3042eaa13062bb..186b10083c552b 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv) if (!hyperv_cs) return -ENODEV; + spin_lock_init(&host_ts.lock); + INIT_WORK(&wrk.work, hv_set_host_time); /* diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index c235a951526711..4402a71e23f7f7 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c @@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file) * connects back. */ hvt_reset(hvt); - mutex_unlock(&hvt->lock); if (mode_old == HVUTIL_TRANSPORT_DESTROY) - hvt_transport_free(hvt); + complete(&hvt->release); + + mutex_unlock(&hvt->lock); return 0; } @@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name, init_waitqueue_head(&hvt->outmsg_q); mutex_init(&hvt->lock); + init_completion(&hvt->release); spin_lock(&hvt_list_lock); list_add(&hvt->list, &hvt_list); @@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt) if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) cn_del_callback(&hvt->cn_id); - if (mode_old != HVUTIL_TRANSPORT_CHARDEV) - hvt_transport_free(hvt); + if (mode_old == HVUTIL_TRANSPORT_CHARDEV) + wait_for_completion(&hvt->release); + + hvt_transport_free(hvt); } diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h index d98f5225c3e693..79afb626e16689 100644 --- a/drivers/hv/hv_utils_transport.h +++ b/drivers/hv/hv_utils_transport.h @@ -41,6 +41,7 @@ struct hvutil_transport { int outmsg_len; /* its length */ wait_queue_head_t outmsg_q; /* poll/read wait queue */ struct mutex lock; /* protects struct members */ + struct completion release; /* synchronize with fd release */ }; struct hvutil_transport *hvutil_transport_init(const char *name, diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index f7f6b9144b07c0..8370b9dc6037c1 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -34,6 +34,8 @@ #include #include #include +#include + #include #include #include @@ -937,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) if (relid == 0) continue; + rcu_read_lock(); + /* Find channel based on relid */ - list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) { + list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) { if (channel->offermsg.child_relid != relid) continue; @@ -954,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) tasklet_schedule(&channel->callback_event); } } + + rcu_read_unlock(); } } diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index cccef87963e050..975c43d446f859 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value) else err = atk_read_value_new(sensor, value); + if (err) + return err; + sensor->is_valid = true; sensor->last_updated = jiffies; sensor->cached_value = *value; diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index efb01c247e2d90..4dfc7238313ebd 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -3198,7 +3198,7 @@ static int __init sm_it87_init(void) { int sioaddr[2] = { REG_2E, REG_4E }; struct it87_sio_data sio_data; - unsigned short isa_address; + unsigned short isa_address[2]; bool found = false; int i, err; @@ -3208,15 +3208,29 @@ static int __init sm_it87_init(void) for (i = 0; i < ARRAY_SIZE(sioaddr); i++) { memset(&sio_data, 0, sizeof(struct it87_sio_data)); - isa_address = 0; - err = it87_find(sioaddr[i], &isa_address, &sio_data); - if (err || isa_address == 0) + isa_address[i] = 0; + err = it87_find(sioaddr[i], &isa_address[i], &sio_data); + if (err || isa_address[i] == 0) continue; + /* + * Don't register second chip if its ISA address matches + * the first chip's ISA address. + */ + if (i && isa_address[i] == isa_address[0]) + break; - err = it87_device_add(i, isa_address, &sio_data); + err = it87_device_add(i, isa_address[i], &sio_data); if (err) goto exit_dev_unregister; + found = true; + + /* + * IT8705F may respond on both SIO addresses. + * Stop probing after finding one. + */ + if (sio_data.type == it87) + break; } if (!found) { diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c index c1b9275978f9d9..281491cca5103a 100644 --- a/drivers/hwmon/max31790.c +++ b/drivers/hwmon/max31790.c @@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel, data->pwm[channel] = val << 8; err = i2c_smbus_write_word_swapped(client, MAX31790_REG_PWMOUT(channel), - val); + data->pwm[channel]); break; case hwmon_pwm_enable: fan_config = data->fan_config[channel]; diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index cdd9b3b26195aa..7563eceeaaeaa3 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev) else intel_th_trace_enable(thdev); - if (ret) + if (ret) { pm_runtime_put(&thdev->dev); + module_put(thdrv->driver.owner); + } return ret; } diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 0bba3842336e6d..590cf90dd21a61 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -85,6 +85,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), .driver_data = (kernel_ulong_t)0, }, + { + /* Denverton */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1), + .driver_data = (kernel_ulong_t)0, + }, + { + /* Gemini Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), + .driver_data = (kernel_ulong_t)0, + }, { 0 }, }; diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 0652281662a8b3..78792b4d6437c7 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, u8 *tmp_buf; int len = 0; int xfersz = brcmstb_i2c_get_xfersz(dev); + u32 cond, cond_per_msg; if (dev->is_suspended) return -EBUSY; @@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, pmsg->buf ? pmsg->buf[0] : '0', pmsg->len); if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART)) - brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP)); + cond = ~COND_START_STOP; else - brcmstb_set_i2c_start_stop(dev, - COND_RESTART | COND_NOSTOP); + cond = COND_RESTART | COND_NOSTOP; + + brcmstb_set_i2c_start_stop(dev, cond); /* Send slave address */ if (!(pmsg->flags & I2C_M_NOSTART)) { @@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, } } + cond_per_msg = cond; + /* Perform data transfer */ while (len) { bytes_to_xfer = min(len, xfersz); - if (len <= xfersz && i == (num - 1)) - brcmstb_set_i2c_start_stop(dev, - ~(COND_START_STOP)); + if (len <= xfersz) { + if (i == (num - 1)) + cond_per_msg = cond_per_msg & + ~(COND_RESTART | COND_NOSTOP); + else + cond_per_msg = cond; + } else { + cond_per_msg = (cond_per_msg & ~COND_RESTART) | + COND_NOSTOP; + } + + brcmstb_set_i2c_start_stop(dev, cond_per_msg); rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf, bytes_to_xfer, pmsg); @@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, len -= bytes_to_xfer; tmp_buf += bytes_to_xfer; + + cond_per_msg = COND_NOSTART | COND_NOSTOP; } } diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index c1db3a5a340f59..d9aaf1790e0eff 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -88,6 +88,7 @@ struct dw_i2c_dev { void __iomem *base; struct completion cmd_complete; struct clk *clk; + struct reset_control *rst; u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev); struct dw_pci_controller *controller; int cmd_err; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 6ce4313231257f..79c4b4ea053969 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -199,6 +200,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) dev->irq = irq; platform_set_drvdata(pdev, dev); + dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(dev->rst)) { + if (PTR_ERR(dev->rst) == -EPROBE_DEFER) + return -EPROBE_DEFER; + } else { + reset_control_deassert(dev->rst); + } + if (pdata) { dev->clk_freq = pdata->i2c_scl_freq; } else { @@ -235,12 +244,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { dev_err(&pdev->dev, "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); - return -EINVAL; + r = -EINVAL; + goto exit_reset; } r = i2c_dw_eval_lock_support(dev); if (r) - return r; + goto exit_reset; dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; @@ -286,10 +296,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) } r = i2c_dw_probe(dev); - if (r && !dev->pm_runtime_disabled) - pm_runtime_disable(&pdev->dev); + if (r) + goto exit_probe; return r; + +exit_probe: + if (!dev->pm_runtime_disabled) + pm_runtime_disable(&pdev->dev); +exit_reset: + if (!IS_ERR_OR_NULL(dev->rst)) + reset_control_assert(dev->rst); + return r; } static int dw_i2c_plat_remove(struct platform_device *pdev) @@ -306,6 +324,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); if (!dev->pm_runtime_disabled) pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(dev->rst)) + reset_control_assert(dev->rst); return 0; } diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index cbd93ce0661f22..736a8247210173 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -457,7 +457,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) int_status = readl(i2c->regs + HSI2C_INT_STATUS); writel(int_status, i2c->regs + HSI2C_INT_STATUS); - trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); /* handle interrupt related to the transfer status */ if (i2c->variant->hw == HSI2C_EXYNOS7) { @@ -482,11 +481,13 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) goto stop; } + trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) { i2c->state = -EAGAIN; goto stop; } } else if (int_status & HSI2C_INT_I2C) { + trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); if (trans_status & HSI2C_NO_DEV_ACK) { dev_dbg(i2c->dev, "No ACK from device\n"); i2c->state = -ENXIO; diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 412b91d255ad1d..961c5f42d956f1 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -37,6 +37,8 @@ #include #include #include +#include + #include #include #include diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 2aa61bbbd307b9..73b97c71a484ee 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len) wdata1 |= *buf++ << ((i - 4) * 8); writel(wdata0, i2c->regs + REG_TOK_WDATA0); - writel(wdata0, i2c->regs + REG_TOK_WDATA1); + writel(wdata1, i2c->regs + REG_TOK_WDATA1); dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__, wdata0, wdata1, len); diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 565a49a0c56410..96caf378b1dc00 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 4a7d9bc2142ba3..45d61714c81bd2 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -172,14 +172,6 @@ static const struct i2c_adapter_quirks mt6577_i2c_quirks = { .max_comb_2nd_msg_len = 31, }; -static const struct i2c_adapter_quirks mt8173_i2c_quirks = { - .max_num_msgs = 65535, - .max_write_len = 65535, - .max_read_len = 65535, - .max_comb_1st_msg_len = 65535, - .max_comb_2nd_msg_len = 65535, -}; - static const struct mtk_i2c_compatible mt6577_compat = { .quirks = &mt6577_i2c_quirks, .pmic_i2c = 0, @@ -199,7 +191,6 @@ static const struct mtk_i2c_compatible mt6589_compat = { }; static const struct mtk_i2c_compatible mt8173_compat = { - .quirks = &mt8173_i2c_quirks, .pmic_i2c = 0, .dcm = 1, .auto_restart = 1, diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index 8f11d347b3ec48..c811af4c8d817b 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data) } if (riic->is_last || riic->err) { - riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER); + riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER); writeb(ICCR2_SP, riic->base + RIIC_ICCR2); + } else { + /* Transfer is complete, but do not send STOP */ + riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER); + complete(&riic->msg_done); } return IRQ_HANDLED; diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 83768e85a919cb..2178266bca7948 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -429,6 +429,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc) while (muxc->num_adapters) { struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters]; struct i2c_mux_priv *priv = adap->algo_data; + struct device_node *np = adap->dev.of_node; muxc->adapter[muxc->num_adapters] = NULL; @@ -438,6 +439,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc) sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); i2c_del_adapter(adap); + of_node_put(np); kfree(priv); } } diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index dfc1c0e37c4022..ad31d21da3165f 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -35,7 +35,6 @@ * warranty of any kind, whether express or implied. */ -#include #include #include #include @@ -117,6 +116,10 @@ static const struct chip_desc chips[] = { .has_irq = 1, .muxtype = pca954x_isswi, }, + [pca_9546] = { + .nchans = 4, + .muxtype = pca954x_isswi, + }, [pca_9547] = { .nchans = 8, .enable = 0x8, @@ -134,28 +137,13 @@ static const struct i2c_device_id pca954x_id[] = { { "pca9543", pca_9543 }, { "pca9544", pca_9544 }, { "pca9545", pca_9545 }, - { "pca9546", pca_9545 }, + { "pca9546", pca_9546 }, { "pca9547", pca_9547 }, { "pca9548", pca_9548 }, { } }; MODULE_DEVICE_TABLE(i2c, pca954x_id); -#ifdef CONFIG_ACPI -static const struct acpi_device_id pca954x_acpi_ids[] = { - { .id = "PCA9540", .driver_data = pca_9540 }, - { .id = "PCA9542", .driver_data = pca_9542 }, - { .id = "PCA9543", .driver_data = pca_9543 }, - { .id = "PCA9544", .driver_data = pca_9544 }, - { .id = "PCA9545", .driver_data = pca_9545 }, - { .id = "PCA9546", .driver_data = pca_9545 }, - { .id = "PCA9547", .driver_data = pca_9547 }, - { .id = "PCA9548", .driver_data = pca_9548 }, - { } -}; -MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids); -#endif - #ifdef CONFIG_OF static const struct of_device_id pca954x_of_match[] = { { .compatible = "nxp,pca9540", .data = &chips[pca_9540] }, @@ -393,17 +381,8 @@ static int pca954x_probe(struct i2c_client *client, match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev); if (match) data->chip = of_device_get_match_data(&client->dev); - else if (id) + else data->chip = &chips[id->driver_data]; - else { - const struct acpi_device_id *acpi_id; - - acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids), - &client->dev); - if (!acpi_id) - return -ENODEV; - data->chip = &chips[acpi_id->driver_data]; - } data->last_chan = 0; /* force the first selection */ @@ -492,7 +471,6 @@ static struct i2c_driver pca954x_driver = { .name = "pca954x", .pm = &pca954x_pm, .of_match_table = of_match_ptr(pca954x_of_match), - .acpi_match_table = ACPI_PTR(pca954x_acpi_ids), }, .probe = pca954x_probe, .remove = pca954x_remove, diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index aef00511ca8646..74f1b7dc03f73c 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 247b9faccce171..4c0007cb74e378 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 7d8ea3d5fda656..5805b041dd0fc1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -125,7 +125,7 @@ static struct cpuidle_state *cpuidle_state_table; */ static struct cpuidle_state nehalem_cstates[] = { { - .name = "C1-NHM", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 3, @@ -133,7 +133,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-NHM", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -141,7 +141,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-NHM", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 20, @@ -149,7 +149,7 @@ static struct cpuidle_state nehalem_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-NHM", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, @@ -162,7 +162,7 @@ static struct cpuidle_state nehalem_cstates[] = { static struct cpuidle_state snb_cstates[] = { { - .name = "C1-SNB", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -170,7 +170,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-SNB", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -178,7 +178,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-SNB", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, @@ -186,7 +186,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-SNB", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 104, @@ -194,7 +194,7 @@ static struct cpuidle_state snb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-SNB", + .name = "C7", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 109, @@ -207,7 +207,7 @@ static struct cpuidle_state snb_cstates[] = { static struct cpuidle_state byt_cstates[] = { { - .name = "C1-BYT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -215,7 +215,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6N-BYT", + .name = "C6N", .desc = "MWAIT 0x58", .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, @@ -223,7 +223,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6S-BYT", + .name = "C6S", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 500, @@ -231,7 +231,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-BYT", + .name = "C7", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, @@ -239,7 +239,7 @@ static struct cpuidle_state byt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7S-BYT", + .name = "C7S", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -252,7 +252,7 @@ static struct cpuidle_state byt_cstates[] = { static struct cpuidle_state cht_cstates[] = { { - .name = "C1-CHT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -260,7 +260,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6N-CHT", + .name = "C6N", .desc = "MWAIT 0x58", .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, @@ -268,7 +268,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6S-CHT", + .name = "C6S", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, @@ -276,7 +276,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-CHT", + .name = "C7", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, @@ -284,7 +284,7 @@ static struct cpuidle_state cht_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7S-CHT", + .name = "C7S", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -297,7 +297,7 @@ static struct cpuidle_state cht_cstates[] = { static struct cpuidle_state ivb_cstates[] = { { - .name = "C1-IVB", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -305,7 +305,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVB", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -313,7 +313,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVB", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -321,7 +321,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVB", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, @@ -329,7 +329,7 @@ static struct cpuidle_state ivb_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-IVB", + .name = "C7", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 87, @@ -342,7 +342,7 @@ static struct cpuidle_state ivb_cstates[] = { static struct cpuidle_state ivt_cstates[] = { { - .name = "C1-IVT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -350,7 +350,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVT", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -358,7 +358,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVT", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -366,7 +366,7 @@ static struct cpuidle_state ivt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVT", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 82, @@ -379,7 +379,7 @@ static struct cpuidle_state ivt_cstates[] = { static struct cpuidle_state ivt_cstates_4s[] = { { - .name = "C1-IVT-4S", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -387,7 +387,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVT-4S", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -395,7 +395,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVT-4S", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -403,7 +403,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVT-4S", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 84, @@ -416,7 +416,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { static struct cpuidle_state ivt_cstates_8s[] = { { - .name = "C1-IVT-8S", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -424,7 +424,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-IVT-8S", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -432,7 +432,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-IVT-8S", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, @@ -440,7 +440,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-IVT-8S", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 88, @@ -453,7 +453,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { static struct cpuidle_state hsw_cstates[] = { { - .name = "C1-HSW", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -461,7 +461,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-HSW", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -469,7 +469,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-HSW", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 33, @@ -477,7 +477,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-HSW", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -485,7 +485,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-HSW", + .name = "C7s", .desc = "MWAIT 0x32", .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 166, @@ -493,7 +493,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-HSW", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, @@ -501,7 +501,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-HSW", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 600, @@ -509,7 +509,7 @@ static struct cpuidle_state hsw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-HSW", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2600, @@ -521,7 +521,7 @@ static struct cpuidle_state hsw_cstates[] = { }; static struct cpuidle_state bdw_cstates[] = { { - .name = "C1-BDW", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -529,7 +529,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-BDW", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -537,7 +537,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-BDW", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 40, @@ -545,7 +545,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-BDW", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -553,7 +553,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-BDW", + .name = "C7s", .desc = "MWAIT 0x32", .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 166, @@ -561,7 +561,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-BDW", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, @@ -569,7 +569,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-BDW", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 600, @@ -577,7 +577,7 @@ static struct cpuidle_state bdw_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-BDW", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2600, @@ -590,7 +590,7 @@ static struct cpuidle_state bdw_cstates[] = { static struct cpuidle_state skl_cstates[] = { { - .name = "C1-SKL", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -598,7 +598,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-SKL", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -606,7 +606,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C3-SKL", + .name = "C3", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 70, @@ -614,7 +614,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-SKL", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 85, @@ -622,7 +622,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-SKL", + .name = "C7s", .desc = "MWAIT 0x33", .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 124, @@ -630,7 +630,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-SKL", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, @@ -638,7 +638,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-SKL", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 480, @@ -646,7 +646,7 @@ static struct cpuidle_state skl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-SKL", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 890, @@ -659,7 +659,7 @@ static struct cpuidle_state skl_cstates[] = { static struct cpuidle_state skx_cstates[] = { { - .name = "C1-SKX", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -667,7 +667,7 @@ static struct cpuidle_state skx_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-SKX", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -675,7 +675,7 @@ static struct cpuidle_state skx_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-SKX", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -688,7 +688,7 @@ static struct cpuidle_state skx_cstates[] = { static struct cpuidle_state atom_cstates[] = { { - .name = "C1E-ATM", + .name = "C1E", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 10, @@ -696,7 +696,7 @@ static struct cpuidle_state atom_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C2-ATM", + .name = "C2", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10), .exit_latency = 20, @@ -704,7 +704,7 @@ static struct cpuidle_state atom_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C4-ATM", + .name = "C4", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, @@ -712,7 +712,7 @@ static struct cpuidle_state atom_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-ATM", + .name = "C6", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, @@ -724,7 +724,7 @@ static struct cpuidle_state atom_cstates[] = { }; static struct cpuidle_state tangier_cstates[] = { { - .name = "C1-TNG", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -732,7 +732,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C4-TNG", + .name = "C4", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, @@ -740,7 +740,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-TNG", + .name = "C6", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, @@ -748,7 +748,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7-TNG", + .name = "C7", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, @@ -756,7 +756,7 @@ static struct cpuidle_state tangier_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-TNG", + .name = "C9", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -768,7 +768,7 @@ static struct cpuidle_state tangier_cstates[] = { }; static struct cpuidle_state avn_cstates[] = { { - .name = "C1-AVN", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -776,7 +776,7 @@ static struct cpuidle_state avn_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-AVN", + .name = "C6", .desc = "MWAIT 0x51", .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 15, @@ -788,7 +788,7 @@ static struct cpuidle_state avn_cstates[] = { }; static struct cpuidle_state knl_cstates[] = { { - .name = "C1-KNL", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 1, @@ -796,7 +796,7 @@ static struct cpuidle_state knl_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze }, { - .name = "C6-KNL", + .name = "C6", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 120, @@ -809,7 +809,7 @@ static struct cpuidle_state knl_cstates[] = { static struct cpuidle_state bxt_cstates[] = { { - .name = "C1-BXT", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -817,7 +817,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-BXT", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -825,7 +825,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-BXT", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, @@ -833,7 +833,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C7s-BXT", + .name = "C7s", .desc = "MWAIT 0x31", .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 155, @@ -841,7 +841,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C8-BXT", + .name = "C8", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1000, @@ -849,7 +849,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C9-BXT", + .name = "C9", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2000, @@ -857,7 +857,7 @@ static struct cpuidle_state bxt_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C10-BXT", + .name = "C10", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, @@ -870,7 +870,7 @@ static struct cpuidle_state bxt_cstates[] = { static struct cpuidle_state dnv_cstates[] = { { - .name = "C1-DNV", + .name = "C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, @@ -878,7 +878,7 @@ static struct cpuidle_state dnv_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C1E-DNV", + .name = "C1E", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, @@ -886,7 +886,7 @@ static struct cpuidle_state dnv_cstates[] = { .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { - .name = "C6-DNV", + .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 50, @@ -961,9 +961,9 @@ static void auto_demotion_disable(void) { unsigned long long msr_bits; - rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); msr_bits &= ~(icpu->auto_demotion_disable_flags); - wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); + wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); } static void c1e_promotion_disable(void) { @@ -1273,7 +1273,7 @@ static void sklh_idle_state_table_update(void) if ((mwait_substates & (0xF << 28)) == 0) return; - rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr); + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); /* PC10 is not enabled in PKG C-state limit */ if ((msr & 0xF) != 8) diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c index 0c44f72c32a859..018ed360e717cd 100644 --- a/drivers/iio/adc/rcar-gyroadc.c +++ b/drivers/iio/adc/rcar-gyroadc.c @@ -336,7 +336,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) struct device_node *child; struct regulator *vref; unsigned int reg; - unsigned int adcmode, childmode; + unsigned int adcmode = -1, childmode; unsigned int sample_width; unsigned int num_channels; int ret, first = 1; @@ -366,6 +366,8 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) channels = rcar_gyroadc_iio_channels_3; num_channels = ARRAY_SIZE(rcar_gyroadc_iio_channels_3); break; + default: + return -EINVAL; } /* diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index ad9dec30bb304f..4282ceca3d8f9f 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -169,7 +169,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private) { struct iio_dev *indio_dev = private; struct tiadc_device *adc_dev = iio_priv(indio_dev); - unsigned int status, config; + unsigned int status, config, adc_fsm; + unsigned short count = 0; + status = tiadc_readl(adc_dev, REG_IRQSTATUS); /* @@ -183,6 +185,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private) tiadc_writel(adc_dev, REG_CTRL, config); tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES); + + /* wait for idle state. + * ADC needs to finish the current conversion + * before disabling the module + */ + do { + adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM); + } while (adc_fsm != 0x10 && count++ < 100); + tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB)); return IRQ_HANDLED; } else if (status & IRQENB_FIFO1THRES) { diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 0a6beb3d99cbc7..56cf5907a5f010 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -1208,7 +1208,7 @@ static int xadc_probe(struct platform_device *pdev) ret = xadc->ops->setup(pdev, indio_dev, irq); if (ret) - goto err_free_samplerate_trigger; + goto err_clk_disable_unprepare; ret = request_irq(irq, xadc->ops->interrupt_handler, 0, dev_name(&pdev->dev), indio_dev); @@ -1268,6 +1268,8 @@ static int xadc_probe(struct platform_device *pdev) err_free_irq: free_irq(irq, indio_dev); +err_clk_disable_unprepare: + clk_disable_unprepare(xadc->clk); err_free_samplerate_trigger: if (xadc->ops->flags & XADC_FLAGS_BUFFERED) iio_trigger_free(xadc->samplerate_trigger); @@ -1277,8 +1279,6 @@ static int xadc_probe(struct platform_device *pdev) err_triggered_buffer_cleanup: if (xadc->ops->flags & XADC_FLAGS_BUFFERED) iio_triggered_buffer_cleanup(indio_dev); -err_clk_disable_unprepare: - clk_disable_unprepare(xadc->clk); err_device_free: kfree(indio_dev->channels); diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index a3cce3a3830079..ecf592d69043ae 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) st->report_state.report_id, st->report_state.index, HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); - - poll_value = hid_sensor_read_poll_value(st); } else { int val; @@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) sensor_hub_get_feature(st->hsdev, st->power_state.report_id, st->power_state.index, sizeof(state_val), &state_val); - if (state && poll_value) + if (state) + poll_value = hid_sensor_read_poll_value(st); + if (poll_value > 0) msleep_interruptible(poll_value * 2); return 0; diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c index a5913e97945eb6..f9b8fc9ae13fc7 100644 --- a/drivers/iio/counter/104-quad-8.c +++ b/drivers/iio/counter/104-quad-8.c @@ -76,7 +76,7 @@ static int quad8_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; } - flags = inb(base_offset); + flags = inb(base_offset + 1); borrow = flags & BIT(0); carry = !!(flags & BIT(1)); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 78532ce0744979..81b572d7699a89 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -193,8 +193,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) if (err < 0) goto out; - fifo_watermark = ((data & ~ST_LSM6DSX_FIFO_TH_MASK) << 8) | - (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK); + fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) | + (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK); wdata = cpu_to_le16(fifo_watermark); err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR, diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 4972986f645583..d2b465140a6bdc 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include "iio_core.h" diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 6dd8cbd7ce9531..e13370dc9b1cb4 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -763,7 +763,7 @@ static int ak8974_probe(struct i2c_client *i2c, return ret; } -static int __exit ak8974_remove(struct i2c_client *i2c) +static int ak8974_remove(struct i2c_client *i2c) { struct iio_dev *indio_dev = i2c_get_clientdata(i2c); struct ak8974 *ak8974 = iio_priv(indio_dev); @@ -845,7 +845,7 @@ static struct i2c_driver ak8974_driver = { .of_match_table = of_match_ptr(ak8974_of_match), }, .probe = ak8974_probe, - .remove = __exit_p(ak8974_remove), + .remove = ak8974_remove, .id_table = ak8974_id, }; module_i2c_driver(ak8974_driver); diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index e95510117a6dd7..f2ae75fa3128b9 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -29,7 +29,13 @@ static int __ib_process_cq(struct ib_cq *cq, int budget) { int i, n, completed = 0; - while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) { + /* + * budget might be (-1) if the caller does not + * want to bound this call, thus we need unsigned + * minimum here. + */ + while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, + budget - completed), cq->wc)) > 0) { for (i = 0; i < n; i++) { struct ib_wc *wc = &cq->wc[i]; @@ -196,7 +202,7 @@ void ib_free_cq(struct ib_cq *cq) irq_poll_disable(&cq->iop); break; case IB_POLL_WORKQUEUE: - flush_work(&cq->work); + cancel_work_sync(&cq->work); break; default: WARN_ON_ONCE(1); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 593d2ce6ec7cec..7c9e34d679d325 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device, struct device *parent = device->dev.parent; WARN_ON_ONCE(!parent); - if (!device->dev.dma_ops) - device->dev.dma_ops = parent->dma_ops; - if (!device->dev.dma_mask) - device->dev.dma_mask = parent->dma_mask; - if (!device->dev.coherent_dma_mask) - device->dev.coherent_dma_mask = parent->coherent_dma_mask; + WARN_ON_ONCE(device->dma_device); + if (device->dev.dma_ops) { + /* + * The caller provided custom DMA operations. Copy the + * DMA-related fields that are used by e.g. dma_alloc_coherent() + * into device->dev. + */ + device->dma_device = &device->dev; + if (!device->dev.dma_mask) + device->dev.dma_mask = parent->dma_mask; + if (!device->dev.coherent_dma_mask) + device->dev.coherent_dma_mask = + parent->coherent_dma_mask; + } else { + /* + * The caller did not provide custom DMA operations. Use the + * DMA mapping operations of the parent device. + */ + device->dma_device = parent; + } mutex_lock(&device_mutex); @@ -1015,8 +1029,7 @@ static int __init ib_core_init(void) return -ENOMEM; ib_comp_wq = alloc_workqueue("ib-comp-wq", - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, - WQ_UNBOUND_MAX_ACTIVE); + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); if (!ib_comp_wq) { ret = -ENOMEM; goto err; diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 446b56a5260b73..27f155d2df8da6 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -34,7 +34,8 @@ #include #include -#include +#include +#include #include #include #include diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index f2fc0431512def..cb2742b548bbed 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -32,6 +32,8 @@ #include #include +#include +#include #include #include #include diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 318ec5267bdfe1..86ecd3ea6a4bd1 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index d19662f635b1cc..5846c47c8d55e8 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 7a3d906b36710f..e2cd2cd3b28a88 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -576,7 +576,7 @@ int hfi1_get_proc_affinity(int node) struct hfi1_affinity_node *entry; cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; const struct cpumask *node_mask, - *proc_mask = tsk_cpus_allowed(current); + *proc_mask = ¤t->cpus_allowed; struct hfi1_affinity_node_list *affinity = &node_affinity; struct cpu_mask_set *set = &affinity->proc; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 3b19c16a9e4578..f78c739b330a45 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -48,6 +48,7 @@ #include #include #include +#include #include diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 1d81cac1fa6c83..5cde1ecda0fea8 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -856,7 +856,7 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, { struct sdma_rht_node *rht_node; struct sdma_engine *sde = NULL; - const struct cpumask *current_mask = tsk_cpus_allowed(current); + const struct cpumask *current_mask = ¤t->cpus_allowed; unsigned long cpu_id; /* diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c index 20f4ddcac3b0f1..68295a12b77188 100644 --- a/drivers/infiniband/hw/hfi1/user_pages.c +++ b/drivers/infiniband/hw/hfi1/user_pages.c @@ -46,7 +46,7 @@ */ #include -#include +#include #include #include diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 0f5d43d1f5fc30..70c3e9e795082b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -160,6 +160,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; + if (iwdev->init_state < INET_NOTIFIER) + return NOTIFY_DONE; + netdev = iwdev->ldev->netdev; upper_dev = netdev_master_upper_dev_get(netdev); if (netdev != event_netdev) @@ -214,6 +217,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; + if (iwdev->init_state < INET_NOTIFIER) + return NOTIFY_DONE; + netdev = iwdev->ldev->netdev; if (netdev != event_netdev) return NOTIFY_DONE; @@ -260,6 +266,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void * if (!iwhdl) return NOTIFY_DONE; iwdev = &iwhdl->device; + if (iwdev->init_state < INET_NOTIFIER) + return NOTIFY_DONE; p = (__be32 *)neigh->primary_key; i40iw_copy_ip_ntohl(local_ipaddr, p); if (neigh->nud_state & NUD_VALID) { diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 88608906ce2503..fba94df28cf1b1 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -39,6 +39,9 @@ #include #include #include +#include +#include + #include #include #include diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5b3355268725b8..4dc0a8785fe0d2 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -41,6 +41,8 @@ #include #endif #include +#include +#include #include #include #include diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index bc9fb144e57b8e..c52edeafd616a3 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -372,7 +372,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, return 0; } -static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, +static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, bool dpp_pool) { int status; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 12c4208fd7013b..af9f596bb68b29 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -7068,7 +7068,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, unsigned long flags; while (wait) { - unsigned long shadow; + unsigned long shadow = 0; int cstart, previ = -1; /* diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 75f08624ac052a..ce83ba9a12eff6 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -32,6 +32,7 @@ */ #include +#include #include #include "qib.h" diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 1ccee6ea5bc309..c49db7c33979c9 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -34,7 +34,8 @@ #include #include -#include +#include +#include #include #include #include diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 3cd96c1b95029d..9fbe22d3467b22 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -69,6 +69,9 @@ */ #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 +#define PVRDMA_NUM_RING_PAGES 4 +#define PVRDMA_QP_NUM_HEADER_PAGES 1 + struct pvrdma_dev; struct pvrdma_page_dir { diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h index e69d6f3cae32b5..09078ccfaec719 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h @@ -132,7 +132,7 @@ enum pvrdma_pci_resource { enum pvrdma_device_ctl { PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */ - PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */ + PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */ PVRDMA_DEVICE_CTL_RESET, /* Reset device. */ }; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 100bea5c42ffb7..34ebc7615411d9 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -56,7 +56,7 @@ #include "pvrdma.h" #define DRV_NAME "vmw_pvrdma" -#define DRV_VERSION "1.0.0.0-k" +#define DRV_VERSION "1.0.1.0-k" static DEFINE_MUTEX(pvrdma_device_list_lock); static LIST_HEAD(pvrdma_device_list); @@ -660,7 +660,16 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); break; case NETDEV_UP: - pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); + pvrdma_write_reg(dev, PVRDMA_REG_CTL, + PVRDMA_DEVICE_CTL_UNQUIESCE); + + mb(); + + if (pvrdma_read_reg(dev, PVRDMA_REG_ERR)) + dev_err(&dev->pdev->dev, + "failed to activate device during link up\n"); + else + pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); break; default: dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", @@ -858,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, dev->dsr->resp_slot_dma = (u64)slot_dma; /* Async event ring */ - dev->dsr->async_ring_pages.num_pages = 4; + dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; ret = pvrdma_page_dir_init(dev, &dev->async_pdir, dev->dsr->async_ring_pages.num_pages, true); if (ret) @@ -867,7 +876,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; /* CQ notification ring */ - dev->dsr->cq_ring_pages.num_pages = 4; + dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, dev->dsr->cq_ring_pages.num_pages, true); if (ret) diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index dbbfd35e7da7ad..30062aad3af1a2 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap, sizeof(struct pvrdma_sge) * qp->sq.max_sg); /* Note: one extra page for the header. */ - qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size + - PAGE_SIZE - 1) / PAGE_SIZE; + qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES + + (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / + PAGE_SIZE; return 0; } @@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, qp->npages = qp->npages_send + qp->npages_recv; /* Skip header page. */ - qp->sq.offset = PAGE_SIZE; + qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE; /* Recv queue pages are after send pages. */ qp->rq.offset = qp->npages_send * PAGE_SIZE; @@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); cmd->access_flags = IB_ACCESS_LOCAL_WRITE; cmd->total_chunks = qp->npages; - cmd->send_chunks = qp->npages_send - 1; + cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES; cmd->pdir_dma = qp->pdir.dir_dma; dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", @@ -554,13 +555,13 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, return ret; } -static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n) +static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n) { return pvrdma_page_dir_get_ptr(&qp->pdir, qp->sq.offset + n * qp->sq.wqe_size); } -static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n) +static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) { return pvrdma_page_dir_get_ptr(&qp->pdir, qp->rq.offset + n * qp->rq.wqe_size); @@ -598,9 +599,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, unsigned long flags; struct pvrdma_sq_wqe_hdr *wqe_hdr; struct pvrdma_sge *sge; - int i, index; - int nreq; - int ret; + int i, ret; /* * In states lower than RTS, we can fail immediately. In other states, @@ -613,9 +612,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, spin_lock_irqsave(&qp->sq.lock, flags); - index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); - for (nreq = 0; wr; nreq++, wr = wr->next) { - unsigned int tail; + while (wr) { + unsigned int tail = 0; if (unlikely(!pvrdma_idx_ring_has_space( qp->sq.ring, qp->sq.wqe_cnt, &tail))) { @@ -680,7 +678,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } } - wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index); + wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail); memset(wqe_hdr, 0, sizeof(*wqe_hdr)); wqe_hdr->wr_id = wr->wr_id; wqe_hdr->num_sge = wr->num_sge; @@ -771,12 +769,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, /* Make sure wqe is written before index update */ smp_wmb(); - index++; - if (unlikely(index >= qp->sq.wqe_cnt)) - index = 0; /* Update shared sq ring */ pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); + + wr = wr->next; } ret = 0; @@ -806,7 +803,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct pvrdma_qp *qp = to_vqp(ibqp); struct pvrdma_rq_wqe_hdr *wqe_hdr; struct pvrdma_sge *sge; - int index, nreq; int ret = 0; int i; @@ -821,9 +817,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_lock_irqsave(&qp->rq.lock, flags); - index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); - for (nreq = 0; wr; nreq++, wr = wr->next) { - unsigned int tail; + while (wr) { + unsigned int tail = 0; if (unlikely(wr->num_sge > qp->rq.max_sg || wr->num_sge < 0)) { @@ -843,7 +838,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, goto out; } - wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index); + wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail); wqe_hdr->wr_id = wr->wr_id; wqe_hdr->num_sge = wr->num_sge; wqe_hdr->total_len = 0; @@ -859,12 +854,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, /* Make sure wqe is written before index update */ smp_wmb(); - index++; - if (unlikely(index >= qp->rq.wqe_cnt)) - index = 0; /* Update shared rq ring */ pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); + + wr = wr->next; } spin_unlock_irqrestore(&qp->rq.lock, flags); diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c index e202b8142759f5..6b712eecbd37d9 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.c +++ b/drivers/infiniband/sw/rdmavt/mmap.c @@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, spin_lock_irq(&rdi->mmap_offset_lock); if (rdi->mmap_offset == 0) - rdi->mmap_offset = PAGE_SIZE; + rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); ip->offset = rdi->mmap_offset; - rdi->mmap_offset += size; + rdi->mmap_offset += ALIGN(size, SHMLBA); spin_unlock_irq(&rdi->mmap_offset_lock); INIT_LIST_HEAD(&ip->pending_mmaps); diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig index 7d1ac27ed2516d..6332dedc11e8a3 100644 --- a/drivers/infiniband/sw/rxe/Kconfig +++ b/drivers/infiniband/sw/rxe/Kconfig @@ -22,4 +22,4 @@ config RDMA_RXE To configure and work with soft-RoCE driver please use the following wiki page under "configure Soft-RoCE (RXE)" section: - https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home + https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index c572a4c09359c9..bd812e00988ed3 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c @@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, spin_lock_bh(&rxe->mmap_offset_lock); if (rxe->mmap_offset == 0) - rxe->mmap_offset = PAGE_SIZE; + rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); ip->info.offset = rxe->mmap_offset; - rxe->mmap_offset += size; + rxe->mmap_offset += ALIGN(size, SHMLBA); spin_unlock_bh(&rxe->mmap_offset_lock); diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index dbfde0dc6ff7e7..9f95f50b290904 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -729,11 +729,11 @@ int rxe_requester(void *arg) ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); if (ret) { qp->need_req_skb = 1; - kfree_skb(skb); rollback_state(wqe, qp, &rollback_wqe, rollback_psn); if (ret == -EAGAIN) { + kfree_skb(skb); rxe_run_task(&qp->req.task, 1); goto exit; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index d404a8aba7afca..c9dd385ce62e2c 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -813,18 +813,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) WARN_ON_ONCE(1); } - /* We successfully processed this new request. */ - qp->resp.msn++; - /* next expected psn, read handles this separately */ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; qp->resp.opcode = pkt->opcode; qp->resp.status = IB_WC_SUCCESS; - if (pkt->mask & RXE_COMP_MASK) + if (pkt->mask & RXE_COMP_MASK) { + /* We successfully processed this new request. */ + qp->resp.msn++; return RESPST_COMPLETE; - else if (qp_type(qp) == IB_QPT_RC) + } else if (qp_type(qp) == IB_QPT_RC) return RESPST_ACKNOWLEDGE; else return RESPST_CLEANUP; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index a6d6c617b59736..0cdf2b7f272f3a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "ipoib.h" diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index deedb6fc1b05c5..3e10e3dac2e7f5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -31,6 +31,7 @@ */ #include +#include #include #include diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 9d0b22ad58c157..c1ae4aeae2f90e 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -430,6 +430,7 @@ struct iser_fr_desc { struct list_head list; struct iser_reg_resources rsc; struct iser_pi_context *pi_ctx; + struct list_head all_list; }; /** @@ -443,6 +444,7 @@ struct iser_fr_pool { struct list_head list; spinlock_t lock; int size; + struct list_head all_list; }; /** diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 30b622f2ab7382..c538a38c91ce95 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, int i, ret; INIT_LIST_HEAD(&fr_pool->list); + INIT_LIST_HEAD(&fr_pool->all_list); spin_lock_init(&fr_pool->lock); fr_pool->size = 0; for (i = 0; i < cmds_max; i++) { @@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, } list_add_tail(&desc->list, &fr_pool->list); + list_add_tail(&desc->all_list, &fr_pool->all_list); fr_pool->size++; } @@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn) struct iser_fr_desc *desc, *tmp; int i = 0; - if (list_empty(&fr_pool->list)) + if (list_empty(&fr_pool->all_list)) return; iser_info("freeing conn %p fr pool\n", ib_conn); - list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { - list_del(&desc->list); + list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { + list_del(&desc->all_list); iser_free_reg_res(&desc->rsc); if (desc->pi_ctx) iser_free_pi_ctx(desc->pi_ctx); diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index d96aa27dfcdc97..db64adfbe1aff0 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf, interface = intf->cur_altsetting; + if (interface->desc.bNumEndpoints < 2) + return -ENODEV; + epirq = &interface->endpoint[0].desc; epout = &interface->endpoint[1].desc; diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index 9cc6d057c302a1..23c191a2a0715a 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -700,6 +700,10 @@ static int cm109_usb_probe(struct usb_interface *intf, int error = -ENOMEM; interface = intf->cur_altsetting; + + if (interface->desc.bNumEndpoints < 1) + return -ENODEV; + endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 9c0ea36913b4a9..f4e8fbec6a942a 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc return -EINVAL; alt = pcu->ctrl_intf->cur_altsetting; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + pcu->ep_ctrl = &alt->endpoint[0].desc; pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c index 79c964c075f140..6e7ff9561d9261 100644 --- a/drivers/input/misc/yealink.c +++ b/drivers/input/misc/yealink.c @@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id) int ret, pipe, i; interface = intf->cur_altsetting; + + if (interface->desc.bNumEndpoints < 1) + return -ENODEV; + endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 72b28ebfe36003..f210e19ddba66b 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1282,10 +1282,8 @@ static int alps_decode_ss4_v2(struct alps_fields *f, /* handle buttons */ if (pkt_id == SS4_PACKET_ID_STICK) { f->ts_left = !!(SS4_BTN_V2(p) & 0x01); - if (!(priv->flags & ALPS_BUTTONPAD)) { - f->ts_right = !!(SS4_BTN_V2(p) & 0x02); - f->ts_middle = !!(SS4_BTN_V2(p) & 0x04); - } + f->ts_right = !!(SS4_BTN_V2(p) & 0x02); + f->ts_middle = !!(SS4_BTN_V2(p) & 0x04); } else { f->left = !!(SS4_BTN_V2(p) & 0x01); if (!(priv->flags & ALPS_BUTTONPAD)) { @@ -2462,14 +2460,34 @@ static int alps_update_device_area_ss4_v2(unsigned char otp[][4], int num_y_electrode; int x_pitch, y_pitch, x_phys, y_phys; - num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); - num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + num_x_electrode = + SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F); + num_y_electrode = + SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F); + + priv->x_max = + (num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE; + priv->y_max = + (num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE; - priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; - priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; + x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM; + y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM; - x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; - y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; + } else { + num_x_electrode = + SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); + num_y_electrode = + SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); + + priv->x_max = + (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; + priv->y_max = + (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; + + x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; + y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; + } x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */ y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */ @@ -2485,7 +2503,10 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4], { unsigned char is_btnless; - is_btnless = (otp[1][1] >> 3) & 0x01; + if (IS_SS4PLUS_DEV(priv->dev_id)) + is_btnless = (otp[1][0] >> 1) & 0x01; + else + is_btnless = (otp[1][1] >> 3) & 0x01; if (is_btnless) priv->flags |= ALPS_BUTTONPAD; @@ -2493,6 +2514,21 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4], return 0; } +static int alps_update_dual_info_ss4_v2(unsigned char otp[][4], + struct alps_data *priv) +{ + bool is_dual = false; + + if (IS_SS4PLUS_DEV(priv->dev_id)) + is_dual = (otp[0][0] >> 4) & 0x01; + + if (is_dual) + priv->flags |= ALPS_DUALPOINT | + ALPS_DUALPOINT_WITH_PRESSURE; + + return 0; +} + static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, struct alps_data *priv) { @@ -2508,6 +2544,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, alps_update_btn_info_ss4_v2(otp, priv); + alps_update_dual_info_ss4_v2(otp, priv); + return 0; } @@ -2753,10 +2791,6 @@ static int alps_set_protocol(struct psmouse *psmouse, if (alps_set_defaults_ss4_v2(psmouse, priv)) return -EIO; - if (priv->fw_ver[1] == 0x1) - priv->flags |= ALPS_DUALPOINT | - ALPS_DUALPOINT_WITH_PRESSURE; - break; } @@ -2827,10 +2861,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) ec[2] >= 0x90 && ec[2] <= 0x9d) { protocol = &alps_v3_protocol_data; } else if (e7[0] == 0x73 && e7[1] == 0x03 && - e7[2] == 0x14 && ec[1] == 0x02) { - protocol = &alps_v8_protocol_data; - } else if (e7[0] == 0x73 && e7[1] == 0x03 && - e7[2] == 0x28 && ec[1] == 0x01) { + (e7[2] == 0x14 || e7[2] == 0x28)) { protocol = &alps_v8_protocol_data; } else { psmouse_dbg(psmouse, @@ -2840,7 +2871,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) } if (priv) { - /* Save the Firmware version */ + /* Save Device ID and Firmware version */ + memcpy(priv->dev_id, e7, 3); memcpy(priv->fw_ver, ec, 3); error = alps_set_protocol(psmouse, priv, protocol); if (error) diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index 6d279aa27cb9a1..4334f2805d93c7 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -54,6 +54,16 @@ enum SS4_PACKET_ID { #define SS4_MASK_NORMAL_BUTTONS 0x07 +#define SS4PLUS_COUNT_PER_ELECTRODE 128 +#define SS4PLUS_NUMSENSOR_XOFFSET 16 +#define SS4PLUS_NUMSENSOR_YOFFSET 5 +#define SS4PLUS_MIN_PITCH_MM 37 + +#define IS_SS4PLUS_DEV(_b) (((_b[0]) == 0x73) && \ + ((_b[1]) == 0x03) && \ + ((_b[2]) == 0x28) \ + ) + #define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \ ((_b[1]) == 0x10) && \ ((_b[2]) == 0x00) && \ @@ -283,6 +293,7 @@ struct alps_data { int addr_command; u16 proto_version; u8 byte0, mask0; + u8 dev_id[3]; u8 fw_ver[3]; int flags; int x_max; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 352050e9031dc3..d5ab9ddef3e37e 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data) static int elan_check_ASUS_special_fw(struct elan_tp_data *data) { - if (data->ic_type != 0x0E) - return false; - - switch (data->product_id) { - case 0x05 ... 0x07: - case 0x09: - case 0x13: + if (data->ic_type == 0x0E) { + switch (data->product_id) { + case 0x05 ... 0x07: + case 0x09: + case 0x13: + return true; + } + } else if (data->ic_type == 0x08 && data->product_id == 0x26) { + /* ASUS EeeBook X205TA */ return true; - default: - return false; } + + return false; } static int __elan_initialize(struct elan_tp_data *data) diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c index 1986786133824d..34dfee555b201b 100644 --- a/drivers/input/rmi4/rmi_f30.c +++ b/drivers/input/rmi4/rmi_f30.c @@ -170,6 +170,10 @@ static int rmi_f30_config(struct rmi_function *fn) rmi_get_platform_data(fn->rmi_dev); int error; + /* can happen if f30_data.disable is set */ + if (!f30) + return 0; + if (pdata->f30_data.trackstick_buttons) { /* Try [re-]establish link to F03. */ f30->f03 = rmi_find_function(fn->rmi_dev, 0x03); diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 05afd16ea9c9ef..312bd6ca919806 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -119,6 +119,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"), }, }, + { + /* Dell Embedded Box PC 3000 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), + }, + }, { /* OQO Model 01 */ .matches = { @@ -513,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), }, }, + { + /* TUXEDO BU1406 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), + }, + }, { } }; diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c index cd852059b99e81..df4bea96d7ed7d 100644 --- a/drivers/input/tablet/hanwang.c +++ b/drivers/input/tablet/hanwang.c @@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id int error; int i; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); input_dev = input_allocate_device(); if (!hanwang || !input_dev) { diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index e850d7e8afbc4d..4d9d64908b595f 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c @@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i struct input_dev *input_dev; int error = -ENOMEM; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index aefb6e11f88a08..4c0eecae065c11 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -527,6 +527,9 @@ static int sur40_probe(struct usb_interface *interface, if (iface_desc->desc.bInterfaceClass != 0xFF) return -ENODEV; + if (iface_desc->desc.bNumEndpoints < 5) + return -ENODEV; + /* Use endpoint #4 (0x86). */ endpoint = &iface_desc->endpoint[4].desc; if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 98940d1392cb0c..b17536d6e69bdb 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3202,7 +3202,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, region = iommu_alloc_resv_region(MSI_RANGE_START, MSI_RANGE_END - MSI_RANGE_START + 1, - 0, IOMMU_RESV_RESERVED); + 0, IOMMU_RESV_MSI); if (!region) return; list_add_tail(®ion->list, head); diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index f8ed8c95b68537..063343909b0d12 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 5806a6acc94ecd..591bb96047c976 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1888,7 +1888,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_MSI); + prot, IOMMU_RESV_SW_MSI); if (!region) return; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index abf6496843a617..b493c99e17f74d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1608,7 +1608,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_MSI); + prot, IOMMU_RESV_SW_MSI); if (!region) return; diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index a7e0821c9967e4..c01bfcdb238316 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -512,7 +512,13 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, spin_lock_irqsave(&data->lock, flags); if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { clk_enable(data->clk_master); - __sysmmu_tlb_invalidate_entry(data, iova, 1); + if (sysmmu_block(data)) { + if (data->version >= MAKE_MMU_VER(5, 0)) + __sysmmu_tlb_invalidate(data); + else + __sysmmu_tlb_invalidate_entry(data, iova, 1); + sysmmu_unblock(data); + } clk_disable(data->clk_master); } spin_unlock_irqrestore(&data->lock, flags); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 238ad3447712d2..d412a313a37232 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -916,7 +916,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf * which we used for the IOMMU lookup. Strictly speaking * we could do this for all PCI devices; we only need to * get the BDF# from the scope table for ACPI matches. */ - if (pdev->is_virtfn) + if (pdev && pdev->is_virtfn) goto got_pdev; *bus = drhd->devices[i].bus; @@ -5249,7 +5249,7 @@ static void intel_iommu_get_resv_regions(struct device *device, reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, - 0, IOMMU_RESV_RESERVED); + 0, IOMMU_RESV_MSI); if (!reg) return; list_add_tail(®->list, head); diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 51f2b228723f2c..23c427602c55ba 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 1c049e2e12bf0d..8d6ca28c3e1f14 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -422,8 +422,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, pte |= ARM_V7S_ATTR_NS_TABLE; __arm_v7s_set_pte(ptep, pte, 1, cfg); - } else { + } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { cptep = iopte_deref(pte, lvl); + } else { + /* We require an unmap first */ + WARN_ON(!selftest_running); + return -EEXIST; } /* Rinse, repeat */ diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index feacc54bec683b..f9bc6ebb8140b0 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -335,8 +335,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) pte |= ARM_LPAE_PTE_NSTABLE; __arm_lpae_set_pte(ptep, pte, cfg); - } else { + } else if (!iopte_leaf(pte, lvl)) { cptep = iopte_deref(pte, data); + } else { + /* We require an unmap first */ + WARN_ON(!selftest_running); + return -EEXIST; } /* Rinse, repeat */ diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8ea14f41a979fd..3b67144dead2e3 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -72,6 +72,7 @@ static const char * const iommu_group_resv_type_string[] = { [IOMMU_RESV_DIRECT] = "direct", [IOMMU_RESV_RESERVED] = "reserved", [IOMMU_RESV_MSI] = "msi", + [IOMMU_RESV_SW_MSI] = "msi", }; #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ @@ -1743,8 +1744,8 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list) } struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, - size_t length, - int prot, int type) + size_t length, int prot, + enum iommu_resv_type type) { struct iommu_resv_region *region; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 125528f39e92c2..8162121bb1bcd7 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -262,6 +262,7 @@ config IRQ_MXS config MVEBU_ODMI bool + select GENERIC_MSI_IRQ_DOMAIN config MVEBU_PIC bool diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index 1eef56a89b1fbf..f96601268f7194 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c @@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = { static int __init crossbar_of_init(struct device_node *node) { - int i, size, max = 0, reserved = 0, entry; + u32 max = 0, entry, reg_size; + int i, size, reserved = 0; const __be32 *irqsr; int ret = -ENOMEM; @@ -275,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node) if (!cb->register_offsets) goto err_irq_map; - of_property_read_u32(node, "ti,reg-size", &size); + of_property_read_u32(node, "ti,reg-size", ®_size); - switch (size) { + switch (reg_size) { case 1: cb->write = crossbar_writeb; break; @@ -303,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node) continue; cb->register_offsets[i] = reserved; - reserved += size; + reserved += reg_size; } of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 23201004fd7a68..f77f840d2b5f79 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1601,6 +1601,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data) its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; } +static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->ite_size = 16; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -1617,6 +1625,14 @@ static const struct gic_quirk its_quirks[] = { .mask = 0xffff0fff, .init = its_enable_quirk_cavium_23144, }, +#endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, #endif { } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 11d12bccc4e7f1..cd20df12d63d98 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -991,8 +991,12 @@ static void __init gic_map_single_int(struct device_node *node, static void __init gic_map_interrupts(struct device_node *node) { + gic_map_single_int(node, GIC_LOCAL_INT_WD); + gic_map_single_int(node, GIC_LOCAL_INT_COMPARE); gic_map_single_int(node, GIC_LOCAL_INT_TIMER); gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR); + gic_map_single_int(node, GIC_LOCAL_INT_SWINT0); + gic_map_single_int(node, GIC_LOCAL_INT_SWINT1); gic_map_single_int(node, GIC_LOCAL_INT_FDC); } diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 49d0f70c2baee3..9ca691d6c13b4d 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data) sizeof(avmb1_carddef)))) return -EFAULT; cdef.cardtype = AVM_CARDTYPE_B1; + cdef.cardnr = 0; } else { if ((retval = copy_from_user(&cdef, data, sizeof(avmb1_extcarddef)))) diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 11e13c56126fba..2da3ff650e1d55 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface, return -ENODEV; } + if (hostif->desc.bNumEndpoints < 1) + return -ENODEV; + dev_info(&udev->dev, "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n", __func__, le16_to_cpu(udev->descriptor.idVendor), diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c index 409849165838fb..f64a36007800cf 100644 --- a/drivers/isdn/hisax/st5481_b.c +++ b/drivers/isdn/hisax/st5481_b.c @@ -239,7 +239,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode) } } } else { - // Disble B channel interrupts + // Disable B channel interrupts st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL); // Disable B channel FIFOs diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 63eaa0a9f8a18e..1b169559a240b0 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "isdn_common.h" #include "isdn_tty.h" #ifdef CONFIG_ISDN_AUDIO diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 67c21876c35f1a..6ceca7db62ad42 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -234,6 +234,8 @@ #include #include #include +#include + #include #include "core.h" #include "l1oip.h" diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index b324474c0c12ee..8b7faea2ddf88b 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -19,6 +19,9 @@ #include #include #include +#include +#include + #include "core.h" static u_int *debug; diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 9438d7ec33080a..b1e135fc1fb504 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -25,6 +25,8 @@ #include #include #include +#include + #include "core.h" static DEFINE_MUTEX(mISDN_mutex); diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c index e6f2f8b9f09ad4..afa3b40992140b 100644 --- a/drivers/leds/trigger/ledtrig-heartbeat.c +++ b/drivers/leds/trigger/ledtrig-heartbeat.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index ac219045daf7c0..395ed1961dbfb6 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 30c60687d277c4..1a6787bc9386a6 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 152414e6378a75..fee939efc4fc6b 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 3f041b1870335a..f757cef293f868 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -392,6 +392,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, * To get all the fields, copy all archdata */ dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata; + dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops; #endif /* CONFIG_PCI */ #ifdef DEBUG diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 227869159ac081..1ac66421877a7a 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 43b8db2b54451f..cce99f72e4ae6d 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 9c79f8019d2a5f..97fb956bb6e04a 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -21,6 +21,7 @@ #include #include #include +#include #define MBOX_MAX_SIG_LEN 8 #define MBOX_MAX_MSG_LEN 128 diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 646fe85261c17b..18526d44688de2 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -11,6 +11,7 @@ #include "bset.h" #include +#include #include #include diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index a43eedd5804dd8..450d0e848ae436 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -32,6 +32,9 @@ #include #include #include +#include +#include + #include /* diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 9b2fe2d3e3a941..1ec84ca8114674 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -3,6 +3,7 @@ #include #include +#include #include /* diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index b3ff57d61ddea7..f90f1361698080 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -13,6 +13,7 @@ #include #include +#include static const char * const cache_replacement_policies[] = { "lru", diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index dde6172f3f105d..8c3a938f4bf068 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "util.h" diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index cf2cbc211d8388..5d13930f0f22fc 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -4,8 +4,8 @@ #include #include -#include #include +#include #include #include #include diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 69e1ae59cab8b9..6ac2e48b923544 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -13,6 +13,7 @@ #include #include +#include #include /* Rate limiting */ diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d36d427a9efbf3..df4859f6ac6ad6 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 1cb2ca9dfae36d..389a3637ffcc63 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1536,7 +1536,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string down_read(&key->sem); - ukp = user_key_payload(key); + ukp = user_key_payload_locked(key); if (!ukp) { up_read(&key->sem); key_put(key); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index a5a9b17f0f7fcc..4da6fc6b1ffd33 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5c9e95d66f3b64..f8564d63982f43 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -101,6 +101,8 @@ struct raid_dev { #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS) #define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV) +#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET) + /* * Definitions of various constructor flags to * be used in checks of valid / invalid flags @@ -3462,9 +3464,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv) else if (!strcasecmp(argv[0], "recover")) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); else { - if (!strcasecmp(argv[0], "check")) + if (!strcasecmp(argv[0], "check")) { set_bit(MD_RECOVERY_CHECK, &mddev->recovery); - else if (!strcasecmp(argv[0], "repair")) { + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + } else if (!strcasecmp(argv[0], "repair")) { set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); } else @@ -3771,7 +3775,15 @@ static void raid_resume(struct dm_target *ti) mddev->ro = 0; mddev->in_sync = 0; - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + /* + * Keep the RAID set frozen if reshape/rebuild flags are set. + * The RAID set is unfrozen once the next table load/resume, + * which clears the reshape/rebuild flags, occurs. + * This ensures that the constructor for the inactive table + * retrieves an up-to-date reshape_position. + */ + if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); if (mddev->suspended) mddev_resume(mddev); @@ -3779,7 +3791,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 10, 0}, + .version = {1, 10, 1}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9f37d7fc2786ca..dfb75979e4555d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -988,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) struct dm_offload *o = container_of(cb, struct dm_offload, cb); struct bio_list list; struct bio *bio; + int i; INIT_LIST_HEAD(&o->cb.list); if (unlikely(!current->bio_list)) return; - list = *current->bio_list; - bio_list_init(current->bio_list); - - while ((bio = bio_list_pop(&list))) { - struct bio_set *bs = bio->bi_pool; - if (unlikely(!bs) || bs == fs_bio_set) { - bio_list_add(current->bio_list, bio); - continue; + for (i = 0; i < 2; i++) { + list = current->bio_list[i]; + bio_list_init(¤t->bio_list[i]); + + while ((bio = bio_list_pop(&list))) { + struct bio_set *bs = bio->bi_pool; + if (unlikely(!bs) || bs == fs_bio_set) { + bio_list_add(¤t->bio_list[i], bio); + continue; + } + + spin_lock(&bs->rescue_lock); + bio_list_add(&bs->rescue_list, bio); + queue_work(bs->rescue_workqueue, &bs->rescue_work); + spin_unlock(&bs->rescue_lock); } - - spin_lock(&bs->rescue_lock); - bio_list_add(&bs->rescue_list, bio); - queue_work(bs->rescue_workqueue, &bs->rescue_work); - spin_unlock(&bs->rescue_lock); } } diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 2b13117fb918cb..321ecac2302780 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots) bm_lockres->flags |= DLM_LKF_NOQUEUE; ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); if (ret == -EAGAIN) { - memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE); s = read_resync_info(mddev, bm_lockres); if (s) { pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n", @@ -974,6 +973,7 @@ static int leave(struct mddev *mddev) lockres_free(cinfo->bitmap_lockres); unlock_all_bitmaps(mddev); dlm_release_lockspace(cinfo->lockspace, 2); + kfree(cinfo); return 0; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 985374f20e2e3f..f6ae1d67bcd02c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -44,6 +44,7 @@ */ +#include #include #include #include @@ -439,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) } EXPORT_SYMBOL(md_flush_request); -void md_unplug(struct blk_plug_cb *cb, bool from_schedule) -{ - struct mddev *mddev = cb->data; - md_wakeup_thread(mddev->thread); - kfree(cb); -} -EXPORT_SYMBOL(md_unplug); - static inline struct mddev *mddev_get(struct mddev *mddev) { atomic_inc(&mddev->active); @@ -1886,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) } sb = page_address(rdev->sb_page); sb->data_size = cpu_to_le64(num_sectors); - sb->super_offset = rdev->sb_start; + sb->super_offset = cpu_to_le64(rdev->sb_start); sb->sb_csum = calc_sb_1_csum(sb); do { md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, @@ -2294,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev) /* Check if any mddev parameters have changed */ if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || - (mddev->layout != le64_to_cpu(sb->layout)) || + (mddev->layout != le32_to_cpu(sb->layout)) || (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) return true; @@ -6457,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->layout = info->layout; mddev->chunk_sectors = info->chunk_size >> 9; - mddev->max_disks = MD_SB_DISKS; - if (mddev->persistent) { - mddev->flags = 0; - mddev->sb_flags = 0; + mddev->max_disks = MD_SB_DISKS; + mddev->flags = 0; + mddev->sb_flags = 0; } set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); @@ -6532,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) return -ENOSPC; } rv = mddev->pers->resize(mddev, num_sectors); - if (!rv) - revalidate_disk(mddev->gendisk); + if (!rv) { + if (mddev->queue) { + set_capacity(mddev->gendisk, mddev->array_sectors); + revalidate_disk(mddev->gendisk); + } + } return rv; } diff --git a/drivers/md/md.h b/drivers/md/md.h index b8859cbf84b618..dde8ecb760c871 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); -extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); extern void md_reload_sb(struct mddev *mddev, int raid_disk); extern void md_update_sb(struct mddev *mddev, int force); extern void md_kick_rdev_from_array(struct md_rdev * rdev); struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); -static inline int mddev_check_plugged(struct mddev *mddev) -{ - return !!blk_check_plugged(md_unplug, mddev, - sizeof(struct blk_plug_cb)); -} static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) { diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 0863905dee028c..8589e0a140686e 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -13,6 +13,7 @@ #include #include #include +#include #define DM_MSG_PREFIX "block manager" diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7453d94eeed700..a34f58772022c9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -37,7 +37,10 @@ #include #include #include +#include + #include + #include "md.h" #include "raid1.h" #include "bitmap.h" @@ -1024,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf) static void freeze_array(struct r1conf *conf, int extra) { /* Stop sync I/O and normal I/O and wait for everything to - * go quite. + * go quiet. * This is called in two situations: * 1) management command handlers (reshape, remove disk, quiesce). * 2) one normal I/O request failed. @@ -1584,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio) split = bio; } - if (bio_data_dir(split) == READ) + if (bio_data_dir(split) == READ) { raid1_read_request(mddev, split); - else + + /* + * If a bio is splitted, the first part of bio will + * pass barrier but the bio is queued in + * current->bio_list (see generic_make_request). If + * there is a raise_barrier() called here, the second + * part of bio can't pass barrier. But since the first + * part bio isn't dispatched to underlaying disks yet, + * the barrier is never released, hence raise_barrier + * will alays wait. We have a deadlock. + * Note, this only happens in read path. For write + * path, the first part of bio is dispatched in a + * schedule() call (because of blk plug) or offloaded + * to raid10d. + * Quitting from the function immediately can change + * the bio order queued in bio_list and avoid the deadlock. + */ + if (split != bio) { + generic_make_request(bio); + break; + } + } else raid1_write_request(mddev, split); } while (split != bio); } @@ -3243,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors) return ret; } md_set_array_sectors(mddev, newsize); - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && mddev->recovery_cp > mddev->dev_sectors) { mddev->recovery_cp = mddev->dev_sectors; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 063c43d83b72c2..e89a8d78a9ed53 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf) !conf->barrier || (atomic_read(&conf->nr_pending) && current->bio_list && - !bio_list_empty(current->bio_list)), + (!bio_list_empty(¤t->bio_list[0]) || + !bio_list_empty(¤t->bio_list[1]))), conf->resync_lock); conf->nr_waiting--; if (!conf->nr_waiting) @@ -1477,11 +1478,24 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, mbio->bi_bdev = (void*)rdev; atomic_inc(&r10_bio->remaining); + + cb = blk_check_plugged(raid10_unplug, mddev, + sizeof(*plug)); + if (cb) + plug = container_of(cb, struct raid10_plug_cb, + cb); + else + plug = NULL; spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; + if (plug) { + bio_list_add(&plug->pending, mbio); + plug->pending_cnt++; + } else { + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + } spin_unlock_irqrestore(&conf->device_lock, flags); - if (!mddev_check_plugged(mddev)) + if (!plug) md_wakeup_thread(mddev->thread); } } @@ -1571,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) split = bio; } + /* + * If a bio is splitted, the first part of bio will pass + * barrier but the bio is queued in current->bio_list (see + * generic_make_request). If there is a raise_barrier() called + * here, the second part of bio can't pass barrier. But since + * the first part bio isn't dispatched to underlaying disks + * yet, the barrier is never released, hence raise_barrier will + * alays wait. We have a deadlock. + * Note, this only happens in read path. For write path, the + * first part of bio is dispatched in a schedule() call + * (because of blk plug) or offloaded to raid10d. + * Quitting from the function immediately can change the bio + * order queued in bio_list and avoid the deadlock. + */ __make_request(mddev, split); + if (split != bio && bio_data_dir(bio) == READ) { + generic_make_request(bio); + break; + } } while (split != bio); /* In case raid10d snuck in to freeze_array */ @@ -3943,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) return ret; } md_set_array_sectors(mddev, size); - if (mddev->queue) { - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); - } if (sectors > mddev->dev_sectors && mddev->recovery_cp > oldsize) { mddev->recovery_cp = oldsize; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2ce23b01dbb21d..ed5cd705b985f1 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -55,6 +55,8 @@ #include #include #include +#include + #include #include "md.h" @@ -1399,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs, (test_bit(R5_Wantdrain, &dev->flags) || test_bit(R5_InJournal, &dev->flags))) || (srctype == SYNDROME_SRC_WRITTEN && - dev->written)) { + (dev->written || + test_bit(R5_InJournal, &dev->flags)))) { if (test_bit(R5_InJournal, &dev->flags)) srcs[slot] = sh->dev[i].orig_page; else @@ -7603,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) return ret; } md_set_array_sectors(mddev, newsize); - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); if (sectors > mddev->dev_sectors && mddev->recovery_cp > mddev->dev_sectors) { mddev->recovery_cp = mddev->dev_sectors; diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index 000d737ad8271f..8d65028c7a74ec 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include "dvb_ca_en50221.h" diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 4eac71e50c5f95..6628f80d184fd4 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -19,7 +19,7 @@ #define pr_fmt(fmt) "dvb_demux: " fmt -#include +#include #include #include #include diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 85ae3669aa668b..e3fff8f64d37d8 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -29,7 +29,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h index 7a681d8202c7ee..4442e478db72a2 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h @@ -256,8 +256,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, * * The actual DAP implementation may be restricted to only one of the modes. * A compiler warning or error will be generated if the DAP implementation -* overides or cannot handle the mode defined below. -* +* overrides or cannot handle the mode defined below. */ #ifndef DRXDAP_SINGLE_MASTER #define DRXDAP_SINGLE_MASTER 1 @@ -272,7 +271,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, * * This maximum size may be restricted by the actual DAP implementation. * A compiler warning or error will be generated if the DAP implementation -* overides or cannot handle the chunksize defined below. +* overrides or cannot handle the chunksize defined below. * * Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data * buffer. Do not undefine or choose too large, unless your system is able to @@ -292,8 +291,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, * * This maximum size may be restricted by the actual DAP implementation. * A compiler warning or error will be generated if the DAP implementation -* overides or cannot handle the chunksize defined below. -* +* overrides or cannot handle the chunksize defined below. */ #ifndef DRXDAP_MAX_RCHUNKSIZE #define DRXDAP_MAX_RCHUNKSIZE 60 diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h index fef3c736fcba68..7be2088c45fe6b 100644 --- a/drivers/media/pci/cx18/cx18-driver.h +++ b/drivers/media/pci/cx18/cx18-driver.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index ab2ae53618e829..e73c153285f0d5 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -59,6 +59,7 @@ #include #include #include "tuner-xc2028.h" +#include /* If you have already X v4l cards, then set this to X. This way the device numbers stay matched. Example: you have a WinTV card diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h index cde452e3074679..d27c5c2c07ea1e 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.h +++ b/drivers/media/pci/ivtv/ivtv-driver.h @@ -38,37 +38,38 @@ * using information provided by Jiun-Kuei Jung @ AVerMedia. */ -#include +#include +#include #include -#include +#include #include +#include +#include +#include #include #include -#include -#include -#include -#include -#include #include -#include -#include +#include #include -#include #include -#include +#include +#include #include -#include #include -#include +#include -#include -#include -#include +#include +#include #include +#include #include #include #include -#include +#include +#include +#include + +#include /* Memory layout */ #define IVTV_ENCODER_OFFSET 0x00000000 diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c index da1eebd2016f5c..3219d2f3271ed9 100644 --- a/drivers/media/pci/pt1/pt1.c +++ b/drivers/media/pci/pt1/pt1.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c index 77f4d15f322b2d..e8b5d099215774 100644 --- a/drivers/media/pci/pt3/pt3.c +++ b/drivers/media/pci/pt3/pt3.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "dmxdev.h" #include "dvbdev.h" diff --git a/drivers/media/pci/solo6x10/solo6x10-i2c.c b/drivers/media/pci/solo6x10/solo6x10-i2c.c index c908672b2c4015..e83bb79f93497e 100644 --- a/drivers/media/pci/solo6x10/solo6x10-i2c.c +++ b/drivers/media/pci/solo6x10/solo6x10-i2c.c @@ -27,6 +27,7 @@ * thread context, ACK the interrupt, and move on. -- BenC */ #include +#include #include "solo6x10.h" diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c index 671907a6e6b631..40adceebca7e4e 100644 --- a/drivers/media/pci/zoran/zoran_device.c +++ b/drivers/media/pci/zoran/zoran_device.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c index 67fd8ffa60a418..669a4c82f1ffa4 100644 --- a/drivers/media/platform/coda/imx-vdoa.c +++ b/drivers/media/platform/coda/imx-vdoa.c @@ -321,7 +321,7 @@ static const struct of_device_id vdoa_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, vdoa_dt_ids); -static const struct platform_driver vdoa_driver = { +static struct platform_driver vdoa_driver = { .probe = vdoa_probe, .remove = vdoa_remove, .driver = { diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c index cbb03768f5d735..0f0c389f889713 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.c +++ b/drivers/media/platform/exynos-gsc/gsc-core.c @@ -861,9 +861,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb, if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) || (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) || - (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) || (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) || - (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) || (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M)) swap(addr->cb, addr->cr); diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c index 823608112d89c1..7918b928f0589b 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c @@ -632,8 +632,8 @@ static int bdisp_open(struct file *file) error_ctrls: bdisp_ctrls_delete(ctx); -error_fh: v4l2_fh_del(&ctx->fh); +error_fh: v4l2_fh_exit(&ctx->fh); bdisp_hw_free_nodes(ctx); mem_ctx: diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c index f99092ca8f5c40..47c36c26096b20 100644 --- a/drivers/media/platform/vivid/vivid-radio-rx.c +++ b/drivers/media/platform/vivid/vivid-radio-rx.c @@ -22,6 +22,8 @@ #include #include #include +#include + #include #include #include diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c index 8c59d4f53200a1..0e8025b7b4dde4 100644 --- a/drivers/media/platform/vivid/vivid-radio-tx.c +++ b/drivers/media/platform/vivid/vivid-radio-tx.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index b4b583f7137a54..b4c0f10fc3b0f1 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -54,12 +54,11 @@ EXPORT_SYMBOL_GPL(vsp1_du_init); /** * vsp1_du_setup_lif - Setup the output part of the VSP pipeline * @dev: the VSP device - * @width: output frame width in pixels - * @height: output frame height in pixels + * @cfg: the LIF configuration * - * Configure the output part of VSP DRM pipeline for the given frame @width and - * @height. This sets up formats on the BRU source pad, the WPF0 sink and source - * pads, and the LIF sink pad. + * Configure the output part of VSP DRM pipeline for the given frame @cfg.width + * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink + * and source pads, and the LIF sink pad. * * As the media bus code on the BRU source pad is conditioned by the * configuration of the BRU sink 0 pad, we also set up the formats on all BRU @@ -69,8 +68,7 @@ EXPORT_SYMBOL_GPL(vsp1_du_init); * * Return 0 on success or a negative error code on failure. */ -int vsp1_du_setup_lif(struct device *dev, unsigned int width, - unsigned int height) +int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); struct vsp1_pipeline *pipe = &vsp1->drm->pipe; @@ -79,11 +77,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, unsigned int i; int ret; - dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", - __func__, width, height); - - if (width == 0 || height == 0) { - /* Zero width or height means the CRTC is being disabled, stop + if (!cfg) { + /* NULL configuration means the CRTC is being disabled, stop * the pipeline and turn the light off. */ ret = vsp1_pipeline_stop(pipe); @@ -108,6 +103,9 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, return 0; } + dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", + __func__, cfg->width, cfg->height); + /* Configure the format at the BRU sinks and propagate it through the * pipeline. */ @@ -117,8 +115,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, for (i = 0; i < bru->entity.source_pad; ++i) { format.pad = i; - format.format.width = width; - format.format.height = height; + format.format.width = cfg->width; + format.format.height = cfg->height; format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; format.format.field = V4L2_FIELD_NONE; @@ -133,8 +131,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, } format.pad = bru->entity.source_pad; - format.format.width = width; - format.format.height = height; + format.format.width = cfg->width; + format.format.height = cfg->height; format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; format.format.field = V4L2_FIELD_NONE; @@ -180,7 +178,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, /* Verify that the format at the output of the pipeline matches the * requested frame size and media bus code. */ - if (format.format.width != width || format.format.height != height || + if (format.format.width != cfg->width || + format.format.height != cfg->height || format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) { dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__); return -EPIPE; diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index a54ca531d8ef85..1688893a65bb57 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include @@ -436,6 +436,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file) return -ERESTARTSYS; ir = irctls[iminor(inode)]; + mutex_unlock(&lirc_dev_lock); + if (!ir) { retval = -ENODEV; goto error; @@ -476,8 +478,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file) } error: - mutex_unlock(&lirc_dev_lock); - nonseekable_open(inode, file); return retval; diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index b109f8246b968d..ec4b25bd2ec299 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c @@ -176,12 +176,13 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev, { u8 tolerance, config; struct nvt_dev *nvt = dev->priv; + unsigned long flags; int i; /* hardcode the tolerance to 10% */ tolerance = DIV_ROUND_UP(count, 10); - spin_lock(&nvt->lock); + spin_lock_irqsave(&nvt->lock, flags); nvt_clear_cir_wake_fifo(nvt); nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP); @@ -203,7 +204,7 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev, nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); - spin_unlock(&nvt->lock); + spin_unlock_irqrestore(&nvt->lock, flags); } static ssize_t wakeup_data_show(struct device *dev, diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 2424946740e64f..d84533699668d2 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1663,6 +1663,7 @@ static int rc_setup_rx_device(struct rc_dev *dev) { int rc; struct rc_map *rc_map; + u64 rc_type; if (!dev->map_name) return -EINVAL; @@ -1677,15 +1678,18 @@ static int rc_setup_rx_device(struct rc_dev *dev) if (rc) return rc; - if (dev->change_protocol) { - u64 rc_type = (1ll << rc_map->rc_type); + rc_type = BIT_ULL(rc_map->rc_type); + if (dev->change_protocol) { rc = dev->change_protocol(dev, &rc_type); if (rc < 0) goto out_table; dev->enabled_protocols = rc_type; } + if (dev->driver_type == RC_DRIVER_IR_RAW) + ir_raw_load_modules(&rc_type); + set_bit(EV_KEY, dev->input_dev->evbit); set_bit(EV_REP, dev->input_dev->evbit); set_bit(EV_MSC, dev->input_dev->evbit); @@ -1777,12 +1781,6 @@ int rc_register_device(struct rc_dev *dev) dev->input_name ?: "Unspecified device", path ?: "N/A"); kfree(path); - if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { - rc = rc_setup_rx_device(dev); - if (rc) - goto out_dev; - } - if (dev->driver_type == RC_DRIVER_IR_RAW || dev->driver_type == RC_DRIVER_IR_RAW_TX) { if (!raw_init) { @@ -1791,7 +1789,13 @@ int rc_register_device(struct rc_dev *dev) } rc = ir_raw_event_register(dev); if (rc < 0) - goto out_rx; + goto out_dev; + } + + if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { + rc = rc_setup_rx_device(dev); + if (rc) + goto out_raw; } /* Allow the RC sysfs nodes to be accessible */ @@ -1803,8 +1807,8 @@ int rc_register_device(struct rc_dev *dev) return 0; -out_rx: - rc_free_rx_device(dev); +out_raw: + ir_raw_event_unregister(dev); out_dev: device_del(&dev->dev); out_unlock: diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c index 923fb2299553cb..41b54e40176c23 100644 --- a/drivers/media/rc/serial_ir.c +++ b/drivers/media/rc/serial_ir.c @@ -487,10 +487,69 @@ static void serial_ir_timeout(unsigned long arg) ir_raw_event_handle(serial_ir.rcdev); } +/* Needed by serial_ir_probe() */ +static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf, + unsigned int count); +static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle); +static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier); +static int serial_ir_open(struct rc_dev *rcdev); +static void serial_ir_close(struct rc_dev *rcdev); + static int serial_ir_probe(struct platform_device *dev) { + struct rc_dev *rcdev; int i, nlow, nhigh, result; + rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW); + if (!rcdev) + return -ENOMEM; + + if (hardware[type].send_pulse && hardware[type].send_space) + rcdev->tx_ir = serial_ir_tx; + if (hardware[type].set_send_carrier) + rcdev->s_tx_carrier = serial_ir_tx_carrier; + if (hardware[type].set_duty_cycle) + rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle; + + switch (type) { + case IR_HOMEBREW: + rcdev->input_name = "Serial IR type home-brew"; + break; + case IR_IRDEO: + rcdev->input_name = "Serial IR type IRdeo"; + break; + case IR_IRDEO_REMOTE: + rcdev->input_name = "Serial IR type IRdeo remote"; + break; + case IR_ANIMAX: + rcdev->input_name = "Serial IR type AnimaX"; + break; + case IR_IGOR: + rcdev->input_name = "Serial IR type IgorPlug"; + break; + } + + rcdev->input_phys = KBUILD_MODNAME "/input0"; + rcdev->input_id.bustype = BUS_HOST; + rcdev->input_id.vendor = 0x0001; + rcdev->input_id.product = 0x0001; + rcdev->input_id.version = 0x0100; + rcdev->open = serial_ir_open; + rcdev->close = serial_ir_close; + rcdev->dev.parent = &serial_ir.pdev->dev; + rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rcdev->driver_name = KBUILD_MODNAME; + rcdev->map_name = RC_MAP_RC6_MCE; + rcdev->min_timeout = 1; + rcdev->timeout = IR_DEFAULT_TIMEOUT; + rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; + rcdev->rx_resolution = 250000; + + serial_ir.rcdev = rcdev; + + setup_timer(&serial_ir.timeout_timer, serial_ir_timeout, + (unsigned long)&serial_ir); + result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler, share_irq ? IRQF_SHARED : 0, KBUILD_MODNAME, &hardware); @@ -516,9 +575,6 @@ static int serial_ir_probe(struct platform_device *dev) return -EBUSY; } - setup_timer(&serial_ir.timeout_timer, serial_ir_timeout, - (unsigned long)&serial_ir); - result = hardware_init_port(); if (result < 0) return result; @@ -552,7 +608,8 @@ static int serial_ir_probe(struct platform_device *dev) sense ? "low" : "high"); dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io); - return 0; + + return devm_rc_register_device(&dev->dev, rcdev); } static int serial_ir_open(struct rc_dev *rcdev) @@ -723,7 +780,6 @@ static void serial_ir_exit(void) static int __init serial_ir_init_module(void) { - struct rc_dev *rcdev; int result; switch (type) { @@ -754,63 +810,9 @@ static int __init serial_ir_init_module(void) sense = !!sense; result = serial_ir_init(); - if (result) - return result; - - rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev, RC_DRIVER_IR_RAW); - if (!rcdev) { - result = -ENOMEM; - goto serial_cleanup; - } - - if (hardware[type].send_pulse && hardware[type].send_space) - rcdev->tx_ir = serial_ir_tx; - if (hardware[type].set_send_carrier) - rcdev->s_tx_carrier = serial_ir_tx_carrier; - if (hardware[type].set_duty_cycle) - rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle; - - switch (type) { - case IR_HOMEBREW: - rcdev->input_name = "Serial IR type home-brew"; - break; - case IR_IRDEO: - rcdev->input_name = "Serial IR type IRdeo"; - break; - case IR_IRDEO_REMOTE: - rcdev->input_name = "Serial IR type IRdeo remote"; - break; - case IR_ANIMAX: - rcdev->input_name = "Serial IR type AnimaX"; - break; - case IR_IGOR: - rcdev->input_name = "Serial IR type IgorPlug"; - break; - } - - rcdev->input_phys = KBUILD_MODNAME "/input0"; - rcdev->input_id.bustype = BUS_HOST; - rcdev->input_id.vendor = 0x0001; - rcdev->input_id.product = 0x0001; - rcdev->input_id.version = 0x0100; - rcdev->open = serial_ir_open; - rcdev->close = serial_ir_close; - rcdev->dev.parent = &serial_ir.pdev->dev; - rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; - rcdev->driver_name = KBUILD_MODNAME; - rcdev->map_name = RC_MAP_RC6_MCE; - rcdev->min_timeout = 1; - rcdev->timeout = IR_DEFAULT_TIMEOUT; - rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; - rcdev->rx_resolution = 250000; - - serial_ir.rcdev = rcdev; - - result = rc_register_device(rcdev); - if (!result) return 0; -serial_cleanup: + serial_ir_exit(); return result; } @@ -818,7 +820,6 @@ static int __init serial_ir_init_module(void) static void __exit serial_ir_exit_module(void) { del_timer_sync(&serial_ir.timeout_timer); - rc_unregister_device(serial_ir.rcdev); serial_ir_exit(); } diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c index 431dd0b4b33236..b1d13444ff301e 100644 --- a/drivers/media/usb/cpia2/cpia2_core.c +++ b/drivers/media/usb/cpia2/cpia2_core.c @@ -32,6 +32,7 @@ #include #include #include +#include #define FIRMWARE "cpia2/stv0672_vp4.bin" MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c index ab9866024ec798..04033efe7ad539 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c @@ -36,16 +36,18 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) { struct hexline *hx; - u8 reset; - int ret,pos=0; + u8 *buf; + int ret, pos = 0; + u16 cpu_cs_register = cypress[type].cpu_cs_register; - hx = kmalloc(sizeof(*hx), GFP_KERNEL); - if (!hx) + buf = kmalloc(sizeof(*hx), GFP_KERNEL); + if (!buf) return -ENOMEM; + hx = (struct hexline *)buf; /* stop the CPU */ - reset = 1; - if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) + buf[0] = 1; + if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) err("could not stop the USB controller CPU."); while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { @@ -61,21 +63,21 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw } if (ret < 0) { err("firmware download failed at %d with %d",pos,ret); - kfree(hx); + kfree(buf); return ret; } if (ret == 0) { /* restart the CPU */ - reset = 0; - if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { + buf[0] = 0; + if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } } else ret = -EIO; - kfree(hx); + kfree(buf); return ret; } diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 6ca502d834b4f2..4f42d57f81d954 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -68,6 +68,7 @@ struct dw2102_state { u8 initialized; u8 last_lock; + u8 data[MAX_XFER_SIZE + 4]; struct i2c_client *i2c_client_demod; struct i2c_client *i2c_client_tuner; @@ -661,62 +662,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); - u8 obuf[0x40], ibuf[0x40]; + struct dw2102_state *state; if (!d) return -ENODEV; + + state = d->priv; + if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; + if (mutex_lock_interruptible(&d->data_mutex) < 0) { + mutex_unlock(&d->i2c_mutex); + return -EAGAIN; + } switch (num) { case 1: switch (msg[0].addr) { case SU3000_STREAM_CTRL: - obuf[0] = msg[0].buf[0] + 0x36; - obuf[1] = 3; - obuf[2] = 0; - if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0) + state->data[0] = msg[0].buf[0] + 0x36; + state->data[1] = 3; + state->data[2] = 0; + if (dvb_usb_generic_rw(d, state->data, 3, + state->data, 0, 0) < 0) err("i2c transfer failed."); break; case DW2102_RC_QUERY: - obuf[0] = 0x10; - if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0) + state->data[0] = 0x10; + if (dvb_usb_generic_rw(d, state->data, 1, + state->data, 2, 0) < 0) err("i2c transfer failed."); - msg[0].buf[1] = ibuf[0]; - msg[0].buf[0] = ibuf[1]; + msg[0].buf[1] = state->data[0]; + msg[0].buf[0] = state->data[1]; break; default: /* always i2c write*/ - obuf[0] = 0x08; - obuf[1] = msg[0].addr; - obuf[2] = msg[0].len; + state->data[0] = 0x08; + state->data[1] = msg[0].addr; + state->data[2] = msg[0].len; - memcpy(&obuf[3], msg[0].buf, msg[0].len); + memcpy(&state->data[3], msg[0].buf, msg[0].len); - if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3, - ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3, + state->data, 1, 0) < 0) err("i2c transfer failed."); } break; case 2: /* always i2c read */ - obuf[0] = 0x09; - obuf[1] = msg[0].len; - obuf[2] = msg[1].len; - obuf[3] = msg[0].addr; - memcpy(&obuf[4], msg[0].buf, msg[0].len); - - if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4, - ibuf, msg[1].len + 1, 0) < 0) + state->data[0] = 0x09; + state->data[1] = msg[0].len; + state->data[2] = msg[1].len; + state->data[3] = msg[0].addr; + memcpy(&state->data[4], msg[0].buf, msg[0].len); + + if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4, + state->data, msg[1].len + 1, 0) < 0) err("i2c transfer failed."); - memcpy(msg[1].buf, &ibuf[1], msg[1].len); + memcpy(msg[1].buf, &state->data[1], msg[1].len); break; default: warn("more than 2 i2c messages at a time is not handled yet."); break; } + mutex_unlock(&d->data_mutex); mutex_unlock(&d->i2c_mutex); return num; } @@ -844,17 +855,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) static int su3000_power_ctrl(struct dvb_usb_device *d, int i) { struct dw2102_state *state = (struct dw2102_state *)d->priv; - u8 obuf[] = {0xde, 0}; + int ret = 0; info("%s: %d, initialized %d", __func__, i, state->initialized); if (i && !state->initialized) { + mutex_lock(&d->data_mutex); + + state->data[0] = 0xde; + state->data[1] = 0; + state->initialized = 1; /* reset board */ - return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); + ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0); + mutex_unlock(&d->data_mutex); } - return 0; + return ret; } static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) @@ -1309,49 +1326,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) return 0; } -static int su3000_frontend_attach(struct dvb_usb_adapter *d) +static int su3000_frontend_attach(struct dvb_usb_adapter *adap) { - u8 obuf[3] = { 0xe, 0x80, 0 }; - u8 ibuf[] = { 0 }; + struct dvb_usb_device *d = adap->dev; + struct dw2102_state *state = d->priv; + + mutex_lock(&d->data_mutex); + + state->data[0] = 0xe; + state->data[1] = 0x80; + state->data[2] = 0; - if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); - obuf[0] = 0xe; - obuf[1] = 0x02; - obuf[2] = 1; + state->data[0] = 0xe; + state->data[1] = 0x02; + state->data[2] = 1; - if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(300); - obuf[0] = 0xe; - obuf[1] = 0x83; - obuf[2] = 0; + state->data[0] = 0xe; + state->data[1] = 0x83; + state->data[2] = 0; - if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); - obuf[0] = 0xe; - obuf[1] = 0x83; - obuf[2] = 1; + state->data[0] = 0xe; + state->data[1] = 0x83; + state->data[2] = 1; - if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); - obuf[0] = 0x51; + state->data[0] = 0x51; - if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); - d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, - &d->dev->i2c_adap); - if (d->fe_adap[0].fe == NULL) + mutex_unlock(&d->data_mutex); + + adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, + &d->i2c_adap); + if (adap->fe_adap[0].fe == NULL) return -EIO; - if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, + if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, &dw2104_ts2020_config, - &d->dev->i2c_adap)) { + &d->i2c_adap)) { info("Attached DS3000/TS2020!"); return 0; } @@ -1360,47 +1385,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d) return -EIO; } -static int t220_frontend_attach(struct dvb_usb_adapter *d) +static int t220_frontend_attach(struct dvb_usb_adapter *adap) { - u8 obuf[3] = { 0xe, 0x87, 0 }; - u8 ibuf[] = { 0 }; + struct dvb_usb_device *d = adap->dev; + struct dw2102_state *state = d->priv; + + mutex_lock(&d->data_mutex); - if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) + state->data[0] = 0xe; + state->data[1] = 0x87; + state->data[2] = 0x0; + + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); - obuf[0] = 0xe; - obuf[1] = 0x86; - obuf[2] = 1; + state->data[0] = 0xe; + state->data[1] = 0x86; + state->data[2] = 1; - if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); - obuf[0] = 0xe; - obuf[1] = 0x80; - obuf[2] = 0; + state->data[0] = 0xe; + state->data[1] = 0x80; + state->data[2] = 0; - if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(50); - obuf[0] = 0xe; - obuf[1] = 0x80; - obuf[2] = 1; + state->data[0] = 0xe; + state->data[1] = 0x80; + state->data[2] = 1; - if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); - obuf[0] = 0x51; + state->data[0] = 0x51; - if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); - d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, - &d->dev->i2c_adap, NULL); - if (d->fe_adap[0].fe != NULL) { - if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60, - &d->dev->i2c_adap, &tda18271_config)) { + mutex_unlock(&d->data_mutex); + + adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, + &d->i2c_adap, NULL); + if (adap->fe_adap[0].fe != NULL) { + if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60, + &d->i2c_adap, &tda18271_config)) { info("Attached TDA18271HD/CXD2820R!"); return 0; } @@ -1410,23 +1443,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d) return -EIO; } -static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d) +static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap) { - u8 obuf[] = { 0x51 }; - u8 ibuf[] = { 0 }; + struct dvb_usb_device *d = adap->dev; + struct dw2102_state *state = d->priv; + + mutex_lock(&d->data_mutex); - if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) + state->data[0] = 0x51; + + if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); - d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, - &d->dev->i2c_adap); + mutex_unlock(&d->data_mutex); - if (d->fe_adap[0].fe == NULL) + adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach, + &s421_m88rs2000_config, + &d->i2c_adap); + + if (adap->fe_adap[0].fe == NULL) return -EIO; - if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, + if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, &dw2104_ts2020_config, - &d->dev->i2c_adap)) { + &d->i2c_adap)) { info("Attached RS2000/TS2020!"); return 0; } @@ -1439,44 +1479,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; - u8 obuf[3] = { 0xe, 0x80, 0 }; - u8 ibuf[] = { 0 }; struct i2c_adapter *i2c_adapter; struct i2c_client *client; struct i2c_board_info board_info; struct m88ds3103_platform_data m88ds3103_pdata = {}; struct ts2020_config ts2020_config = {}; - if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) + mutex_lock(&d->data_mutex); + + state->data[0] = 0xe; + state->data[1] = 0x80; + state->data[2] = 0x0; + + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); - obuf[0] = 0xe; - obuf[1] = 0x02; - obuf[2] = 1; + state->data[0] = 0xe; + state->data[1] = 0x02; + state->data[2] = 1; - if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(300); - obuf[0] = 0xe; - obuf[1] = 0x83; - obuf[2] = 0; + state->data[0] = 0xe; + state->data[1] = 0x83; + state->data[2] = 0; - if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); - obuf[0] = 0xe; - obuf[1] = 0x83; - obuf[2] = 1; + state->data[0] = 0xe; + state->data[1] = 0x83; + state->data[2] = 1; - if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); - obuf[0] = 0x51; + state->data[0] = 0x51; - if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0) + if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); + mutex_unlock(&d->data_mutex); + /* attach demod */ m88ds3103_pdata.clk = 27000000; m88ds3103_pdata.i2c_wr_max = 33; diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c index 23d3285f182a59..e91d00762e94bf 100644 --- a/drivers/media/usb/gspca/cpia1.c +++ b/drivers/media/usb/gspca/cpia1.c @@ -27,6 +27,8 @@ #define MODULE_NAME "cpia1" #include +#include + #include "gspca.h" MODULE_AUTHOR("Hans de Goede "); diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 36bd904946bd34..0b5c43f7e020da 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 5457c361ad5864..bf0fe0137dfed2 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev, if (!of_property_read_u32(child, "dma-channel", &val)) gpmc_onenand_data->dma_channel = val; - gpmc_onenand_init(gpmc_onenand_data); - - return 0; + return gpmc_onenand_init(gpmc_onenand_data); } #else static int gpmc_probe_onenand_child(struct platform_device *pdev, diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 377e650a2a1dc3..2fa015c0556186 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -8,7 +8,8 @@ */ #include -#include +#include +#include #include #include #include diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 859959f19f1072..e7139c76f96122 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index cc1706a92aceb6..b0b6ed31918ef2 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -19,6 +19,8 @@ #include #include #include +#include + #include #include diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 09505f432eda62..7ae710585267a5 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 91f645992c9416..b27ea98b781f77 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1792,15 +1792,14 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, /* If we're permanently dead, give up. */ if (state == pci_channel_io_perm_failure) { - /* Tell the AFU drivers; but we don't care what they - * say, we're going away. - */ for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; - /* Only participate in EEH if we are on a virtual PHB */ - if (afu->phb == NULL) - return PCI_ERS_RESULT_NONE; - cxl_vphb_error_detected(afu, state); + /* + * Tell the AFU drivers; but we don't care what they + * say, we're going away. + */ + if (afu->phb != NULL) + cxl_vphb_error_detected(afu, state); } return PCI_ERS_RESULT_DISCONNECT; } diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 3d1d55157e5f3b..2fad790db3bf05 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index cb290b8ca0c812..dd4617764f147d 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c index 232034f5da486f..5c7dd26db716f4 100644 --- a/drivers/misc/ibmasm/r_heartbeat.c +++ b/drivers/misc/ibmasm/r_heartbeat.c @@ -20,7 +20,7 @@ * */ -#include +#include #include "ibmasm.h" #include "dot_command.h" diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 99635dd9dbac7b..fc7efedbc4be25 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -103,6 +103,8 @@ #include #include #include +#include + #include #define v1printk(a...) do { \ diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index fb8705fc3aca7c..e389b0b5278d56 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c index 0f1581664c1c78..ffb6aeac07b3ec 100644 --- a/drivers/misc/lkdtm_heap.c +++ b/drivers/misc/lkdtm_heap.c @@ -4,6 +4,7 @@ */ #include "lkdtm.h" #include +#include /* * This tries to stay within the next largest power-of-2 kmalloc cache diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c index 1dd611423d8be4..df6ac985fbb586 100644 --- a/drivers/misc/lkdtm_usercopy.c +++ b/drivers/misc/lkdtm_usercopy.c @@ -5,6 +5,7 @@ #include "lkdtm.h" #include #include +#include #include #include #include diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 3600c9993a9830..29f2daed37e07b 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -112,11 +112,9 @@ struct mkhi_msg { static int mei_osver(struct mei_cl_device *cldev) { - int ret; const size_t size = sizeof(struct mkhi_msg_hdr) + sizeof(struct mkhi_fwcaps) + sizeof(struct mei_os_ver); - size_t length = 8; char buf[size]; struct mkhi_msg *req; struct mkhi_fwcaps *fwcaps; @@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev) os_ver = (struct mei_os_ver *)fwcaps->data; os_ver->os_type = OSTYPE_LINUX; - ret = __mei_cl_send(cldev->cl, buf, size, mode); - if (ret < 0) - return ret; - - ret = __mei_cl_recv(cldev->cl, buf, length, 0); - if (ret < 0) - return ret; - - return 0; + return __mei_cl_send(cldev->cl, buf, size, mode); } static void mei_mkhi_fix(struct mei_cl_device *cldev) @@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev) return; ret = mei_osver(cldev); - if (ret) + if (ret < 0) dev_err(&cldev->dev, "OS version command failed %d\n", ret); mei_cldev_disable(cldev); diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index cb3e9e0ca0497d..df5f78ae3d2534 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 68fe37b5bc52fb..d3e3372424d616 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -14,7 +14,7 @@ * */ -#include +#include #include #include #include diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index cfb1cdf176fa90..13c55b8f926186 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev) mei_clear_interrupts(dev); - mei_synchronize_irq(dev); - /* we're already in reset, cancel the init timer * if the reset was called due the hbm protocol error * we need to call it before hw start @@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work) container_of(work, struct mei_device, reset_work); int ret; + mei_clear_interrupts(dev); + mei_synchronize_irq(dev); + mutex_lock(&dev->device_lock); ret = mei_reset(dev); @@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev) mei_cancel_work(dev); + mei_clear_interrupts(dev); + mei_synchronize_irq(dev); + mutex_lock(&dev->device_lock); dev->dev_state = MEI_DEV_POWER_DOWN; diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 9d0b7050c79a36..bf816449cd405e 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c index 5696df4326b5c5..85f7d09cc65fd1 100644 --- a/drivers/misc/mic/cosm/cosm_scif_server.c +++ b/drivers/misc/mic/cosm/cosm_scif_server.c @@ -19,6 +19,8 @@ * */ #include +#include + #include "cosm_main.h" /* diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c index 03e98bf1ac1553..aa530fcceaa995 100644 --- a/drivers/misc/mic/cosm_client/cosm_scif_client.c +++ b/drivers/misc/mic/cosm_client/cosm_scif_client.c @@ -22,6 +22,8 @@ #include #include #include +#include + #include "../cosm/cosm_main.h" #define COSM_SCIF_MAX_RETRIES 10 diff --git a/drivers/misc/mic/scif/scif_main.h b/drivers/misc/mic/scif/scif_main.h index a08f0b600a9e22..0e5eff9ad08063 100644 --- a/drivers/misc/mic/scif/scif_main.h +++ b/drivers/misc/mic/scif/scif_main.h @@ -18,7 +18,7 @@ #ifndef SCIF_MAIN_H #define SCIF_MAIN_H -#include +#include #include #include #include diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index f806a4471eb913..329727e00e9703 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -17,6 +17,9 @@ */ #include #include +#include +#include + #include "scif_main.h" #include "scif_map.h" diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 1a2b67f3183d50..c2e29d7f0de888 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -374,7 +374,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], struct irq_affinity *desc) { struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 6fb773dbcd0c32..93be82fc338ad8 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -219,15 +219,20 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, int write, unsigned long *paddr, int *pageshift) { pgd_t *pgdp; - pmd_t *pmdp; + p4d_t *p4dp; pud_t *pudp; + pmd_t *pmdp; pte_t pte; pgdp = pgd_offset(vma->vm_mm, vaddr); if (unlikely(pgd_none(*pgdp))) goto err; - pudp = pud_offset(pgdp, vaddr); + p4dp = p4d_offset(pgdp, vaddr); + if (unlikely(p4d_none(*p4dp))) + goto err; + + pudp = pud_offset(p4dp, vaddr); if (unlikely(pud_none(*pudp))) goto err; diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c index c344483fa7d65a..2cde80c7bb934e 100644 --- a/drivers/misc/vexpress-syscfg.c +++ b/drivers/misc/vexpress-syscfg.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index f35f0c8606b9ad..21d0fa592145c0 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "vmci_queue_pair.h" diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c index 8449516d6ac648..84258a48029d41 100644 --- a/drivers/misc/vmw_vmci/vmci_event.c +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "vmci_driver.h" #include "vmci_event.h" diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 9d659542a335b4..dad5abee656ef5 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, */ error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, PCI_IRQ_MSIX); - if (error) { + if (error < 0) { error = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); - if (error) + if (error < 0) goto err_remove_bitmap; } else { vmci_dev->exclusive_vectors = true; diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index ec090105eb4be7..8a16a26e9658f8 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c index 9a53a30de445cf..1ab6e8737a5f09 100644 --- a/drivers/misc/vmw_vmci/vmci_resource.c +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "vmci_resource.h" #include "vmci_driver.h" diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 1621fa08e20692..ff3da960c47361 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1560,11 +1560,8 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, bool old_req_pending) { - struct mmc_queue_req *mq_rq; bool req_pending; - mq_rq = container_of(brq, struct mmc_queue_req, brq); - /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. @@ -1701,7 +1698,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) case MMC_BLK_CMD_ERR: req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); if (mmc_blk_reset(md, card->host, type)) { - mmc_blk_rw_cmd_abort(card, old_req); + if (req_pending) + mmc_blk_rw_cmd_abort(card, old_req); mmc_blk_rw_try_restart(mq, new_req); return; } @@ -1817,6 +1815,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) mmc_blk_issue_flush(mq, req); } else { mmc_blk_issue_rw_rq(mq, req); + card->host->context_info.is_waiting_last_req = false; } out: diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 7fd722868875f3..b502601df22815 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1730,7 +1730,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, err = mmc_select_hs400(card); if (err) goto free_card; - } else { + } else if (!mmc_card_hs400es(card)) { /* Select the desired bus width optionally */ err = mmc_select_bus_width(card); if (err > 0 && mmc_card_hs(card)) { diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index d29faf2addfe51..6d4b72080d5124 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 8e32580c12b520..b235d8da0602a8 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -580,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) } } sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, - (mode << 8) | (div % 0xff)); + (mode << 8) | div); sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) cpu_relax(); @@ -1559,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev) host->src_clk_freq = clk_get_rate(host->src_clk); /* Set host parameters to mmc */ mmc->ops = &mt_msdc_ops; - mmc->f_min = host->src_clk_freq / (4 * 255); + mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; /* MMC core transfer sizes tunable parameters */ diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 410a55b1c25fe5..1cfd7f90033944 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -28,13 +28,9 @@ #include "sdhci-pltfm.h" #include -#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c #define SDHCI_ARASAN_VENDOR_REGISTER 0x78 #define VENDOR_ENHANCED_STROBE BIT(0) -#define CLK_CTRL_TIMEOUT_SHIFT 16 -#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT) -#define CLK_CTRL_TIMEOUT_MIN_EXP 13 #define PHY_CLK_TOO_SLOW_HZ 400000 @@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host, static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) { - u32 div; unsigned long freq; struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET); - div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT; + /* SDHCI timeout clock is in kHz */ + freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000); - freq = clk_get_rate(pltfm_host->clk); - freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div); + /* or in MHz */ + if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) + freq = DIV_ROUND_UP(freq, 1000); return freq; } diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 2f9ad213377a2c..d5430ed02a6789 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -29,6 +29,8 @@ #include "sdhci-pltfm.h" +#define SDMMC_MC1R 0x204 +#define SDMMC_MC1R_DDR BIT(3) #define SDMMC_CACR 0x230 #define SDMMC_CACR_CAPWREN BIT(0) #define SDMMC_CACR_KEY (0x46 << 8) @@ -85,11 +87,37 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); } +/* + * In this specific implementation of the SDHCI controller, the power register + * needs to have a valid voltage set even when the power supply is managed by + * an external regulator. + */ +static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + if (!IS_ERR(host->mmc->supply.vmmc)) { + struct mmc_host *mmc = host->mmc; + + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + spin_lock_irq(&host->lock); + } + sdhci_set_power_noreg(host, mode, vdd); +} + +void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) +{ + if (timing == MMC_TIMING_MMC_DDR52) + sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R); + sdhci_set_uhs_signaling(host, timing); +} + static const struct sdhci_ops sdhci_at91_sama5d2_ops = { .set_clock = sdhci_at91_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, - .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_uhs_signaling = sdhci_at91_set_uhs_signaling, + .set_power = sdhci_at91_set_power, }; static const struct sdhci_pltfm_data soc_data_sama5d2 = { diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 982b3e34942614..86560d590786f3 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -451,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, if (mode == MMC_POWER_OFF) return; + spin_unlock_irq(&host->lock); + /* * Bus power might not enable after D3 -> D0 transition due to the * present state not yet having propagated. Retry for up to 2ms. @@ -463,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, reg |= SDHCI_POWER_ON; sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); } + + spin_lock_irq(&host->lock); } static const struct sdhci_ops sdhci_intel_byt_ops = { diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6fdd7a70f229b8..63bc33a54d0dd8 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk) return; } timeout--; - mdelay(1); + spin_unlock_irq(&host->lock); + usleep_range(900, 1100); + spin_lock_irq(&host->lock); } clk |= SDHCI_CLOCK_CARD_EN; @@ -1828,6 +1830,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) struct sdhci_host *host = mmc_priv(mmc); unsigned long flags; + if (enable) + pm_runtime_get_noresume(host->mmc->parent); + spin_lock_irqsave(&host->lock, flags); if (enable) host->flags |= SDHCI_SDIO_IRQ_ENABLED; @@ -1836,6 +1841,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) sdhci_enable_sdio_irq_nolock(host, enable); spin_unlock_irqrestore(&host->lock, flags); + + if (!enable) + pm_runtime_put_noidle(host->mmc->parent); } static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index d2c386f09d69f4..1d843357422e8a 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c @@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id struct ushc_data *ushc; int ret; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); if (mmc == NULL) return -ENOMEM; diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 6c062b8251d238..d52139635b67c6 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -20,6 +20,7 @@ */ #include #include +#include #include #include #include diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 1492c12906f6bd..b0524f8accb620 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 1ae872bfc3ba5b..747645c74134de 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -186,7 +186,7 @@ static inline int write_enable(struct spi_nor *nor) } /* - * Send write disble instruction to the chip. + * Send write disable instruction to the chip. */ static inline int write_disable(struct spi_nor *nor) { diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h index 4b7bee17c924bb..04afd0e7074f12 100644 --- a/drivers/mtd/tests/mtd_test.h +++ b/drivers/mtd/tests/mtd_test.h @@ -1,5 +1,5 @@ #include -#include +#include static inline int mtdtest_relax(void) { diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 85d54f37e28ff2..77513195f50e39 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -1159,7 +1159,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) if (err) return ERR_PTR(err); - err = vfs_getattr(&path, &stat); + err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT); path_put(&path); if (err) return ERR_PTR(err); diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 88b1897aeb40f7..d4b2e874449869 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -314,7 +314,7 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) if (error) return ERR_PTR(error); - error = vfs_getattr(&path, &stat); + error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT); path_put(&path); if (error) return ERR_PTR(error); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6321f12630c8c5..8a4ba8b88e52f9 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4179,6 +4179,7 @@ void bond_setup(struct net_device *bond_dev) /* Initialize the device entry points */ ether_setup(bond_dev); + bond_dev->max_mtu = ETH_MAX_MTU; bond_dev->netdev_ops = &bond_netdev_ops; bond_dev->ethtool_ops = &bond_ethtool_ops; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 577e57cad1dc44..1bcbb8913e1715 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -16,6 +16,8 @@ #include #include #include +#include + #include static int bond_option_active_slave_set(struct bonding *bond, diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index e23c3ed737deef..770623a0cc01c3 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index b306210b02b7b4..bc0eb47ecceea7 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -679,7 +679,8 @@ static int cfv_probe(struct virtio_device *vdev) goto err; /* Get the TX virtio ring. This is a "guest side vring". */ - err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names); + err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, + NULL); if (err) goto err; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index ea57fed375c634..13f0f219d8aa83 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -196,7 +196,7 @@ #define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ -#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */ +#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ /* Structure of the message buffer */ diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index 4063215c9b54b0..aac58ce6e371a8 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -17,7 +17,7 @@ */ #include -#include +#include #include #include diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 77e3cc06a30c8c..300349fe8dc049 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -258,7 +258,7 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev) rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_MODE, - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, gsdev->channel, 0, dm, @@ -432,7 +432,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev) rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_BITTIMING, - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, dbt, @@ -546,7 +546,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, hf, urb->transfer_dma); - if (rc == -ENODEV) { netif_device_detach(netdev); } else { @@ -804,7 +803,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_BT_CONST, - USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, channel, 0, bt_const, @@ -908,57 +907,72 @@ static int gs_usb_probe(struct usb_interface *intf, struct gs_usb *dev; int rc = -ENOMEM; unsigned int icount, i; - struct gs_host_config hconf = { - .byte_order = 0x0000beef, - }; - struct gs_device_config dconf; + struct gs_host_config *hconf; + struct gs_device_config *dconf; + + hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); + if (!hconf) + return -ENOMEM; + + hconf->byte_order = 0x0000beef; /* send host config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_HOST_FORMAT, - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - &hconf, - sizeof(hconf), + hconf, + sizeof(*hconf), 1000); + kfree(hconf); + if (rc < 0) { dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; } + dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); + if (!dconf) + return -ENOMEM; + /* read device config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_DEVICE_CONFIG, - USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - &dconf, - sizeof(dconf), + dconf, + sizeof(*dconf), 1000); if (rc < 0) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); + kfree(dconf); return rc; } - icount = dconf.icount + 1; + icount = dconf->icount + 1; dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); if (icount > GS_MAX_INTF) { dev_err(&intf->dev, "Driver cannot handle more that %d CAN interfaces\n", GS_MAX_INTF); + kfree(dconf); return -EINVAL; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) + if (!dev) { + kfree(dconf); return -ENOMEM; + } + init_usb_anchor(&dev->rx_submitted); atomic_set(&dev->active_channels, 0); @@ -967,7 +981,7 @@ static int gs_usb_probe(struct usb_interface *intf, dev->udev = interface_to_usbdev(intf); for (i = 0; i < icount; i++) { - dev->canch[i] = gs_make_candev(i, intf, &dconf); + dev->canch[i] = gs_make_candev(i, intf, dconf); if (IS_ERR_OR_NULL(dev->canch[i])) { /* save error code to return later */ rc = PTR_ERR(dev->canch[i]); @@ -978,12 +992,15 @@ static int gs_usb_probe(struct usb_interface *intf, gs_destroy_candev(dev->canch[i]); usb_kill_anchored_urbs(&dev->rx_submitted); + kfree(dconf); kfree(dev); return rc; } dev->canch[i]->parent = dev; } + kfree(dconf); + return 0; } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 108a30e1509756..d000cb62d6ae8c 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -951,8 +951,8 @@ static int usb_8dev_probe(struct usb_interface *intf, for (i = 0; i < MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = MAX_TX_URBS; - priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), - GFP_KERNEL); + priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg), + GFP_KERNEL); if (!priv->cmd_msg_buffer) goto cleanup_candev; @@ -966,7 +966,7 @@ static int usb_8dev_probe(struct usb_interface *intf, if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); - goto cleanup_cmd_msg_buffer; + goto cleanup_candev; } err = usb_8dev_cmd_version(priv, &version); @@ -987,9 +987,6 @@ static int usb_8dev_probe(struct usb_interface *intf, cleanup_unregister_candev: unregister_netdev(priv->netdev); -cleanup_cmd_msg_buffer: - kfree(priv->cmd_msg_buffer); - cleanup_candev: free_candev(netdev); diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 76e5fc7adff519..6c98901f1b8970 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -1276,18 +1276,6 @@ static int dec_lance_probe(struct device *bdev, const int type) return ret; } -static void __exit dec_lance_remove(struct device *bdev) -{ - struct net_device *dev = dev_get_drvdata(bdev); - resource_size_t start, len; - - unregister_netdev(dev); - start = to_tc_dev(bdev)->resource.start; - len = to_tc_dev(bdev)->resource.end - start + 1; - release_mem_region(start, len); - free_netdev(dev); -} - /* Find all the lance cards on the system and initialize them */ static int __init dec_lance_platform_probe(void) { @@ -1320,7 +1308,7 @@ static void __exit dec_lance_platform_remove(void) #ifdef CONFIG_TC static int dec_lance_tc_probe(struct device *dev); -static int __exit dec_lance_tc_remove(struct device *dev); +static int dec_lance_tc_remove(struct device *dev); static const struct tc_device_id dec_lance_tc_table[] = { { "DEC ", "PMAD-AA " }, @@ -1334,7 +1322,7 @@ static struct tc_driver dec_lance_tc_driver = { .name = "declance", .bus = &tc_bus_type, .probe = dec_lance_tc_probe, - .remove = __exit_p(dec_lance_tc_remove), + .remove = dec_lance_tc_remove, }, }; @@ -1346,7 +1334,19 @@ static int dec_lance_tc_probe(struct device *dev) return status; } -static int __exit dec_lance_tc_remove(struct device *dev) +static void dec_lance_remove(struct device *bdev) +{ + struct net_device *dev = dev_get_drvdata(bdev); + resource_size_t start, len; + + unregister_netdev(dev); + start = to_tc_dev(bdev)->resource.start; + len = to_tc_dev(bdev)->resource.end - start + 1; + release_mem_region(start, len); + free_netdev(dev); +} + +static int dec_lance_tc_remove(struct device *dev) { put_device(dev); dec_lance_remove(dev); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 8a280e7d66bddc..127adbeefb105c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -984,29 +984,29 @@ #define XP_ECC_CNT1_DESC_DED_WIDTH 8 #define XP_ECC_CNT1_DESC_SEC_INDEX 0 #define XP_ECC_CNT1_DESC_SEC_WIDTH 8 -#define XP_ECC_IER_DESC_DED_INDEX 0 +#define XP_ECC_IER_DESC_DED_INDEX 5 #define XP_ECC_IER_DESC_DED_WIDTH 1 -#define XP_ECC_IER_DESC_SEC_INDEX 1 +#define XP_ECC_IER_DESC_SEC_INDEX 4 #define XP_ECC_IER_DESC_SEC_WIDTH 1 -#define XP_ECC_IER_RX_DED_INDEX 2 +#define XP_ECC_IER_RX_DED_INDEX 3 #define XP_ECC_IER_RX_DED_WIDTH 1 -#define XP_ECC_IER_RX_SEC_INDEX 3 +#define XP_ECC_IER_RX_SEC_INDEX 2 #define XP_ECC_IER_RX_SEC_WIDTH 1 -#define XP_ECC_IER_TX_DED_INDEX 4 +#define XP_ECC_IER_TX_DED_INDEX 1 #define XP_ECC_IER_TX_DED_WIDTH 1 -#define XP_ECC_IER_TX_SEC_INDEX 5 +#define XP_ECC_IER_TX_SEC_INDEX 0 #define XP_ECC_IER_TX_SEC_WIDTH 1 -#define XP_ECC_ISR_DESC_DED_INDEX 0 +#define XP_ECC_ISR_DESC_DED_INDEX 5 #define XP_ECC_ISR_DESC_DED_WIDTH 1 -#define XP_ECC_ISR_DESC_SEC_INDEX 1 +#define XP_ECC_ISR_DESC_SEC_INDEX 4 #define XP_ECC_ISR_DESC_SEC_WIDTH 1 -#define XP_ECC_ISR_RX_DED_INDEX 2 +#define XP_ECC_ISR_RX_DED_INDEX 3 #define XP_ECC_ISR_RX_DED_WIDTH 1 -#define XP_ECC_ISR_RX_SEC_INDEX 3 +#define XP_ECC_ISR_RX_SEC_INDEX 2 #define XP_ECC_ISR_RX_SEC_WIDTH 1 -#define XP_ECC_ISR_TX_DED_INDEX 4 +#define XP_ECC_ISR_TX_DED_INDEX 1 #define XP_ECC_ISR_TX_DED_WIDTH 1 -#define XP_ECC_ISR_TX_SEC_INDEX 5 +#define XP_ECC_ISR_TX_SEC_INDEX 0 #define XP_ECC_ISR_TX_SEC_WIDTH 1 #define XP_I2C_MUTEX_BUSY_INDEX 31 #define XP_I2C_MUTEX_BUSY_WIDTH 1 @@ -1148,8 +1148,8 @@ #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 -#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 -#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2 +#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 @@ -1158,6 +1158,8 @@ #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7 +#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1 #define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_WIDTH 16 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index a7d16db5c4b21d..24a687ce438818 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1323,7 +1323,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, enum xgbe_mdio_mode mode) { - unsigned int reg_val = 0; + unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); switch (mode) { case XGBE_MDIO_MODE_CL22: @@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel) /* Get the header length */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + FIRST, 1); rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, HL); if (rdata->rx.hdr_len) pdata->ext_stats.rx_split_header_packets++; + } else { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + FIRST, 0); } /* Get the RSS hash */ @@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel) } } - /* Get the packet length */ - rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); - - if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { - /* Not all the data has been transferred for this packet */ - XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, - INCOMPLETE, 1); + /* Not all the data has been transferred for this packet */ + if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) return 0; - } /* This is the last of the data for this packet */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, - INCOMPLETE, 0); + LAST, 1); + + /* Get the packet length */ + rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); /* Set checksum done indicator as appropriate */ if (netdev->features & NETIF_F_RXCSUM) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 3aa457c8ca21d3..a713abd9d03e63 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1131,12 +1131,12 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); + phy_if->phy_stop(pdata); + xgbe_free_irqs(pdata); xgbe_napi_disable(pdata, 1); - phy_if->phy_stop(pdata); - hw_if->exit(pdata); channel = pdata->channel; @@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, { struct sk_buff *skb; u8 *packet; - unsigned int copy_len; skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); if (!skb) return NULL; - /* Start with the header buffer which may contain just the header + /* Pull in the header buffer which may contain just the header * or the header plus data */ dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, @@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, packet = page_address(rdata->rx.hdr.pa.pages) + rdata->rx.hdr.pa.pages_offset; - copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; - copy_len = min(rdata->rx.hdr.dma_len, copy_len); - skb_copy_to_linear_data(skb, packet, copy_len); - skb_put(skb, copy_len); - - len -= copy_len; - if (len) { - /* Add the remaining data as a frag */ - dma_sync_single_range_for_cpu(pdata->dev, - rdata->rx.buf.dma_base, - rdata->rx.buf.dma_off, - rdata->rx.buf.dma_len, - DMA_FROM_DEVICE); - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rdata->rx.buf.pa.pages, - rdata->rx.buf.pa.pages_offset, - len, rdata->rx.buf.dma_len); - rdata->rx.buf.pa.pages = NULL; - } + skb_copy_to_linear_data(skb, packet, len); + skb_put(skb, len); return skb; } +static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata, + struct xgbe_packet_data *packet) +{ + /* Always zero if not the first descriptor */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) + return 0; + + /* First descriptor with split header, return header length */ + if (rdata->rx.hdr_len) + return rdata->rx.hdr_len; + + /* First descriptor but not the last descriptor and no split header, + * so the full buffer was used + */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) + return rdata->rx.hdr.dma_len; + + /* First descriptor and last descriptor and no split header, so + * calculate how much of the buffer was used + */ + return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len); +} + +static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata, + struct xgbe_packet_data *packet, + unsigned int len) +{ + /* Always the full buffer if not the last descriptor */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) + return rdata->rx.buf.dma_len; + + /* Last descriptor so calculate how much of the buffer was used + * for the last bit of data + */ + return rdata->rx.len - len; +} + static int xgbe_tx_poll(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; @@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) struct napi_struct *napi; struct sk_buff *skb; struct skb_shared_hwtstamps *hwtstamps; - unsigned int incomplete, error, context_next, context; - unsigned int len, rdesc_len, max_len; + unsigned int last, error, context_next, context; + unsigned int len, buf1_len, buf2_len, max_len; unsigned int received = 0; int packet_count = 0; @@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) if (!ring) return 0; - incomplete = 0; + last = 0; context_next = 0; napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; @@ -2137,9 +2155,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) received++; ring->cur++; - incomplete = XGMAC_GET_BITS(packet->attributes, - RX_PACKET_ATTRIBUTES, - INCOMPLETE); + last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + LAST); context_next = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT_NEXT); @@ -2148,7 +2165,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) CONTEXT); /* Earlier error, just drain the remaining data */ - if ((incomplete || context_next) && error) + if ((!last || context_next) && error) goto read_again; if (error || packet->errors) { @@ -2160,16 +2177,22 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) } if (!context) { - /* Length is cumulative, get this descriptor's length */ - rdesc_len = rdata->rx.len - len; - len += rdesc_len; + /* Get the data length in the descriptor buffers */ + buf1_len = xgbe_rx_buf1_len(rdata, packet); + len += buf1_len; + buf2_len = xgbe_rx_buf2_len(rdata, packet, len); + len += buf2_len; - if (rdesc_len && !skb) { + if (!skb) { skb = xgbe_create_skb(pdata, napi, rdata, - rdesc_len); - if (!skb) + buf1_len); + if (!skb) { error = 1; - } else if (rdesc_len) { + goto skip_data; + } + } + + if (buf2_len) { dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.buf.dma_base, rdata->rx.buf.dma_off, @@ -2179,13 +2202,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rdata->rx.buf.pa.pages, rdata->rx.buf.pa.pages_offset, - rdesc_len, + buf2_len, rdata->rx.buf.dma_len); rdata->rx.buf.pa.pages = NULL; } } - if (incomplete || context_next) +skip_data: + if (!last || context_next) goto read_again; if (!skb) @@ -2243,7 +2267,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) } /* Check if we need to save state before leaving */ - if (received && (incomplete || context_next)) { + if (received && (!last || context_next)) { rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdata->state_saved = 1; rdata->state.skb = skb; @@ -2272,10 +2296,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget) processed = xgbe_rx_poll(channel, budget); /* If we processed everything, we are done */ - if (processed < budget) { - /* Turn off polling */ - napi_complete_done(napi, processed); - + if ((processed < budget) && napi_complete_done(napi, processed)) { /* Enable Tx and Rx interrupts */ if (pdata->channel_irq_mode) xgbe_enable_rx_tx_int(pdata, channel); @@ -2317,10 +2338,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget) } while ((processed < budget) && (processed != last_processed)); /* If we processed everything, we are done */ - if (processed < budget) { - /* Turn off polling */ - napi_complete_done(napi, processed); - + if ((processed < budget) && napi_complete_done(napi, processed)) { /* Enable Tx and Rx interrupts */ xgbe_enable_rx_tx_ints(pdata); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 9d8c953083b4ef..e707c49cc55a78 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -716,6 +716,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.advertising = pdata->phy.supported; + + return; } pdata->phy.advertising &= ~ADVERTISED_Autoneg; @@ -875,6 +877,16 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) !phy_data->sfp_phy_avail) return 0; + /* Set the proper MDIO mode for the PHY */ + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, + phy_data->phydev_mode); + if (ret) { + netdev_err(pdata->netdev, + "mdio port/clause not compatible (%u/%u)\n", + phy_data->mdio_addr, phy_data->phydev_mode); + return ret; + } + /* Create and connect to the PHY device */ phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr, (phy_data->phydev_mode == XGBE_MDIO_MODE_CL45)); @@ -2722,6 +2734,18 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) if (ret) return ret; + /* Set the proper MDIO mode for the re-driver */ + if (phy_data->redrv && !phy_data->redrv_if) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, + XGBE_MDIO_MODE_CL22); + if (ret) { + netdev_err(pdata->netdev, + "redriver mdio port not compatible (%u)\n", + phy_data->redrv_addr); + return ret; + } + } + /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index e536301acfdec9..b3568c453b1451 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1749,6 +1749,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { + /* Abort if the clock is defined but couldn't be retrived. + * Always abort if the clock is missing on DT system as + * the driver can't cope with this case. + */ + if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) + return PTR_ERR(pdata->clk); /* Firmware may have set up the clock already. */ dev_info(dev, "clocks have been setup already\n"); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index dad63623be6a93..5d6c40d86775dd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -98,11 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) if (err < 0) goto err_exit; - - if (netif_running(ndev)) { - aq_ndev_close(ndev); - aq_ndev_open(ndev); - } + ndev->mtu = new_mtu; err_exit: return err; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index ee78444bfb8851..cdb02991f249c6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -487,6 +487,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, dx_buff->mss = skb_shinfo(skb)->gso_size; dx_buff->is_txc = 1U; + dx_buff->is_ipv6 = + (ip_hdr(skb)->version == 6) ? 1U : 0U; + dx = aq_ring_next_dx(ring, dx); dx_buff = &ring->buff_ring[dx]; ++ret; @@ -510,10 +513,22 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, if (skb->ip_summed == CHECKSUM_PARTIAL) { dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U; - dx_buff->is_tcp_cso = - (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; - dx_buff->is_udp_cso = - (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; + + if (ip_hdr(skb)->version == 4) { + dx_buff->is_tcp_cso = + (ip_hdr(skb)->protocol == IPPROTO_TCP) ? + 1U : 0U; + dx_buff->is_udp_cso = + (ip_hdr(skb)->protocol == IPPROTO_UDP) ? + 1U : 0U; + } else if (ip_hdr(skb)->version == 6) { + dx_buff->is_tcp_cso = + (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ? + 1U : 0U; + dx_buff->is_udp_cso = + (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ? + 1U : 0U; + } } for (; nr_frags--; ++frag_count) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 581de71a958a35..4c6c882c6a1c42 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self) if (!((1U << i) & self->msix_entry_mask)) continue; - free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]); if (pdev->msix_enabled) irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); + free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]); self->msix_entry_mask &= ~(1U << i); } } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 0358e6072d45ab..3a8a4aa13687ff 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -101,6 +101,7 @@ int aq_ring_init(struct aq_ring_s *self) self->hw_head = 0; self->sw_head = 0; self->sw_tail = 0; + spin_lock_init(&self->header.lock); return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 2572546450685d..eecd6d1c4d731a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -58,7 +58,8 @@ struct __packed aq_ring_buff_s { u8 len_l2; u8 len_l3; u8 len_l4; - u8 rsvd2; + u8 is_ipv6:1; + u8 rsvd2:7; u32 len_pkt; }; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index a2b746a2dd50b8..4ee15ff06a448b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -433,6 +433,9 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, buff->len_l3 + buff->len_l2); is_gso = true; + + if (buff->is_ipv6) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6; } else { buff_pa_len = buff->len; @@ -458,6 +461,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, if (unlikely(buff->is_eop)) { txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; + is_gso = false; } } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 1093ea18823a32..0592a0330cf0d6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = { .tx_rings = HW_ATL_A0_TX_RINGS, .rx_rings = HW_ATL_A0_RX_RINGS, .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_TSO, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index cab2931dab9ac3..42150708191dbf 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -471,6 +471,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, buff->len_l3 + buff->len_l2); is_gso = true; + + if (buff->is_ipv6) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6; } else { buff_pa_len = buff->len; @@ -496,6 +499,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, if (unlikely(buff->is_eop)) { txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; + is_gso = false; } } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 8bdee3ddd5a0bd..f3957e9303405c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = { .tx_rings = HW_ATL_B0_TX_RINGS, .rx_rings = HW_ATL_B0_RX_RINGS, .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_TSO | diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 7b1af950f312f3..da1b8b225eb9d3 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -51,8 +51,7 @@ static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) { - if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & - (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK) + if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN) return false; if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) return false; @@ -61,15 +60,25 @@ static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) { - bgmac_idm_write(bgmac, BCMA_IOCTL, - (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags)); - bgmac_idm_read(bgmac, BCMA_IOCTL); + u32 val; - bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); - bgmac_idm_read(bgmac, BCMA_RESET_CTL); - udelay(1); + /* The Reset Control register only contains a single bit to show if the + * controller is currently in reset. Do a sanity check here, just in + * case the bootloader happened to leave the device in reset. + */ + val = bgmac_idm_read(bgmac, BCMA_RESET_CTL); + if (val) { + bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); + bgmac_idm_read(bgmac, BCMA_RESET_CTL); + udelay(1); + } - bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); + val = bgmac_idm_read(bgmac, BCMA_IOCTL); + /* Some bits of BCMA_IOCTL set by HW/ATF and should not change */ + val |= flags & ~(BGMAC_AWCACHE | BGMAC_ARCACHE | BGMAC_AWUSER | + BGMAC_ARUSER); + val |= BGMAC_CLK_EN; + bgmac_idm_write(bgmac, BCMA_IOCTL, val); bgmac_idm_read(bgmac, BCMA_IOCTL); udelay(1); } diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 415046750bb449..fd66fca00e0177 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1223,12 +1223,16 @@ static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb, static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) { struct bgmac *bgmac = netdev_priv(net_dev); + struct sockaddr *sa = addr; int ret; ret = eth_prepare_mac_addr_change(net_dev, addr); if (ret < 0) return ret; - bgmac_write_mac_address(bgmac, (u8 *)addr); + + ether_addr_copy(net_dev->dev_addr, sa->sa_data); + bgmac_write_mac_address(bgmac, net_dev->dev_addr); + eth_commit_mac_addr_change(net_dev, addr); return 0; } diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 248727dc62f22c..6d1c6ff1ed963e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -213,6 +213,22 @@ /* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */ #define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */ #define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */ +/* The IOCTL values appear to be different in NS, NSP, and NS2, and do not match + * the values directly above + */ +#define BGMAC_CLK_EN BIT(0) +#define BGMAC_RESERVED_0 BIT(1) +#define BGMAC_SOURCE_SYNC_MODE_EN BIT(2) +#define BGMAC_DEST_SYNC_MODE_EN BIT(3) +#define BGMAC_TX_CLK_OUT_INVERT_EN BIT(4) +#define BGMAC_DIRECT_GMII_MODE BIT(5) +#define BGMAC_CLK_250_SEL BIT(6) +#define BGMAC_AWCACHE (0xf << 7) +#define BGMAC_RESERVED_1 (0x1f << 11) +#define BGMAC_ARCACHE (0xf << 16) +#define BGMAC_AWUSER (0x3f << 20) +#define BGMAC_ARUSER (0x3f << 26) +#define BGMAC_RESERVED BIT(31) /* BCMA GMAC core specific IO status (BCMA_IOST) flags */ #define BGMAC_BCMA_IOST_ATTACHED 0x00000800 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 0a23034bbe3ff8..352beff796ae5b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2277,7 +2277,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) -#define HW_INTERRUT_ASSERT_SET_0 \ +#define HW_INTERRUPT_ASSERT_SET_0 \ (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ @@ -2290,7 +2290,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) -#define HW_INTERRUT_ASSERT_SET_1 \ +#define HW_INTERRUPT_ASSERT_SET_1 \ (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ @@ -2318,7 +2318,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) -#define HW_INTERRUT_ASSERT_SET_2 \ +#define HW_INTERRUPT_ASSERT_SET_2 \ (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index d8d06fdfc42b9d..a851f95c307a33 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4166,14 +4166,14 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) bnx2x_release_phy_lock(bp); } - if (attn & HW_INTERRUT_ASSERT_SET_0) { + if (attn & HW_INTERRUPT_ASSERT_SET_0) { val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); + val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0); REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set0 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); + (u32)(attn & HW_INTERRUPT_ASSERT_SET_0)); bnx2x_panic(); } } @@ -4191,7 +4191,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) BNX2X_ERR("FATAL error from DORQ\n"); } - if (attn & HW_INTERRUT_ASSERT_SET_1) { + if (attn & HW_INTERRUPT_ASSERT_SET_1) { int port = BP_PORT(bp); int reg_offset; @@ -4200,11 +4200,11 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); + val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1); REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set1 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); + (u32)(attn & HW_INTERRUPT_ASSERT_SET_1)); bnx2x_panic(); } } @@ -4235,7 +4235,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) } } - if (attn & HW_INTERRUT_ASSERT_SET_2) { + if (attn & HW_INTERRUPT_ASSERT_SET_2) { int port = BP_PORT(bp); int reg_offset; @@ -4244,11 +4244,11 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); + val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2); REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set2 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); + (u32)(attn & HW_INTERRUPT_ASSERT_SET_2)); bnx2x_panic(); } } @@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; - /* VF with OLD Hypervisor or old PF do not support filtering */ if (IS_PF(bp)) { if (chip_is_e1x) bp->accept_any_vlan = true; else dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; -#ifdef CONFIG_BNX2X_SRIOV - } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; -#endif } + /* For VF we'll know whether to enable VLAN filtering after + * getting a response to CHANNEL_TLV_ACQUIRE from PF. + */ dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; dev->features |= NETIF_F_HIGHDMA; @@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) if (!netif_running(bp->dev)) { DP(BNX2X_MSG_PTP, "PTP adjfreq called while the interface is down\n"); - return -EFAULT; + return -ENETDOWN; } if (ppb < 0) { @@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + if (!netif_running(bp->dev)) { + DP(BNX2X_MSG_PTP, + "PTP adjtime called while the interface is down\n"); + return -ENETDOWN; + } + DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); timecounter_adjtime(&bp->timecounter, delta); @@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); u64 ns; + if (!netif_running(bp->dev)) { + DP(BNX2X_MSG_PTP, + "PTP gettime called while the interface is down\n"); + return -ENETDOWN; + } + ns = timecounter_read(&bp->timecounter); DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); @@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp, struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); u64 ns; + if (!netif_running(bp->dev)) { + DP(BNX2X_MSG_PTP, + "PTP settime called while the interface is down\n"); + return -ENETDOWN; + } + ns = timespec64_to_ns(ts); DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); @@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev, rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); if (rc) goto init_one_freemem; + +#ifdef CONFIG_BNX2X_SRIOV + /* VF with OLD Hypervisor or old PF do not support filtering */ + if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } +#endif } /* Enable SRIOV if capability found in configuration space */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 6fad22adbbb9e7..bdfd53b46bc568 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, /* Add/Remove the filter */ rc = bnx2x_config_vlan_mac(bp, &ramrod); - if (rc && rc != -EEXIST) { + if (rc == -EEXIST) + return 0; + if (rc) { BNX2X_ERR("Failed to %s %s\n", filter->add ? "add" : "delete", (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? @@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, return rc; } + filter->applied = true; + return 0; } @@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, /* Rollback if needed */ if (i != filters->count) { BNX2X_ERR("Managed only %d/%d filters - rolling back\n", - i, filters->count + 1); + i, filters->count); while (--i >= 0) { + if (!filters->filters[i].applied) + continue; filters->filters[i].add = !filters->filters[i].add; bnx2x_vf_mac_vlan_config(bp, vf, qid, &filters->filters[i], @@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) continue; } - DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "add addresses for vf %d\n", vf->abs_vfid); for_each_vfq(vf, j) { struct bnx2x_vf_queue *rxq = vfq_get(vf, j); @@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) cpu_to_le32(U64_HI(q_stats_addr)); cur_query_entry->address.lo = cpu_to_le32(U64_LO(q_stats_addr)); - DP(BNX2X_MSG_IOV, - "added address %x %x for vf %d queue %d client %d\n", - cur_query_entry->address.hi, - cur_query_entry->address.lo, cur_query_entry->funcID, - j, cur_query_entry->index); + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "added address %x %x for vf %d queue %d client %d\n", + cur_query_entry->address.hi, + cur_query_entry->address.lo, + cur_query_entry->funcID, + j, cur_query_entry->index); cur_query_entry++; cur_data_offset += sizeof(struct per_queue_stats); stats_count++; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 7a6d406f4c1117..888d0b6632e86f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter { (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/ bool add; + bool applied; u8 *mac; u16 vid; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index bfae300cf25ff8..76a4668c50fe98 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev) struct bnx2x *bp = netdev_priv(dev); struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; - int rc, i = 0; + int rc = 0, i = 0; struct netdev_hw_addr *ha; if (bp->state != BNX2X_STATE_OPEN) { @@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev) /* Get Rx mode requested */ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); + /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */ + if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) { + DP(NETIF_MSG_IFUP, + "VF supports not more than %d multicast MAC addresses\n", + PFVF_MAX_MULTICAST_PER_VF); + rc = -EINVAL; + goto out; + } + netdev_for_each_mc_addr(ha, dev) { DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", bnx2x_mc_addr(ha)); @@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev) i++; } - /* We support four PFVF_MAX_MULTICAST_PER_VF mcast - * addresses tops - */ - if (i >= PFVF_MAX_MULTICAST_PER_VF) { - DP(NETIF_MSG_IFUP, - "VF supports not more than %d multicast MAC addresses\n", - PFVF_MAX_MULTICAST_PER_VF); - return -EINVAL; - } - req->n_multicast = i; req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED; req->vf_qid = 0; @@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev) out: bnx2x_vfpf_finalize(bp, &req->first_tlv); - return 0; + return rc; } /* request pf to add a vlan for the vf */ @@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) goto op_err; } + /* build vlan list */ + fl = NULL; + + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_VLAN_FILTER); + if (rc) + goto op_err; + + if (fl) { + /* set vlan list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) + goto op_err; + } + } if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 235733e91c791b..1f1e54ba0ecb31 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1983,20 +1983,25 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) for (j = 0; j < max_idx; j++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; + dma_addr_t mapping = rx_buf->mapping; void *data = rx_buf->data; if (!data) continue; - dma_unmap_single(&pdev->dev, rx_buf->mapping, - bp->rx_buf_use_size, bp->rx_dir); - rx_buf->data = NULL; - if (BNXT_RX_PAGE_MODE(bp)) + if (BNXT_RX_PAGE_MODE(bp)) { + mapping -= bp->rx_dma_offset; + dma_unmap_page(&pdev->dev, mapping, + PAGE_SIZE, bp->rx_dir); __free_page(data); - else + } else { + dma_unmap_single(&pdev->dev, mapping, + bp->rx_buf_use_size, + bp->rx_dir); kfree(data); + } } for (j = 0; j < max_agg_idx; j++) { @@ -2455,6 +2460,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) return 0; } +static void bnxt_init_cp_rings(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + ring->fw_ring_id = INVALID_HW_RING_ID; + } +} + static int bnxt_init_rx_rings(struct bnxt *bp) { int i, rc = 0; @@ -4465,6 +4482,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; } #endif + if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) & + FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)) + bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; + switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: @@ -4728,7 +4749,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); if (rc) { netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", - rc, i); + i, rc); return rc; } } @@ -5002,6 +5023,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) { + bnxt_init_cp_rings(bp); bnxt_init_rx_rings(bp); bnxt_init_tx_rings(bp); bnxt_init_ring_grps(bp, irq_re_init); @@ -5507,8 +5529,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; } - link_info->support_auto_speeds = - le16_to_cpu(resp->supported_speeds_auto_mode); + if (resp->supported_speeds_auto_mode) + link_info->support_auto_speeds = + le16_to_cpu(resp->supported_speeds_auto_mode); hwrm_phy_qcaps_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -6495,8 +6518,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent) if (!silent) bnxt_dbg_dump_states(bp); if (netif_running(bp->dev)) { + int rc; + + if (!silent) + bnxt_ulp_stop(bp); bnxt_close_nic(bp, false, false); - bnxt_open_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + if (!silent && !rc) + bnxt_ulp_start(bp); } } @@ -7444,6 +7473,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; + rc = bnxt_hwrm_func_reset(bp); + if (rc) + goto init_err_pci_clean; + bnxt_hwrm_fw_set_time(bp); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | @@ -7554,10 +7587,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; - rc = bnxt_hwrm_func_reset(bp); - if (rc) - goto init_err_pci_clean; - rc = bnxt_init_int_mode(bp); if (rc) goto init_err_pci_clean; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index faf26a2f726b80..c7a5b84a5cb20e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -993,6 +993,7 @@ struct bnxt { BNXT_FLAG_ROCEV2_CAP) #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 + #define BNXT_FLAG_FW_LLDP_AGENT 0x80000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index fdf2d8caf7bfaa..03532061d211b1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp) return; bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; - if (BNXT_PF(bp)) + if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) bp->dcbx_cap |= DCB_CAP_DCBX_HOST; else bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index f92896835d2a4c..365895ed3c3e24 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1,7 +1,7 @@ /* * Broadcom GENET (Gigabit Ethernet) controller driver * - * Copyright (c) 2014 Broadcom Corporation + * Copyright (c) 2014-2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, genet_dma_ring_regs[r]); } +static int bcmgenet_begin(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn on the clock */ + return clk_prepare_enable(priv->clk); +} + +static void bcmgenet_complete(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn off the clock */ + clk_disable_unprepare(priv->clk); +} + static int bcmgenet_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { @@ -778,8 +794,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), /* Misc UniMAC counters */ STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, - UMAC_RBUF_OVFL_CNT), - STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), + UMAC_RBUF_OVFL_CNT_V1), + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, + UMAC_RBUF_ERR_CNT_V1), STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), @@ -821,6 +838,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, } } +static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) +{ + u16 new_offset; + u32 val; + + switch (offset) { + case UMAC_RBUF_OVFL_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_OVFL_CNT_V2; + else + new_offset = RBUF_OVFL_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + case UMAC_RBUF_ERR_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_ERR_CNT_V2; + else + new_offset = RBUF_ERR_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + default: + val = bcmgenet_umac_readl(priv, offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, offset); + break; + } + + return val; +} + static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) { int i, j = 0; @@ -836,19 +892,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) case BCMGENET_STAT_NETDEV: case BCMGENET_STAT_SOFT: continue; - case BCMGENET_STAT_MIB_RX: - case BCMGENET_STAT_MIB_TX: case BCMGENET_STAT_RUNT: - if (s->type != BCMGENET_STAT_MIB_RX) - offset = BCMGENET_STAT_OFFSET; + offset += BCMGENET_STAT_OFFSET; + /* fall through */ + case BCMGENET_STAT_MIB_TX: + offset += BCMGENET_STAT_OFFSET; + /* fall through */ + case BCMGENET_STAT_MIB_RX: val = bcmgenet_umac_readl(priv, UMAC_MIB_START + j + offset); + offset = 0; /* Reset Offset */ break; case BCMGENET_STAT_MISC: - val = bcmgenet_umac_readl(priv, s->reg_offset); - /* clear if overflowed */ - if (val == ~0) - bcmgenet_umac_writel(priv, 0, s->reg_offset); + if (GENET_IS_V1(priv)) { + val = bcmgenet_umac_readl(priv, s->reg_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, + s->reg_offset); + } else { + val = bcmgenet_update_stat_misc(priv, + s->reg_offset); + } break; } @@ -973,6 +1038,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) /* standard ethtool support functions. */ static const struct ethtool_ops bcmgenet_ethtool_ops = { + .begin = bcmgenet_begin, + .complete = bcmgenet_complete, .get_strings = bcmgenet_get_strings, .get_sset_count = bcmgenet_get_sset_count, .get_ethtool_stats = bcmgenet_get_ethtool_stats, @@ -1167,7 +1234,6 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; struct enet_cb *tx_cb_ptr; - struct netdev_queue *txq; unsigned int pkts_compl = 0; unsigned int bytes_compl = 0; unsigned int c_index; @@ -1219,13 +1285,8 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, dev->stats.tx_packets += pkts_compl; dev->stats.tx_bytes += bytes_compl; - txq = netdev_get_tx_queue(dev, ring->queue); - netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); - - if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); - } + netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), + pkts_compl, bytes_compl); return pkts_compl; } @@ -1248,8 +1309,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) struct bcmgenet_tx_ring *ring = container_of(napi, struct bcmgenet_tx_ring, napi); unsigned int work_done = 0; + struct netdev_queue *txq; + unsigned long flags; - work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); + spin_lock_irqsave(&ring->lock, flags); + work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); + netif_tx_wake_queue(txq); + } + spin_unlock_irqrestore(&ring->lock, flags); if (work_done == 0) { napi_complete(napi); @@ -2457,24 +2526,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) /* Interrupt bottom half */ static void bcmgenet_irq_task(struct work_struct *work) { + unsigned long flags; + unsigned int status; struct bcmgenet_priv *priv = container_of( work, struct bcmgenet_priv, bcmgenet_irq_work); netif_dbg(priv, intr, priv->dev, "%s\n", __func__); - if (priv->irq0_stat & UMAC_IRQ_MPD_R) { - priv->irq0_stat &= ~UMAC_IRQ_MPD_R; + spin_lock_irqsave(&priv->lock, flags); + status = priv->irq0_stat; + priv->irq0_stat = 0; + spin_unlock_irqrestore(&priv->lock, flags); + + if (status & UMAC_IRQ_MPD_R) { netif_dbg(priv, wol, priv->dev, "magic packet detected, waking up\n"); bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); } /* Link UP/DOWN event */ - if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) { + if (status & UMAC_IRQ_LINK_EVENT) phy_mac_interrupt(priv->phydev, - !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); - priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; - } + !!(status & UMAC_IRQ_LINK_UP)); } /* bcmgenet_isr1: handle Rx and Tx priority queues */ @@ -2483,22 +2556,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) struct bcmgenet_priv *priv = dev_id; struct bcmgenet_rx_ring *rx_ring; struct bcmgenet_tx_ring *tx_ring; - unsigned int index; + unsigned int index, status; - /* Save irq status for bottom-half processing. */ - priv->irq1_stat = - bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + /* Read irq status */ + status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); /* clear interrupts */ - bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); netif_dbg(priv, intr, priv->dev, - "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); + "%s: IRQ=0x%x\n", __func__, status); /* Check Rx priority queue interrupts */ for (index = 0; index < priv->hw_params->rx_queues; index++) { - if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) continue; rx_ring = &priv->rx_rings[index]; @@ -2511,7 +2583,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) /* Check Tx priority queue interrupts */ for (index = 0; index < priv->hw_params->tx_queues; index++) { - if (!(priv->irq1_stat & BIT(index))) + if (!(status & BIT(index))) continue; tx_ring = &priv->tx_rings[index]; @@ -2531,19 +2603,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) struct bcmgenet_priv *priv = dev_id; struct bcmgenet_rx_ring *rx_ring; struct bcmgenet_tx_ring *tx_ring; + unsigned int status; + unsigned long flags; - /* Save irq status for bottom-half processing. */ - priv->irq0_stat = - bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + /* Read irq status */ + status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); /* clear interrupts */ - bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); netif_dbg(priv, intr, priv->dev, - "IRQ=0x%x\n", priv->irq0_stat); + "IRQ=0x%x\n", status); - if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) { + if (status & UMAC_IRQ_RXDMA_DONE) { rx_ring = &priv->rx_rings[DESC_INDEX]; if (likely(napi_schedule_prep(&rx_ring->napi))) { @@ -2552,7 +2625,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } } - if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) { + if (status & UMAC_IRQ_TXDMA_DONE) { tx_ring = &priv->tx_rings[DESC_INDEX]; if (likely(napi_schedule_prep(&tx_ring->napi))) { @@ -2561,22 +2634,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } } - if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | - UMAC_IRQ_PHY_DET_F | - UMAC_IRQ_LINK_EVENT | - UMAC_IRQ_HFB_SM | - UMAC_IRQ_HFB_MM | - UMAC_IRQ_MPD_R)) { - /* all other interested interrupts handled in bottom half */ - schedule_work(&priv->bcmgenet_irq_work); - } - if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && - priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { - priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); + status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { wake_up(&priv->wq); } + /* all other interested interrupts handled in bottom half */ + status &= (UMAC_IRQ_LINK_EVENT | + UMAC_IRQ_MPD_R); + if (status) { + /* Save irq status for bottom-half processing. */ + spin_lock_irqsave(&priv->lock, flags); + priv->irq0_stat |= status; + spin_unlock_irqrestore(&priv->lock, flags); + + schedule_work(&priv->bcmgenet_irq_work); + } + return IRQ_HANDLED; } @@ -2801,6 +2875,8 @@ static int bcmgenet_open(struct net_device *dev) err_fini_dma: bcmgenet_fini_dma(priv); err_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); clk_disable_unprepare(priv->clk); return ret; } @@ -3177,6 +3253,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) */ gphy_rev = reg & 0xffff; + /* This is reserved so should require special treatment */ + if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + } + /* This is the good old scheme, just GPHY major, no minor nor patch */ if ((gphy_rev & 0xf0) != 0) priv->gphy_rev = gphy_rev << 8; @@ -3185,12 +3267,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) else if ((gphy_rev & 0xff00) != 0) priv->gphy_rev = gphy_rev; - /* This is reserved so should require special treatment */ - else if (gphy_rev == 0 || gphy_rev == 0x01ff) { - pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); - return; - } - #ifdef CONFIG_PHYS_ADDR_T_64BIT if (!(params->flags & GENET_HAS_40BITS)) pr_warn("GENET does not support 40-bits PA\n"); @@ -3233,6 +3309,7 @@ static int bcmgenet_probe(struct platform_device *pdev) const void *macaddr; struct resource *r; int err = -EIO; + const char *phy_mode_str; /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, @@ -3276,6 +3353,8 @@ static int bcmgenet_probe(struct platform_device *pdev) goto err; } + spin_lock_init(&priv->lock); + SET_NETDEV_DEV(dev, &pdev->dev); dev_set_drvdata(&pdev->dev, dev); ether_addr_copy(dev->dev_addr, macaddr); @@ -3338,6 +3417,13 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->clk_eee = NULL; } + /* If this is an internal GPHY, power it on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) && + !strcasecmp(phy_mode_str, "internal")) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + err = reset_umac(priv); if (err) goto err_clk_disable; @@ -3395,7 +3481,8 @@ static int bcmgenet_suspend(struct device *d) bcmgenet_netif_stop(dev); - phy_suspend(priv->phydev); + if (!device_may_wakeup(d)) + phy_suspend(priv->phydev); netif_device_detach(dev); @@ -3492,7 +3579,8 @@ static int bcmgenet_resume(struct device *d) netif_device_attach(dev); - phy_resume(priv->phydev); + if (!device_may_wakeup(d)) + phy_resume(priv->phydev); if (priv->eee.eee_enabled) bcmgenet_eee_enable_set(dev, true); @@ -3502,6 +3590,8 @@ static int bcmgenet_resume(struct device *d) return 0; out_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); clk_disable_unprepare(priv->clk); return ret; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 1e2dc34d331a49..db7f289d65ae2a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Broadcom Corporation + * Copyright (c) 2014-2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -214,7 +214,9 @@ struct bcmgenet_mib_counters { #define MDIO_REG_SHIFT 16 #define MDIO_REG_MASK 0x1F -#define UMAC_RBUF_OVFL_CNT 0x61C +#define UMAC_RBUF_OVFL_CNT_V1 0x61C +#define RBUF_OVFL_CNT_V2 0x80 +#define RBUF_OVFL_CNT_V3PLUS 0x94 #define UMAC_MPD_CTRL 0x620 #define MPD_EN (1 << 0) @@ -224,7 +226,9 @@ struct bcmgenet_mib_counters { #define UMAC_MPD_PW_MS 0x624 #define UMAC_MPD_PW_LS 0x628 -#define UMAC_RBUF_ERR_CNT 0x634 +#define UMAC_RBUF_ERR_CNT_V1 0x634 +#define RBUF_ERR_CNT_V2 0x84 +#define RBUF_ERR_CNT_V3PLUS 0x98 #define UMAC_MDF_ERR_CNT 0x638 #define UMAC_MDF_CTRL 0x650 #define UMAC_MDF_ADDR 0x654 @@ -619,11 +623,13 @@ struct bcmgenet_priv { struct work_struct bcmgenet_irq_work; int irq0; int irq1; - unsigned int irq0_stat; - unsigned int irq1_stat; int wol_irq; bool wol_irq_disabled; + /* shared status */ + spinlock_t lock; + unsigned int irq0_stat; + /* HW descriptors/checksum variables */ bool desc_64b_en; bool desc_rxchk_en; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index e87607621e62a0..2f9281936f0e43 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) udelay(60); } -static void bcmgenet_internal_phy_setup(struct net_device *dev) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - /* Power up PHY */ - bcmgenet_phy_power_set(dev, true); - /* enable APD */ - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_PWR_DN_EN_LD; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - bcmgenet_mii_reset(dev); -} - static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) { u32 reg; @@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev) if (priv->internal_phy) { phy_name = "internal PHY"; - bcmgenet_internal_phy_setup(dev); } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { phy_name = "MoCA"; bcmgenet_moca_phy_setup(priv); diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 89d4feba1a9aea..55c8e25b43d9ad 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2617,7 +2617,7 @@ static int sbmac_probe(struct platform_device *pldev) return err; } -static int __exit sbmac_remove(struct platform_device *pldev) +static int sbmac_remove(struct platform_device *pldev) { struct net_device *dev = platform_get_drvdata(pldev); struct sbmac_softc *sc = netdev_priv(dev); @@ -2634,7 +2634,7 @@ static int __exit sbmac_remove(struct platform_device *pldev) static struct platform_driver sbmac_driver = { .probe = sbmac_probe, - .remove = __exit_p(sbmac_remove), + .remove = sbmac_remove, .driver = { .name = sbmac_string, }, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a448177990fe42..30d1eb9ebec9af 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 9e59663a6eadb0..0f6811860ad51d 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1930,13 +1930,13 @@ static void bfa_ioc_send_enable(struct bfa_ioc *ioc) { struct bfi_ioc_ctrl_req enable_req; - struct timeval tv; bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, bfa_ioc_portid(ioc)); enable_req.clscode = htons(ioc->clscode); - do_gettimeofday(&tv); - enable_req.tv_sec = ntohl(tv.tv_sec); + enable_req.rsvd = htons(0); + /* overflow in 2106 */ + enable_req.tv_sec = ntohl(ktime_get_real_seconds()); bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); } @@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc) bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, bfa_ioc_portid(ioc)); + disable_req.clscode = htons(ioc->clscode); + disable_req.rsvd = htons(0); + /* overflow in 2106 */ + disable_req.tv_sec = ntohl(ktime_get_real_seconds()); bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); } diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 05c1c1dd7751bd..cebfe3bd086e36 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, return PTR_ERR(kern_buf); rc = sscanf(kern_buf, "%x:%x", &addr, &len); - if (rc < 2) { + if (rc < 2 || len > UINT_MAX >> 2) { netdev_warn(bnad->netdev, "failed to read user buffer\n"); kfree(kern_buf); return -EINVAL; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index be9c0e3f5ade7d..92f46b1375c325 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -152,7 +152,7 @@ struct octnic_gather { */ struct octeon_sg_entry *sg; - u64 sg_dma_ptr; + dma_addr_t sg_dma_ptr; }; struct handshake { @@ -734,6 +734,9 @@ static void delete_glists(struct lio *lio) struct octnic_gather *g; int i; + kfree(lio->glist_lock); + lio->glist_lock = NULL; + if (!lio->glist) return; @@ -741,23 +744,26 @@ static void delete_glists(struct lio *lio) do { g = (struct octnic_gather *) list_delete_head(&lio->glist[i]); - if (g) { - if (g->sg) { - dma_unmap_single(&lio->oct_dev-> - pci_dev->dev, - g->sg_dma_ptr, - g->sg_size, - DMA_TO_DEVICE); - kfree((void *)((unsigned long)g->sg - - g->adjust)); - } + if (g) kfree(g); - } } while (g); + + if (lio->glists_virt_base && lio->glists_virt_base[i]) { + lio_dma_free(lio->oct_dev, + lio->glist_entry_size * lio->tx_qsize, + lio->glists_virt_base[i], + lio->glists_dma_base[i]); + } } - kfree((void *)lio->glist); - kfree((void *)lio->glist_lock); + kfree(lio->glists_virt_base); + lio->glists_virt_base = NULL; + + kfree(lio->glists_dma_base); + lio->glists_dma_base = NULL; + + kfree(lio->glist); + lio->glist = NULL; } /** @@ -772,13 +778,30 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL); if (!lio->glist_lock) - return 1; + return -ENOMEM; lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL); if (!lio->glist) { - kfree((void *)lio->glist_lock); - return 1; + kfree(lio->glist_lock); + lio->glist_lock = NULL; + return -ENOMEM; + } + + lio->glist_entry_size = + ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); + + /* allocate memory to store virtual and dma base address of + * per glist consistent memory + */ + lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), + GFP_KERNEL); + lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), + GFP_KERNEL); + + if (!lio->glists_virt_base || !lio->glists_dma_base) { + delete_glists(lio); + return -ENOMEM; } for (i = 0; i < num_iqs; i++) { @@ -788,6 +811,16 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) INIT_LIST_HEAD(&lio->glist[i]); + lio->glists_virt_base[i] = + lio_dma_alloc(oct, + lio->glist_entry_size * lio->tx_qsize, + &lio->glists_dma_base[i]); + + if (!lio->glists_virt_base[i]) { + delete_glists(lio); + return -ENOMEM; + } + for (j = 0; j < lio->tx_qsize; j++) { g = kzalloc_node(sizeof(*g), GFP_KERNEL, numa_node); @@ -796,43 +829,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) if (!g) break; - g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * - OCT_SG_ENTRY_SIZE); + g->sg = lio->glists_virt_base[i] + + (j * lio->glist_entry_size); - g->sg = kmalloc_node(g->sg_size + 8, - GFP_KERNEL, numa_node); - if (!g->sg) - g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); - if (!g->sg) { - kfree(g); - break; - } - - /* The gather component should be aligned on 64-bit - * boundary - */ - if (((unsigned long)g->sg) & 7) { - g->adjust = 8 - (((unsigned long)g->sg) & 7); - g->sg = (struct octeon_sg_entry *) - ((unsigned long)g->sg + g->adjust); - } - g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev, - g->sg, g->sg_size, - DMA_TO_DEVICE); - if (dma_mapping_error(&oct->pci_dev->dev, - g->sg_dma_ptr)) { - kfree((void *)((unsigned long)g->sg - - g->adjust)); - kfree(g); - break; - } + g->sg_dma_ptr = lio->glists_dma_base[i] + + (j * lio->glist_entry_size); list_add_tail(&g->list, &lio->glist[i]); } if (j != lio->tx_qsize) { delete_glists(lio); - return 1; + return -ENOMEM; } } @@ -1885,9 +1893,6 @@ static void free_netsgbuf(void *buf) i++; } - dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, - g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); - iq = skb_iq(lio, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -1933,9 +1938,6 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, - g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); - iq = skb_iq(lio, skb); spin_lock(&lio->glist_lock[iq]); @@ -3273,8 +3275,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) i++; } - dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr, - g->sg_size, DMA_TO_DEVICE); dptr = g->sg_dma_ptr; if (OCTEON_CN23XX_PF(oct)) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 9d5e03502c76cb..7b83be4ce1fe0c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -108,6 +108,8 @@ struct octnic_gather { * received from the IP layer. */ struct octeon_sg_entry *sg; + + dma_addr_t sg_dma_ptr; }; struct octeon_device_priv { @@ -490,6 +492,9 @@ static void delete_glists(struct lio *lio) struct octnic_gather *g; int i; + kfree(lio->glist_lock); + lio->glist_lock = NULL; + if (!lio->glist) return; @@ -497,17 +502,26 @@ static void delete_glists(struct lio *lio) do { g = (struct octnic_gather *) list_delete_head(&lio->glist[i]); - if (g) { - if (g->sg) - kfree((void *)((unsigned long)g->sg - - g->adjust)); + if (g) kfree(g); - } } while (g); + + if (lio->glists_virt_base && lio->glists_virt_base[i]) { + lio_dma_free(lio->oct_dev, + lio->glist_entry_size * lio->tx_qsize, + lio->glists_virt_base[i], + lio->glists_dma_base[i]); + } } + kfree(lio->glists_virt_base); + lio->glists_virt_base = NULL; + + kfree(lio->glists_dma_base); + lio->glists_dma_base = NULL; + kfree(lio->glist); - kfree(lio->glist_lock); + lio->glist = NULL; } /** @@ -522,13 +536,30 @@ static int setup_glists(struct lio *lio, int num_iqs) lio->glist_lock = kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); if (!lio->glist_lock) - return 1; + return -ENOMEM; lio->glist = kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); if (!lio->glist) { kfree(lio->glist_lock); - return 1; + lio->glist_lock = NULL; + return -ENOMEM; + } + + lio->glist_entry_size = + ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); + + /* allocate memory to store virtual and dma base address of + * per glist consistent memory + */ + lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), + GFP_KERNEL); + lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), + GFP_KERNEL); + + if (!lio->glists_virt_base || !lio->glists_dma_base) { + delete_glists(lio); + return -ENOMEM; } for (i = 0; i < num_iqs; i++) { @@ -536,34 +567,33 @@ static int setup_glists(struct lio *lio, int num_iqs) INIT_LIST_HEAD(&lio->glist[i]); + lio->glists_virt_base[i] = + lio_dma_alloc(lio->oct_dev, + lio->glist_entry_size * lio->tx_qsize, + &lio->glists_dma_base[i]); + + if (!lio->glists_virt_base[i]) { + delete_glists(lio); + return -ENOMEM; + } + for (j = 0; j < lio->tx_qsize; j++) { g = kzalloc(sizeof(*g), GFP_KERNEL); if (!g) break; - g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * - OCT_SG_ENTRY_SIZE); + g->sg = lio->glists_virt_base[i] + + (j * lio->glist_entry_size); - g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); - if (!g->sg) { - kfree(g); - break; - } + g->sg_dma_ptr = lio->glists_dma_base[i] + + (j * lio->glist_entry_size); - /* The gather component should be aligned on 64-bit - * boundary - */ - if (((unsigned long)g->sg) & 7) { - g->adjust = 8 - (((unsigned long)g->sg) & 7); - g->sg = (struct octeon_sg_entry *) - ((unsigned long)g->sg + g->adjust); - } list_add_tail(&g->list, &lio->glist[i]); } if (j != lio->tx_qsize) { delete_glists(lio); - return 1; + return -ENOMEM; } } @@ -1324,10 +1354,6 @@ static void free_netsgbuf(void *buf) i++; } - dma_unmap_single(&lio->oct_dev->pci_dev->dev, - finfo->dptr, g->sg_size, - DMA_TO_DEVICE); - iq = skb_iq(lio, skb); spin_lock(&lio->glist_lock[iq]); @@ -1374,10 +1400,6 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - dma_unmap_single(&lio->oct_dev->pci_dev->dev, - finfo->dptr, g->sg_size, - DMA_TO_DEVICE); - iq = skb_iq(lio, skb); spin_lock(&lio->glist_lock[iq]); @@ -2382,23 +2404,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) i++; } - dptr = dma_map_single(&oct->pci_dev->dev, - g->sg, g->sg_size, - DMA_TO_DEVICE); - if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { - dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n", - __func__); - dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0], - skb->len - skb->data_len, - DMA_TO_DEVICE); - for (j = 1; j <= frags; j++) { - frag = &skb_shinfo(skb)->frags[j - 1]; - dma_unmap_page(&oct->pci_dev->dev, - g->sg[j >> 2].ptr[j & 3], - frag->size, DMA_TO_DEVICE); - } - return NETDEV_TX_BUSY; - } + dptr = g->sg_dma_ptr; ndata.cmd.cmd3.dptr = dptr; finfo->dptr = dptr; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index b3dc2e9651a8e2..d29ebc531151f0 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -71,17 +71,17 @@ #define CN23XX_MAX_RINGS_PER_VF 8 #define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF -#define CN23XX_MAX_IQ_DESCRIPTORS 2048 +#define CN23XX_MAX_IQ_DESCRIPTORS 512 #define CN23XX_DB_MIN 1 #define CN23XX_DB_MAX 8 #define CN23XX_DB_TIMEOUT 1 #define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF -#define CN23XX_MAX_OQ_DESCRIPTORS 2048 +#define CN23XX_MAX_OQ_DESCRIPTORS 512 #define CN23XX_OQ_BUF_SIZE 1536 #define CN23XX_OQ_PKTSPER_INTR 128 /*#define CAVIUM_ONLY_CN23XX_RX_PERF*/ -#define CN23XX_OQ_REFIL_THRESHOLD 128 +#define CN23XX_OQ_REFIL_THRESHOLD 16 #define CN23XX_OQ_INTR_PKT 64 #define CN23XX_OQ_INTR_TIME 100 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 0be87d119a979e..79f809479af6e7 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct, recv_buffer_destroy(droq->recv_buf_list[i].buffer, pg_info); - if (droq->desc_ring && droq->desc_ring[i].info_ptr) - lio_unmap_ring_info(oct->pci_dev, - (u64)droq-> - desc_ring[i].info_ptr, - OCT_DROQ_INFO_SIZE); droq->recv_buf_list[i].buffer = NULL; } @@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no) vfree(droq->recv_buf_list); if (droq->info_base_addr) - cnnic_free_aligned_dma(oct->pci_dev, droq->info_list, - droq->info_alloc_size, - droq->info_base_addr, - droq->info_list_dma); + lio_free_info_buffer(oct, droq); if (droq->desc_ring) lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), @@ -294,12 +286,7 @@ int octeon_init_droq(struct octeon_device *oct, dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, droq->max_count); - droq->info_list = - cnnic_numa_alloc_aligned_dma((droq->max_count * - OCT_DROQ_INFO_SIZE), - &droq->info_alloc_size, - &droq->info_base_addr, - numa_node); + droq->info_list = lio_alloc_info_buffer(oct, droq); if (!droq->info_list) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index e62074090681d3..6982c0af5eccb7 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -325,10 +325,10 @@ struct octeon_droq { size_t desc_ring_dma; /** Info ptr list are allocated at this virtual address. */ - size_t info_base_addr; + void *info_base_addr; /** DMA mapped address of the info list */ - size_t info_list_dma; + dma_addr_t info_list_dma; /** Allocated size of info list. */ u32 info_alloc_size; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index 8cd38914816665..bed9ef17bc26b4 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -23,6 +23,8 @@ #ifndef _OCTEON_MAIN_H_ #define _OCTEON_MAIN_H_ +#include + #if BITS_PER_LONG == 32 #define CVM_CAST64(v) ((long long)(v)) #elif BITS_PER_LONG == 64 @@ -138,48 +140,6 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct, return 1; } -static inline void * -cnnic_numa_alloc_aligned_dma(u32 size, - u32 *alloc_size, - size_t *orig_ptr, - int numa_node) -{ - int retries = 0; - void *ptr = NULL; - -#define OCTEON_MAX_ALLOC_RETRIES 1 - do { - struct page *page = NULL; - - page = alloc_pages_node(numa_node, - GFP_KERNEL, - get_order(size)); - if (!page) - page = alloc_pages(GFP_KERNEL, - get_order(size)); - ptr = (void *)page_address(page); - if ((unsigned long)ptr & 0x07) { - __free_pages(page, get_order(size)); - ptr = NULL; - /* Increment the size required if the first - * attempt failed. - */ - if (!retries) - size += 7; - } - retries++; - } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr); - - *alloc_size = size; - *orig_ptr = (unsigned long)ptr; - if ((unsigned long)ptr & 0x07) - ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL)); - return ptr; -} - -#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \ - free_pages(orig_ptr, get_order(size)) - static inline int sleep_cond(wait_queue_head_t *wait_queue, int *condition) { diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 6bb89419006eb5..eef2a1e8a7e3f9 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -62,6 +62,9 @@ struct lio { /** Array of gather component linked lists */ struct list_head *glist; + void **glists_virt_base; + dma_addr_t *glists_dma_base; + u32 glist_entry_size; /** Pointer to the NIC properties for the Octeon device this network * interface is associated with. @@ -344,6 +347,29 @@ static inline void tx_buffer_free(void *buffer) #define lio_dma_free(oct, size, virt_addr, dma_addr) \ dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr) +static inline void * +lio_alloc_info_buffer(struct octeon_device *oct, + struct octeon_droq *droq) +{ + void *virt_ptr; + + virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE), + &droq->info_list_dma); + if (virt_ptr) { + droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE; + droq->info_base_addr = virt_ptr; + } + + return virt_ptr; +} + +static inline void lio_free_info_buffer(struct octeon_device *oct, + struct octeon_droq *droq) +{ + lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr, + droq->info_list_dma); +} + static inline void *get_rbd(struct sk_buff *skb) { @@ -359,22 +385,7 @@ void *get_rbd(struct sk_buff *skb) static inline u64 lio_map_ring_info(struct octeon_droq *droq, u32 i) { - dma_addr_t dma_addr; - struct octeon_device *oct = droq->oct_dev; - - dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i], - OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE); - - WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr)); - - return (u64)dma_addr; -} - -static inline void -lio_unmap_ring_info(struct pci_dev *pci_dev, - u64 info_ptr, u32 size) -{ - dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE); + return droq->info_list_dma + (i * sizeof(struct octeon_droq_info)); } static inline u64 diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index e739c715356283..2269ff562d9562 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -269,6 +269,7 @@ struct nicvf { #define MAX_QUEUES_PER_QSET 8 struct queue_set *qs; struct nicvf_cq_poll *napi[8]; + void *iommu_domain; u8 vf_id; u8 sqs_id; bool sqs_mode; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 6feaa24bcfd42b..24017588f53171 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "nic_reg.h" #include "nic.h" @@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, /* Get actual TSO descriptors and free them */ tso_sqe = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); + nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, + tso_sqe->subdesc_cnt); nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1); + } else { + nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, + hdr->subdesc_cnt); } nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); prefetch(skb); @@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, { struct sk_buff *skb; struct nicvf *nic = netdev_priv(netdev); + struct nicvf *snic = nic; int err = 0; int rq_idx; @@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, if (err && !cqe_rx->rb_cnt) return; - skb = nicvf_get_rcv_skb(nic, cqe_rx); + skb = nicvf_get_rcv_skb(snic, cqe_rx); if (!skb) { netdev_dbg(nic->netdev, "Packet not received\n"); return; @@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!pass1_silicon(nic->pdev)) nic->hw_tso = true; + /* Get iommu domain for iova to physical addr conversion */ + nic->iommu_domain = iommu_get_domain_for_dev(dev); + pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); if (sdevid == 0xA134) nic->t88 = true; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index ac0390be3b126e..f13289f0d2386d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -18,6 +19,16 @@ #include "q_struct.h" #include "nicvf_queues.h" +#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0) + +static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr) +{ + /* Translation is installed only when IOMMU is present */ + if (nic->iommu_domain) + return iommu_iova_to_phys(nic->iommu_domain, dma_addr); + return dma_addr; +} + static void nicvf_get_page(struct nicvf *nic) { if (!nic->rb_pageref || !nic->rb_page) @@ -87,7 +98,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, u32 buf_len, u64 **rbuf) { - int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; + int order = NICVF_PAGE_ORDER; /* Check if request can be accomodated in previous allocated page */ if (nic->rb_page && @@ -97,22 +108,27 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, } nicvf_get_page(nic); - nic->rb_page = NULL; /* Allocate a new page */ + nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, + order); if (!nic->rb_page) { - nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, - order); - if (!nic->rb_page) { - this_cpu_inc(nic->pnicvf->drv_stats-> - rcv_buffer_alloc_failures); - return -ENOMEM; - } - nic->rb_page_offset = 0; + this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures); + return -ENOMEM; } - + nic->rb_page_offset = 0; ret: - *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); + /* HW will ensure data coherency, CPU sync not required */ + *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page, + nic->rb_page_offset, buf_len, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC)); + if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) { + if (!nic->rb_page_offset) + __free_pages(nic->rb_page, order); + nic->rb_page = NULL; + return -ENOMEM; + } nic->rb_page_offset += buf_len; return 0; @@ -158,16 +174,21 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, rbdr->dma_size = buf_size; rbdr->enable = true; rbdr->thresh = RBDR_THRESH; + rbdr->head = 0; + rbdr->tail = 0; nic->rb_page = NULL; for (idx = 0; idx < ring_len; idx++) { err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, &rbuf); - if (err) + if (err) { + /* To free already allocated and mapped ones */ + rbdr->tail = idx - 1; return err; + } desc = GET_RBDR_DESC(rbdr, idx); - desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; + desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN; } nicvf_get_page(nic); @@ -179,7 +200,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) { int head, tail; - u64 buf_addr; + u64 buf_addr, phys_addr; struct rbdr_entry_t *desc; if (!rbdr) @@ -192,18 +213,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) head = rbdr->head; tail = rbdr->tail; - /* Free SKBs */ + /* Release page references */ while (head != tail) { desc = GET_RBDR_DESC(rbdr, head); - buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; - put_page(virt_to_page(phys_to_virt(buf_addr))); + buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN; + phys_addr = nicvf_iova_to_phys(nic, buf_addr); + dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (phys_addr) + put_page(virt_to_page(phys_to_virt(phys_addr))); head++; head &= (rbdr->dmem.q_len - 1); } - /* Free SKB of tail desc */ + /* Release buffer of tail desc */ desc = GET_RBDR_DESC(rbdr, tail); - buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; - put_page(virt_to_page(phys_to_virt(buf_addr))); + buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN; + phys_addr = nicvf_iova_to_phys(nic, buf_addr); + dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (phys_addr) + put_page(virt_to_page(phys_to_virt(phys_addr))); /* Free RBDR ring */ nicvf_free_q_desc_mem(nic, &rbdr->dmem); @@ -250,7 +279,7 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) break; desc = GET_RBDR_DESC(rbdr, tail); - desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; + desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN; refill_rb_cnt--; new_rb++; } @@ -361,9 +390,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic, return 0; } +void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, + int hdr_sqe, u8 subdesc_cnt) +{ + u8 idx; + struct sq_gather_subdesc *gather; + + /* Unmap DMA mapped skb data buffers */ + for (idx = 0; idx < subdesc_cnt; idx++) { + hdr_sqe++; + hdr_sqe &= (sq->dmem.q_len - 1); + gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe); + /* HW will ensure data coherency, CPU sync not required */ + dma_unmap_page_attrs(&nic->pdev->dev, gather->addr, + gather->size, DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } +} + static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) { struct sk_buff *skb; + struct sq_hdr_subdesc *hdr; + struct sq_hdr_subdesc *tso_sqe; if (!sq) return; @@ -379,8 +428,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) smp_rmb(); while (sq->head != sq->tail) { skb = (struct sk_buff *)sq->skbuff[sq->head]; - if (skb) - dev_kfree_skb_any(skb); + if (!skb) + goto next; + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); + /* Check for dummy descriptor used for HW TSO offload on 88xx */ + if (hdr->dont_send) { + /* Get actual TSO descriptors and unmap them */ + tso_sqe = + (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); + nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, + tso_sqe->subdesc_cnt); + } else { + nicvf_unmap_sndq_buffers(nic, sq, sq->head, + hdr->subdesc_cnt); + } + dev_kfree_skb_any(skb); +next: sq->head++; sq->head &= (sq->dmem.q_len - 1); } @@ -559,9 +622,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, nicvf_send_msg_to_pf(nic, &mbx); if (!nic->sqs_mode && (qidx == 0)) { - /* Enable checking L3/L4 length and TCP/UDP checksums */ + /* Enable checking L3/L4 length and TCP/UDP checksums + * Also allow IPv6 pkts with zero UDP checksum. + */ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, - (BIT(24) | BIT(23) | BIT(21))); + (BIT(24) | BIT(23) | BIT(21) | BIT(20))); nicvf_config_vlan_stripping(nic, nic->netdev->features); } @@ -882,6 +947,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) return qentry; } +/* Rollback to previous tail pointer when descriptors not used */ +static inline void nicvf_rollback_sq_desc(struct snd_queue *sq, + int qentry, int desc_cnt) +{ + sq->tail = qentry; + atomic_add(desc_cnt, &sq->free_cnt); +} + /* Free descriptor back to SQ for future use */ void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) { @@ -1207,8 +1280,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, struct sk_buff *skb, u8 sq_num) { int i, size; - int subdesc_cnt, tso_sqe = 0; + int subdesc_cnt, hdr_sqe = 0; int qentry; + u64 dma_addr; subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); if (subdesc_cnt > atomic_read(&sq->free_cnt)) @@ -1223,12 +1297,21 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, /* Add SQ header subdesc */ nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, skb, skb->len); - tso_sqe = qentry; + hdr_sqe = qentry; /* Add SQ gather subdescs */ qentry = nicvf_get_nxt_sqentry(sq, qentry); size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; - nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); + /* HW will ensure data coherency, CPU sync not required */ + dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data), + offset_in_page(skb->data), size, + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { + nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); + return 0; + } + + nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); /* Check for scattered buffer */ if (!skb_is_nonlinear(skb)) @@ -1241,15 +1324,26 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, qentry = nicvf_get_nxt_sqentry(sq, qentry); size = skb_frag_size(frag); - nicvf_sq_add_gather_subdesc(sq, qentry, size, - virt_to_phys( - skb_frag_address(frag))); + dma_addr = dma_map_page_attrs(&nic->pdev->dev, + skb_frag_page(frag), + frag->page_offset, size, + DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { + /* Free entire chain of mapped buffers + * here 'i' = frags mapped + above mapped skb->data + */ + nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i); + nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); + return 0; + } + nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); } doorbell: if (nic->t88 && skb_shinfo(skb)->gso_size) { qentry = nicvf_get_nxt_sqentry(sq, qentry); - nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb); + nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb); } nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt); @@ -1282,6 +1376,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) int offset; u16 *rb_lens = NULL; u64 *rb_ptrs = NULL; + u64 phys_addr; rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); /* Except 88xx pass1 on all other chips CQE_RX2_S is added to @@ -1296,15 +1391,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) else rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); - netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", - __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); - for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { payload_len = rb_lens[frag_num(frag)]; + phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs); + if (!phys_addr) { + if (skb) + dev_kfree_skb_any(skb); + return NULL; + } + if (!frag) { /* First fragment */ + dma_unmap_page_attrs(&nic->pdev->dev, + *rb_ptrs - cqe_rx->align_pad, + RCV_FRAG_LEN, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); skb = nicvf_rb_ptr_to_skb(nic, - *rb_ptrs - cqe_rx->align_pad, + phys_addr - cqe_rx->align_pad, payload_len); if (!skb) return NULL; @@ -1312,8 +1415,11 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) skb_put(skb, payload_len); } else { /* Add fragments */ - page = virt_to_page(phys_to_virt(*rb_ptrs)); - offset = phys_to_virt(*rb_ptrs) - page_address(page); + dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs, + RCV_FRAG_LEN, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + page = virt_to_page(phys_to_virt(phys_addr)); + offset = phys_to_virt(phys_addr) - page_address(page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, payload_len, RCV_FRAG_LEN); } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 5cb84da99a2de5..10cb4b84625b14 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -87,7 +87,7 @@ #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) #define RBDR_THRESH (RCV_BUF_COUNT / 2) -#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ +#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */ #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -301,6 +301,8 @@ struct queue_set { #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) +void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, + int hdr_sqe, u8 subdesc_cnt); void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features); int nicvf_set_qset_resources(struct nicvf *nic); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 4c8e8cf730bbc2..64a1095e4d1495 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) return 1; } +static int max_bgx_per_node; +static void set_max_bgx_per_node(struct pci_dev *pdev) +{ + u16 sdevid; + + if (max_bgx_per_node) + return; + + pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); + switch (sdevid) { + case PCI_SUBSYS_DEVID_81XX_BGX: + max_bgx_per_node = MAX_BGX_PER_CN81XX; + break; + case PCI_SUBSYS_DEVID_83XX_BGX: + max_bgx_per_node = MAX_BGX_PER_CN83XX; + break; + case PCI_SUBSYS_DEVID_88XX_BGX: + default: + max_bgx_per_node = MAX_BGX_PER_CN88XX; + break; + } +} + +static struct bgx *get_bgx(int node, int bgx_idx) +{ + int idx = (node * max_bgx_per_node) + bgx_idx; + + return bgx_vnic[idx]; +} + /* Return number of BGX present in HW */ unsigned bgx_get_map(int node) { int i; unsigned map = 0; - for (i = 0; i < MAX_BGX_PER_NODE; i++) { - if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i]) + for (i = 0; i < max_bgx_per_node; i++) { + if (bgx_vnic[(node * max_bgx_per_node) + i]) map |= (1 << i); } @@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx) { struct bgx *bgx; - bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + bgx = get_bgx(node, bgx_idx); if (bgx) return bgx->lmac_count; @@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) struct bgx *bgx; struct lmac *lmac; - bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + bgx = get_bgx(node, bgx_idx); if (!bgx) return; @@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state); const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) { - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + struct bgx *bgx = get_bgx(node, bgx_idx); if (bgx) return bgx->lmac[lmacid].mac; @@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac); void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) { - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + struct bgx *bgx = get_bgx(node, bgx_idx); if (!bgx) return; @@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac); void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) { - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + struct bgx *bgx = get_bgx(node, bgx_idx); struct lmac *lmac; u64 cfg; @@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause) { struct pfc *pfc = (struct pfc *)pause; - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct bgx *bgx = get_bgx(node, bgx_idx); struct lmac *lmac; u64 cfg; @@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc); void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause) { struct pfc *pfc = (struct pfc *)pause; - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct bgx *bgx = get_bgx(node, bgx_idx); struct lmac *lmac; u64 cfg; @@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) { struct bgx *bgx; - bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + bgx = get_bgx(node, bgx_idx); if (!bgx) return 0; @@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) { struct bgx *bgx; - bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + bgx = get_bgx(node, bgx_idx); if (!bgx) return 0; @@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx, struct lmac *lmac; u64 cfg; - bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + bgx = get_bgx(node, bgx_idx); if (!bgx) return; @@ -1011,12 +1041,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) dev_info(dev, "%s: 40G_KR4\n", (char *)str); break; case BGX_MODE_QSGMII: - if ((lmacid == 0) && - (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid)) - return; - if ((lmacid == 2) && - (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid)) - return; dev_info(dev, "%s: QSGMII\n", (char *)str); break; case BGX_MODE_RGMII: @@ -1334,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_release_regions; } + set_max_bgx_per_node(pdev); + pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; - bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; + bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node; bgx->max_lmac = MAX_LMAC_PER_BGX; bgx_vnic[bgx->bgx_id] = bgx; } else { diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index a60f189429bb65..c5080f2cead5d0 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -22,7 +22,6 @@ #define MAX_BGX_PER_CN88XX 2 #define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */ #define MAX_BGX_PER_CN83XX 4 -#define MAX_BGX_PER_NODE 4 #define MAX_LMAC_PER_BGX 4 #define MAX_BGX_CHANS_PER_LMAC 16 #define MAX_DMAC_PER_LMAC 8 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 5043b64805f0b2..8098c93cd16e68 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1364,6 +1364,10 @@ struct cpl_tx_data { #define TX_FORCE_S 13 #define TX_FORCE_V(x) ((x) << TX_FORCE_S) +#define T6_TX_FORCE_S 20 +#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S) +#define T6_TX_FORCE_F T6_TX_FORCE_V(1U) + enum { ULP_TX_MEM_READ = 2, ULP_TX_MEM_WRITE = 3, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 5fdaa16426c50e..fa376444e57c56 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -37,7 +37,7 @@ #define T4FW_VERSION_MAJOR 0x01 #define T4FW_VERSION_MINOR 0x10 -#define T4FW_VERSION_MICRO 0x1A +#define T4FW_VERSION_MICRO 0x21 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -46,7 +46,7 @@ #define T5FW_VERSION_MAJOR 0x01 #define T5FW_VERSION_MINOR 0x10 -#define T5FW_VERSION_MICRO 0x1A +#define T5FW_VERSION_MICRO 0x21 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -55,7 +55,7 @@ #define T6FW_VERSION_MAJOR 0x01 #define T6FW_VERSION_MINOR 0x10 -#define T6FW_VERSION_MICRO 0x1A +#define T6FW_VERSION_MICRO 0x21 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h index e995a1a3840a66..a91ad766cef009 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h @@ -59,7 +59,7 @@ struct cxgbi_pagepod_hdr { #define PPOD_PAGES_MAX 4 struct cxgbi_pagepod { struct cxgbi_pagepod_hdr hdr; - u64 addr[PPOD_PAGES_MAX + 1]; + __be64 addr[PPOD_PAGES_MAX + 1]; }; /* ddp tag format diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 30e855004c5759..02dd5246dfae9a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -4939,8 +4939,9 @@ static int __be_cmd_set_logical_link_config(struct be_adapter *adapter, int link_state, int version, u8 domain) { - struct be_mcc_wrb *wrb; struct be_cmd_req_set_ll_link *req; + struct be_mcc_wrb *wrb; + u32 link_config = 0; int status; mutex_lock(&adapter->mcc_lock); @@ -4962,10 +4963,12 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter, if (link_state == IFLA_VF_LINK_STATE_ENABLE || link_state == IFLA_VF_LINK_STATE_AUTO) - req->link_config |= PLINK_ENABLE; + link_config |= PLINK_ENABLE; if (link_state == IFLA_VF_LINK_STATE_AUTO) - req->link_config |= PLINK_TRACK; + link_config |= PLINK_TRACK; + + req->link_config = cpu_to_le32(link_config); status = be_mcc_notify_wait(adapter); err: diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 992ebe973d25bf..f819843e2bae73 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -189,11 +189,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) nps_enet_tx_handler(ndev); work_done = nps_enet_rx_handler(ndev); - if (work_done < budget) { + if ((work_done < budget) && napi_complete_done(napi, work_done)) { u32 buf_int_enable_value = 0; - napi_complete_done(napi, work_done); - /* set tx_done and rx_rdy bits */ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 262587240c86e5..ade6b3e4ed1326 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -28,8 +28,10 @@ #include #include #include +#include #include #include +#include #include #include @@ -1456,7 +1458,7 @@ static int ftgmac100_probe(struct platform_device *pdev) return err; } -static int __exit ftgmac100_remove(struct platform_device *pdev) +static int ftgmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftgmac100 *priv; @@ -1483,7 +1485,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match); static struct platform_driver ftgmac100_driver = { .probe = ftgmac100_probe, - .remove = __exit_p(ftgmac100_remove), + .remove = ftgmac100_remove, .driver = { .name = DRV_NAME, .of_match_table = ftgmac100_of_match, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index c0ddbbe6c22689..6ac336b546e6c2 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1156,7 +1156,7 @@ static int ftmac100_probe(struct platform_device *pdev) return err; } -static int __exit ftmac100_remove(struct platform_device *pdev) +static int ftmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftmac100 *priv; @@ -1176,7 +1176,7 @@ static int __exit ftmac100_remove(struct platform_device *pdev) static struct platform_driver ftmac100_driver = { .probe = ftmac100_probe, - .remove = __exit_p(ftmac100_remove), + .remove = ftmac100_remove, .driver = { .name = DRV_NAME, }, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 3239d27143b935..bdd8cdd732fb58 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -82,9 +82,12 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) else *link_status = 0; - ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt); - if (!ret) - *link_status = *link_status && sfp_prsnt; + if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) { + ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, + &sfp_prsnt); + if (!ret) + *link_status = *link_status && sfp_prsnt; + } mac_cb->link = *link_status; } @@ -855,7 +858,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) of_node_put(np); np = of_parse_phandle(to_of_node(mac_cb->fw_port), - "serdes-syscon", 0); + "serdes-syscon", 0); syscon = syscon_node_to_regmap(np); of_node_put(np); if (IS_ERR_OR_NULL(syscon)) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 90dbda7926144a..403ea9db6dbd15 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1519,6 +1519,7 @@ static void hns_dsaf_set_mac_key( mac_key->high.bits.mac_3 = addr[3]; mac_key->low.bits.mac_4 = addr[4]; mac_key->low.bits.mac_5 = addr[5]; + mac_key->low.bits.port_vlan = 0; dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M, DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, @@ -2924,10 +2925,11 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, /* find the tcam entry index for promisc */ entry_index = dsaf_promisc_tcam_entry(port); + memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data)); + memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask)); + /* config key mask */ if (enable) { - memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data)); - memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask)); dsaf_set_field(tbl_tcam_data.low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, DSAF_TBL_TCAM_KEY_PORT_S, port); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index a2c22d084ce90c..e13aa064a8e943 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -461,6 +461,32 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) return 0; } +int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) +{ + union acpi_object *obj; + union acpi_object obj_args, argv4; + + obj_args.integer.type = ACPI_TYPE_INTEGER; + obj_args.integer.value = mac_cb->mac_id; + + argv4.type = ACPI_TYPE_PACKAGE, + argv4.package.count = 1, + argv4.package.elements = &obj_args, + + obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), + hns_dsaf_acpi_dsm_uuid, 0, + HNS_OP_GET_SFP_STAT_FUNC, &argv4); + + if (!obj || obj->type != ACPI_TYPE_INTEGER) + return -ENODEV; + + *sfp_prsnt = obj->integer.value; + + ACPI_FREE(obj); + + return 0; +} + /** * hns_mac_config_sds_loopback - set loop back for serdes * @mac_cb: mac control block @@ -592,7 +618,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi; misc_op->get_phy_if = hns_mac_get_phy_if_acpi; - misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; + misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi; misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi; } else { diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 275c2e2349ad92..c44036d5761a4c 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2589,8 +2589,6 @@ static int emac_dt_mdio_probe(struct emac_instance *dev) static int emac_dt_phy_connect(struct emac_instance *dev, struct device_node *phy_handle) { - int res; - dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def), GFP_KERNEL); if (!dev->phy.def) @@ -2617,7 +2615,7 @@ static int emac_dt_phy_probe(struct emac_instance *dev) { struct device_node *np = dev->ofdev->dev.of_node; struct device_node *phy_handle; - int res = 0; + int res = 1; phy_handle = of_parse_phandle(np, "phy-handle", 0); @@ -2714,13 +2712,24 @@ static int emac_init_phy(struct emac_instance *dev) if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) { int res = emac_dt_phy_probe(dev); - mutex_unlock(&emac_phy_map_lock); - if (!res) + switch (res) { + case 1: + /* No phy-handle property configured. + * Continue with the existing phy probe + * and setup code. + */ + break; + + case 0: + mutex_unlock(&emac_phy_map_lock); goto init_phy; - dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n", - res); - return res; + default: + mutex_unlock(&emac_phy_map_lock); + dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n", + res); + return res; + } } if (dev->phy_address != 0xffffffff) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 9198e6bd5160f9..b23d6545f83562 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev) send_map_query(adapter); for (i = 0; i < rxadd_subcrqs; i++) { init_rx_pool(adapter, &adapter->rx_pool[i], - IBMVNIC_BUFFS_PER_POOL, i, + adapter->req_rx_add_entries_per_subcrq, i, be64_to_cpu(size_array[i]), 1); if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) { dev_err(dev, "Couldn't alloc rx pool\n"); @@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev) for (i = 0; i < tx_subcrqs; i++) { tx_pool = &adapter->tx_pool[i]; tx_pool->tx_buff = - kcalloc(adapter->max_tx_entries_per_subcrq, + kcalloc(adapter->req_tx_entries_per_subcrq, sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); if (!tx_pool->tx_buff) goto tx_pool_alloc_failed; if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, - adapter->max_tx_entries_per_subcrq * + adapter->req_tx_entries_per_subcrq * adapter->req_mtu)) goto tx_ltb_alloc_failed; tx_pool->free_map = - kcalloc(adapter->max_tx_entries_per_subcrq, + kcalloc(adapter->req_tx_entries_per_subcrq, sizeof(int), GFP_KERNEL); if (!tx_pool->free_map) goto tx_fm_alloc_failed; - for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++) + for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) tx_pool->free_map[j] = j; tx_pool->consumer_index = 0; @@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; struct device *dev = &adapter->vdev->dev; struct ibmvnic_tx_buff *tx_buff = NULL; + struct ibmvnic_sub_crq_queue *tx_scrq; struct ibmvnic_tx_pool *tx_pool; unsigned int tx_send_failed = 0; unsigned int tx_map_failed = 0; @@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) int ret = 0; tx_pool = &adapter->tx_pool[queue_num]; + tx_scrq = adapter->tx_scrq[queue_num]; txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + be32_to_cpu(adapter->login_rsp_buf-> @@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_pool->consumer_index = (tx_pool->consumer_index + 1) % - adapter->max_tx_entries_per_subcrq; + adapter->req_tx_entries_per_subcrq; tx_buff = &tx_pool->tx_buff[index]; tx_buff->skb = skb; @@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) if (tx_pool->consumer_index == 0) tx_pool->consumer_index = - adapter->max_tx_entries_per_subcrq - 1; + adapter->req_tx_entries_per_subcrq - 1; else tx_pool->consumer_index--; @@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ret = NETDEV_TX_BUSY; goto out; } + + atomic_inc(&tx_scrq->used); + + if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) { + netdev_info(netdev, "Stopping queue %d\n", queue_num); + netif_stop_subqueue(netdev, queue_num); + } + tx_packets++; tx_bytes += skb->len; txq->trans_start = jiffies; @@ -1213,6 +1223,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter scrq->adapter = adapter; scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); scrq->cur = 0; + atomic_set(&scrq->used, 0); scrq->rx_skb_top = NULL; spin_lock_init(&scrq->lock); @@ -1246,6 +1257,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) release_sub_crq_queue(adapter, adapter->tx_scrq[i]); } + kfree(adapter->tx_scrq); adapter->tx_scrq = NULL; } @@ -1258,6 +1270,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) release_sub_crq_queue(adapter, adapter->rx_scrq[i]); } + kfree(adapter->rx_scrq); adapter->rx_scrq = NULL; } } @@ -1355,14 +1368,28 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, DMA_TO_DEVICE); } - if (txbuff->last_frag) + if (txbuff->last_frag) { + atomic_dec(&scrq->used); + + if (atomic_read(&scrq->used) <= + (adapter->req_tx_entries_per_subcrq / 2) && + netif_subqueue_stopped(adapter->netdev, + txbuff->skb)) { + netif_wake_subqueue(adapter->netdev, + scrq->pool_index); + netdev_dbg(adapter->netdev, + "Started queue %d\n", + scrq->pool_index); + } + dev_kfree_skb_any(txbuff->skb); + } adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. producer_index] = index; adapter->tx_pool[pool].producer_index = (adapter->tx_pool[pool].producer_index + 1) % - adapter->max_tx_entries_per_subcrq; + adapter->req_tx_entries_per_subcrq; } /* remove tx_comp scrq*/ next->tx_comp.first = 0; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 422824f1f42a8a..1993b42666f73d 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue { spinlock_t lock; struct sk_buff *rx_skb_top; struct ibmvnic_adapter *adapter; + atomic_t used; }; struct ibmvnic_long_term_buff { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 2175cced402f7f..e9af89ad039c6f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6274,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev) /* Quiesce the device without resetting the hardware */ e1000e_down(adapter, false); e1000_free_irq(adapter); - e1000e_reset_interrupt_capability(adapter); } + e1000e_reset_interrupt_capability(adapter); /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e8a8351c8ea998..82a95cc2c8ee38 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4438,8 +4438,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_enable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_enable(&q_vector->napi); + } } /** @@ -4453,8 +4457,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_disable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_disable(&q_vector->napi); + } } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index a2cc43d2888801..b1ecc2627a5aee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -96,7 +96,7 @@ #define IXGBE_MAX_FRAME_BUILD_SKB \ (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD) #else -#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K +#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K #endif /* @@ -929,6 +929,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring); u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); +void ixgbe_store_key(struct ixgbe_adapter *adapter); void ixgbe_store_reta(struct ixgbe_adapter *adapter); s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index a7574c7b12af06..90fa5bf23d1b5f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2998,8 +2998,10 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, } /* Fill out the rss hash key */ - if (key) + if (key) { memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); + ixgbe_store_key(adapter); + } ixgbe_store_reta(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 060cdce8058f9b..a7a430a7be2cd9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3473,6 +3473,21 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) return 512; } +/** + * ixgbe_store_key - Write the RSS key to HW + * @adapter: device handle + * + * Write the RSS key stored in adapter.rss_key to HW. + */ +void ixgbe_store_key(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); +} + /** * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle @@ -3538,7 +3553,6 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; u32 i, j; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; @@ -3551,8 +3565,7 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) rss_i = 4; /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); + ixgbe_store_key(adapter); /* Fill out redirection table */ memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); @@ -3959,7 +3972,8 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); - if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) + if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame > IXGBE_MAX_FRAME_BUILD_SKB)) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); #endif } diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index e8c105164931f3..0e0fa703056595 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev) rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { /* PCI might be offline */ + + /* If device removal has been requested, + * do not continue retrying. + */ + if (dev->persist->interface_state & + MLX4_INTERFACE_STATE_NOWAIT) { + mlx4_warn(dev, + "communication channel is offline\n"); + return -EIO; + } + msleep(100); wr_toggle = swab32(readl(&priv->mfunc.comm-> slave_write)); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 21377c315083b6..703205475524d6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev) (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); if (!offline_bit) return 0; + + /* If device removal has been requested, + * do not continue retrying. + */ + if (dev->persist->interface_state & + MLX4_INTERFACE_STATE_NOWAIT) + break; + /* There are cases as part of AER/Reset flow that PF needs * around 100 msec to load. We therefore sleep for 100 msec * to allow other tasks to make use of that CPU during this @@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(priv); int active_vfs = 0; + if (mlx4_is_slave(dev)) + persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; + mutex_lock(&persist->interface_state_mutex); persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; mutex_unlock(&persist->interface_state_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index ddb4ca4ff930a7..117170014e8897 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -14,6 +14,7 @@ config MLX5_CORE config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE + depends on IPV6=y || IPV6=n || MLX5_CORE=m imply PTP_1588_CLOCK default n ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index caa837e5e2b991..a380353a78c2d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER: + case MLX5_CMD_OP_SET_RATE_LIMIT: + case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_UAR: case MLX5_CMD_OP_CONFIG_INT_MODERATION: @@ -497,6 +499,8 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); + MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); + MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); MLX5_COMMAND_STR_CASE(ALLOC_PD); MLX5_COMMAND_STR_CASE(DEALLOC_PD); MLX5_COMMAND_STR_CASE(ALLOC_UAR); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f6a6ded204f61c..dc52053128bc75 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); -void mlx5e_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti); -void mlx5e_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti); int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, void *sp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 0523ed47f597c7..8fa23f6a1f67f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -302,6 +302,9 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_dcbx *dcbx = &priv->dcbx; + if (mode & DCB_CAP_DCBX_LLD_MANAGED) + return 1; + if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) { if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO) return 0; @@ -315,13 +318,10 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) return 1; } - if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) + if (!(mode & DCB_CAP_DCBX_HOST)) return 1; - if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || - !(mode & DCB_CAP_DCBX_VER_CEE) || - !(mode & DCB_CAP_DCBX_VER_IEEE) || - !(mode & DCB_CAP_DCBX_HOST)) + if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) return 1; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8ef64c4db2c21a..66c133757a5ee8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev, vf_stats); } -void mlx5e_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +static void mlx5e_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev, mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); } -void mlx5e_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +static void mlx5e_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2c864574a9d5fa..f621373bd7a564 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, .ndo_get_stats64 = mlx5e_rep_get_stats, - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3d371688fbbbf3..bafcb349a50c6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); + /* Subtract one since we already counted this as one + * "regular" packet in mlx5e_complete_rx_cqe() + */ + rq->stats.packets += lro_num_seg - 1; rq->stats.lro_packets++; rq->stats.lro_bytes += cqe_bcnt; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 31e3cb7ee5feeb..5621dcfda4f186 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -204,9 +204,6 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, struct iphdr *iph; /* We are only going to peek, no need to clone the SKB */ - if (skb->protocol != htons(ETH_P_IP)) - goto out; - if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb)) goto out; @@ -249,7 +246,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, lbtp->loopback_ok = false; init_completion(&lbtp->comp); - lbtp->pt.type = htons(ETH_P_ALL); + lbtp->pt.type = htons(ETH_P_IP); lbtp->pt.func = mlx5e_test_loopback_validate; lbtp->pt.dev = priv->netdev; lbtp->pt.af_packet_priv = lbtp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 44406a5ec15d96..fade7233dac525 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -48,9 +48,14 @@ #include "eswitch.h" #include "vxlan.h" +enum { + MLX5E_TC_FLOW_ESWITCH = BIT(0), +}; + struct mlx5e_tc_flow { struct rhash_head node; u64 cookie; + u8 flags; struct mlx5_flow_handle *rule; struct list_head encap; /* flows sharing the same encap */ struct mlx5_esw_flow_attr *attr; @@ -128,6 +133,23 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, return rule; } +static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_fc *counter = NULL; + + if (!IS_ERR(flow->rule)) { + counter = mlx5_flow_rule_counter(flow->rule); + mlx5_del_flow_rules(flow->rule); + mlx5_fc_destroy(priv->mdev, counter); + } + + if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { + mlx5_destroy_flow_table(priv->fs.tc.t); + priv->fs.tc.t = NULL; + } +} + static struct mlx5_flow_handle * mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, @@ -144,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, } static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) { + struct mlx5e_tc_flow *flow); + +static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr); + + mlx5_eswitch_del_vlan_action(esw, flow->attr); + + if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) + mlx5e_detach_encap(priv, flow); +} + +static void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ struct list_head *next = flow->encap.next; list_del(&flow->encap); @@ -168,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_fc *counter = NULL; - - if (!IS_ERR(flow->rule)) { - counter = mlx5_flow_rule_counter(flow->rule); - mlx5_del_flow_rules(flow->rule); - mlx5_fc_destroy(priv->mdev, counter); - } - - if (esw && esw->mode == SRIOV_OFFLOADS) { - mlx5_eswitch_del_vlan_action(esw, flow->attr); - if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) - mlx5e_detach_encap(priv, flow); - } - - if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { - mlx5_destroy_flow_table(priv->fs.tc.t); - priv->fs.tc.t = NULL; - } + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + mlx5e_tc_del_fdb_flow(priv, flow); + else + mlx5e_tc_del_nic_flow(priv, flow); } static void parse_vxlan_attr(struct mlx5_flow_spec *spec, @@ -243,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS, f->mask); + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); + struct mlx5e_priv *up_priv = netdev_priv(up_dev); /* Full udp dst port must be given */ if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) goto vxlan_match_offload_err; - if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && + if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); else { @@ -598,6 +625,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } static int parse_cls_flower(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f) { @@ -609,7 +637,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, err = __parse_cls_flower(priv, spec, f, &min_inline); - if (!err && esw->mode == SRIOV_OFFLOADS && + if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && rep->vport != FDB_UPLINK_VPORT) { if (min_inline > esw->offloads.inline_mode) { netdev_warn(priv->netdev, @@ -970,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); + struct mlx5e_priv *up_priv = netdev_priv(up_dev); unsigned short family = ip_tunnel_info_af(tun_info); struct ip_tunnel_key *key = &tun_info->key; struct mlx5_encap_entry *e; @@ -990,7 +1020,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && + if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { @@ -1106,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } if (is_tcf_vlan(a)) { - if (tcf_vlan_action(a) == VLAN_F_POP) { + if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - } else if (tcf_vlan_action(a) == VLAN_F_PUSH) { + } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) return -EOPNOTSUPP; attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; attr->vlan = tcf_vlan_push_vid(a); + } else { /* action is TCA_VLAN_ACT_MODIFY */ + return -EOPNOTSUPP; } continue; } @@ -1132,23 +1164,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, struct tc_cls_flower_offload *f) { struct mlx5e_tc_table *tc = &priv->fs.tc; - int err = 0; - bool fdb_flow = false; + int err, attr_size = 0; u32 flow_tag, action; struct mlx5e_tc_flow *flow; struct mlx5_flow_spec *spec; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + u8 flow_flags = 0; - if (esw && esw->mode == SRIOV_OFFLOADS) - fdb_flow = true; - - if (fdb_flow) - flow = kzalloc(sizeof(*flow) + - sizeof(struct mlx5_esw_flow_attr), - GFP_KERNEL); - else - flow = kzalloc(sizeof(*flow), GFP_KERNEL); + if (esw && esw->mode == SRIOV_OFFLOADS) { + flow_flags = MLX5E_TC_FLOW_ESWITCH; + attr_size = sizeof(struct mlx5_esw_flow_attr); + } + flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec)); if (!spec || !flow) { err = -ENOMEM; @@ -1156,12 +1184,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, } flow->cookie = f->cookie; + flow->flags = flow_flags; - err = parse_cls_flower(priv, spec, f); + err = parse_cls_flower(priv, flow, spec, f); if (err < 0) goto err_free; - if (fdb_flow) { + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1); err = parse_tc_fdb_actions(priv, f->exts, flow); if (err < 0) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f193128bac4b8c..57f5e2d7ebd1a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) sq->stats.tso_bytes += skb->len - ihs; } + sq->stats.packets += skb_shinfo(skb)->gso_segs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; } else { bf = sq->bf_budget && !skb->xmit_more && !skb_shinfo(skb)->nr_frags; ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); + sq->stats.packets++; num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } + sq->stats.bytes += num_bytes; wi->num_bytes = num_bytes; ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; @@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) if (bf) sq->bf_budget--; - sq->stats.packets++; - sq->stats.bytes += num_bytes; return NETDEV_TX_OK; dma_unmap_wqe_err: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 5b78883d565413..ad329b1680b455 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -209,6 +209,7 @@ struct mlx5_esw_offload { struct mlx5_eswitch_rep *vport_reps; DECLARE_HASHTABLE(encap_tbl, 8); u8 inline_mode; + u64 num_flows; }; struct mlx5_eswitch { @@ -271,6 +272,11 @@ struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr); +void +mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *attr); + struct mlx5_flow_handle * mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 4f5b0d47d5f382..307ec6c5fd3b62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, spec, &flow_act, dest, i); if (IS_ERR(rule)) mlx5_fc_destroy(esw->dev, counter); + else + esw->offloads.num_flows++; return rule; } +void +mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *attr) +{ + struct mlx5_fc *counter = NULL; + + if (!IS_ERR(rule)) { + counter = mlx5_flow_rule_counter(rule); + mlx5_del_flow_rules(rule); + mlx5_fc_destroy(esw->dev, counter); + esw->offloads.num_flows--; + } +} + static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) { struct mlx5_eswitch_rep *rep; @@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) return -EOPNOTSUPP; + if (esw->offloads.num_flows > 0) { + esw_warn(dev, "Can't set inline mode when flows are configured\n"); + return -EOPNOTSUPP; + } + err = esw_inline_mode_from_devlink(mode, &mlx5_mode); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 2478516a61e2ea..ded27bb9a3b604 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1136,7 +1136,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, u32 *match_criteria) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct list_head *prev = ft->node.children.prev; + struct list_head *prev = &ft->node.children; unsigned int candidate_index = 0; struct mlx5_flow_group *fg; void *match_criteria_addr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 55957246c0e844..b5d5519542e873 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, struct netdev_notifier_changeupper_info *info) { struct net_device *upper = info->upper_dev, *ndev_tmp; - struct netdev_lag_upper_info *lag_upper_info; + struct netdev_lag_upper_info *lag_upper_info = NULL; bool is_bonded; int bond_status = 0; int num_slaves = 0; @@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, if (!netif_is_lag_master(upper)) return 0; - lag_upper_info = info->upper_info; + if (info->linking) + lag_upper_info = info->upper_info; /* The event may still be of interest if the slave does not belong to * us, but is enslaved to a master which has one or more of our netdevs diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c4242a4e81309f..60154a175bd386 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = { [2] = { .mask = MLX5_PROF_MASK_QP_SIZE | MLX5_PROF_MASK_MR_CACHE, - .log_max_qp = 17, + .log_max_qp = 18, .mr_cache[0] = { .size = 500, .limit = 250 @@ -1352,6 +1352,7 @@ static int init_one(struct pci_dev *pdev, if (err) goto clean_load; + pci_save_state(pdev); return 0; clean_load: @@ -1407,9 +1408,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, mlx5_enter_error_state(dev); mlx5_unload_one(dev, priv, false); - /* In case of kernel call save the pci state and drain the health wq */ + /* In case of kernel call drain the health wq */ if (state) { - pci_save_state(pdev); mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); } @@ -1461,6 +1461,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); pci_restore_state(pdev); + pci_save_state(pdev); if (wait_vital(pdev)) { dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0899e2d310e262..d9616daf8a7056 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -769,7 +769,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid) #define MLXSW_REG_SPVM_ID 0x200F #define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */ #define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */ -#define MLXSW_REG_SPVM_REC_MAX_COUNT 256 +#define MLXSW_REG_SPVM_REC_MAX_COUNT 255 #define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \ MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT) @@ -1702,7 +1702,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload, #define MLXSW_REG_SPVMLR_ID 0x2020 #define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */ #define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */ -#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256 +#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255 #define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \ MLXSW_REG_SPVMLR_REC_LEN * \ MLXSW_REG_SPVMLR_REC_MAX_COUNT) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 22ab429253778d..ae6cccc666e461 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -303,11 +303,11 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, ingress, MLXSW_SP_ACL_PROFILE_FLOWER); - if (WARN_ON(IS_ERR(ruleset))) + if (IS_ERR(ruleset)) return; rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); - if (!WARN_ON(!rule)) { + if (rule) { mlxsw_sp_acl_rule_del(mlxsw_sp, rule); mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index d7ac22d7f94029..bd8de6b9be718f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -441,30 +441,40 @@ static int mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, struct mlxsw_sp_prefix_usage *req_prefix_usage) { - struct mlxsw_sp_lpm_tree *lpm_tree; + struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; + struct mlxsw_sp_lpm_tree *new_tree; + int err; - if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, - &vr->lpm_tree->prefix_usage)) + if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage)) return 0; - lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, + new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, vr->proto, false); - if (IS_ERR(lpm_tree)) { + if (IS_ERR(new_tree)) { /* We failed to get a tree according to the required * prefix usage. However, the current tree might be still good * for us if our requirement is subset of the prefixes used * in the tree. */ if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, - &vr->lpm_tree->prefix_usage)) + &lpm_tree->prefix_usage)) return 0; - return PTR_ERR(lpm_tree); + return PTR_ERR(new_tree); } - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); - mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); + /* Prevent packet loss by overwriting existing binding */ + vr->lpm_tree = new_tree; + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + if (err) + goto err_tree_bind; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); + + return 0; + +err_tree_bind: vr->lpm_tree = lpm_tree; - return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); + return err; } static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 06c9f4100cb9bd..6ad44be08b3307 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "moxart_ether.h" @@ -278,6 +279,13 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) return rx; } +static int moxart_tx_queue_space(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM); +} + static void moxart_tx_finished(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); @@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev) tx_tail = TX_NEXT(tx_tail); } priv->tx_tail = tx_tail; + if (netif_queue_stopped(ndev) && + moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD) + netif_wake_queue(ndev); } static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) @@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct moxart_mac_priv_t *priv = netdev_priv(ndev); void *desc; unsigned int len; - unsigned int tx_head = priv->tx_head; + unsigned int tx_head; u32 txdes1; int ret = NETDEV_TX_BUSY; + spin_lock_irq(&priv->txlock); + + tx_head = priv->tx_head; desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); - spin_lock_irq(&priv->txlock); + if (moxart_tx_queue_space(ndev) == 1) + netif_stop_queue(ndev); + if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { net_dbg_ratelimited("no TX space for packet\n"); priv->stats.tx_dropped++; diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index 93a9563ac7c673..afc32ec998c043 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -59,6 +59,7 @@ #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) #define TX_BUF_SIZE 1600 #define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) +#define TX_WAKE_THRESHOLD 16 #define RX_DESC_NUM 64 #define RX_DESC_NUM_MASK (RX_DESC_NUM-1) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 074259cc8e066d..a41377e26c07d0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1498,7 +1498,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, txbuf->real_len = pkt_len; dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, - pkt_len, DMA_TO_DEVICE); + pkt_len, DMA_BIDIRECTIONAL); /* Build TX descriptor */ txd = &tx_ring->txds[wr_idx]; @@ -1611,7 +1611,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) dma_sync_single_for_cpu(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, - pkt_len, DMA_FROM_DEVICE); + pkt_len, DMA_BIDIRECTIONAL); act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off, pkt_len); switch (act) { @@ -2198,7 +2198,8 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) nfp_net_write_mac_addr(nn); nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); - nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); + nn_writel(nn, NFP_NET_CFG_FLBUFSZ, + nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA); /* Enable device */ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; @@ -3274,9 +3275,10 @@ void nfp_net_netdev_clean(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); + unregister_netdev(nn->netdev); + if (nn->xdp_prog) bpf_prog_put(nn->xdp_prog); if (nn->bpf_offload_xdp) nfp_net_xdp_offload(nn, NULL); - unregister_netdev(nn->netdev); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index d42d03df751acb..7e3a6fed3da6d9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -422,8 +422,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; u32 cxt_size = CONN_CXT_SIZE(p_hwfn); u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + u32 align = elems_per_page * DQ_RANGE_ALIGN; - p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page); + p_conn->cid_count = roundup(p_conn->cid_count, align); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index e2a081ceaf520c..e518f914eab13f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2389,9 +2389,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev, * size/capacity fields are of a u32 type. */ if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && - chain_size > 0x10000) || - (cnt_type == QED_CHAIN_CNT_TYPE_U32 && - chain_size > 0x100000000ULL)) { + chain_size > ((u32)U16_MAX + 1)) || + (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) { DP_NOTICE(cdev, "The actual chain size (0x%llx) is larger than the maximal possible value\n", chain_size); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 3a44d6b395fac9..098766f7fe88a6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -190,6 +190,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; + p_init->ooo_enable = p_params->ooo_enable; + p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + + p_params->ll2_ooo_queue_id; p_init->func_params.log_page_size = p_params->log_page_size; val = p_params->num_tasks; p_init->func_params.num_tasks = cpu_to_le16(val); @@ -786,6 +789,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn, spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); } +void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn) +{ + qed_chain_free(p_hwfn->cdev, &p_conn->xhq); + qed_chain_free(p_hwfn->cdev, &p_conn->uhq); + qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct tcp_upload_params), + p_conn->tcp_upload_params_virt_addr, + p_conn->tcp_upload_params_phys_addr); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct scsi_terminate_extra_params), + p_conn->queue_cnts_virt_addr, + p_conn->queue_cnts_phys_addr); + kfree(p_conn); +} + struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn) { struct qed_iscsi_info *p_iscsi_info; @@ -807,6 +827,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn, void qed_iscsi_free(struct qed_hwfn *p_hwfn, struct qed_iscsi_info *p_iscsi_info) { + struct qed_iscsi_conn *p_conn = NULL; + + while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) { + p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list, + struct qed_iscsi_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + qed_iscsi_free_connection(p_hwfn, p_conn); + } + } + kfree(p_iscsi_info); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 9a0b9af10a572f..0d3cef409c96d0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, /* If need to reuse or there's no replacement buffer, repost this */ if (rc) goto out_post; + dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, + cdev->ll2->rx_size, DMA_FROM_DEVICE); skb = build_skb(buffer->data, 0); if (!skb) { @@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn, static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, union core_rx_cqe_union *p_cqe, - unsigned long lock_flags, + unsigned long *p_lock_flags, bool b_last_cqe) { struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; @@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, "Mismatch between active_descq and the LL2 Rx chain\n"); list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); - spin_unlock_irqrestore(&p_rx->lock, lock_flags); + spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id, p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe); - spin_lock_irqsave(&p_rx->lock, lock_flags); + spin_lock_irqsave(&p_rx->lock, *p_lock_flags); return 0; } @@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) break; case CORE_RX_CQE_TYPE_REGULAR: rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn, - cqe, flags, b_last_cqe); + cqe, &flags, + b_last_cqe); break; default: rc = -EIO; @@ -968,7 +971,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev, { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; - struct qed_ll2_conn ll2_info; + struct qed_ll2_conn ll2_info = { 0 }; int rc; ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index 7d731c6cb8923d..378afce58b3f0a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -159,6 +159,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) if (!p_ooo_info->ooo_history.p_cqes) goto no_history_mem; + p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES; + return p_ooo_info; no_history_mem: diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 6d31f92ef2b634..84ac50f92c9c51 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -1162,8 +1162,8 @@ struct ob_mac_tso_iocb_rsp { struct ib_mac_iocb_rsp { u8 opcode; /* 0x20 */ u8 flags1; -#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ -#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ +#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */ +#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */ #define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ #define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ #define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 7cd76b6b5cb9f6..2ae85245478087 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2216,18 +2216,15 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port, { bool want[OFDPA_CTRL_MAX] = { 0, }; bool prev_ctrls[OFDPA_CTRL_MAX]; - u8 uninitialized_var(prev_state); + u8 prev_state; int err; int i; - if (switchdev_trans_ph_prepare(trans)) { - memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); - prev_state = ofdpa_port->stp_state; - } - - if (ofdpa_port->stp_state == state) + prev_state = ofdpa_port->stp_state; + if (prev_state == state) return 0; + memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); ofdpa_port->stp_state = state; switch (state) { diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index ed34196028b8e8..70347720fdf98a 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -807,7 +807,7 @@ static int sgiseeq_probe(struct platform_device *pdev) return err; } -static int __exit sgiseeq_remove(struct platform_device *pdev) +static int sgiseeq_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sgiseeq_private *sp = netdev_priv(dev); @@ -822,7 +822,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev) static struct platform_driver sgiseeq_driver = { .probe = sgiseeq_probe, - .remove = __exit_p(sgiseeq_remove), + .remove = sgiseeq_remove, .driver = { .name = "sgiseeq", } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 92e1c6d8b2937e..c60c2d4c646a89 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -828,9 +828,7 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) static int efx_ef10_link_piobufs(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; - _MCDI_DECLARE_BUF(inbuf, - max(MC_CMD_LINK_PIOBUF_IN_LEN, - MC_CMD_UNLINK_PIOBUF_IN_LEN)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned int offset, index; @@ -839,8 +837,6 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); - memset(inbuf, 0, sizeof(inbuf)); - /* Link a buffer to each VI in the write-combining mapping */ for (index = 0; index < nic_data->n_piobufs; ++index) { MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, @@ -920,6 +916,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) return 0; fail: + /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same + * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. + */ + BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); while (index--) { MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, nic_data->pio_write_vi_base + index); @@ -2183,7 +2183,7 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, /* Modify IPv4 header if needed. */ ip->tot_len = 0; ip->check = 0; - ipv4_id = ip->id; + ipv4_id = ntohs(ip->id); } else { /* Modify IPv6 header if needed. */ struct ipv6hdr *ipv6 = ipv6_hdr(skb); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 334bcc6df6b2ba..50d28261b6b9ea 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2404,7 +2404,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t tnl.type = (u16)efx_tunnel_type; tnl.port = ti->port; - if (efx->type->udp_tnl_add_port) + if (efx->type->udp_tnl_del_port) (void)efx->type->udp_tnl_del_port(efx, tnl); } diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index c6ff0cc5ef1839..93c713c1f627a7 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -16,6 +16,8 @@ #include #include #include +#include + #include "net_driver.h" #include "bitfield.h" #include "efx.h" diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 69d2d30e5ef13b..ea55abd62ec709 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -854,7 +854,7 @@ static int meth_probe(struct platform_device *pdev) return 0; } -static int __exit meth_remove(struct platform_device *pdev) +static int meth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -866,7 +866,7 @@ static int __exit meth_remove(struct platform_device *pdev) static struct platform_driver meth_driver = { .probe = meth_probe, - .remove = __exit_p(meth_remove), + .remove = meth_remove, .driver = { .name = "meth", } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 65077c77082a2f..91e9bd7159ab37 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -1535,32 +1535,33 @@ static int smc_close(struct net_device *dev) * Ethtool support */ static int -smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +smc_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct smc_local *lp = netdev_priv(dev); int ret; - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - if (lp->phy_type != 0) { spin_lock_irq(&lp->lock); - ret = mii_ethtool_gset(&lp->mii, cmd); + ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd); spin_unlock_irq(&lp->lock); } else { - cmd->supported = SUPPORTED_10baseT_Half | + u32 supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_TP | SUPPORTED_AUI; if (lp->ctl_rspeed == 10) - ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->base.speed = SPEED_10; else if (lp->ctl_rspeed == 100) - ethtool_cmd_speed_set(cmd, SPEED_100); + cmd->base.speed = SPEED_100; + + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = 0; + cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ? + DUPLEX_FULL : DUPLEX_HALF; - cmd->autoneg = AUTONEG_DISABLE; - cmd->transceiver = XCVR_INTERNAL; - cmd->port = 0; - cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF; + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.supported, supported); ret = 0; } @@ -1569,24 +1570,26 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) } static int -smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +smc_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct smc_local *lp = netdev_priv(dev); int ret; if (lp->phy_type != 0) { spin_lock_irq(&lp->lock); - ret = mii_ethtool_sset(&lp->mii, cmd); + ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd); spin_unlock_irq(&lp->lock); } else { - if (cmd->autoneg != AUTONEG_DISABLE || - cmd->speed != SPEED_10 || - (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || - (cmd->port != PORT_TP && cmd->port != PORT_AUI)) + if (cmd->base.autoneg != AUTONEG_DISABLE || + cmd->base.speed != SPEED_10 || + (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL) || + (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI)) return -EINVAL; -// lp->port = cmd->port; - lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; +// lp->port = cmd->base.port; + lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL; // if (netif_running(dev)) // smc_set_port(dev); @@ -1744,8 +1747,6 @@ static int smc_ethtool_seteeprom(struct net_device *dev, static const struct ethtool_ops smc_ethtool_ops = { - .get_settings = smc_ethtool_getsettings, - .set_settings = smc_ethtool_setsettings, .get_drvinfo = smc_ethtool_getdrvinfo, .get_msglevel = smc_ethtool_getmsglevel, @@ -1755,6 +1756,8 @@ static const struct ethtool_ops smc_ethtool_ops = { .get_eeprom_len = smc_ethtool_geteeprom_len, .get_eeprom = smc_ethtool_geteeprom, .set_eeprom = smc_ethtool_seteeprom, + .get_link_ksettings = smc_ethtool_get_link_ksettings, + .set_link_ksettings = smc_ethtool_set_link_ksettings, }; static const struct net_device_ops smc_netdev_ops = { diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 296c8efd0038c8..9e631952b86f3d 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -74,15 +74,21 @@ config TI_CPSW will be called cpsw. config TI_CPTS - tristate "TI Common Platform Time Sync (CPTS) Support" + bool "TI Common Platform Time Sync (CPTS) Support" depends on TI_CPSW || TI_KEYSTONE_NETCP - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK ---help--- This driver supports the Common Platform Time Sync unit of the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem. The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the driver offers a PTP Hardware Clock. +config TI_CPTS_MOD + tristate + depends on TI_CPTS + default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y + default m + config TI_KEYSTONE_NETCP tristate "TI Keystone NETCP Core Support" select TI_CPSW_ALE diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index 1e7c10bf87132c..10e6b0ce51baf3 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o -obj-$(CONFIG_TI_CPTS) += cpts.o +obj-$(CONFIG_TI_CPTS_MOD) += cpts.o obj-$(CONFIG_TI_CPSW) += ti_cpsw.o ti_cpsw-y := cpsw.o diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 9f3d9c67e3fe0f..fa674a8bda0c8f 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1267,6 +1267,7 @@ static void soft_reset_slave(struct cpsw_slave *slave) static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) { u32 slave_port; + struct phy_device *phy; struct cpsw_common *cpsw = priv->cpsw; soft_reset_slave(slave); @@ -1300,27 +1301,28 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); if (slave->data->phy_node) { - slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node, + phy = of_phy_connect(priv->ndev, slave->data->phy_node, &cpsw_adjust_link, 0, slave->data->phy_if); - if (!slave->phy) { + if (!phy) { dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", slave->data->phy_node->full_name, slave->slave_num); return; } } else { - slave->phy = phy_connect(priv->ndev, slave->data->phy_id, + phy = phy_connect(priv->ndev, slave->data->phy_id, &cpsw_adjust_link, slave->data->phy_if); - if (IS_ERR(slave->phy)) { + if (IS_ERR(phy)) { dev_err(priv->dev, "phy \"%s\" not found on slave %d, err %ld\n", slave->data->phy_id, slave->slave_num, - PTR_ERR(slave->phy)); - slave->phy = NULL; + PTR_ERR(phy)); return; } } + slave->phy = phy; + phy_attached_info(slave->phy); phy_start(slave->phy); @@ -1817,6 +1819,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) } cpsw_intr_enable(cpsw); + netif_trans_update(ndev); + netif_tx_wake_all_queues(ndev); } static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index b75d9cdcfb0c41..ae48c809bac9fe 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02" + static int fjes_request_irq(struct fjes_adapter *); static void fjes_free_irq(struct fjes_adapter *); @@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int); static int fjes_poll(struct napi_struct *, int); static const struct acpi_device_id fjes_acpi_ids[] = { - {"PNP0C02", 0}, + {ACPI_MOTHERBOARD_RESOURCE_HID, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); @@ -115,18 +117,17 @@ static struct resource fjes_resource[] = { }, }; -static int fjes_acpi_add(struct acpi_device *device) +static bool is_extended_socket_device(struct acpi_device *device) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; - struct platform_device *plat_dev; union acpi_object *str; acpi_status status; int result; status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); if (ACPI_FAILURE(status)) - return -ENODEV; + return false; str = buffer.pointer; result = utf16s_to_utf8s((wchar_t *)str->string.pointer, @@ -136,10 +137,42 @@ static int fjes_acpi_add(struct acpi_device *device) if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { kfree(buffer.pointer); - return -ENODEV; + return false; } kfree(buffer.pointer); + return true; +} + +static int acpi_check_extended_socket_status(struct acpi_device *device) +{ + unsigned long long sta; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta); + if (ACPI_FAILURE(status)) + return -ENODEV; + + if (!((sta & ACPI_STA_DEVICE_PRESENT) && + (sta & ACPI_STA_DEVICE_ENABLED) && + (sta & ACPI_STA_DEVICE_UI) && + (sta & ACPI_STA_DEVICE_FUNCTIONING))) + return -ENODEV; + + return 0; +} + +static int fjes_acpi_add(struct acpi_device *device) +{ + struct platform_device *plat_dev; + acpi_status status; + + if (!is_extended_socket_device(device)) + return -ENODEV; + + if (acpi_check_extended_socket_status(device)) + return -ENODEV; + status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, fjes_get_acpi_resource, fjes_resource); if (ACPI_FAILURE(status)) @@ -1316,7 +1349,7 @@ static void fjes_netdev_setup(struct net_device *netdev) netdev->min_mtu = fjes_support_mtu[0]; netdev->max_mtu = fjes_support_mtu[3]; netdev->flags |= IFF_BROADCAST; - netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; } static void fjes_irq_watch_task(struct work_struct *work) @@ -1473,11 +1506,44 @@ static void fjes_watch_unshare_task(struct work_struct *work) } } +static acpi_status +acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level, + void *context, void **return_value) +{ + struct acpi_device *device; + bool *found = context; + int result; + + result = acpi_bus_get_device(obj_handle, &device); + if (result) + return AE_OK; + + if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID)) + return AE_OK; + + if (!is_extended_socket_device(device)) + return AE_OK; + + if (acpi_check_extended_socket_status(device)) + return AE_OK; + + *found = true; + return AE_CTRL_TERMINATE; +} + /* fjes_init_module - Driver Registration Routine */ static int __init fjes_init_module(void) { + bool found = false; int result; + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, + acpi_find_extended_socket_device, NULL, &found, + NULL); + + if (!found) + return -ENODEV; + pr_info("%s - version %s - %s\n", fjes_driver_string, fjes_driver_version, fjes_copyright); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 45301cb98bc1c2..7074b40ebd7f8e 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -881,12 +881,14 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) info = &geneve->info; } + rcu_read_lock(); #if IS_ENABLED(CONFIG_IPV6) if (info->mode & IP_TUNNEL_INFO_IPV6) err = geneve6_xmit_skb(skb, dev, geneve, info); else #endif err = geneve_xmit_skb(skb, dev, geneve, info); + rcu_read_unlock(); if (likely(!err)) return NETDEV_TX_OK; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index d3e73ac158aee6..f9f3dba7a58800 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -700,6 +700,8 @@ struct net_device_context { u32 tx_checksum_mask; + u32 tx_send_table[VRSS_SEND_TAB_SIZE]; + /* Ethtool settings */ u8 duplex; u32 speed; @@ -757,7 +759,6 @@ struct netvsc_device { struct nvsp_message revoke_packet; - u32 send_table[VRSS_SEND_TAB_SIZE]; u32 max_chn; u32 num_chn; spinlock_t sc_lock; /* Protects num_sc_offered variable */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d35ebd993b3852..8dd0b87703288c 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1136,15 +1136,11 @@ static void netvsc_receive(struct net_device *ndev, static void netvsc_send_table(struct hv_device *hdev, struct nvsp_message *nvmsg) { - struct netvsc_device *nvscdev; struct net_device *ndev = hv_get_drvdata(hdev); + struct net_device_context *net_device_ctx = netdev_priv(ndev); int i; u32 count, *tab; - nvscdev = get_outbound_net_device(hdev); - if (!nvscdev) - return; - count = nvmsg->msg.v5_msg.send_table.count; if (count != VRSS_SEND_TAB_SIZE) { netdev_err(ndev, "Received wrong send-table size:%u\n", count); @@ -1155,7 +1151,7 @@ static void netvsc_send_table(struct hv_device *hdev, nvmsg->msg.v5_msg.send_table.offset); for (i = 0; i < count; i++) - nvscdev->send_table[i] = tab[i]; + net_device_ctx->tx_send_table[i] = tab[i]; } static void netvsc_send_vf(struct net_device_context *net_device_ctx, @@ -1235,8 +1231,11 @@ void netvsc_channel_cb(void *context) return; net_device = net_device_to_netvsc_device(ndev); - if (unlikely(net_device->destroy) && - netvsc_channel_idle(net_device, q_idx)) + if (unlikely(!net_device)) + return; + + if (unlikely(net_device->destroy && + netvsc_channel_idle(net_device, q_idx))) return; /* commit_rd_index() -> hv_signal_on_read() needs this. */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 2d3cdb026a9959..5ede87f30463e8 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -206,17 +206,15 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *nvsc_dev = net_device_ctx->nvdev; + unsigned int num_tx_queues = ndev->real_num_tx_queues; struct sock *sk = skb->sk; int q_idx = sk_tx_queue_get(sk); - if (q_idx < 0 || skb->ooo_okay || - q_idx >= ndev->real_num_tx_queues) { + if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) { u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE); int new_idx; - new_idx = nvsc_dev->send_table[hash] - % nvsc_dev->num_chn; + new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues; if (q_idx != new_idx && sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) @@ -225,9 +223,6 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, q_idx = new_idx; } - if (unlikely(!nvsc_dev->chan_table[q_idx].channel)) - q_idx = 0; - return q_idx; } @@ -859,15 +854,22 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (ret) goto out; + memset(&device_info, 0, sizeof(device_info)); + device_info.ring_size = ring_size; + device_info.num_chn = nvdev->num_chn; + device_info.max_num_vrss_chns = nvdev->num_chn; + ndevctx->start_remove = true; rndis_filter_device_remove(hdev, nvdev); + /* 'nvdev' has been freed in rndis_filter_device_remove() -> + * netvsc_device_remove () -> free_netvsc_device(). + * We mustn't access it before it's re-created in + * rndis_filter_device_add() -> netvsc_device_add(). + */ + ndev->mtu = mtu; - memset(&device_info, 0, sizeof(device_info)); - device_info.ring_size = ring_size; - device_info.num_chn = nvdev->num_chn; - device_info.max_num_vrss_chns = nvdev->num_chn; rndis_filter_device_add(hdev, &device_info); out: diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 6e8f616be48eff..1dba16bc7f8d77 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 42da094b68ddfa..7ee51487953155 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -40,6 +40,7 @@ #include #include +#include #include #include #include diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index ffedad2a360afb..15b92008625163 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr memset(rd, 0, sizeof(*rd)); rd->hw = hwmap + i; rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); - if (rd->buf == NULL || - !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { + if (rd->buf) + busaddr = pci_map_single(pdev, rd->buf, len, dir); + if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) { if (rd->buf) { net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", __func__, rd->buf); @@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr rd = r->rd + j; busaddr = rd_get_addr(rd); rd_set_addr_status(rd, 0, 0); - if (busaddr) - pci_unmap_single(pdev, busaddr, len, dir); + pci_unmap_single(pdev, busaddr, len, dir); kfree(rd->buf); rd->buf = NULL; } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a4bfc10b61ddd7..da85057680d657 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f9d0fa315a4762..272b051a019975 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1883,17 +1883,6 @@ static int m88e1510_probe(struct phy_device *phydev) return m88e1510_hwmon_probe(phydev); } -static void marvell_remove(struct phy_device *phydev) -{ -#ifdef CONFIG_HWMON - - struct marvell_priv *priv = phydev->priv; - - if (priv && priv->hwmon_dev) - hwmon_device_unregister(priv->hwmon_dev); -#endif -} - static struct phy_driver marvell_drivers[] = { { .phy_id = MARVELL_PHY_ID_88E1101, @@ -1974,7 +1963,6 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .probe = &m88e1121_probe, - .remove = &marvell_remove, .config_init = &m88e1121_config_init, .config_aneg = &m88e1121_config_aneg, .read_status = &marvell_read_status, @@ -2087,7 +2075,6 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE, .flags = PHY_HAS_INTERRUPT, .probe = &m88e1510_probe, - .remove = &marvell_remove, .config_init = &m88e1510_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -2109,7 +2096,6 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .probe = m88e1510_probe, - .remove = &marvell_remove, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -2127,7 +2113,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1545", .probe = m88e1510_probe, - .remove = &marvell_remove, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &marvell_config_init, diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c index 6b988f77da08fc..61941e29daae85 100644 --- a/drivers/net/phy/mdio-boardinfo.c +++ b/drivers/net/phy/mdio-boardinfo.c @@ -84,3 +84,4 @@ int mdiobus_register_board_info(const struct mdio_board_info *info, return 0; } +EXPORT_SYMBOL(mdiobus_register_board_info); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1be69d8bc90948..a2bfc82e95d70b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -681,7 +681,7 @@ void phy_stop_machine(struct phy_device *phydev) cancel_delayed_work_sync(&phydev->state_queue); mutex_lock(&phydev->lock); - if (phydev->state > PHY_UP) + if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index daec6555f3b108..5198ccfa347f8b 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1864,7 +1864,7 @@ static struct phy_driver genphy_driver[] = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, .name = "Generic PHY", - .soft_reset = genphy_soft_reset, + .soft_reset = genphy_no_soft_reset, .config_init = genphy_config_init, .features = PHY_GBIT_FEATURES | SUPPORTED_MII | SUPPORTED_AUI | SUPPORTED_FIBRE | diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 93ffedfa299412..1e2d4f1179da31 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi) if (err) return err; - ks->regs_attr.size = ks->chip->regs_size; memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr)); + ks->regs_attr.size = ks->chip->regs_size; err = ks8995_reset(ks); if (err) return err; + sysfs_attr_init(&ks->regs_attr.attr); err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr); if (err) { dev_err(&spi->dev, "unable to create sysfs file, err=%d\n", diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a411b43a69eb43..f9c0e62716eaa7 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 08db4d687533c7..1da31dc47f8638 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -66,7 +66,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 35b55a2fa1a154..4d4173d25dd0af 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 4a24b5d15f5a5d..1b52520715aec6 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2072,6 +2072,7 @@ static int team_dev_type_check_change(struct net_device *dev, static void team_setup(struct net_device *dev) { ether_setup(dev); + dev->max_mtu = ETH_MAX_MTU; dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 30863e378925b3..cc88cd7856f5e5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -821,7 +822,18 @@ static void tun_net_uninit(struct net_device *dev) /* Net device open. */ static int tun_net_open(struct net_device *dev) { + struct tun_struct *tun = netdev_priv(dev); + int i; + netif_tx_start_all_queues(dev); + + for (i = 0; i < tun->numqueues; i++) { + struct tun_file *tfile; + + tfile = rtnl_dereference(tun->tfiles[i]); + tfile->socket.sk->sk_write_space(tfile->socket.sk); + } + return 0; } @@ -1102,9 +1114,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) if (!skb_array_empty(&tfile->tx_array)) mask |= POLLIN | POLLRDNORM; - if (sock_writeable(sk) || - (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && - sock_writeable(sk))) + if (tun->dev->flags & IFF_UP && + (sock_writeable(sk) || + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && + sock_writeable(sk)))) mask |= POLLOUT | POLLWRNORM; if (tun->dev->reg_state != NETREG_REGISTERED) @@ -1918,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) return -EINVAL; tun->set_features = features; + tun->dev->wanted_features &= ~TUN_USER_FEATURES; + tun->dev->wanted_features |= features; netdev_update_features(tun->dev); return 0; @@ -2569,7 +2584,6 @@ static int __init tun_init(void) int ret = 0; pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); - pr_info("%s\n", DRV_COPYRIGHT); ret = rtnl_link_register(&tun_link_ops); if (ret) { diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 6e98ede997d3f0..0dd510604118bc 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -346,7 +346,7 @@ static int ax88772_reset(struct usbnet *dev) if (ret < 0) goto out; - asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0); + ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0); if (ret < 0) goto out; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index f5552aaaa77a59..f3ae88fdf332e8 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -532,6 +532,7 @@ static const struct driver_info wwan_info = { #define LENOVO_VENDOR_ID 0x17ef #define NVIDIA_VENDOR_ID 0x0955 #define HP_VENDOR_ID 0x03f0 +#define MICROSOFT_VENDOR_ID 0x045e static const struct usb_device_id products[] = { /* BLACKLIST !! @@ -761,6 +762,20 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* Microsoft Surface 2 dock (based on Realtek RTL8152) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + +/* Microsoft Surface 3 dock (based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index e7b516342678d5..4f2e8141dbe2e5 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -52,7 +52,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include +#include #include #include #include diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 24d5272cdce510..156f7f85e4860d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -579,6 +580,10 @@ static const struct usb_device_id products[] = { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* Motorola Mapphone devices with MDM6600 */ + USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff), + .driver_info = (unsigned long)&qmi_wwan_info, + }, /* 2. Combined interface devices matching on class+protocol */ { /* Huawei E367 and possibly others in "Windows mode" */ @@ -924,6 +929,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 986243c932ccd6..07f788c49d573f 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -32,7 +32,7 @@ #define NETNEXT_VERSION "08" /* Information for net */ -#define NET_VERSION "8" +#define NET_VERSION "9" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers " @@ -501,6 +501,8 @@ enum rtl_register_content { #define RTL8153_RMS RTL8153_MAX_PACKET #define RTL8152_TX_TIMEOUT (5 * HZ) #define RTL8152_NAPI_WEIGHT 64 +#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \ + sizeof(struct rx_desc) + RX_ALIGN) /* rtl8152 flags */ enum rtl8152_flags { @@ -515,6 +517,7 @@ enum rtl8152_flags { /* Define these values to match your device */ #define VENDOR_ID_REALTEK 0x0bda +#define VENDOR_ID_MICROSOFT 0x045e #define VENDOR_ID_SAMSUNG 0x04e8 #define VENDOR_ID_LENOVO 0x17ef #define VENDOR_ID_NVIDIA 0x0955 @@ -1292,6 +1295,7 @@ static void intr_callback(struct urb *urb) } } else { if (netif_carrier_ok(tp->netdev)) { + netif_stop_queue(tp->netdev); set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } @@ -1362,6 +1366,7 @@ static int alloc_all_mem(struct r8152 *tp) spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->tx_lock); INIT_LIST_HEAD(&tp->tx_free); + INIT_LIST_HEAD(&tp->rx_done); skb_queue_head_init(&tp->tx_queue); skb_queue_head_init(&tp->rx_queue); @@ -2252,8 +2257,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) static void r8153_set_rx_early_size(struct r8152 *tp) { - u32 mtu = tp->netdev->mtu; - u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8; + u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4; ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); } @@ -2898,7 +2902,8 @@ static void r8153_first_init(struct r8152 *tp) rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); + ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); @@ -2950,7 +2955,8 @@ static void r8153_enter_oob(struct r8152 *tp) usleep_range(1000, 2000); } - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); + ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); ocp_data &= ~TEREDO_WAKE_MASK; @@ -3165,6 +3171,9 @@ static void set_carrier(struct r8152 *tp) napi_enable(&tp->napi); netif_wake_queue(netdev); netif_info(tp, link, netdev, "carrier on\n"); + } else if (netif_queue_stopped(netdev) && + skb_queue_len(&tp->tx_queue) < tp->tx_qlen) { + netif_wake_queue(netdev); } } else { if (netif_carrier_ok(netdev)) { @@ -3698,8 +3707,18 @@ static int rtl8152_resume(struct usb_interface *intf) tp->rtl_ops.autosuspend_en(tp, false); napi_disable(&tp->napi); set_bit(WORK_ENABLE, &tp->flags); - if (netif_carrier_ok(tp->netdev)) - rtl_start_rx(tp); + + if (netif_carrier_ok(tp->netdev)) { + if (rtl8152_get_speed(tp) & LINK_STATUS) { + rtl_start_rx(tp); + } else { + netif_carrier_off(tp->netdev); + tp->rtl_ops.disable(tp); + netif_info(tp, link, tp->netdev, + "linking down\n"); + } + } + napi_enable(&tp->napi); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); @@ -4200,8 +4219,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; - if (netif_running(dev) && netif_carrier_ok(dev)) - r8153_set_rx_early_size(tp); + if (netif_running(dev)) { + u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE; + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms); + + if (netif_carrier_ok(dev)) + r8153_set_rx_early_size(tp); + } mutex_unlock(&tp->control); @@ -4497,6 +4522,8 @@ static void rtl8152_disconnect(struct usb_interface *intf) static struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bf95016f442ace..ea9890d619670e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -51,7 +51,7 @@ module_param(gso, bool, 0444); * at once, the weight is chosen so that the EWMA will be insensitive to short- * term, transient changes in packet size. */ -DECLARE_EWMA(pkt_len, 1, 64) +DECLARE_EWMA(pkt_len, 0, 64) /* With mergeable buffers we align buffer address and use the low bits to * encode its true size. Buffer size is up to 1 page so we need to align to @@ -2080,7 +2080,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names); + names, NULL); if (ret) goto err_find; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 22379da6340077..d6988db1930d6b 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -340,6 +340,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) { + int len = skb->len; netdev_tx_t ret = is_ip_tx_frame(skb, dev); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { @@ -347,7 +348,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) u64_stats_update_begin(&dstats->syncp); dstats->tx_pkts++; - dstats->tx_bytes += skb->len; + dstats->tx_bytes += len; u64_stats_update_end(&dstats->syncp); } else { this_cpu_inc(dev->dstats->tx_drps); @@ -461,8 +462,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) } if (rt6_local) { - if (rt6_local->rt6i_idev) + if (rt6_local->rt6i_idev) { in6_dev_put(rt6_local->rt6i_idev); + rt6_local->rt6i_idev = NULL; + } dst = &rt6_local->dst; dev_put(dst->dev); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b7911994112aeb..bdb6ae16d4a85b 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2105,6 +2105,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, vxlan->cfg.port_max, true); + rcu_read_lock(); if (dst->sa.sa_family == AF_INET) { struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); struct rtable *rt; @@ -2127,7 +2128,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port, vni, &rt->dst, rt->rt_flags); if (err) - return; + goto out_unlock; } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { df = htons(IP_DF); } @@ -2166,7 +2167,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port, vni, ndst, rt6i_flags); if (err) - return; + goto out_unlock; } tos = ip_tunnel_ecn_encap(tos, old_iph, skb); @@ -2183,6 +2184,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, label, src_port, dst_port, !udp_sum); #endif } +out_unlock: + rcu_read_unlock(); return; drop: @@ -2191,6 +2194,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; tx_error: + rcu_read_unlock(); if (err == -ELOOP) dev->stats.collisions++; else if (err == -ENETUNREACH) @@ -2972,6 +2976,44 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, return 0; } +static int __vxlan_dev_create(struct net *net, struct net_device *dev, + struct vxlan_config *conf) +{ + struct vxlan_net *vn = net_generic(net, vxlan_net_id); + struct vxlan_dev *vxlan = netdev_priv(dev); + int err; + + err = vxlan_dev_configure(net, dev, conf, false); + if (err) + return err; + + dev->ethtool_ops = &vxlan_ethtool_ops; + + /* create an fdb entry for a valid default destination */ + if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { + err = vxlan_fdb_create(vxlan, all_zeros_mac, + &vxlan->default_dst.remote_ip, + NUD_REACHABLE | NUD_PERMANENT, + NLM_F_EXCL | NLM_F_CREATE, + vxlan->cfg.dst_port, + vxlan->default_dst.remote_vni, + vxlan->default_dst.remote_vni, + vxlan->default_dst.remote_ifindex, + NTF_SELF); + if (err) + return err; + } + + err = register_netdevice(dev); + if (err) { + vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); + return err; + } + + list_add(&vxlan->next, &vn->vxlan_list); + return 0; +} + static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], struct net_device *dev, struct vxlan_config *conf, bool changelink) @@ -3168,8 +3210,6 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); - struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_config conf; int err; @@ -3177,36 +3217,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (err) return err; - err = vxlan_dev_configure(src_net, dev, &conf, false); - if (err) - return err; - - dev->ethtool_ops = &vxlan_ethtool_ops; - - /* create an fdb entry for a valid default destination */ - if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { - err = vxlan_fdb_create(vxlan, all_zeros_mac, - &vxlan->default_dst.remote_ip, - NUD_REACHABLE | NUD_PERMANENT, - NLM_F_EXCL | NLM_F_CREATE, - vxlan->cfg.dst_port, - vxlan->default_dst.remote_vni, - vxlan->default_dst.remote_vni, - vxlan->default_dst.remote_ifindex, - NTF_SELF); - if (err) - return err; - } - - err = register_netdevice(dev); - if (err) { - vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); - return err; - } - - list_add(&vxlan->next, &vn->vxlan_list); - - return 0; + return __vxlan_dev_create(src_net, dev, &conf); } static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], @@ -3436,7 +3447,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name, if (IS_ERR(dev)) return dev; - err = vxlan_dev_configure(net, dev, conf, false); + err = __vxlan_dev_create(net, dev, conf); if (err < 0) { free_netdev(dev); return ERR_PTR(err); diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 087eb266601fc8..4ca71bca39acfc 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -78,7 +78,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index a5045b5279d70a..6742ae60566045 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) /* set bd status and length */ bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; - iowrite16be(bd_status, &bd->status); iowrite16be(skb->len, &bd->length); + iowrite16be(bd_status, &bd->status); /* Move to next BD in the ring */ if (!(bd_status & T_W_S)) @@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) struct sk_buff *skb; hdlc_device *hdlc = dev_to_hdlc(dev); struct qe_bd *bd; - u32 bd_status; + u16 bd_status; u16 length, howmany = 0; u8 *bdbuffer; int i; diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index e7f5910a65191f..f8eb66ef2944ea 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface, struct i2400mu *i2400mu; struct usb_device *usb_dev = interface_to_usbdev(iface); + if (iface->cur_altsetting->desc.bNumEndpoints < 4) + return -ENODEV; + if (usb_dev->speed != USB_SPEED_HIGH) dev_err(dev, "device not connected as high speed\n"); diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index dd902b43f8f775..0a8e29e9a0ebc7 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include "core.h" @@ -711,6 +713,72 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) return 0; } +static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data) +{ + struct ath10k *ar = data; + const char *bdf_ext; + const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC; + u8 bdf_enabled; + int i; + + if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE) + return; + + if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "wrong smbios bdf ext type length (%d).\n", + hdr->length); + return; + } + + bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET); + if (!bdf_enabled) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n"); + return; + } + + /* Only one string exists (per spec) */ + bdf_ext = (char *)hdr + hdr->length; + + if (memcmp(bdf_ext, magic, strlen(magic)) != 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant magic does not match.\n"); + return; + } + + for (i = 0; i < strlen(bdf_ext); i++) { + if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant name contains non ascii chars.\n"); + return; + } + } + + /* Copy extension name without magic suffix */ + if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic), + sizeof(ar->id.bdf_ext)) < 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", + bdf_ext); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found and validated bdf variant smbios_type 0x%x bdf %s\n", + ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext); +} + +static int ath10k_core_check_smbios(struct ath10k *ar) +{ + ar->id.bdf_ext[0] = '\0'; + dmi_walk(ath10k_core_check_bdfext, ar); + + if (ar->id.bdf_ext[0] == '\0') + return -ENODATA; + + return 0; +} + static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; @@ -1020,6 +1088,23 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, case ATH10K_BD_IE_BOARD: ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, boardname); + if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') { + /* try default bdf if variant was not found */ + char *s, *v = ",variant="; + char boardname2[100]; + + strlcpy(boardname2, boardname, + sizeof(boardname2)); + + s = strstr(boardname2, v); + if (s) + *s = '\0'; /* strip ",variant=%s" */ + + ret = ath10k_core_parse_bd_ie_board(ar, data, + ie_len, + boardname2); + } + if (ret == -ENOENT) /* no match found, continue */ break; @@ -1057,6 +1142,9 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, static int ath10k_core_create_board_name(struct ath10k *ar, char *name, size_t name_len) { + /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ + char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; + if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, "bus=%s,bmi-chip-id=%d,bmi-board-id=%d", @@ -1066,12 +1154,15 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, goto out; } + if (ar->id.bdf_ext[0] != '\0') + scnprintf(variant, sizeof(variant), ",variant=%s", + ar->id.bdf_ext); + scnprintf(name, name_len, - "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x", + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", ath10k_bus_str(ar->hif.bus), ar->id.vendor, ar->id.device, - ar->id.subsystem_vendor, ar->id.subsystem_device); - + ar->id.subsystem_vendor, ar->id.subsystem_device, variant); out: ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); @@ -2128,6 +2219,10 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } + ret = ath10k_core_check_smbios(ar); + if (ret) + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n"); + ret = ath10k_core_fetch_board_file(ar); if (ret) { ath10k_err(ar, "failed to fetch board file: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 757242ef52ac14..88d14be7fcceb4 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -69,6 +69,23 @@ #define ATH10K_NAPI_BUDGET 64 #define ATH10K_NAPI_QUOTA_LIMIT 60 +/* SMBIOS type containing Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 + +/* SMBIOS type structure length (excluding strings-set) */ +#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9 + +/* Offset pointing to Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8 + +/* Board Data File Name Extension string length. + * String format: BDF__\0 + */ +#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20 + +/* The magic used by QCA spec */ +#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_" + struct ath10k; enum ath10k_bus { @@ -798,6 +815,8 @@ struct ath10k { bool bmi_ids_valid; u8 bmi_board_id; u8 bmi_chip_id; + + char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; } id; int fw_api; diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 33fb26833cd0a6..d9f37ee4bfdd3e 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = { .rtc_soc_base_address = 0x00000800, .rtc_wmac_base_address = 0x00001000, .soc_core_base_address = 0x0003a000, - .wlan_mac_base_address = 0x00020000, + .wlan_mac_base_address = 0x00010000, .ce_wrapper_base_address = 0x00034000, .ce0_base_address = 0x00034400, .ce1_base_address = 0x00034800, diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index 67fedb61fcc02d..979800c6f57fba 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -1252,7 +1252,7 @@ struct ath5k_statistics { #define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */ #define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */ -DECLARE_EWMA(beacon_rssi, 1024, 8) +DECLARE_EWMA(beacon_rssi, 10, 8) /* Driver state associated with an instance of a device */ struct ath5k_hw { diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index b7fe0af4cb2400..363b30a549c2b6 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "core.h" #include "cfg80211.h" diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index e97ab2b916630e..cdafebb9c936b4 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index de19c7c92bc6c0..85d949e03f79f7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -2238,14 +2238,16 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); struct brcmf_p2p_info *p2p = &cfg->p2p; struct brcmf_cfg80211_vif *vif; + enum nl80211_iftype iftype; bool wait_for_disable = false; int err; brcmf_dbg(TRACE, "delete P2P vif\n"); vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); + iftype = vif->wdev.iftype; brcmf_cfg80211_arm_vif_event(cfg, vif); - switch (vif->wdev.iftype) { + switch (iftype) { case NL80211_IFTYPE_P2P_CLIENT: if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state)) wait_for_disable = true; @@ -2275,7 +2277,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) BRCMF_P2P_DISABLE_TIMEOUT); err = 0; - if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) { + if (iftype != NL80211_IFTYPE_P2P_DEVICE) { brcmf_vif_clear_mgmt_ies(vif); err = brcmf_p2p_release_p2p_if(vif); } @@ -2291,7 +2293,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) brcmf_remove_interface(vif->ifp, true); brcmf_cfg80211_arm_vif_event(cfg, NULL); - if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) + if (iftype != NL80211_IFTYPE_P2P_DEVICE) p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; return err; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index c5744b45ec8fbc..65689469c5a12e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index a260cd5032005b..077bfd8f4c0cd8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1056,6 +1056,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, if (ret) return ret; + if (count == 0) + return 0; iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 99132ea16ede08..c5734e1a02d27e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -216,7 +216,8 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif) qmask |= BIT(vif->hw_queue[ac]); } - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) qmask |= BIT(vif->cab_queue); return qmask; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index d37b1695c64eac..486dcceed17a4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2319,7 +2319,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - /* Called when we need to transmit (a) frame(s) from agg queue */ + /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, true); @@ -2338,7 +2338,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - if (tid_data->state != IWL_AGG_ON && + if (!iwl_mvm_is_dqa_supported(mvm) && + tid_data->state != IWL_AGG_ON && tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) continue; @@ -2400,7 +2401,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) return; rcu_read_lock(); - sta = mvm->fw_id_to_mac_id[notif->sta_id]; + sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (WARN_ON(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index bd1dcc863d8f33..9d28db7f56aa24 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1806,7 +1806,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_get_wd_timeout(mvm, vif, false, false); int queue; - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; @@ -1837,7 +1838,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * enabled-cab_queue to the mask) */ if (iwl_mvm_is_dqa_supported(mvm) && - vif->type == NL80211_IFTYPE_AP) { + (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) { struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = mvmvif->bcast_sta.sta_id, @@ -1862,7 +1864,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, IWL_MAX_TID_COUNT, 0); @@ -3135,7 +3138,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, - bool agg) + bool single_sta_queue) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { @@ -3155,14 +3158,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); - /* If we're releasing frames from aggregation queues then check if the - * all queues combined that we're releasing frames from have + /* If we're releasing frames from aggregation or dqa queues then check + * if all the queues that we're releasing frames from, combined, have: * - more frames than the service period, in which case more_data * needs to be set * - fewer than 'cnt' frames, in which case we need to adjust the * firmware command (but do that unconditionally) */ - if (agg) { + if (single_sta_queue) { int remaining = cnt; int sleep_tx_count; @@ -3172,7 +3175,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, u16 n_queued; tid_data = &mvmsta->tid_data[tid]; - if (WARN(tid_data->state != IWL_AGG_ON && + if (WARN(!iwl_mvm_is_dqa_supported(mvm) && + tid_data->state != IWL_AGG_ON && tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, "TID %d state is %d\n", tid, tid_data->state)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 4be34f902278c8..1927ce6077984f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -547,7 +547,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, - bool agg); + bool single_sta_queue); int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain); void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index dd2b4a30081993..1ba0a6f5550366 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -505,6 +506,7 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, switch (info->control.vif->type) { case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: /* * Handle legacy hostapd as well, where station may be added * only after assoc. Take care of the case where we send a @@ -516,7 +518,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, if (info->hw_queue == info->control.vif->cab_queue) return info->hw_queue; - WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc)); + WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, + "fc=0x%02x", le16_to_cpu(fc)); return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) @@ -583,7 +586,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) iwl_mvm_vif_from_mac80211(info.control.vif); if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info.control.vif->type == NL80211_IFTYPE_AP) { + info.control.vif->type == NL80211_IFTYPE_AP || + info.control.vif->type == NL80211_IFTYPE_ADHOC) { sta_id = mvmvif->bcast_sta.sta_id; queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr->frame_control); @@ -628,8 +632,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) * values. * Note that we don't need to make sure it isn't agg'd, since we're * TXing non-sta + * For DQA mode - we shouldn't increase it though */ - atomic_inc(&mvm->pending_frames[sta_id]); + if (!iwl_mvm_is_dqa_supported(mvm)) + atomic_inc(&mvm->pending_frames[sta_id]); return 0; } @@ -1005,11 +1011,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - /* Increase pending frames count if this isn't AMPDU */ - if ((iwl_mvm_is_dqa_supported(mvm) && - mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON && - mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) || - (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)) + /* Increase pending frames count if this isn't AMPDU or DQA queue */ + if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu) atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); return 0; @@ -1079,12 +1082,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, lockdep_assert_held(&mvmsta->lock); if ((tid_data->state == IWL_AGG_ON || - tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && + tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || + iwl_mvm_is_dqa_supported(mvm)) && iwl_mvm_tid_queued(tid_data) == 0) { /* - * Now that this aggregation queue is empty tell mac80211 so it - * knows we no longer have frames buffered for the station on - * this TID (for the TIM bitmap calculation.) + * Now that this aggregation or DQA queue is empty tell + * mac80211 so it knows we no longer have frames buffered for + * the station on this TID (for the TIM bitmap calculation.) */ ieee80211_sta_set_buffered(sta, tid, false); } @@ -1257,7 +1261,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, u8 skb_freed = 0; u16 next_reclaimed, seq_ctl; bool is_ndp = false; - bool txq_agg = false; /* Is this TXQ aggregated */ __skb_queue_head_init(&skbs); @@ -1283,6 +1286,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->flags |= IEEE80211_TX_STAT_ACK; break; case TX_STATUS_FAIL_DEST_PS: + /* In DQA, the FW should have stopped the queue and not + * return this status + */ + WARN_ON(iwl_mvm_is_dqa_supported(mvm)); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: @@ -1387,15 +1394,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); - if (iwl_mvm_is_dqa_supported(mvm)) { - enum iwl_mvm_agg_state state; - - state = mvmsta->tid_data[tid].state; - txq_agg = (state == IWL_AGG_ON || - state == IWL_EMPTYING_HW_QUEUE_DELBA); - } else { - txq_agg = txq_id >= mvm->first_agg_queue; - } if (!is_ndp) { tid_data->next_reclaimed = next_reclaimed; @@ -1452,11 +1450,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, * If the txq is not an AMPDU queue, there is no chance we freed * several skbs. Check that out... */ - if (txq_agg) + if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) goto out; /* We can't free more than one frame at once on a shared queue */ - WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); + WARN_ON(skb_freed > 1); /* If we have still frames for this STA nothing to do here */ if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 544ef7adde7d2a..04dfd040a6502a 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -43,7 +43,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index a5656bc0e6aaa7..b2c6b065b54293 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0889fc81ce9e47..50c219fb1a52b9 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3056,6 +3056,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2, static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; + const char *hwname = NULL; param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; @@ -3069,8 +3070,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; - if (info->attrs[HWSIM_ATTR_RADIO_NAME]) - param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); + if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { + hwname = kasprintf(GFP_KERNEL, "%.*s", + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + if (!hwname) + return -ENOMEM; + param.hwname = hwname; + } if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) param.use_chanctx = true; @@ -3098,11 +3105,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) s64 idx = -1; const char *hwname = NULL; - if (info->attrs[HWSIM_ATTR_RADIO_ID]) + if (info->attrs[HWSIM_ATTR_RADIO_ID]) { idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); - else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) - hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); - else + } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { + hwname = kasprintf(GFP_KERNEL, "%.*s", + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + if (!hwname) + return -ENOMEM; + } else return -EINVAL; spin_lock_bh(&hwsim_radio_lock); @@ -3111,7 +3122,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) if (data->idx != idx) continue; } else { - if (strcmp(hwname, wiphy_name(data->hw->wiphy))) + if (!hwname || + strcmp(hwname, wiphy_name(data->hw->wiphy))) continue; } @@ -3122,10 +3134,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) spin_unlock_bh(&hwsim_radio_lock); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), info); + kfree(hwname); return 0; } spin_unlock_bh(&hwsim_radio_lock); + kfree(hwname); return -ENODEV; } diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 5ebca1d0cfc750..b62e03d11c2e27 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -57,8 +57,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0"); * In case of any errors during inittialization, this function also ensures * proper cleanup before exiting. */ -static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, - void **padapter) +static int mwifiex_register(void *card, struct device *dev, + struct mwifiex_if_ops *if_ops, void **padapter) { struct mwifiex_adapter *adapter; int i; @@ -68,6 +68,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, return -ENOMEM; *padapter = adapter; + adapter->dev = dev; adapter->card = card; /* Save interface specific operations in adapter */ @@ -1568,12 +1569,11 @@ mwifiex_add_card(void *card, struct completion *fw_done, { struct mwifiex_adapter *adapter; - if (mwifiex_register(card, if_ops, (void **)&adapter)) { + if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) { pr_err("%s: software init failed\n", __func__); goto err_init_sw; } - adapter->dev = dev; mwifiex_probe_of(adapter); adapter->iface_type = iface_type; @@ -1718,6 +1718,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); + if (adapter->irq_wakeup >= 0) + device_init_wakeup(adapter->dev, false); + /* Unregister device */ mwifiex_dbg(adapter, INFO, "info: unregister device\n"); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index a0d918094889df..b8c990d10d6ecb 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2739,6 +2739,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) schedule_work(&card->work); } +static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + + if (reg->sleep_cookie) + mwifiex_pcie_delete_sleep_cookie_buf(adapter); + + mwifiex_pcie_delete_cmdrsp_buf(adapter); + mwifiex_pcie_delete_evtbd_ring(adapter); + mwifiex_pcie_delete_rxbd_ring(adapter); + mwifiex_pcie_delete_txbd_ring(adapter); + card->cmdrsp_buf = NULL; +} + /* * This function initializes the PCI-E host memory space, WCB rings, etc. * @@ -2850,13 +2865,6 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter) /* * This function cleans up the allocated card buffers. - * - * The following are freed by this function - - * - TXBD ring buffers - * - RXBD ring buffers - * - Event BD ring buffers - * - Command response ring buffer - * - Sleep cookie buffer */ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) { @@ -2875,6 +2883,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) "Failed to write driver not-ready signature\n"); } + mwifiex_pcie_free_buffers(adapter); + if (pdev) { pci_iounmap(pdev, card->pci_mmap); pci_iounmap(pdev, card->pci_mmap1); @@ -3126,10 +3136,7 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) pci_iounmap(pdev, card->pci_mmap1); } -/* This function cleans up the PCI-E host memory space. - * Some code is extracted from mwifiex_unregister_dev() - * - */ +/* This function cleans up the PCI-E host memory space. */ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; @@ -3140,14 +3147,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) adapter->seq_num = 0; - if (reg->sleep_cookie) - mwifiex_pcie_delete_sleep_cookie_buf(adapter); - - mwifiex_pcie_delete_cmdrsp_buf(adapter); - mwifiex_pcie_delete_evtbd_ring(adapter); - mwifiex_pcie_delete_rxbd_ring(adapter); - mwifiex_pcie_delete_txbd_ring(adapter); - card->cmdrsp_buf = NULL; + mwifiex_pcie_free_buffers(adapter); } static struct mwifiex_if_ops pcie_ops = { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 26869b3bef45ff..340787894c694a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -257,7 +257,7 @@ struct link_qual { int tx_failed; }; -DECLARE_EWMA(rssi, 1024, 8) +DECLARE_EWMA(rssi, 10, 8) /* * Antenna settings about the currently active link. diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index caea350f05aac7..bdc379178e8795 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1742,12 +1742,14 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val) unsigned long flags; struct rtl_c2hcmd *c2hcmd; - c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL); + c2hcmd = kmalloc(sizeof(*c2hcmd), + in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!c2hcmd) goto label_err; - c2hcmd->val = kmalloc(len, GFP_KERNEL); + c2hcmd->val = kmalloc(len, + in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!c2hcmd->val) goto label_err2; diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index e8c5dddc54ba27..3c4c58b9fe76ed 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -39,7 +39,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag, unsigned long flags; bool found; - new = kmalloc(sizeof(*entry), GFP_KERNEL); + new = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!new) return; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index a2d326760a7274..8397f6c9245158 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -31,6 +31,7 @@ #include "common.h" #include +#include #include #include #include @@ -164,13 +165,17 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; - unsigned int num_queues = vif->num_queues; + unsigned int num_queues; u16 index; struct xenvif_rx_cb *cb; BUG_ON(skb->dev != dev); - /* Drop the packet if queues are not set up */ + /* Drop the packet if queues are not set up. + * This handler should be called inside an RCU read section + * so we don't need to enter it here explicitly. + */ + num_queues = READ_ONCE(vif->num_queues); if (num_queues < 1) goto drop; @@ -221,18 +226,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; + unsigned int num_queues; u64 rx_bytes = 0; u64 rx_packets = 0; u64 tx_bytes = 0; u64 tx_packets = 0; unsigned int index; - spin_lock(&vif->lock); - if (vif->queues == NULL) - goto out; + rcu_read_lock(); + num_queues = READ_ONCE(vif->num_queues); /* Aggregate tx and rx stats from each queue */ - for (index = 0; index < vif->num_queues; ++index) { + for (index = 0; index < num_queues; ++index) { queue = &vif->queues[index]; rx_bytes += queue->stats.rx_bytes; rx_packets += queue->stats.rx_packets; @@ -240,8 +245,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) tx_packets += queue->stats.tx_packets; } -out: - spin_unlock(&vif->lock); + rcu_read_unlock(); vif->dev->stats.rx_bytes = rx_bytes; vif->dev->stats.rx_packets = rx_packets; @@ -377,10 +381,13 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { struct xenvif *vif = netdev_priv(dev); - unsigned int num_queues = vif->num_queues; + unsigned int num_queues; int i; unsigned int queue_index; + rcu_read_lock(); + num_queues = READ_ONCE(vif->num_queues); + for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { unsigned long accum = 0; for (queue_index = 0; queue_index < num_queues; ++queue_index) { @@ -389,6 +396,8 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, } data[i] = accum; } + + rcu_read_unlock(); } static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f9bcf4a665bcae..602d408fa25e98 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -214,7 +214,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif) netdev_err(vif->dev, "fatal error; disabling device\n"); vif->disabled = true; /* Disable the vif from queue 0's kthread */ - if (vif->queues) + if (vif->num_queues) xenvif_kick_thread(&vif->queues[0]); } diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index bb854f92f5a5cd..a56d3eab35dd65 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -492,24 +492,31 @@ static int backend_create_xenvif(struct backend_info *be) static void backend_disconnect(struct backend_info *be) { - if (be->vif) { + struct xenvif *vif = be->vif; + + if (vif) { + unsigned int num_queues = vif->num_queues; unsigned int queue_index; - xen_unregister_watchers(be->vif); + xen_unregister_watchers(vif); #ifdef CONFIG_DEBUG_FS - xenvif_debugfs_delif(be->vif); + xenvif_debugfs_delif(vif); #endif /* CONFIG_DEBUG_FS */ - xenvif_disconnect_data(be->vif); - for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) - xenvif_deinit_queue(&be->vif->queues[queue_index]); + xenvif_disconnect_data(vif); + + /* At this point some of the handlers may still be active + * so we need to have additional synchronization here. + */ + vif->num_queues = 0; + synchronize_net(); + + for (queue_index = 0; queue_index < num_queues; ++queue_index) + xenvif_deinit_queue(&vif->queues[queue_index]); - spin_lock(&be->vif->lock); - vfree(be->vif->queues); - be->vif->num_queues = 0; - be->vif->queues = NULL; - spin_unlock(&be->vif->lock); + vfree(vif->queues); + vif->queues = NULL; - xenvif_disconnect_ctrl(be->vif); + xenvif_disconnect_ctrl(vif); } } diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index ce3e8dfa10ad5c..1b481a5fb9667d 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1700,6 +1700,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) struct device *create_namespace_pmem(struct nd_region *nd_region, struct nd_namespace_label *nd_label) { + u64 altcookie = nd_region_interleave_set_altcookie(nd_region); u64 cookie = nd_region_interleave_set_cookie(nd_region); struct nd_label_ent *label_ent; struct nd_namespace_pmem *nspm; @@ -1718,7 +1719,11 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, if (__le64_to_cpu(nd_label->isetcookie) != cookie) { dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", nd_label->uuid); - return ERR_PTR(-EAGAIN); + if (__le64_to_cpu(nd_label->isetcookie) != altcookie) + return ERR_PTR(-EAGAIN); + + dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", + nd_label->uuid); } nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); @@ -1733,9 +1738,14 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, res->name = dev_name(&nd_region->dev); res->flags = IORESOURCE_MEM; - for (i = 0; i < nd_region->ndr_mappings; i++) - if (!has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i)) - break; + for (i = 0; i < nd_region->ndr_mappings; i++) { + if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i)) + continue; + if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i)) + continue; + break; + } + if (i < nd_region->ndr_mappings) { struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 35dd75057e1697..2a99c83aa19f08 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -328,6 +328,7 @@ struct nd_region *to_nd_region(struct device *dev); int nd_region_to_nstype(struct nd_region *nd_region); int nd_region_register_namespaces(struct nd_region *nd_region, int *err); u64 nd_region_interleave_set_cookie(struct nd_region *nd_region); +u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region); void nvdimm_bus_lock(struct device *dev); void nvdimm_bus_unlock(struct device *dev); bool is_nvdimm_bus_locked(struct device *dev); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 7cd705f3247c34..b7cb5066d9613e 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -505,6 +505,15 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) return 0; } +u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) +{ + struct nd_interleave_set *nd_set = nd_region->nd_set; + + if (nd_set) + return nd_set->altcookie; + return 0; +} + void nd_mapping_free_labels(struct nd_mapping *nd_mapping) { struct nd_label_ent *label_ent, *e; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 25ec4e58522058..9b3b57fef446dc 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2344,6 +2344,53 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_kill_queues); +void nvme_unfreeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_unfreeze_queue(ns->queue); + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_unfreeze); + +void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) { + timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); + if (timeout <= 0) + break; + } + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); + +void nvme_wait_freeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_freeze_queue_wait(ns->queue); + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_wait_freeze); + +void nvme_start_freeze(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_mq_freeze_queue_start(ns->queue); + mutex_unlock(&ctrl->namespaces_mutex); +} +EXPORT_SYMBOL_GPL(nvme_start_freeze); + void nvme_stop_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a3da1e90b99dbf..2aa20e3e5675bf 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -294,6 +294,10 @@ void nvme_queue_async_events(struct nvme_ctrl *ctrl); void nvme_stop_queues(struct nvme_ctrl *ctrl); void nvme_start_queues(struct nvme_ctrl *ctrl); void nvme_kill_queues(struct nvme_ctrl *ctrl); +void nvme_unfreeze(struct nvme_ctrl *ctrl); +void nvme_wait_freeze(struct nvme_ctrl *ctrl); +void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); +void nvme_start_freeze(struct nvme_ctrl *ctrl); #define NVME_QID_ANY -1 struct request *nvme_alloc_request(struct request_queue *q, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 57a1af52b06e66..26a5fd05fe88aa 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1038,9 +1038,10 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth) + int depth, int node) { - struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); + struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL, + node); if (!nvmeq) return NULL; @@ -1217,7 +1218,8 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) nvmeq = dev->queues[0]; if (!nvmeq) { - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); + nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, + dev_to_node(dev->dev)); if (!nvmeq) return -ENOMEM; } @@ -1309,7 +1311,9 @@ static int nvme_create_io_queues(struct nvme_dev *dev) int ret = 0; for (i = dev->queue_count; i <= dev->max_qid; i++) { - if (!nvme_alloc_queue(dev, i, dev->q_depth)) { + /* vector == qid - 1, match nvme_create_queue */ + if (!nvme_alloc_queue(dev, i, dev->q_depth, + pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { ret = -ENOMEM; break; } @@ -1671,21 +1675,34 @@ static void nvme_pci_disable(struct nvme_dev *dev) static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { int i, queues; - u32 csts = -1; + bool dead = true; + struct pci_dev *pdev = to_pci_dev(dev->dev); del_timer_sync(&dev->watchdog_timer); mutex_lock(&dev->shutdown_lock); - if (pci_is_enabled(to_pci_dev(dev->dev))) { - nvme_stop_queues(&dev->ctrl); - csts = readl(dev->bar + NVME_REG_CSTS); + if (pci_is_enabled(pdev)) { + u32 csts = readl(dev->bar + NVME_REG_CSTS); + + if (dev->ctrl.state == NVME_CTRL_LIVE) + nvme_start_freeze(&dev->ctrl); + dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || + pdev->error_state != pci_channel_io_normal); } + /* + * Give the controller a chance to complete all entered requests if + * doing a safe shutdown. + */ + if (!dead && shutdown) + nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); + nvme_stop_queues(&dev->ctrl); + queues = dev->online_queues - 1; for (i = dev->queue_count - 1; i > 0; i--) nvme_suspend_queue(dev->queues[i]); - if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { + if (dead) { /* A device might become IO incapable very soon during * probe, before the admin queue is configured. Thus, * queue_count can be 0 here. @@ -1700,6 +1717,14 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); + + /* + * The driver will not be starting up queues again if shutting down so + * must flush all entered requests to their failed completion to avoid + * deadlocking blk-mq hot-cpu notifier. + */ + if (shutdown) + nvme_start_queues(&dev->ctrl); mutex_unlock(&dev->shutdown_lock); } @@ -1822,7 +1847,9 @@ static void nvme_reset_work(struct work_struct *work) nvme_remove_namespaces(&dev->ctrl); } else { nvme_start_queues(&dev->ctrl); + nvme_wait_freeze(&dev->ctrl); nvme_dev_add(dev); + nvme_unfreeze(&dev->ctrl); } if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 779f516e7a4ec4..47a479f26e5d7d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl, struct ib_device *ibdev = dev->dev; int ret; - BUG_ON(queue_idx >= ctrl->queue_count); - ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); if (ret) @@ -652,8 +650,22 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl) static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) { + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + unsigned int nr_io_queues; int i, ret; + nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); + if (ret) + return ret; + + ctrl->queue_count = nr_io_queues + 1; + if (ctrl->queue_count < 2) + return 0; + + dev_info(ctrl->ctrl.device, + "creating %d I/O queues.\n", nr_io_queues); + for (i = 1; i < ctrl->queue_count; i++) { ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.opts->queue_size); @@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) { - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; int ret; - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); - if (ret) - return ret; - - ctrl->queue_count = opts->nr_io_queues + 1; - if (ctrl->queue_count < 2) - return 0; - - dev_info(ctrl->ctrl.device, - "creating %d I/O queues.\n", opts->nr_io_queues); - ret = nvme_rdma_init_io_queues(ctrl); if (ret) return ret; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 94e524fea5687b..a7bcff45f4376d 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -13,6 +13,8 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include + #include #include #include "nvmet.h" diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 5267ce20c12d48..798653b329b28c 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -14,6 +14,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include +#include + #include "nvmet.h" static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; @@ -423,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, ctrl->sqs[qid] = sq; } +static void nvmet_confirm_sq(struct percpu_ref *ref) +{ + struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); + + complete(&sq->confirm_done); +} + void nvmet_sq_destroy(struct nvmet_sq *sq) { /* @@ -431,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) */ if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) nvmet_async_events_free(sq->ctrl); - percpu_ref_kill(&sq->ref); + percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); + wait_for_completion(&sq->confirm_done); wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); @@ -459,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq) return ret; } init_completion(&sq->free_done); + init_completion(&sq->confirm_done); return 0; } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index d1f06e7768ff1d..22f7bc6bac7fa7 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, struct nvme_loop_iod *iod, unsigned int queue_idx) { - BUG_ON(queue_idx >= ctrl->queue_count); - iod->req.cmd = &iod->cmd; iod->req.rsp = &iod->rsp; iod->queue = &ctrl->queues[queue_idx]; @@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = { static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { + nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); - nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); } static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) @@ -314,6 +312,43 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) kfree(ctrl); } +static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->queue_count; i++) + nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); +} + +static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) +{ + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + unsigned int nr_io_queues; + int ret, i; + + nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); + if (ret || !nr_io_queues) + return ret; + + dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); + + for (i = 1; i <= nr_io_queues; i++) { + ctrl->queues[i].ctrl = ctrl; + ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); + if (ret) + goto out_destroy_queues; + + ctrl->queue_count++; + } + + return 0; + +out_destroy_queues: + nvme_loop_destroy_io_queues(ctrl); + return ret; +} + static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) { int error; @@ -385,17 +420,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) { - int i; - nvme_stop_keep_alive(&ctrl->ctrl); if (ctrl->queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); - - for (i = 1; i < ctrl->queue_count; i++) - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + nvme_loop_destroy_io_queues(ctrl); } if (ctrl->ctrl.state == NVME_CTRL_LIVE) @@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) if (ret) goto out_disable; - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { - ctrl->queues[i].ctrl = ctrl; - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); - if (ret) - goto out_free_queues; - - ctrl->queue_count++; - } + ret = nvme_loop_init_io_queues(ctrl); + if (ret) + goto out_destroy_admin; - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { + for (i = 1; i < ctrl->queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) - goto out_free_queues; + goto out_destroy_io; } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); @@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) return; -out_free_queues: - for (i = 1; i < ctrl->queue_count; i++) - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); +out_destroy_io: + nvme_loop_destroy_io_queues(ctrl); +out_destroy_admin: nvme_loop_destroy_admin_queue(ctrl); out_disable: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); @@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) { - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; int ret, i; - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); - if (ret || !opts->nr_io_queues) + ret = nvme_loop_init_io_queues(ctrl); + if (ret) return ret; - dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", - opts->nr_io_queues); - - for (i = 1; i <= opts->nr_io_queues; i++) { - ctrl->queues[i].ctrl = ctrl; - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); - if (ret) - goto out_destroy_queues; - - ctrl->queue_count++; - } - memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_loop_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; @@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) goto out_free_tagset; } - for (i = 1; i <= opts->nr_io_queues; i++) { + for (i = 1; i < ctrl->queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) goto out_cleanup_connect_q; @@ -588,8 +601,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) out_free_tagset: blk_mq_free_tag_set(&ctrl->tag_set); out_destroy_queues: - for (i = 1; i < ctrl->queue_count; i++) - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + nvme_loop_destroy_io_queues(ctrl); return ret; } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 1370eee0a3c0f6..f7ff15f17ca97d 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -73,6 +73,7 @@ struct nvmet_sq { u16 qid; u16 size; struct completion free_done; + struct completion confirm_done; }; /** diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 9aa1da3778b3ac..ecc4fe86256124 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, { u16 status; - cmd->queue = queue; - cmd->n_rdma = 0; - cmd->req.port = queue->port; - - ib_dma_sync_single_for_cpu(queue->dev->device, cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, DMA_FROM_DEVICE); @@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) cmd->queue = queue; rsp = nvmet_rdma_get_rsp(queue); + rsp->queue = queue; rsp->cmd = cmd; rsp->flags = 0; rsp->req.cmd = cmd->nvme_cmd; + rsp->req.port = queue->port; + rsp->n_rdma = 0; if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { unsigned long flags; diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 642478d35e99a5..ac27f3d3fbb42b 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -31,6 +31,8 @@ #include #include #include +#include +#include #include #include "oprofile_stats.h" diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 0581461c3a67be..eda2633a393d56 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -23,6 +23,8 @@ #include #include +#include + #include "event_buffer.h" #include "cpu_buffer.h" #include "buffer_sync.h" diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 67935fbbbcabf7..32888f2bd1a977 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index aeb073b5fe1606..e32ca2ef9e5404 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1539,7 +1539,7 @@ static int __init ccio_probe(struct parisc_device *dev) ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); if (ioc == NULL) { printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); - return 1; + return -ENOMEM; } ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn"; @@ -1554,6 +1554,10 @@ static int __init ccio_probe(struct parisc_device *dev) ioc->hw_path = dev->hw_path; ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096); + if (!ioc->ioc_regs) { + kfree(ioc); + return -ENOMEM; + } ccio_ioc_init(ioc); ccio_init_resources(ioc); hppa_dma_ops = &ccio_ops; diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index 103095bbe8c09b..7e2f6d5a6aaf39 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c @@ -14,16 +14,16 @@ * Wax ASIC also includes a PS/2 and RS-232 controller, but those are * dealt with elsewhere; this file is concerned only with the EISA portions * of Wax. - * - * + * + * * HINT: * ----- * To allow an ISA card to work properly in the EISA slot you need to - * set an edge trigger level. This may be done on the palo command line - * by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with + * set an edge trigger level. This may be done on the palo command line + * by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with * n and n2 as the irq levels you want to use. - * - * Example: "eisa_irq_edge=10,11" allows ISA cards to operate at + * + * Example: "eisa_irq_edge=10,11" allows ISA cards to operate at * irq levels 10 and 11. */ @@ -46,9 +46,9 @@ #include #if 0 -#define EISA_DBG(msg, arg... ) printk(KERN_DEBUG "eisa: " msg , ## arg ) +#define EISA_DBG(msg, arg...) printk(KERN_DEBUG "eisa: " msg, ## arg) #else -#define EISA_DBG(msg, arg... ) +#define EISA_DBG(msg, arg...) #endif #define SNAKES_EEPROM_BASE_ADDR 0xF0810400 @@ -108,7 +108,7 @@ void eisa_out8(unsigned char data, unsigned short port) void eisa_out16(unsigned short data, unsigned short port) { - if (EISA_bus) + if (EISA_bus) gsc_writew(cpu_to_le16(data), eisa_permute(port)); } @@ -135,9 +135,9 @@ static int master_mask; static int slave_mask; /* the trig level can be set with the - * eisa_irq_edge=n,n,n commandline parameter - * We should really read this from the EEPROM - * in the furure. + * eisa_irq_edge=n,n,n commandline parameter + * We should really read this from the EEPROM + * in the furure. */ /* irq 13,8,2,1,0 must be edge */ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered */ @@ -170,7 +170,7 @@ static void eisa_unmask_irq(struct irq_data *d) unsigned int irq = d->irq; unsigned long flags; EISA_DBG("enable irq %d\n", irq); - + spin_lock_irqsave(&eisa_irq_lock, flags); if (irq & 8) { slave_mask &= ~(1 << (irq&7)); @@ -194,7 +194,7 @@ static irqreturn_t eisa_irq(int wax_irq, void *intr_dev) { int irq = gsc_readb(0xfc01f000); /* EISA supports 16 irqs */ unsigned long flags; - + spin_lock_irqsave(&eisa_irq_lock, flags); /* read IRR command */ eisa_out8(0x0a, 0x20); @@ -202,31 +202,31 @@ static irqreturn_t eisa_irq(int wax_irq, void *intr_dev) EISA_DBG("irq IAR %02x 8259-1 irr %02x 8259-2 irr %02x\n", irq, eisa_in8(0x20), eisa_in8(0xa0)); - + /* read ISR command */ eisa_out8(0x0a, 0x20); eisa_out8(0x0a, 0xa0); EISA_DBG("irq 8259-1 isr %02x imr %02x 8259-2 isr %02x imr %02x\n", eisa_in8(0x20), eisa_in8(0x21), eisa_in8(0xa0), eisa_in8(0xa1)); - + irq &= 0xf; - + /* mask irq and write eoi */ if (irq & 8) { slave_mask |= (1 << (irq&7)); eisa_out8(slave_mask, 0xa1); eisa_out8(0x60 | (irq&7),0xa0);/* 'Specific EOI' to slave */ - eisa_out8(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ - + eisa_out8(0x62, 0x20); /* 'Specific EOI' to master-IRQ2 */ + } else { master_mask |= (1 << (irq&7)); eisa_out8(master_mask, 0x21); - eisa_out8(0x60|irq,0x20); /* 'Specific EOI' to master */ + eisa_out8(0x60|irq, 0x20); /* 'Specific EOI' to master */ } spin_unlock_irqrestore(&eisa_irq_lock, flags); generic_handle_irq(irq); - + spin_lock_irqsave(&eisa_irq_lock, flags); /* unmask */ if (irq & 8) { @@ -254,44 +254,44 @@ static struct irqaction irq2_action = { static void init_eisa_pic(void) { unsigned long flags; - + spin_lock_irqsave(&eisa_irq_lock, flags); eisa_out8(0xff, 0x21); /* mask during init */ eisa_out8(0xff, 0xa1); /* mask during init */ - + /* master pic */ - eisa_out8(0x11,0x20); /* ICW1 */ - eisa_out8(0x00,0x21); /* ICW2 */ - eisa_out8(0x04,0x21); /* ICW3 */ - eisa_out8(0x01,0x21); /* ICW4 */ - eisa_out8(0x40,0x20); /* OCW2 */ - + eisa_out8(0x11, 0x20); /* ICW1 */ + eisa_out8(0x00, 0x21); /* ICW2 */ + eisa_out8(0x04, 0x21); /* ICW3 */ + eisa_out8(0x01, 0x21); /* ICW4 */ + eisa_out8(0x40, 0x20); /* OCW2 */ + /* slave pic */ - eisa_out8(0x11,0xa0); /* ICW1 */ - eisa_out8(0x08,0xa1); /* ICW2 */ - eisa_out8(0x02,0xa1); /* ICW3 */ - eisa_out8(0x01,0xa1); /* ICW4 */ - eisa_out8(0x40,0xa0); /* OCW2 */ - + eisa_out8(0x11, 0xa0); /* ICW1 */ + eisa_out8(0x08, 0xa1); /* ICW2 */ + eisa_out8(0x02, 0xa1); /* ICW3 */ + eisa_out8(0x01, 0xa1); /* ICW4 */ + eisa_out8(0x40, 0xa0); /* OCW2 */ + udelay(100); - - slave_mask = 0xff; - master_mask = 0xfb; + + slave_mask = 0xff; + master_mask = 0xfb; eisa_out8(slave_mask, 0xa1); /* OCW1 */ eisa_out8(master_mask, 0x21); /* OCW1 */ - + /* setup trig level */ EISA_DBG("EISA edge/level %04x\n", eisa_irq_level); - + eisa_out8(eisa_irq_level&0xff, 0x4d0); /* Set all irq's to edge */ - eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1); - + eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1); + EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); EISA_DBG("pic0 edge/level %02x\n", eisa_in8(0x4d0)); EISA_DBG("pic1 edge/level %02x\n", eisa_in8(0x4d1)); - + spin_unlock_irqrestore(&eisa_irq_lock, flags); } @@ -305,7 +305,7 @@ static int __init eisa_probe(struct parisc_device *dev) char *name = is_mongoose(dev) ? "Mongoose" : "Wax"; - printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n", + printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n", name, (unsigned long)dev->hpa.start); eisa_dev.hba.dev = dev; @@ -334,16 +334,16 @@ static int __init eisa_probe(struct parisc_device *dev) result = request_irq(dev->irq, eisa_irq, IRQF_SHARED, "EISA", &eisa_dev); if (result) { printk(KERN_ERR "EISA: request_irq failed!\n"); - return result; + goto error_release; } - + /* Reserve IRQ2 */ setup_irq(2, &irq2_action); for (i = 0; i < 16; i++) { irq_set_chip_and_handler(i, &eisa_interrupt_type, handle_simple_irq); } - + EISA_bus = 1; if (dev->num_addrs) { @@ -358,6 +358,11 @@ static int __init eisa_probe(struct parisc_device *dev) } } eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH); + if (!eisa_eeprom_addr) { + result = -ENOMEM; + printk(KERN_ERR "EISA: ioremap_nocache failed!\n"); + goto error_free_irq; + } result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space, &eisa_dev.hba.lmmio_space); init_eisa_pic(); @@ -372,11 +377,20 @@ static int __init eisa_probe(struct parisc_device *dev) eisa_dev.root.dma_mask = 0xffffffff; /* wild guess */ if (eisa_root_register (&eisa_dev.root)) { printk(KERN_ERR "EISA: Failed to register EISA root\n"); - return -1; + result = -ENOMEM; + goto error_iounmap; } } - + return 0; + +error_iounmap: + iounmap(eisa_eeprom_addr); +error_free_irq: + free_irq(dev->irq, &eisa_dev); +error_release: + release_resource(&eisa_dev.hba.io_space); + return result; } static const struct parisc_device_id eisa_tbl[] = { @@ -404,7 +418,7 @@ void eisa_make_irq_level(int num) { if (eisa_irq_configured& (1< 15 || val < 0) { printk(KERN_ERR "eisa: EISA irq value are 0-15\n"); continue; } - if (val == 2) { + if (val == 2) { val = 9; } eisa_make_irq_edge(val); /* clear the corresponding bit */ EISA_DBG("setting IRQ %d to edge-triggered mode\n", val); - + if ((cur = strchr(cur, ','))) { cur++; } else { diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c index ef31b77404ef8e..e2a3112f1c98ef 100644 --- a/drivers/parisc/power.c +++ b/drivers/parisc/power.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index d998d0ed2bec55..46eb15fb57fff4 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index f9fd4b33a5463d..74cc6dd982d2aa 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #undef DEBUG /* undef me for production */ diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index 75071605d22fc6..a959224d011bce 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #undef DEBUG /* undef me for production */ diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c index 30e981be14c237..dcbeeb220dda7d 100644 --- a/drivers/parport/parport_ip32.c +++ b/drivers/parport/parport_ip32.c @@ -102,7 +102,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 3e56e7deab8e8d..9d42dfe65d448a 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -44,7 +44,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 3308427ed9f7a0..5dc53d420ca8ca 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include @@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name, * pardevice fields. -arca */ port->ops->init_state(par_dev, par_dev->state); - port->proc_device = par_dev; - parport_device_proc_register(par_dev); + if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { + port->proc_device = par_dev; + parport_device_proc_register(par_dev); + } return par_dev; diff --git a/drivers/pci/access.c b/drivers/pci/access.c index b9dd37c8c9ce1e..8b7382705bf27e 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -1,7 +1,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 001c91a945aa64..44f774c12fb25e 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c @@ -132,10 +132,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, struct device *dev = pci->dev; struct resource *res; - /* If using the PHY framework, doesn't need to get other resource */ - if (ep->using_phy) - return 0; - ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); if (!ep->mem_res) return -ENOMEM; @@ -145,6 +141,10 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, if (IS_ERR(ep->mem_res->elbi_base)) return PTR_ERR(ep->mem_res->elbi_base); + /* If using the PHY framework, doesn't need to get other resource */ + if (ep->using_phy) + return 0; + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ep->mem_res->phy_base = devm_ioremap_resource(dev, res); if (IS_ERR(ep->mem_res->phy_base)) @@ -668,6 +668,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + ep->pci = pci; ep->ops = (const struct exynos_pcie_ops *) of_device_get_match_data(dev); diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index 3ab6761db9e8ad..801e46cd266d79 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c @@ -605,6 +605,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + imx6_pcie->pci = pci; imx6_pcie->variant = (enum imx6_pcie_variants)of_device_get_match_data(dev); diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index 8dc66409182da0..fcc9723bad6e01 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c @@ -401,6 +401,8 @@ static int __init ks_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + ks_pcie->pci = pci; + /* initialize SerDes Phy if present */ phy = devm_phy_get(dev, "pcie-phy"); if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER) diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c index 175c09e3a93261..c32e392a0ae6f8 100644 --- a/drivers/pci/dwc/pci-layerscape.c +++ b/drivers/pci/dwc/pci-layerscape.c @@ -280,6 +280,8 @@ static int __init ls_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = pcie->drvdata->dw_pcie_ops; + pcie->pci = pci; + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); pci->dbi_base = devm_ioremap_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c index 66bac6fbfa9f1c..f110e3b24a26dc 100644 --- a/drivers/pci/dwc/pcie-armada8k.c +++ b/drivers/pci/dwc/pcie-armada8k.c @@ -220,6 +220,8 @@ static int armada8k_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + pcie->pci = pci; + pcie->clk = devm_clk_get(dev, NULL); if (IS_ERR(pcie->clk)) return PTR_ERR(pcie->clk); diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index 59ecc9e664362a..fcd3ef84588355 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c @@ -253,6 +253,8 @@ static int artpec6_pcie_probe(struct platform_device *pdev) pci->dev = dev; + artpec6_pcie->pci = pci; + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_ioremap_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index 65250f63515cf0..b6c832ba39dd69 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c @@ -104,6 +104,8 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) pci->dev = dev; + dw_plat_pcie->pci = pci; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pci->dbi_base = devm_ioremap_resource(dev, res); if (IS_ERR(pci->dbi_base)) diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c index e3e4fedd9f68d6..fd66a3199db77d 100644 --- a/drivers/pci/dwc/pcie-hisi.c +++ b/drivers/pci/dwc/pcie-hisi.c @@ -284,6 +284,8 @@ static int hisi_pcie_probe(struct platform_device *pdev) driver = dev->driver; + hisi_pcie->pci = pci; + hisi_pcie->soc_ops = of_device_get_match_data(dev); hisi_pcie->subctrl = diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index e36abe0d9d6f03..67eb7f5926ddd2 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c @@ -686,6 +686,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) pci->ops = &dw_pcie_ops; pp = &pci->pp; + pcie->pci = pci; + pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev); pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c index 348f9c5e0433e2..eaa4ea8e2ea4ea 100644 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ b/drivers/pci/dwc/pcie-spear13xx.c @@ -247,6 +247,8 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; + spear13xx_pcie->pci = pci; + spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); if (IS_ERR(spear13xx_pcie->phy)) { ret = PTR_ERR(spear13xx_pcie->phy); diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c index 52b5bdccf5f0c2..b89c373555c553 100644 --- a/drivers/pci/host/pci-thunder-pem.c +++ b/drivers/pci/host/pci-thunder-pem.c @@ -14,6 +14,7 @@ * Copyright (C) 2015 - 2016 Cavium, Inc. */ +#include #include #include #include @@ -334,6 +335,50 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg, #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) +#define PEM_RES_BASE 0x87e0c0000000UL +#define PEM_NODE_MASK GENMASK(45, 44) +#define PEM_INDX_MASK GENMASK(26, 24) +#define PEM_MIN_DOM_IN_NODE 4 +#define PEM_MAX_DOM_IN_NODE 10 + +static void thunder_pem_reserve_range(struct device *dev, int seg, + struct resource *r) +{ + resource_size_t start = r->start, end = r->end; + struct resource *res; + const char *regionid; + + regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg); + if (!regionid) + return; + + res = request_mem_region(start, end - start + 1, regionid); + if (res) + res->flags &= ~IORESOURCE_BUSY; + else + kfree(regionid); + + dev_info(dev, "%pR %s reserved\n", r, + res ? "has been" : "could not be"); +} + +static void thunder_pem_legacy_fw(struct acpi_pci_root *root, + struct resource *res_pem) +{ + int node = acpi_get_node(root->device->handle); + int index; + + if (node == NUMA_NO_NODE) + node = 0; + + index = root->segment - PEM_MIN_DOM_IN_NODE; + index -= node * PEM_MAX_DOM_IN_NODE; + res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | + FIELD_PREP(PEM_INDX_MASK, index); + res_pem->end = res_pem->start + SZ_16M - 1; + res_pem->flags = IORESOURCE_MEM; +} + static int thunder_pem_acpi_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; @@ -346,10 +391,17 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg) if (!res_pem) return -ENOMEM; - ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem); + ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem); + + /* + * If we fail to gather resources it means that we run with old + * FW where we need to calculate PEM-specific resources manually. + */ if (ret) { - dev_err(dev, "can't get rc base address\n"); - return ret; + thunder_pem_legacy_fw(root, res_pem); + /* Reserve PEM-specific resources and PCI configuration space */ + thunder_pem_reserve_range(dev, root->segment, res_pem); + thunder_pem_reserve_range(dev, root->segment, &cfg->res); } return thunder_pem_init(dev, cfg, res_pem); diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index 5043b5f00ed833..75ec5cea26f6e1 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -57,10 +57,14 @@ #define TLP_WRITE_TAG 0x10 #define RP_DEVFN 0 #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) -#define TLP_CFG_DW0(pcie, bus) \ +#define TLP_CFGRD_DW0(pcie, bus) \ ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \ : TLP_FMTTYPE_CFGRD1) << 24) | \ TLP_PAYLOAD_SIZE) +#define TLP_CFGWR_DW0(pcie, bus) \ + ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \ + : TLP_FMTTYPE_CFGWR1) << 24) | \ + TLP_PAYLOAD_SIZE) #define TLP_CFG_DW1(pcie, tag, be) \ (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) #define TLP_CFG_DW2(bus, devfn, offset) \ @@ -222,7 +226,7 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, { u32 headers[TLP_HDR_SIZE]; - headers[0] = TLP_CFG_DW0(pcie, bus); + headers[0] = TLP_CFGRD_DW0(pcie, bus); headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); headers[2] = TLP_CFG_DW2(bus, devfn, where); @@ -237,7 +241,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, u32 headers[TLP_HDR_SIZE]; int ret; - headers[0] = TLP_CFG_DW0(pcie, bus); + headers[0] = TLP_CFGWR_DW0(pcie, bus); headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); headers[2] = TLP_CFG_DW2(bus, devfn, where); diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c index bd4c9ec25edc22..384c27e664fec8 100644 --- a/drivers/pci/host/pcie-iproc-bcma.c +++ b/drivers/pci/host/pcie-iproc-bcma.c @@ -44,8 +44,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) { struct device *dev = &bdev->dev; struct iproc_pcie *pcie; - LIST_HEAD(res); - struct resource res_mem; + LIST_HEAD(resources); int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); @@ -63,22 +62,23 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) pcie->base_addr = bdev->addr; - res_mem.start = bdev->addr_s[0]; - res_mem.end = bdev->addr_s[0] + SZ_128M - 1; - res_mem.name = "PCIe MEM space"; - res_mem.flags = IORESOURCE_MEM; - pci_add_resource(&res, &res_mem); + pcie->mem.start = bdev->addr_s[0]; + pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1; + pcie->mem.name = "PCIe MEM space"; + pcie->mem.flags = IORESOURCE_MEM; + pci_add_resource(&resources, &pcie->mem); pcie->map_irq = iproc_pcie_bcma_map_irq; - ret = iproc_pcie_setup(pcie, &res); - if (ret) + ret = iproc_pcie_setup(pcie, &resources); + if (ret) { dev_err(dev, "PCIe controller setup failed\n"); - - pci_free_resource_list(&res); + pci_free_resource_list(&resources); + return ret; + } bcma_set_drvdata(bdev, pcie); - return ret; + return 0; } static void iproc_pcie_bcma_remove(struct bcma_device *bdev) diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index f4909bb0b2ad15..8c6a327ca6cdf8 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -51,7 +51,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct resource reg; resource_size_t iobase = 0; - LIST_HEAD(res); + LIST_HEAD(resources); int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); @@ -96,10 +96,10 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) pcie->phy = NULL; } - ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase); + ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources, + &iobase); if (ret) { - dev_err(dev, - "unable to get PCI host bridge resources\n"); + dev_err(dev, "unable to get PCI host bridge resources\n"); return ret; } @@ -112,14 +112,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) pcie->map_irq = of_irq_parse_and_map_pci; } - ret = iproc_pcie_setup(pcie, &res); - if (ret) + ret = iproc_pcie_setup(pcie, &resources); + if (ret) { dev_err(dev, "PCIe controller setup failed\n"); - - pci_free_resource_list(&res); + pci_free_resource_list(&resources); + return ret; + } platform_set_drvdata(pdev, pcie); - return ret; + return 0; } static int iproc_pcie_pltfm_remove(struct platform_device *pdev) diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index 04fed8e907f12b..0bbe2ea44f3e15 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -90,6 +90,7 @@ struct iproc_pcie { #ifdef CONFIG_ARM struct pci_sys_data sysdata; #endif + struct resource mem; struct pci_bus *root_bus; struct phy *phy; int (*map_irq)(const struct pci_dev *, u8, u8); diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index 7ec8a8f72c698a..95f689f5392014 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -27,6 +27,7 @@ #include #include +#include #include #include #include diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index 9103a7b9f3b996..48c8a066a6b78f 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h @@ -32,7 +32,7 @@ #include /* for read? and write? functions */ #include /* for delays */ #include -#include /* for signal_pending() */ +#include /* for signal_pending() */ #define MY_NAME "cpqphp" diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 37d70b5ad22f99..06109d40c4ac92 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -33,7 +33,7 @@ #include #include #include -#include /* signal_pending() */ +#include /* signal_pending() */ #include #include #include diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 4da8fc601467be..70c7ea6af0344d 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -33,7 +33,7 @@ #include #include #include -#include /* signal_pending(), struct timer_list */ +#include /* signal_pending(), struct timer_list */ #include #include diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 980eaf58828180..d571bc33068651 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1298,6 +1298,22 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) } EXPORT_SYMBOL(pci_irq_get_affinity); +/** + * pci_irq_get_node - return the numa node of a particular msi vector + * @pdev: PCI device to operate on + * @vec: device-relative interrupt vector index (0-based). + */ +int pci_irq_get_node(struct pci_dev *pdev, int vec) +{ + const struct cpumask *mask; + + mask = pci_irq_get_affinity(pdev, vec); + if (mask) + return local_memory_node(cpu_to_node(cpumask_first(mask))); + return dev_to_node(&pdev->dev); +} +EXPORT_SYMBOL(pci_irq_get_node); + struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) { return to_pci_dev(desc->dev); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 973472c23d8904..1dfa10cc566beb 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -478,7 +478,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) { - struct pci_dev *child, *parent = link->pdev; + struct pci_dev *child = link->downstream, *parent = link->pdev; struct pci_bus *linkbus = parent->subordinate; struct aspm_register_info upreg, dwreg; @@ -491,9 +491,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) /* Get upstream/downstream components' register state */ pcie_get_aspm_reg(parent, &upreg); - child = pci_function_0(linkbus); pcie_get_aspm_reg(child, &dwreg); - link->downstream = child; /* * If ASPM not supported, don't mess with the clocks and link, @@ -800,6 +798,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) INIT_LIST_HEAD(&link->children); INIT_LIST_HEAD(&link->link); link->pdev = pdev; + link->downstream = pci_function_0(pdev->subordinate); /* * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f754453fe754e9..673683660b5c70 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2174,6 +2174,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); /* * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 6d9335865880e1..9612b84bc3e008 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index dc5277ad1b5a7a..005cadb7a3f8e9 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -449,6 +449,7 @@ config PHY_QCOM_UFS config PHY_QCOM_USB_HS tristate "Qualcomm USB HS PHY module" depends on USB_ULPI_BUS + depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in select GENERIC_PHY help Support for the USB high-speed ULPI compliant phy on Qualcomm @@ -510,12 +511,4 @@ config PHY_MESON8B_USB2 and GXBB SoCs. If unsure, say N. -config PHY_NSP_USB3 - tristate "Broadcom NorthStar plus USB3 PHY driver" - depends on OF && (ARCH_BCM_NSP || COMPILE_TEST) - select GENERIC_PHY - default ARCH_BCM_NSP - help - Enable this to support the Broadcom Northstar plus USB3 PHY. - If unsure, say N. endmenu diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index e7b0feb1e125a5..dd8f3b5d2918cd 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -62,4 +62,3 @@ obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o -obj-$(CONFIG_PHY_NSP_USB3) += phy-bcm-nsp-usb3.o diff --git a/drivers/phy/phy-bcm-nsp-usb3.c b/drivers/phy/phy-bcm-nsp-usb3.c deleted file mode 100644 index 49024eaa55459b..00000000000000 --- a/drivers/phy/phy-bcm-nsp-usb3.c +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (C) 2016 Broadcom - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define NSP_USB3_RST_CTRL_OFFSET 0x3f8 - -/* mdio reg access */ -#define NSP_USB3_PHY_BASE_ADDR_REG 0x1f - -#define NSP_USB3_PHY_PLL30_BLOCK 0x8000 -#define NSP_USB3_PLL_CONTROL 0x01 -#define NSP_USB3_PLLA_CONTROL0 0x0a -#define NSP_USB3_PLLA_CONTROL1 0x0b - -#define NSP_USB3_PHY_TX_PMD_BLOCK 0x8040 -#define NSP_USB3_TX_PMD_CONTROL1 0x01 - -#define NSP_USB3_PHY_PIPE_BLOCK 0x8060 -#define NSP_USB3_LFPS_CMP 0x02 -#define NSP_USB3_LFPS_DEGLITCH 0x03 - -struct nsp_usb3_phy { - struct regmap *usb3_ctrl; - struct phy *phy; - struct mdio_device *mdiodev; -}; - -static int nsp_usb3_phy_init(struct phy *phy) -{ - struct nsp_usb3_phy *iphy = phy_get_drvdata(phy); - struct mii_bus *bus = iphy->mdiodev->bus; - int addr = iphy->mdiodev->addr; - u32 data; - int rc; - - rc = regmap_read(iphy->usb3_ctrl, 0, &data); - if (rc) - return rc; - data |= 1; - rc = regmap_write(iphy->usb3_ctrl, 0, data); - if (rc) - return rc; - - rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 1); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG, - NSP_USB3_PHY_PLL30_BLOCK); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x1000); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL0, 0x6400); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0xc000); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0x8000); - if (rc) - return rc; - - rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 0); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x9000); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG, - NSP_USB3_PHY_PIPE_BLOCK); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_CMP, 0xf30d); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_DEGLITCH, 0x6302); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG, - NSP_USB3_PHY_TX_PMD_BLOCK); - if (rc) - return rc; - - rc = mdiobus_write(bus, addr, NSP_USB3_TX_PMD_CONTROL1, 0x1003); - - return rc; -} - -static struct phy_ops nsp_usb3_phy_ops = { - .init = nsp_usb3_phy_init, - .owner = THIS_MODULE, -}; - -static int nsp_usb3_phy_probe(struct mdio_device *mdiodev) -{ - struct device *dev = &mdiodev->dev; - struct phy_provider *provider; - struct nsp_usb3_phy *iphy; - - iphy = devm_kzalloc(dev, sizeof(*iphy), GFP_KERNEL); - if (!iphy) - return -ENOMEM; - iphy->mdiodev = mdiodev; - - iphy->usb3_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node, - "usb3-ctrl-syscon"); - if (IS_ERR(iphy->usb3_ctrl)) - return PTR_ERR(iphy->usb3_ctrl); - - iphy->phy = devm_phy_create(dev, dev->of_node, &nsp_usb3_phy_ops); - if (IS_ERR(iphy->phy)) { - dev_err(dev, "failed to create PHY\n"); - return PTR_ERR(iphy->phy); - } - - phy_set_drvdata(iphy->phy, iphy); - - provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(provider)) { - dev_err(dev, "could not register PHY provider\n"); - return PTR_ERR(provider); - } - - return 0; -} - -static const struct of_device_id nsp_usb3_phy_of_match[] = { - {.compatible = "brcm,nsp-usb3-phy",}, - { /* sentinel */ } -}; - -static struct mdio_driver nsp_usb3_phy_driver = { - .mdiodrv = { - .driver = { - .name = "nsp-usb3-phy", - .of_match_table = nsp_usb3_phy_of_match, - }, - }, - .probe = nsp_usb3_phy_probe, -}; - -mdio_module_driver(nsp_usb3_phy_driver); - -MODULE_DESCRIPTION("Broadcom NSP USB3 PHY driver"); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy blk_base = devm_ioremap_resource(dev, res); - if (IS_ERR(exynos_phy->phy_base)) - return PTR_ERR(exynos_phy->phy_base); + if (IS_ERR(exynos_phy->blk_base)) + return PTR_ERR(exynos_phy->blk_base); exynos_phy->drv_data = drv_data; diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index 7671424d46cbe0..31a3a98d067caa 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c @@ -667,11 +667,11 @@ static const char * const uart_ao_b_groups[] = { }; static const char * const i2c_ao_groups[] = { - "i2c_sdk_ao", "i2c_sda_ao", + "i2c_sck_ao", "i2c_sda_ao", }; static const char * const i2c_slave_ao_groups[] = { - "i2c_slave_sdk_ao", "i2c_slave_sda_ao", + "i2c_slave_sck_ao", "i2c_slave_sda_ao", }; static const char * const remote_input_ao_groups[] = { diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 676efcc032d261..3ae8066bc1279c 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d) writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); } +static int st_gpio_irq_request_resources(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + + st_gpio_direction_input(gc, d->hwirq); + + return gpiochip_lock_as_irq(gc, d->hwirq); +} + +static void st_gpio_irq_release_resources(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + + gpiochip_unlock_as_irq(gc, d->hwirq); +} + static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = { }; static struct irq_chip st_gpio_irqchip = { - .name = "GPIO", - .irq_disable = st_gpio_irq_mask, - .irq_mask = st_gpio_irq_mask, - .irq_unmask = st_gpio_irq_unmask, - .irq_set_type = st_gpio_irq_set_type, - .flags = IRQCHIP_SKIP_SET_WAKE, + .name = "GPIO", + .irq_request_resources = st_gpio_irq_request_resources, + .irq_release_resources = st_gpio_irq_release_resources, + .irq_disable = st_gpio_irq_mask, + .irq_mask = st_gpio_irq_mask, + .irq_unmask = st_gpio_irq_unmask, + .irq_set_type = st_gpio_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE, }; static int st_gpiolib_register_bank(struct st_pinctrl *info, diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c index b68ae424cee247..743d1f458205fa 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c @@ -405,6 +405,36 @@ static const struct msm_pingroup ipq4019_groups[] = { PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), }; static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index f8e9e1c2b2f6f4..273badd925611a 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -422,6 +422,20 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in return 0; } +static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) +{ + struct msm_pinctrl *pctrl = gpiochip_get_data(chip); + const struct msm_pingroup *g; + u32 val; + + g = &pctrl->soc->groups[offset]; + + val = readl(pctrl->regs + g->ctl_reg); + + /* 0 = output, 1 = input */ + return val & BIT(g->oe_bit) ? 0 : 1; +} + static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) { const struct msm_pingroup *g; @@ -510,6 +524,7 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) static struct gpio_chip msm_gpio_template = { .direction_input = msm_gpio_direction_input, .direction_output = msm_gpio_direction_output, + .get_direction = msm_gpio_get_direction, .get = msm_gpio_get, .set = msm_gpio_set, .request = gpiochip_generic_request, @@ -594,10 +609,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->intr_status_reg); - val &= ~BIT(g->intr_status_bit); - writel(val, pctrl->regs + g->intr_status_reg); - val = readl(pctrl->regs + g->intr_cfg_reg); val |= BIT(g->intr_enable_bit); writel(val, pctrl->regs + g->intr_cfg_reg); diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index f9ddba7decc185..d7aa22cff480ed 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -988,9 +988,16 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); - virt_base[i] = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(virt_base[i])) - return ERR_CAST(virt_base[i]); + if (!res) { + dev_err(&pdev->dev, "failed to get mem%d resource\n", i); + return ERR_PTR(-EINVAL); + } + virt_base[i] = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!virt_base[i]) { + dev_err(&pdev->dev, "failed to ioremap %pR\n", res); + return ERR_PTR(-EIO); + } } bank = d->pin_banks; diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig index 815a88673d3819..542077069391b9 100644 --- a/drivers/pinctrl/ti/Kconfig +++ b/drivers/pinctrl/ti/Kconfig @@ -1,6 +1,6 @@ config PINCTRL_TI_IODELAY tristate "TI IODelay Module pinconf driver" - depends on OF + depends on OF && (SOC_DRA7XX || COMPILE_TEST) select GENERIC_PINCTRL_GROUPS select GENERIC_PINMUX_FUNCTIONS select GENERIC_PINCONF diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c index 77a0236ee781dd..83f8864fa76ac5 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c @@ -390,22 +390,22 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = { UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140, 140, UNIPHIER_PIN_DRV_1BIT, 140, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(141, "TCON0", 141, + UNIPHIER_PINCTRL_PIN(141, "AO1D1", 141, 141, UNIPHIER_PIN_DRV_1BIT, 141, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(142, "TCON1", 142, + UNIPHIER_PINCTRL_PIN(142, "AO1D2", 142, 142, UNIPHIER_PIN_DRV_1BIT, 142, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(143, "TCON2", 143, + UNIPHIER_PINCTRL_PIN(143, "XIRQ9", 143, 143, UNIPHIER_PIN_DRV_1BIT, 143, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(144, "TCON3", 144, + UNIPHIER_PINCTRL_PIN(144, "XIRQ10", 144, 144, UNIPHIER_PIN_DRV_1BIT, 144, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(145, "TCON4", 145, + UNIPHIER_PINCTRL_PIN(145, "XIRQ11", 145, 145, UNIPHIER_PIN_DRV_1BIT, 145, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(146, "TCON5", 146, + UNIPHIER_PINCTRL_PIN(146, "XIRQ13", 146, 146, UNIPHIER_PIN_DRV_1BIT, 146, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(147, "PWMA", 147, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 5be4783e40d4c9..dea98ffb6f606a 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -103,15 +103,6 @@ static struct quirk_entry quirk_asus_x200ca = { .wapf = 2, }; -static struct quirk_entry quirk_no_rfkill = { - .no_rfkill = true, -}; - -static struct quirk_entry quirk_no_rfkill_wapf4 = { - .wapf = 4, - .no_rfkill = true, -}; - static struct quirk_entry quirk_asus_ux303ub = { .wmi_backlight_native = true, }; @@ -194,7 +185,7 @@ static const struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"), }, - .driver_data = &quirk_no_rfkill_wapf4, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -203,7 +194,7 @@ static const struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"), }, - .driver_data = &quirk_no_rfkill_wapf4, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -367,42 +358,6 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_x200ca, }, - { - .callback = dmi_matched, - .ident = "ASUSTeK COMPUTER INC. X555UB", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"), - }, - .driver_data = &quirk_no_rfkill, - }, - { - .callback = dmi_matched, - .ident = "ASUSTeK COMPUTER INC. N552VW", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"), - }, - .driver_data = &quirk_no_rfkill, - }, - { - .callback = dmi_matched, - .ident = "ASUSTeK COMPUTER INC. U303LB", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"), - }, - .driver_data = &quirk_no_rfkill, - }, - { - .callback = dmi_matched, - .ident = "ASUSTeK COMPUTER INC. Z550MA", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"), - }, - .driver_data = &quirk_no_rfkill, - }, { .callback = dmi_matched, .ident = "ASUSTeK COMPUTER INC. UX303UB", diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 43cb680adbb420..8fe5890bf539f4 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -159,6 +159,8 @@ MODULE_LICENSE("GPL"); #define USB_INTEL_XUSB2PR 0xD0 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 +static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; + struct bios_args { u32 arg0; u32 arg1; @@ -2051,6 +2053,16 @@ static int asus_wmi_fan_init(struct asus_wmi *asus) return 0; } +static bool ashs_present(void) +{ + int i = 0; + while (ashs_ids[i]) { + if (acpi_dev_found(ashs_ids[i++])) + return true; + } + return false; +} + /* * WMI Driver */ @@ -2095,7 +2107,11 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_leds; - if (!asus->driver->quirks->no_rfkill) { + asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result); + if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) + asus->driver->wlan_ctrl_by_user = 1; + + if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) { err = asus_wmi_rfkill_init(asus); if (err) goto fail_rfkill; @@ -2134,10 +2150,6 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_debugfs; - asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result); - if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) - asus->driver->wlan_ctrl_by_user = 1; - return 0; fail_debugfs: diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index fdff626c3b51b0..c9589d9342bbf8 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -39,7 +39,6 @@ struct key_entry; struct asus_wmi; struct quirk_entry { - bool no_rfkill; bool hotplug_wireless; bool scalar_panel_brightness; bool store_backlight_power; diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 2b218b1d13e55d..e12cc3504d4879 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -78,18 +78,18 @@ #define FUJITSU_LCD_N_LEVELS 8 -#define ACPI_FUJITSU_CLASS "fujitsu" -#define ACPI_FUJITSU_HID "FUJ02B1" -#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver" -#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1" -#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3" -#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver" -#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3" +#define ACPI_FUJITSU_CLASS "fujitsu" +#define ACPI_FUJITSU_BL_HID "FUJ02B1" +#define ACPI_FUJITSU_BL_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver" +#define ACPI_FUJITSU_BL_DEVICE_NAME "Fujitsu FUJ02B1" +#define ACPI_FUJITSU_LAPTOP_HID "FUJ02E3" +#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver" +#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME "Fujitsu FUJ02E3" #define ACPI_FUJITSU_NOTIFY_CODE1 0x80 /* FUNC interface - command values */ -#define FUNC_RFKILL 0x1000 +#define FUNC_FLAGS 0x1000 #define FUNC_LEDS 0x1001 #define FUNC_BUTTONS 0x1002 #define FUNC_BACKLIGHT 0x1004 @@ -97,6 +97,11 @@ /* FUNC interface - responses */ #define UNSUPPORTED_CMD 0x80000000 +/* FUNC interface - status flags */ +#define FLAG_RFKILL 0x020 +#define FLAG_LID 0x100 +#define FLAG_DOCK 0x200 + #if IS_ENABLED(CONFIG_LEDS_CLASS) /* FUNC interface - LED control */ #define FUNC_LED_OFF 0x1 @@ -136,7 +141,7 @@ #endif /* Device controlling the backlight and associated keys */ -struct fujitsu_t { +struct fujitsu_bl { acpi_handle acpi_handle; struct acpi_device *dev; struct input_dev *input; @@ -150,12 +155,12 @@ struct fujitsu_t { unsigned int brightness_level; }; -static struct fujitsu_t *fujitsu; +static struct fujitsu_bl *fujitsu_bl; static int use_alt_lcd_levels = -1; static int disable_brightness_adjust = -1; -/* Device used to access other hotkeys on the laptop */ -struct fujitsu_hotkey_t { +/* Device used to access hotkeys and other features on the laptop */ +struct fujitsu_laptop { acpi_handle acpi_handle; struct acpi_device *dev; struct input_dev *input; @@ -163,17 +168,15 @@ struct fujitsu_hotkey_t { struct platform_device *pf_device; struct kfifo fifo; spinlock_t fifo_lock; - int rfkill_supported; - int rfkill_state; + int flags_supported; + int flags_state; int logolamp_registered; int kblamps_registered; int radio_led_registered; int eco_led_registered; }; -static struct fujitsu_hotkey_t *fujitsu_hotkey; - -static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event); +static struct fujitsu_laptop *fujitsu_laptop; #if IS_ENABLED(CONFIG_LEDS_CLASS) static enum led_brightness logolamp_get(struct led_classdev *cdev); @@ -222,8 +225,6 @@ static struct led_classdev eco_led = { static u32 dbg_level = 0x03; #endif -static void acpi_fujitsu_notify(struct acpi_device *device, u32 event); - /* Fujitsu ACPI interface function */ static int call_fext_func(int cmd, int arg0, int arg1, int arg2) @@ -239,7 +240,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2) unsigned long long value; acpi_handle handle = NULL; - status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle); + status = acpi_get_handle(fujitsu_laptop->acpi_handle, "FUNC", &handle); if (ACPI_FAILURE(status)) { vdbg_printk(FUJLAPTOP_DBG_ERROR, "FUNC interface is not present\n"); @@ -300,9 +301,9 @@ static int radio_led_set(struct led_classdev *cdev, enum led_brightness brightness) { if (brightness >= LED_FULL) - return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON); + return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, RADIO_LED_ON); else - return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0); + return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0); } static int eco_led_set(struct led_classdev *cdev, @@ -346,7 +347,7 @@ static enum led_brightness radio_led_get(struct led_classdev *cdev) { enum led_brightness brightness = LED_OFF; - if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON) + if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON) brightness = LED_FULL; return brightness; @@ -373,10 +374,10 @@ static int set_lcd_level(int level) vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n", level); - if (level < 0 || level >= fujitsu->max_brightness) + if (level < 0 || level >= fujitsu_bl->max_brightness) return -EINVAL; - status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle); + status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBLL", &handle); if (ACPI_FAILURE(status)) { vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n"); return -ENODEV; @@ -398,10 +399,10 @@ static int set_lcd_level_alt(int level) vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n", level); - if (level < 0 || level >= fujitsu->max_brightness) + if (level < 0 || level >= fujitsu_bl->max_brightness) return -EINVAL; - status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle); + status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBL2", &handle); if (ACPI_FAILURE(status)) { vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n"); return -ENODEV; @@ -421,19 +422,19 @@ static int get_lcd_level(void) vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n"); - status = - acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state); + status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "GBLL", NULL, + &state); if (ACPI_FAILURE(status)) return 0; - fujitsu->brightness_level = state & 0x0fffffff; + fujitsu_bl->brightness_level = state & 0x0fffffff; if (state & 0x80000000) - fujitsu->brightness_changed = 1; + fujitsu_bl->brightness_changed = 1; else - fujitsu->brightness_changed = 0; + fujitsu_bl->brightness_changed = 0; - return fujitsu->brightness_level; + return fujitsu_bl->brightness_level; } static int get_max_brightness(void) @@ -443,14 +444,14 @@ static int get_max_brightness(void) vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n"); - status = - acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state); + status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "RBLL", NULL, + &state); if (ACPI_FAILURE(status)) return -1; - fujitsu->max_brightness = state; + fujitsu_bl->max_brightness = state; - return fujitsu->max_brightness; + return fujitsu_bl->max_brightness; } /* Backlight device stuff */ @@ -483,7 +484,7 @@ static int bl_update_status(struct backlight_device *b) return ret; } -static const struct backlight_ops fujitsubl_ops = { +static const struct backlight_ops fujitsu_bl_ops = { .get_brightness = bl_get_brightness, .update_status = bl_update_status, }; @@ -511,7 +512,7 @@ show_brightness_changed(struct device *dev, int ret; - ret = fujitsu->brightness_changed; + ret = fujitsu_bl->brightness_changed; if (ret < 0) return ret; @@ -539,7 +540,7 @@ static ssize_t store_lcd_level(struct device *dev, int level, ret; if (sscanf(buf, "%i", &level) != 1 - || (level < 0 || level >= fujitsu->max_brightness)) + || (level < 0 || level >= fujitsu_bl->max_brightness)) return -EINVAL; if (use_alt_lcd_levels) @@ -567,9 +568,9 @@ static ssize_t show_lid_state(struct device *dev, struct device_attribute *attr, char *buf) { - if (!(fujitsu_hotkey->rfkill_supported & 0x100)) + if (!(fujitsu_laptop->flags_supported & FLAG_LID)) return sprintf(buf, "unknown\n"); - if (fujitsu_hotkey->rfkill_state & 0x100) + if (fujitsu_laptop->flags_state & FLAG_LID) return sprintf(buf, "open\n"); else return sprintf(buf, "closed\n"); @@ -579,9 +580,9 @@ static ssize_t show_dock_state(struct device *dev, struct device_attribute *attr, char *buf) { - if (!(fujitsu_hotkey->rfkill_supported & 0x200)) + if (!(fujitsu_laptop->flags_supported & FLAG_DOCK)) return sprintf(buf, "unknown\n"); - if (fujitsu_hotkey->rfkill_state & 0x200) + if (fujitsu_laptop->flags_state & FLAG_DOCK) return sprintf(buf, "docked\n"); else return sprintf(buf, "undocked\n"); @@ -591,9 +592,9 @@ static ssize_t show_radios_state(struct device *dev, struct device_attribute *attr, char *buf) { - if (!(fujitsu_hotkey->rfkill_supported & 0x20)) + if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL)) return sprintf(buf, "unknown\n"); - if (fujitsu_hotkey->rfkill_state & 0x20) + if (fujitsu_laptop->flags_state & FLAG_RFKILL) return sprintf(buf, "on\n"); else return sprintf(buf, "killed\n"); @@ -607,7 +608,7 @@ static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store); static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store); static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store); -static struct attribute *fujitsupf_attributes[] = { +static struct attribute *fujitsu_pf_attributes[] = { &dev_attr_brightness_changed.attr, &dev_attr_max_brightness.attr, &dev_attr_lcd_level.attr, @@ -617,11 +618,11 @@ static struct attribute *fujitsupf_attributes[] = { NULL }; -static struct attribute_group fujitsupf_attribute_group = { - .attrs = fujitsupf_attributes +static struct attribute_group fujitsu_pf_attribute_group = { + .attrs = fujitsu_pf_attributes }; -static struct platform_driver fujitsupf_driver = { +static struct platform_driver fujitsu_pf_driver = { .driver = { .name = "fujitsu-laptop", } @@ -630,39 +631,30 @@ static struct platform_driver fujitsupf_driver = { static void __init dmi_check_cb_common(const struct dmi_system_id *id) { pr_info("Identified laptop model '%s'\n", id->ident); - if (use_alt_lcd_levels == -1) { - if (acpi_has_method(NULL, - "\\_SB.PCI0.LPCB.FJEX.SBL2")) - use_alt_lcd_levels = 1; - else - use_alt_lcd_levels = 0; - vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as " - "%i\n", use_alt_lcd_levels); - } } static int __init dmi_check_cb_s6410(const struct dmi_system_id *id) { dmi_check_cb_common(id); - fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ - fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */ + fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */ + fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */ return 1; } static int __init dmi_check_cb_s6420(const struct dmi_system_id *id) { dmi_check_cb_common(id); - fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ - fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */ + fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */ + fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */ return 1; } static int __init dmi_check_cb_p8010(const struct dmi_system_id *id) { dmi_check_cb_common(id); - fujitsu->keycode1 = KEY_HELP; /* "Support" */ - fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */ - fujitsu->keycode4 = KEY_WWW; /* "Internet" */ + fujitsu_bl->keycode1 = KEY_HELP; /* "Support" */ + fujitsu_bl->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */ + fujitsu_bl->keycode4 = KEY_WWW; /* "Internet" */ return 1; } @@ -693,7 +685,7 @@ static const struct dmi_system_id fujitsu_dmi_table[] __initconst = { /* ACPI device for LCD brightness control */ -static int acpi_fujitsu_add(struct acpi_device *device) +static int acpi_fujitsu_bl_add(struct acpi_device *device) { int state = 0; struct input_dev *input; @@ -702,22 +694,22 @@ static int acpi_fujitsu_add(struct acpi_device *device) if (!device) return -EINVAL; - fujitsu->acpi_handle = device->handle; - sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME); + fujitsu_bl->acpi_handle = device->handle; + sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_BL_DEVICE_NAME); sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); - device->driver_data = fujitsu; + device->driver_data = fujitsu_bl; - fujitsu->input = input = input_allocate_device(); + fujitsu_bl->input = input = input_allocate_device(); if (!input) { error = -ENOMEM; goto err_stop; } - snprintf(fujitsu->phys, sizeof(fujitsu->phys), + snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys), "%s/video/input0", acpi_device_hid(device)); input->name = acpi_device_name(device); - input->phys = fujitsu->phys; + input->phys = fujitsu_bl->phys; input->id.bustype = BUS_HOST; input->id.product = 0x06; input->dev.parent = &device->dev; @@ -730,7 +722,7 @@ static int acpi_fujitsu_add(struct acpi_device *device) if (error) goto err_free_input_dev; - error = acpi_bus_update_power(fujitsu->acpi_handle, &state); + error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state); if (error) { pr_err("Error reading power state\n"); goto err_unregister_input_dev; @@ -740,7 +732,7 @@ static int acpi_fujitsu_add(struct acpi_device *device) acpi_device_name(device), acpi_device_bid(device), !device->power.state ? "on" : "off"); - fujitsu->dev = device; + fujitsu_bl->dev = device; if (acpi_has_method(device->handle, METHOD_NAME__INI)) { vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); @@ -750,6 +742,15 @@ static int acpi_fujitsu_add(struct acpi_device *device) pr_err("_INI Method failed\n"); } + if (use_alt_lcd_levels == -1) { + if (acpi_has_method(NULL, "\\_SB.PCI0.LPCB.FJEX.SBL2")) + use_alt_lcd_levels = 1; + else + use_alt_lcd_levels = 0; + vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as %i\n", + use_alt_lcd_levels); + } + /* do config (detect defaults) */ use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; @@ -758,7 +759,7 @@ static int acpi_fujitsu_add(struct acpi_device *device) use_alt_lcd_levels, disable_brightness_adjust); if (get_max_brightness() <= 0) - fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS; + fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS; get_lcd_level(); return 0; @@ -772,38 +773,38 @@ static int acpi_fujitsu_add(struct acpi_device *device) return error; } -static int acpi_fujitsu_remove(struct acpi_device *device) +static int acpi_fujitsu_bl_remove(struct acpi_device *device) { - struct fujitsu_t *fujitsu = acpi_driver_data(device); - struct input_dev *input = fujitsu->input; + struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device); + struct input_dev *input = fujitsu_bl->input; input_unregister_device(input); - fujitsu->acpi_handle = NULL; + fujitsu_bl->acpi_handle = NULL; return 0; } /* Brightness notify */ -static void acpi_fujitsu_notify(struct acpi_device *device, u32 event) +static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event) { struct input_dev *input; int keycode; int oldb, newb; - input = fujitsu->input; + input = fujitsu_bl->input; switch (event) { case ACPI_FUJITSU_NOTIFY_CODE1: keycode = 0; - oldb = fujitsu->brightness_level; + oldb = fujitsu_bl->brightness_level; get_lcd_level(); - newb = fujitsu->brightness_level; + newb = fujitsu_bl->brightness_level; vdbg_printk(FUJLAPTOP_DBG_TRACE, "brightness button event [%i -> %i (%i)]\n", - oldb, newb, fujitsu->brightness_changed); + oldb, newb, fujitsu_bl->brightness_changed); if (oldb < newb) { if (disable_brightness_adjust != 1) { @@ -840,7 +841,7 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event) /* ACPI device for hotkey handling */ -static int acpi_fujitsu_hotkey_add(struct acpi_device *device) +static int acpi_fujitsu_laptop_add(struct acpi_device *device) { int result = 0; int state = 0; @@ -851,42 +852,42 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) if (!device) return -EINVAL; - fujitsu_hotkey->acpi_handle = device->handle; + fujitsu_laptop->acpi_handle = device->handle; sprintf(acpi_device_name(device), "%s", - ACPI_FUJITSU_HOTKEY_DEVICE_NAME); + ACPI_FUJITSU_LAPTOP_DEVICE_NAME); sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); - device->driver_data = fujitsu_hotkey; + device->driver_data = fujitsu_laptop; /* kfifo */ - spin_lock_init(&fujitsu_hotkey->fifo_lock); - error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int), + spin_lock_init(&fujitsu_laptop->fifo_lock); + error = kfifo_alloc(&fujitsu_laptop->fifo, RINGBUFFERSIZE * sizeof(int), GFP_KERNEL); if (error) { pr_err("kfifo_alloc failed\n"); goto err_stop; } - fujitsu_hotkey->input = input = input_allocate_device(); + fujitsu_laptop->input = input = input_allocate_device(); if (!input) { error = -ENOMEM; goto err_free_fifo; } - snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), + snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys), "%s/video/input0", acpi_device_hid(device)); input->name = acpi_device_name(device); - input->phys = fujitsu_hotkey->phys; + input->phys = fujitsu_laptop->phys; input->id.bustype = BUS_HOST; input->id.product = 0x06; input->dev.parent = &device->dev; set_bit(EV_KEY, input->evbit); - set_bit(fujitsu->keycode1, input->keybit); - set_bit(fujitsu->keycode2, input->keybit); - set_bit(fujitsu->keycode3, input->keybit); - set_bit(fujitsu->keycode4, input->keybit); - set_bit(fujitsu->keycode5, input->keybit); + set_bit(fujitsu_bl->keycode1, input->keybit); + set_bit(fujitsu_bl->keycode2, input->keybit); + set_bit(fujitsu_bl->keycode3, input->keybit); + set_bit(fujitsu_bl->keycode4, input->keybit); + set_bit(fujitsu_bl->keycode5, input->keybit); set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit); set_bit(KEY_UNKNOWN, input->keybit); @@ -894,7 +895,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) if (error) goto err_free_input_dev; - error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state); + error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state); if (error) { pr_err("Error reading power state\n"); goto err_unregister_input_dev; @@ -904,7 +905,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) acpi_device_name(device), acpi_device_bid(device), !device->power.state ? "on" : "off"); - fujitsu_hotkey->dev = device; + fujitsu_laptop->dev = device; if (acpi_has_method(device->handle, METHOD_NAME__INI)) { vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); @@ -920,27 +921,27 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) ; /* No action, result is discarded */ vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); - fujitsu_hotkey->rfkill_supported = - call_fext_func(FUNC_RFKILL, 0x0, 0x0, 0x0); + fujitsu_laptop->flags_supported = + call_fext_func(FUNC_FLAGS, 0x0, 0x0, 0x0); /* Make sure our bitmask of supported functions is cleared if the RFKILL function block is not implemented, like on the S7020. */ - if (fujitsu_hotkey->rfkill_supported == UNSUPPORTED_CMD) - fujitsu_hotkey->rfkill_supported = 0; + if (fujitsu_laptop->flags_supported == UNSUPPORTED_CMD) + fujitsu_laptop->flags_supported = 0; - if (fujitsu_hotkey->rfkill_supported) - fujitsu_hotkey->rfkill_state = - call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); + if (fujitsu_laptop->flags_supported) + fujitsu_laptop->flags_state = + call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0); /* Suspect this is a keymap of the application panel, print it */ pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0)); #if IS_ENABLED(CONFIG_LEDS_CLASS) if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { - result = led_classdev_register(&fujitsu->pf_device->dev, + result = led_classdev_register(&fujitsu_bl->pf_device->dev, &logolamp_led); if (result == 0) { - fujitsu_hotkey->logolamp_registered = 1; + fujitsu_laptop->logolamp_registered = 1; } else { pr_err("Could not register LED handler for logo lamp, error %i\n", result); @@ -949,10 +950,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) && (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) { - result = led_classdev_register(&fujitsu->pf_device->dev, + result = led_classdev_register(&fujitsu_bl->pf_device->dev, &kblamps_led); if (result == 0) { - fujitsu_hotkey->kblamps_registered = 1; + fujitsu_laptop->kblamps_registered = 1; } else { pr_err("Could not register LED handler for keyboard lamps, error %i\n", result); @@ -966,10 +967,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) * that an RF LED is present. */ if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) { - result = led_classdev_register(&fujitsu->pf_device->dev, + result = led_classdev_register(&fujitsu_bl->pf_device->dev, &radio_led); if (result == 0) { - fujitsu_hotkey->radio_led_registered = 1; + fujitsu_laptop->radio_led_registered = 1; } else { pr_err("Could not register LED handler for radio LED, error %i\n", result); @@ -983,10 +984,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) */ if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) && (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) { - result = led_classdev_register(&fujitsu->pf_device->dev, + result = led_classdev_register(&fujitsu_bl->pf_device->dev, &eco_led); if (result == 0) { - fujitsu_hotkey->eco_led_registered = 1; + fujitsu_laptop->eco_led_registered = 1; } else { pr_err("Could not register LED handler for eco LED, error %i\n", result); @@ -1002,47 +1003,47 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) err_free_input_dev: input_free_device(input); err_free_fifo: - kfifo_free(&fujitsu_hotkey->fifo); + kfifo_free(&fujitsu_laptop->fifo); err_stop: return error; } -static int acpi_fujitsu_hotkey_remove(struct acpi_device *device) +static int acpi_fujitsu_laptop_remove(struct acpi_device *device) { - struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); - struct input_dev *input = fujitsu_hotkey->input; + struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device); + struct input_dev *input = fujitsu_laptop->input; #if IS_ENABLED(CONFIG_LEDS_CLASS) - if (fujitsu_hotkey->logolamp_registered) + if (fujitsu_laptop->logolamp_registered) led_classdev_unregister(&logolamp_led); - if (fujitsu_hotkey->kblamps_registered) + if (fujitsu_laptop->kblamps_registered) led_classdev_unregister(&kblamps_led); - if (fujitsu_hotkey->radio_led_registered) + if (fujitsu_laptop->radio_led_registered) led_classdev_unregister(&radio_led); - if (fujitsu_hotkey->eco_led_registered) + if (fujitsu_laptop->eco_led_registered) led_classdev_unregister(&eco_led); #endif input_unregister_device(input); - kfifo_free(&fujitsu_hotkey->fifo); + kfifo_free(&fujitsu_laptop->fifo); - fujitsu_hotkey->acpi_handle = NULL; + fujitsu_laptop->acpi_handle = NULL; return 0; } -static void acpi_fujitsu_hotkey_press(int keycode) +static void acpi_fujitsu_laptop_press(int keycode) { - struct input_dev *input = fujitsu_hotkey->input; + struct input_dev *input = fujitsu_laptop->input; int status; - status = kfifo_in_locked(&fujitsu_hotkey->fifo, + status = kfifo_in_locked(&fujitsu_laptop->fifo, (unsigned char *)&keycode, sizeof(keycode), - &fujitsu_hotkey->fifo_lock); + &fujitsu_laptop->fifo_lock); if (status != sizeof(keycode)) { vdbg_printk(FUJLAPTOP_DBG_WARN, "Could not push keycode [0x%x]\n", keycode); @@ -1054,16 +1055,16 @@ static void acpi_fujitsu_hotkey_press(int keycode) "Push keycode into ringbuffer [%d]\n", keycode); } -static void acpi_fujitsu_hotkey_release(void) +static void acpi_fujitsu_laptop_release(void) { - struct input_dev *input = fujitsu_hotkey->input; + struct input_dev *input = fujitsu_laptop->input; int keycode, status; while (true) { - status = kfifo_out_locked(&fujitsu_hotkey->fifo, + status = kfifo_out_locked(&fujitsu_laptop->fifo, (unsigned char *)&keycode, sizeof(keycode), - &fujitsu_hotkey->fifo_lock); + &fujitsu_laptop->fifo_lock); if (status != sizeof(keycode)) return; input_report_key(input, keycode, 0); @@ -1073,14 +1074,14 @@ static void acpi_fujitsu_hotkey_release(void) } } -static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) +static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event) { struct input_dev *input; int keycode; unsigned int irb = 1; int i; - input = fujitsu_hotkey->input; + input = fujitsu_laptop->input; if (event != ACPI_FUJITSU_NOTIFY_CODE1) { keycode = KEY_UNKNOWN; @@ -1093,9 +1094,9 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) return; } - if (fujitsu_hotkey->rfkill_supported) - fujitsu_hotkey->rfkill_state = - call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); + if (fujitsu_laptop->flags_supported) + fujitsu_laptop->flags_state = + call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0); i = 0; while ((irb = @@ -1103,19 +1104,19 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { switch (irb & 0x4ff) { case KEY1_CODE: - keycode = fujitsu->keycode1; + keycode = fujitsu_bl->keycode1; break; case KEY2_CODE: - keycode = fujitsu->keycode2; + keycode = fujitsu_bl->keycode2; break; case KEY3_CODE: - keycode = fujitsu->keycode3; + keycode = fujitsu_bl->keycode3; break; case KEY4_CODE: - keycode = fujitsu->keycode4; + keycode = fujitsu_bl->keycode4; break; case KEY5_CODE: - keycode = fujitsu->keycode5; + keycode = fujitsu_bl->keycode5; break; case 0: keycode = 0; @@ -1128,17 +1129,17 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) } if (keycode > 0) - acpi_fujitsu_hotkey_press(keycode); + acpi_fujitsu_laptop_press(keycode); else if (keycode == 0) - acpi_fujitsu_hotkey_release(); + acpi_fujitsu_laptop_release(); } /* On some models (first seen on the Skylake-based Lifebook * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is - * handled in software; its state is queried using FUNC_RFKILL + * handled in software; its state is queried using FUNC_FLAGS */ - if ((fujitsu_hotkey->rfkill_supported & BIT(26)) && - (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) { + if ((fujitsu_laptop->flags_supported & BIT(26)) && + (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26))) { keycode = KEY_TOUCHPAD_TOGGLE; input_report_key(input, keycode, 1); input_sync(input); @@ -1150,83 +1151,81 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) /* Initialization */ -static const struct acpi_device_id fujitsu_device_ids[] = { - {ACPI_FUJITSU_HID, 0}, +static const struct acpi_device_id fujitsu_bl_device_ids[] = { + {ACPI_FUJITSU_BL_HID, 0}, {"", 0}, }; -static struct acpi_driver acpi_fujitsu_driver = { - .name = ACPI_FUJITSU_DRIVER_NAME, +static struct acpi_driver acpi_fujitsu_bl_driver = { + .name = ACPI_FUJITSU_BL_DRIVER_NAME, .class = ACPI_FUJITSU_CLASS, - .ids = fujitsu_device_ids, + .ids = fujitsu_bl_device_ids, .ops = { - .add = acpi_fujitsu_add, - .remove = acpi_fujitsu_remove, - .notify = acpi_fujitsu_notify, + .add = acpi_fujitsu_bl_add, + .remove = acpi_fujitsu_bl_remove, + .notify = acpi_fujitsu_bl_notify, }, }; -static const struct acpi_device_id fujitsu_hotkey_device_ids[] = { - {ACPI_FUJITSU_HOTKEY_HID, 0}, +static const struct acpi_device_id fujitsu_laptop_device_ids[] = { + {ACPI_FUJITSU_LAPTOP_HID, 0}, {"", 0}, }; -static struct acpi_driver acpi_fujitsu_hotkey_driver = { - .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME, +static struct acpi_driver acpi_fujitsu_laptop_driver = { + .name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME, .class = ACPI_FUJITSU_CLASS, - .ids = fujitsu_hotkey_device_ids, + .ids = fujitsu_laptop_device_ids, .ops = { - .add = acpi_fujitsu_hotkey_add, - .remove = acpi_fujitsu_hotkey_remove, - .notify = acpi_fujitsu_hotkey_notify, + .add = acpi_fujitsu_laptop_add, + .remove = acpi_fujitsu_laptop_remove, + .notify = acpi_fujitsu_laptop_notify, }, }; static const struct acpi_device_id fujitsu_ids[] __used = { - {ACPI_FUJITSU_HID, 0}, - {ACPI_FUJITSU_HOTKEY_HID, 0}, + {ACPI_FUJITSU_BL_HID, 0}, + {ACPI_FUJITSU_LAPTOP_HID, 0}, {"", 0} }; MODULE_DEVICE_TABLE(acpi, fujitsu_ids); static int __init fujitsu_init(void) { - int ret, result, max_brightness; + int ret, max_brightness; if (acpi_disabled) return -ENODEV; - fujitsu = kzalloc(sizeof(struct fujitsu_t), GFP_KERNEL); - if (!fujitsu) + fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL); + if (!fujitsu_bl) return -ENOMEM; - fujitsu->keycode1 = KEY_PROG1; - fujitsu->keycode2 = KEY_PROG2; - fujitsu->keycode3 = KEY_PROG3; - fujitsu->keycode4 = KEY_PROG4; - fujitsu->keycode5 = KEY_RFKILL; + fujitsu_bl->keycode1 = KEY_PROG1; + fujitsu_bl->keycode2 = KEY_PROG2; + fujitsu_bl->keycode3 = KEY_PROG3; + fujitsu_bl->keycode4 = KEY_PROG4; + fujitsu_bl->keycode5 = KEY_RFKILL; dmi_check_system(fujitsu_dmi_table); - result = acpi_bus_register_driver(&acpi_fujitsu_driver); - if (result < 0) { - ret = -ENODEV; + ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver); + if (ret) goto fail_acpi; - } /* Register platform stuff */ - fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1); - if (!fujitsu->pf_device) { + fujitsu_bl->pf_device = platform_device_alloc("fujitsu-laptop", -1); + if (!fujitsu_bl->pf_device) { ret = -ENOMEM; goto fail_platform_driver; } - ret = platform_device_add(fujitsu->pf_device); + ret = platform_device_add(fujitsu_bl->pf_device); if (ret) goto fail_platform_device1; ret = - sysfs_create_group(&fujitsu->pf_device->dev.kobj, - &fujitsupf_attribute_group); + sysfs_create_group(&fujitsu_bl->pf_device->dev.kobj, + &fujitsu_pf_attribute_group); if (ret) goto fail_platform_device2; @@ -1236,90 +1235,88 @@ static int __init fujitsu_init(void) struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); - max_brightness = fujitsu->max_brightness; + max_brightness = fujitsu_bl->max_brightness; props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_brightness - 1; - fujitsu->bl_device = backlight_device_register("fujitsu-laptop", - NULL, NULL, - &fujitsubl_ops, - &props); - if (IS_ERR(fujitsu->bl_device)) { - ret = PTR_ERR(fujitsu->bl_device); - fujitsu->bl_device = NULL; + fujitsu_bl->bl_device = backlight_device_register("fujitsu-laptop", + NULL, NULL, + &fujitsu_bl_ops, + &props); + if (IS_ERR(fujitsu_bl->bl_device)) { + ret = PTR_ERR(fujitsu_bl->bl_device); + fujitsu_bl->bl_device = NULL; goto fail_sysfs_group; } - fujitsu->bl_device->props.brightness = fujitsu->brightness_level; + fujitsu_bl->bl_device->props.brightness = fujitsu_bl->brightness_level; } - ret = platform_driver_register(&fujitsupf_driver); + ret = platform_driver_register(&fujitsu_pf_driver); if (ret) goto fail_backlight; - /* Register hotkey driver */ + /* Register laptop driver */ - fujitsu_hotkey = kzalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL); - if (!fujitsu_hotkey) { + fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL); + if (!fujitsu_laptop) { ret = -ENOMEM; - goto fail_hotkey; + goto fail_laptop; } - result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver); - if (result < 0) { - ret = -ENODEV; - goto fail_hotkey1; - } + ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver); + if (ret) + goto fail_laptop1; /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3) - fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN; + fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN; else - fujitsu->bl_device->props.power = FB_BLANK_UNBLANK; + fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK; } pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n"); return 0; -fail_hotkey1: - kfree(fujitsu_hotkey); -fail_hotkey: - platform_driver_unregister(&fujitsupf_driver); +fail_laptop1: + kfree(fujitsu_laptop); +fail_laptop: + platform_driver_unregister(&fujitsu_pf_driver); fail_backlight: - backlight_device_unregister(fujitsu->bl_device); + backlight_device_unregister(fujitsu_bl->bl_device); fail_sysfs_group: - sysfs_remove_group(&fujitsu->pf_device->dev.kobj, - &fujitsupf_attribute_group); + sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj, + &fujitsu_pf_attribute_group); fail_platform_device2: - platform_device_del(fujitsu->pf_device); + platform_device_del(fujitsu_bl->pf_device); fail_platform_device1: - platform_device_put(fujitsu->pf_device); + platform_device_put(fujitsu_bl->pf_device); fail_platform_driver: - acpi_bus_unregister_driver(&acpi_fujitsu_driver); + acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver); fail_acpi: - kfree(fujitsu); + kfree(fujitsu_bl); return ret; } static void __exit fujitsu_cleanup(void) { - acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver); + acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver); - kfree(fujitsu_hotkey); + kfree(fujitsu_laptop); - platform_driver_unregister(&fujitsupf_driver); + platform_driver_unregister(&fujitsu_pf_driver); - backlight_device_unregister(fujitsu->bl_device); + backlight_device_unregister(fujitsu_bl->bl_device); - sysfs_remove_group(&fujitsu->pf_device->dev.kobj, - &fujitsupf_attribute_group); + sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj, + &fujitsu_pf_attribute_group); - platform_device_unregister(fujitsu->pf_device); + platform_device_unregister(fujitsu_bl->pf_device); - acpi_bus_unregister_driver(&acpi_fujitsu_driver); + acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver); - kfree(fujitsu); + kfree(fujitsu_bl); pr_info("driver unloaded\n"); } @@ -1341,7 +1338,3 @@ MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon"); MODULE_DESCRIPTION("Fujitsu laptop extras support"); MODULE_VERSION(FUJITSU_DRIVER_VERSION); MODULE_LICENSE("GPL"); - -MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); -MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*"); -MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 55663b3d72823b..58dcee562d6417 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -68,6 +68,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c index f2ab435954f6b9..73e496a7211344 100644 --- a/drivers/ps3/ps3-sys-manager.c +++ b/drivers/ps3/ps3-sys-manager.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c index 09b4df74291e26..bb865695d7a62d 100644 --- a/drivers/ptp/ptp_kvm.c +++ b/drivers/ptp/ptp_kvm.c @@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void) kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); - if (IS_ERR(kvm_ptp_clock.ptp_clock)) - return PTR_ERR(kvm_ptp_clock.ptp_clock); - - return 0; + return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock); } module_init(ptp_kvm_init); diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 9d19b9a62011b3..315a4be8dc1e64 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -37,8 +37,8 @@ #include "tsi721.h" #ifdef DEBUG -u32 dbg_level; -module_param(dbg_level, uint, S_IWUSR | S_IRUGO); +u32 tsi_dbg_level; +module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); #endif diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index 5941437cbdd164..957eadc5815095 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h @@ -40,11 +40,11 @@ enum { }; #ifdef DEBUG -extern u32 dbg_level; +extern u32 tsi_dbg_level; #define tsi_debug(level, dev, fmt, arg...) \ do { \ - if (DBG_##level & dbg_level) \ + if (DBG_##level & tsi_dbg_level) \ dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \ } while (0) #else diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 65f86bc24c07c7..1dc43fc5f65f38 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -76,7 +76,7 @@ config QCOM_ADSP_PIL depends on OF && ARCH_QCOM depends on REMOTEPROC depends on QCOM_SMEM - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) select MFD_SYSCON select QCOM_MDT_LOADER select QCOM_RPROC_COMMON @@ -93,7 +93,7 @@ config QCOM_Q6V5_PIL depends on OF && ARCH_QCOM depends on QCOM_SMEM depends on REMOTEPROC - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) select MFD_SYSCON select QCOM_RPROC_COMMON select QCOM_SCM @@ -104,7 +104,7 @@ config QCOM_Q6V5_PIL config QCOM_WCNSS_PIL tristate "Qualcomm WCNSS Peripheral Image Loader" depends on OF && ARCH_QCOM - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) depends on QCOM_SMEM depends on REMOTEPROC select QCOM_MDT_LOADER diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 364411fb77343f..0142cc3f0c91c6 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -137,7 +137,8 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev) static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { int i, ret; diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 3090b0d3072f1e..5e66e081027e56 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -869,7 +869,7 @@ static int rpmsg_probe(struct virtio_device *vdev) init_waitqueue_head(&vrp->sendq); /* We expect two virtqueues, rx and tx (and in this order) */ - err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names); + err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names, NULL); if (err) goto free_vrp; diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index a6d9434addf6f7..6dc8f29697abfe 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include "rtc-core.h" static dev_t rtc_devt; diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 85eca1cef06305..c4518168fd02c9 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 82c913318b73be..ba0e4f93503db5 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -7,7 +7,7 @@ */ #include -#include +#include #include #include diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index de6fccc1312485..1b350665c82332 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 79823ee9c1007d..b8006ea9099cd9 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 8ad98a902a91f1..c61164f4528e12 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -8,6 +8,8 @@ #include #include #include +#include + #include #include #include diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 40f1136f556889..058db724b5a28a 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain, rc = -EIO; goto out; } + if (prepcblk->ccp_rscode != 0) { + DEBUG_WARN( + "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n", + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + } /* process response cprb param block */ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); @@ -761,9 +767,10 @@ static int query_crypto_facility(u16 cardnr, u16 domain, } /* - * Fetch just the mkvp value via query_crypto_facility from adapter. + * Fetch the current and old mkvp values via + * query_crypto_facility from adapter. */ -static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) +static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2]) { int rc, found = 0; size_t rlen, vlen; @@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) rc = query_crypto_facility(cardnr, domain, "STATICSA", rarray, &rlen, varray, &vlen); if (rc == 0 && rlen > 8*8 && vlen > 184+8) { - if (rarray[64] == '2') { + if (rarray[8*8] == '2') { /* current master key state is valid */ - *mkvp = *((u64 *)(varray + 184)); + mkvp[0] = *((u64 *)(varray + 184)); + mkvp[1] = *((u64 *)(varray + 172)); found = 1; } } @@ -796,14 +804,14 @@ struct mkvp_info { struct list_head list; u16 cardnr; u16 domain; - u64 mkvp; + u64 mkvp[2]; }; /* a list with mkvp_info entries */ static LIST_HEAD(mkvp_list); static DEFINE_SPINLOCK(mkvp_list_lock); -static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) +static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2]) { int rc = -ENOENT; struct mkvp_info *ptr; @@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) list_for_each_entry(ptr, &mkvp_list, list) { if (ptr->cardnr == cardnr && ptr->domain == domain) { - *mkvp = ptr->mkvp; + memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64)); rc = 0; break; } @@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) return rc; } -static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) +static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2]) { int found = 0; struct mkvp_info *ptr; @@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) list_for_each_entry(ptr, &mkvp_list, list) { if (ptr->cardnr == cardnr && ptr->domain == domain) { - ptr->mkvp = mkvp; + memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); found = 1; break; } @@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) } ptr->cardnr = cardnr; ptr->domain = domain; - ptr->mkvp = mkvp; + memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); list_add(&ptr->list, &mkvp_list); } spin_unlock_bh(&mkvp_list_lock); @@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey, struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; struct zcrypt_device_matrix *device_matrix; u16 card, dom; - u64 mkvp; - int i, rc; + u64 mkvp[2]; + int i, rc, oi = -1; /* mkvp must not be zero */ if (t->mkvp == 0) @@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey, device_matrix->device[i].functions & 0x04) { /* an enabled CCA Coprocessor card */ /* try cached mkvp */ - if (mkvp_cache_fetch(card, dom, &mkvp) == 0 && - t->mkvp == mkvp) { + if (mkvp_cache_fetch(card, dom, mkvp) == 0 && + t->mkvp == mkvp[0]) { if (!verify) break; /* verify: fetch mkvp from adapter */ - if (fetch_mkvp(card, dom, &mkvp) == 0) { + if (fetch_mkvp(card, dom, mkvp) == 0) { mkvp_cache_update(card, dom, mkvp); - if (t->mkvp == mkvp) + if (t->mkvp == mkvp[0]) break; } } @@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey, card = AP_QID_CARD(device_matrix->device[i].qid); dom = AP_QID_QUEUE(device_matrix->device[i].qid); /* fresh fetch mkvp from adapter */ - if (fetch_mkvp(card, dom, &mkvp) == 0) { + if (fetch_mkvp(card, dom, mkvp) == 0) { mkvp_cache_update(card, dom, mkvp); - if (t->mkvp == mkvp) + if (t->mkvp == mkvp[0]) break; + if (t->mkvp == mkvp[1] && oi < 0) + oi = i; } } + if (i >= MAX_ZDEV_ENTRIES && oi >= 0) { + /* old mkvp matched, use this card then */ + card = AP_QID_CARD(device_matrix->device[oi].qid); + dom = AP_QID_QUEUE(device_matrix->device[oi].qid); + } } - if (i < MAX_ZDEV_ENTRIES) { + if (i < MAX_ZDEV_ENTRIES || oi >= 0) { if (pcardnr) *pcardnr = card; if (pdomain) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index e7addea8741b79..d9561e39c3b237 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -961,7 +961,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); int qeth_bridgeport_an_set(struct qeth_card *card, int enable); int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); -int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); +int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, + int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, struct sk_buff *, struct qeth_hdr *, int, int, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 315d8a2db7c066..9a5f99ccb122ba 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3837,6 +3837,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); * @card: qeth card structure, to check max. elems. * @skb: SKB address * @extra_elems: extra elems needed, to check against max. + * @data_offset: range starts at skb->data + data_offset * * Returns the number of pages, and thus QDIO buffer elements, needed to cover * skb data, including linear part and fragments. Checks if the result plus @@ -3844,10 +3845,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); * Note: extra_elems is not included in the returned result. */ int qeth_get_elements_no(struct qeth_card *card, - struct sk_buff *skb, int extra_elems) + struct sk_buff *skb, int extra_elems, int data_offset) { int elements = qeth_get_elements_for_range( - (addr_t)skb->data, + (addr_t)skb->data + data_offset, (addr_t)skb->data + skb_headlen(skb)) + qeth_get_elements_for_frags(skb); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index bea48330761899..af4e6a639fecf2 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -849,7 +849,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) * chaining we can not send long frag lists */ if ((card->info.type != QETH_CARD_TYPE_IQD) && - !qeth_get_elements_no(card, new_skb, 0)) { + !qeth_get_elements_no(card, new_skb, 0, 0)) { int lin_rc = skb_linearize(new_skb); if (card->options.performance_stats) { @@ -894,7 +894,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } } - elements = qeth_get_elements_no(card, new_skb, elements_needed); + elements = qeth_get_elements_no(card, new_skb, elements_needed, + (data_offset > 0) ? data_offset : 0); if (!elements) { if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 06d0addcc058dc..653f0fb76573ab 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2609,17 +2609,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, char daddr[16]; struct af_iucv_trans_hdr *iucv_hdr; - skb_pull(skb, 14); - card->dev->header_ops->create(skb, card->dev, 0, - card->dev->dev_addr, card->dev->dev_addr, - card->dev->addr_len); - skb_pull(skb, 14); - iucv_hdr = (struct af_iucv_trans_hdr *)skb->data; memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; hdr->hdr.l3.ext_flags = 0; - hdr->hdr.l3.length = skb->len; + hdr->hdr.l3.length = skb->len - ETH_HLEN; hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; + + iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN); memset(daddr, 0, sizeof(daddr)); daddr[0] = 0xfe; daddr[1] = 0x80; @@ -2823,10 +2819,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if ((card->info.type == QETH_CARD_TYPE_IQD) && !skb_is_nonlinear(skb)) { new_skb = skb; - if (new_skb->protocol == ETH_P_AF_IUCV) - data_offset = 0; - else - data_offset = ETH_HLEN; + data_offset = ETH_HLEN; hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) goto tx_drop; @@ -2867,7 +2860,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) */ if ((card->info.type != QETH_CARD_TYPE_IQD) && ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || - (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) { + (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) { int lin_rc = skb_linearize(new_skb); if (card->options.performance_stats) { @@ -2909,7 +2902,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) elements = use_tso ? qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : - qeth_get_elements_no(card, new_skb, hdr_elements); + qeth_get_elements_no(card, new_skb, hdr_elements, + (data_offset > 0) ? data_offset : 0); if (!elements) { if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 5e5c11f37b2420..2ce0b3eb2efebc 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c @@ -255,7 +255,8 @@ static void kvm_del_vqs(struct virtio_device *vdev) static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { struct kvm_device *kdev = to_kvmdev(vdev); int i; diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 648373cde4a137..0ed209f3d8b0c5 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -628,7 +628,8 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); unsigned long *indicatorp = NULL; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index d4023bf1e739d2..3c52867dfe28e3 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1235,20 +1235,21 @@ config SCSI_QLOGICPTI source "drivers/scsi/qla2xxx/Kconfig" source "drivers/scsi/qla4xxx/Kconfig" source "drivers/scsi/qedi/Kconfig" +source "drivers/scsi/qedf/Kconfig" config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI depends on SCSI_FC_ATTRS select CRC_T10DIF - help + ---help--- This lpfc driver supports the Emulex LightPulse Family of Fibre Channel PCI host adapters. config SCSI_LPFC_DEBUG_FS bool "Emulex LightPulse Fibre Channel debugfs Support" depends on SCSI_LPFC && DEBUG_FS - help + ---help--- This makes debugging information from the lpfc driver available via the debugfs filesystem. @@ -1478,7 +1479,7 @@ config ATARI_SCSI config MAC_SCSI tristate "Macintosh NCR5380 SCSI" - depends on MAC && SCSI=y + depends on MAC && SCSI select SCSI_SPI_ATTRS help This is the NCR 5380 SCSI controller included on most of the 68030 diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 736b77414a4baa..fc2855565a51fd 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_FCOE) += fcoe/ obj-$(CONFIG_FCOE_FNIC) += fnic/ obj-$(CONFIG_SCSI_SNIC) += snic/ obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/ +obj-$(CONFIG_QEDF) += qedf/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 907f1e80665b1c..e3e93def722b04 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -294,6 +294,10 @@ MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for" "deregistering them. This is typically adjusted for heavily burdened" " systems."); +int aac_fib_dump; +module_param(aac_fib_dump, int, 0644); +MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on"); + int numacb = -1; module_param(numacb, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control" @@ -311,7 +315,7 @@ module_param(update_interval, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync" " updates issued to adapter."); -int check_interval = 24 * 60 * 60; +int check_interval = 60; module_param(check_interval, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" " checks."); @@ -483,7 +487,7 @@ int aac_get_containers(struct aac_dev *dev) if (status >= 0) { dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); - if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + if (fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_SUPPORTED_240_VOLUMES) { maximum_num_containers = le32_to_cpu(dresp->MaxSimpleVolumes); @@ -639,13 +643,16 @@ static void _aac_probe_container2(void * context, struct fib * fibptr) fsa_dev_ptr = fibptr->dev->fsa_dev; if (fsa_dev_ptr) { struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr); + __le32 sup_options2; + fsa_dev_ptr += scmd_id(scsicmd); + sup_options2 = + fibptr->dev->supplement_adapter_info.supported_options2; if ((le32_to_cpu(dresp->status) == ST_OK) && (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { - if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 & - AAC_OPTION_VARIABLE_BLOCK_SIZE)) { + if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) { dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200; fsa_dev_ptr->block_size = 0x200; } else { @@ -688,7 +695,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) int status; dresp = (struct aac_mount *) fib_data(fibptr); - if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 & + if (!(fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) dresp->mnt[0].capacityhigh = 0; if ((le32_to_cpu(dresp->status) != ST_OK) || @@ -705,7 +712,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) dinfo = (struct aac_query_mount *)fib_data(fibptr); - if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + if (fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE) dinfo->command = cpu_to_le32(VM_NameServeAllBlk); else @@ -745,7 +752,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru dinfo = (struct aac_query_mount *)fib_data(fibptr); - if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + if (fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE) dinfo->command = cpu_to_le32(VM_NameServeAllBlk); else @@ -896,12 +903,14 @@ char * get_container_type(unsigned tindex) static void setinqstr(struct aac_dev *dev, void *data, int tindex) { struct scsi_inq *str; + struct aac_supplement_adapter_info *sup_adap_info; + sup_adap_info = &dev->supplement_adapter_info; str = (struct scsi_inq *)(data); /* cast data to scsi inq block */ memset(str, ' ', sizeof(*str)); - if (dev->supplement_adapter_info.AdapterTypeText[0]) { - char * cp = dev->supplement_adapter_info.AdapterTypeText; + if (sup_adap_info->adapter_type_text[0]) { + char *cp = sup_adap_info->adapter_type_text; int c; if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) inqstrcpy("SMC", str->vid); @@ -911,8 +920,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) ++cp; c = *cp; *cp = '\0'; - inqstrcpy (dev->supplement_adapter_info.AdapterTypeText, - str->vid); + inqstrcpy(sup_adap_info->adapter_type_text, str->vid); *cp = c; while (*cp && *cp != ' ') ++cp; @@ -1675,8 +1683,8 @@ int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target) if (!identify_resp) goto fib_free_ptr; - vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus); - vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget); + vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus); + vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target); aac_fib_init(fibptr); @@ -1815,9 +1823,9 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan) } vbus = (u32) le16_to_cpu( - dev->supplement_adapter_info.VirtDeviceBus); + dev->supplement_adapter_info.virt_device_bus); vid = (u32) le16_to_cpu( - dev->supplement_adapter_info.VirtDeviceTarget); + dev->supplement_adapter_info.virt_device_target); aac_fib_init(fibptr); @@ -1893,7 +1901,7 @@ int aac_get_adapter_info(struct aac_dev* dev) } memcpy(&dev->adapter_info, info, sizeof(*info)); - dev->supplement_adapter_info.VirtDeviceBus = 0xffff; + dev->supplement_adapter_info.virt_device_bus = 0xffff; if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { struct aac_supplement_adapter_info * sinfo; @@ -1961,7 +1969,7 @@ int aac_get_adapter_info(struct aac_dev* dev) } if (!dev->sync_mode && dev->sa_firmware && - dev->supplement_adapter_info.VirtDeviceBus != 0xffff) { + dev->supplement_adapter_info.virt_device_bus != 0xffff) { /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */ rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT); } @@ -1976,8 +1984,8 @@ int aac_get_adapter_info(struct aac_dev* dev) (tmp>>16)&0xff, tmp&0xff, le32_to_cpu(dev->adapter_info.kernelbuild), - (int)sizeof(dev->supplement_adapter_info.BuildDate), - dev->supplement_adapter_info.BuildDate); + (int)sizeof(dev->supplement_adapter_info.build_date), + dev->supplement_adapter_info.build_date); tmp = le32_to_cpu(dev->adapter_info.monitorrev); printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", dev->name, dev->id, @@ -1993,14 +2001,15 @@ int aac_get_adapter_info(struct aac_dev* dev) shost_to_class(dev->scsi_host_ptr), buffer)) printk(KERN_INFO "%s%d: serial %s", dev->name, dev->id, buffer); - if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) { + if (dev->supplement_adapter_info.vpd_info.tsid[0]) { printk(KERN_INFO "%s%d: TSID %.*s\n", dev->name, dev->id, - (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), - dev->supplement_adapter_info.VpdInfo.Tsid); + (int)sizeof(dev->supplement_adapter_info + .vpd_info.tsid), + dev->supplement_adapter_info.vpd_info.tsid); } if (!aac_check_reset || ((aac_check_reset == 1) && - (dev->supplement_adapter_info.SupportedOptions2 & + (dev->supplement_adapter_info.supported_options2 & AAC_OPTION_IGNORE_RESET))) { printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", dev->name, dev->id); @@ -2008,7 +2017,7 @@ int aac_get_adapter_info(struct aac_dev* dev) } dev->cache_protected = 0; - dev->jbod = ((dev->supplement_adapter_info.FeatureBits & + dev->jbod = ((dev->supplement_adapter_info.feature_bits & AAC_FEATURE_JBOD) != 0); dev->nondasd_support = 0; dev->raid_scsi_mode = 0; @@ -2631,7 +2640,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) struct scsi_device *sdev = scsicmd->device; struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; - if (!(aac->supplement_adapter_info.SupportedOptions2 & + if (!(aac->supplement_adapter_info.supported_options2 & AAC_OPTION_POWER_MANAGEMENT)) { scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index f2344971e3cbe3..d036a806f31c47 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -97,7 +97,7 @@ enum { #define PMC_GLOBAL_INT_BIT0 0x00000001 #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 50740 +# define AAC_DRIVER_BUILD 50792 # define AAC_DRIVER_BRANCH "-custom" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -1380,57 +1380,57 @@ struct aac_adapter_info struct aac_supplement_adapter_info { - u8 AdapterTypeText[17+1]; - u8 Pad[2]; - __le32 FlashMemoryByteSize; - __le32 FlashImageId; - __le32 MaxNumberPorts; - __le32 Version; - __le32 FeatureBits; - u8 SlotNumber; - u8 ReservedPad0[3]; - u8 BuildDate[12]; - __le32 CurrentNumberPorts; + u8 adapter_type_text[17+1]; + u8 pad[2]; + __le32 flash_memory_byte_size; + __le32 flash_image_id; + __le32 max_number_ports; + __le32 version; + __le32 feature_bits; + u8 slot_number; + u8 reserved_pad0[3]; + u8 build_date[12]; + __le32 current_number_ports; struct { - u8 AssemblyPn[8]; - u8 FruPn[8]; - u8 BatteryFruPn[8]; - u8 EcVersionString[8]; - u8 Tsid[12]; - } VpdInfo; - __le32 FlashFirmwareRevision; - __le32 FlashFirmwareBuild; - __le32 RaidTypeMorphOptions; - __le32 FlashFirmwareBootRevision; - __le32 FlashFirmwareBootBuild; - u8 MfgPcbaSerialNo[12]; - u8 MfgWWNName[8]; - __le32 SupportedOptions2; - __le32 StructExpansion; + u8 assembly_pn[8]; + u8 fru_pn[8]; + u8 battery_fru_pn[8]; + u8 ec_version_string[8]; + u8 tsid[12]; + } vpd_info; + __le32 flash_firmware_revision; + __le32 flash_firmware_build; + __le32 raid_type_morph_options; + __le32 flash_firmware_boot_revision; + __le32 flash_firmware_boot_build; + u8 mfg_pcba_serial_no[12]; + u8 mfg_wwn_name[8]; + __le32 supported_options2; + __le32 struct_expansion; /* StructExpansion == 1 */ - __le32 FeatureBits3; - __le32 SupportedPerformanceModes; - u8 HostBusType; /* uses HOST_BUS_TYPE_xxx defines */ - u8 HostBusWidth; /* actual width in bits or links */ - u16 HostBusSpeed; /* actual bus speed/link rate in MHz */ - u8 MaxRRCDrives; /* max. number of ITP-RRC drives/pool */ - u8 MaxDiskXtasks; /* max. possible num of DiskX Tasks */ - - u8 CpldVerLoaded; - u8 CpldVerInFlash; - - __le64 MaxRRCCapacity; - __le32 CompiledMaxHistLogLevel; - u8 CustomBoardName[12]; - u16 SupportedCntlrMode; /* identify supported controller mode */ - u16 ReservedForFuture16; - __le32 SupportedOptions3; /* reserved for future options */ - - __le16 VirtDeviceBus; /* virt. SCSI device for Thor */ - __le16 VirtDeviceTarget; - __le16 VirtDeviceLUN; - __le16 Unused; - __le32 ReservedForFutureGrowth[68]; + __le32 feature_bits3; + __le32 supported_performance_modes; + u8 host_bus_type; /* uses HOST_BUS_TYPE_xxx defines */ + u8 host_bus_width; /* actual width in bits or links */ + u16 host_bus_speed; /* actual bus speed/link rate in MHz */ + u8 max_rrc_drives; /* max. number of ITP-RRC drives/pool */ + u8 max_disk_xtasks; /* max. possible num of DiskX Tasks */ + + u8 cpld_ver_loaded; + u8 cpld_ver_in_flash; + + __le64 max_rrc_capacity; + __le32 compiled_max_hist_log_level; + u8 custom_board_name[12]; + u16 supported_cntlr_mode; /* identify supported controller mode */ + u16 reserved_for_future16; + __le32 supported_options3; /* reserved for future options */ + + __le16 virt_device_bus; /* virt. SCSI device for Thor */ + __le16 virt_device_target; + __le16 virt_device_lun; + __le16 unused; + __le32 reserved_for_future_growth[68]; }; #define AAC_FEATURE_FALCON cpu_to_le32(0x00000010) @@ -1444,6 +1444,10 @@ struct aac_supplement_adapter_info #define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000) /* 240 simple volume support */ #define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000) +/* + * Supports FIB dump sync command send prior to IOP_RESET + */ +#define AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP cpu_to_le32(0x00004000) #define AAC_SIS_VERSION_V3 3 #define AAC_SIS_SLOT_UNKNOWN 0xFF @@ -2483,6 +2487,7 @@ struct aac_hba_info { #define GET_DRIVER_BUFFER_PROPERTIES 0x00000023 #define RCV_TEMP_READINGS 0x00000025 #define GET_COMM_PREFERRED_SETTINGS 0x00000026 +#define IOP_RESET_FW_FIB_DUMP 0x00000034 #define IOP_RESET 0x00001000 #define IOP_RESET_ALWAYS 0x00001001 #define RE_INIT_ADAPTER 0x000000ee @@ -2639,6 +2644,7 @@ void aac_hba_callback(void *context, struct fib *fibptr); #define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data) struct aac_dev *aac_init_adapter(struct aac_dev *dev); void aac_src_access_devreg(struct aac_dev *dev, int mode); +void aac_set_intx_mode(struct aac_dev *dev); int aac_get_config_status(struct aac_dev *dev, int commit_flag); int aac_get_containers(struct aac_dev *dev); int aac_scsi_cmd(struct scsi_cmnd *cmd); @@ -2685,4 +2691,5 @@ extern int aac_commit; extern int update_interval; extern int check_interval; extern int aac_check_reset; +extern int aac_fib_dump; #endif diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 614842a9eb07fe..f6afd50579c038 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -580,7 +580,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - chn = aac_logical_to_phys(user_srbcmd->channel); + chn = user_srbcmd->channel; if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS && dev->hba_map[chn][user_srbcmd->id].devtype == AAC_DEVTYPE_NATIVE_RAW) { diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 40bfc57b68493a..35607005f7e1fb 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -330,7 +330,7 @@ int aac_send_shutdown(struct aac_dev * dev) dev->pdev->device == PMC_DEVICE_S8 || dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) - aac_src_access_devreg(dev, AAC_ENABLE_INTX); + aac_set_intx_mode(dev); return status; } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 969727b67cdd14..c8172f16cf33cd 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -95,12 +95,20 @@ static int fib_map_alloc(struct aac_dev *dev) void aac_fib_map_free(struct aac_dev *dev) { - if (dev->hw_fib_va && dev->max_cmd_size) { - pci_free_consistent(dev->pdev, - (dev->max_cmd_size * - (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)), - dev->hw_fib_va, dev->hw_fib_pa); - } + size_t alloc_size; + size_t fib_size; + int num_fibs; + + if(!dev->hw_fib_va || !dev->max_cmd_size) + return; + + num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; + fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr); + alloc_size = fib_size * num_fibs + ALIGN32 - 1; + + pci_free_consistent(dev->pdev, alloc_size, dev->hw_fib_va, + dev->hw_fib_pa); + dev->hw_fib_va = NULL; dev->hw_fib_pa = 0; } @@ -153,22 +161,20 @@ int aac_fib_setup(struct aac_dev * dev) if (i<0) return -ENOMEM; - /* 32 byte alignment for PMC */ - hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); - dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + - (hw_fib_pa - dev->hw_fib_pa)); - dev->hw_fib_pa = hw_fib_pa; memset(dev->hw_fib_va, 0, (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); + /* 32 byte alignment for PMC */ + hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); + hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + + (hw_fib_pa - dev->hw_fib_pa)); + /* add Xport header */ - dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + + hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + sizeof(struct aac_fib_xporthdr)); - dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr); + hw_fib_pa += sizeof(struct aac_fib_xporthdr); - hw_fib = dev->hw_fib_va; - hw_fib_pa = dev->hw_fib_pa; /* * Initialise the fibs */ @@ -461,6 +467,35 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw return 0; } +#ifdef CONFIG_EEH +static inline int aac_check_eeh_failure(struct aac_dev *dev) +{ + /* Check for an EEH failure for the given + * device node. Function eeh_dev_check_failure() + * returns 0 if there has not been an EEH error + * otherwise returns a non-zero value. + * + * Need to be called before any PCI operation, + * i.e.,before aac_adapter_check_health() + */ + struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev); + + if (eeh_dev_check_failure(edev)) { + /* The EEH mechanisms will handle this + * error and reset the device if + * necessary. + */ + return 1; + } + return 0; +} +#else +static inline int aac_check_eeh_failure(struct aac_dev *dev) +{ + return 0; +} +#endif + /* * Define the highest level of host to adapter communication routines. * These routines will support host to adapter FS commuication. These @@ -496,9 +531,12 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, unsigned long mflags = 0; unsigned long sflags = 0; - if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) return -EBUSY; + + if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)) + return -EINVAL; + /* * There are 5 cases with the wait and response requested flags. * The only invalid cases are if the caller requests to wait and @@ -662,6 +700,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, } return -ETIMEDOUT; } + + if (aac_check_eeh_failure(dev)) + return -EFAULT; + if ((blink = aac_adapter_check_health(dev)) > 0) { if (wait == -1) { printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n" @@ -755,7 +797,12 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, FIB_COUNTER_INCREMENT(aac_config.NativeSent); if (wait) { + spin_unlock_irqrestore(&fibptr->event_lock, flags); + + if (aac_check_eeh_failure(dev)) + return -EFAULT; + /* Only set for first known interruptable command */ if (down_interruptible(&fibptr->event_wait)) { fibptr->done = 2; @@ -1590,11 +1637,29 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) command->SCp.phase = AAC_OWNER_ERROR_HANDLER; command->scsi_done(command); } + /* + * Any Device that was already marked offline needs to be cleaned up + */ + __shost_for_each_device(dev, host) { + if (!scsi_device_online(dev)) { + sdev_printk(KERN_INFO, dev, "Removing offline device\n"); + scsi_remove_device(dev); + scsi_device_put(dev); + } + } retval = 0; out: aac->in_reset = 0; scsi_unblock_requests(host); + /* + * Issue bus rescan to catch any configuration that might have + * occurred + */ + if (!retval) { + dev_info(&aac->pdev->dev, "Issuing bus rescan\n"); + scsi_scan_host(host); + } if (jafo) { spin_lock_irq(host->host_lock); } @@ -1815,7 +1880,7 @@ int aac_check_health(struct aac_dev * aac) printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); if (!aac_check_reset || ((aac_check_reset == 1) && - (aac->supplement_adapter_info.SupportedOptions2 & + (aac->supplement_adapter_info.supported_options2 & AAC_OPTION_IGNORE_RESET))) goto out; host = aac->scsi_host_ptr; @@ -1843,9 +1908,6 @@ static void aac_resolve_luns(struct aac_dev *dev) for (bus = 0; bus < AAC_MAX_BUSES; bus++) { for (target = 0; target < AAC_MAX_TARGETS; target++) { - if (aac_phys_to_logical(bus) == ENCLOSURE_CHANNEL) - continue; - if (bus == CONTAINER_CHANNEL) channel = CONTAINER_CHANNEL; else @@ -1857,7 +1919,7 @@ static void aac_resolve_luns(struct aac_dev *dev) sdev = scsi_device_lookup(dev->scsi_host_ptr, channel, target, 0); - if (!sdev && devtype) + if (!sdev && new_devtype) scsi_add_device(dev->scsi_host_ptr, channel, target, 0); else if (sdev && new_devtype != devtype) @@ -1994,7 +2056,6 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool, { struct hw_fib **hw_fib_p; struct fib **fib_p; - int rcode = 1; hw_fib_p = hw_fib_pool; fib_p = fib_pool; @@ -2012,11 +2073,11 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool, } } + /* + * Get the actual number of allocated fibs + */ num = hw_fib_p - hw_fib_pool; - if (!num) - rcode = 0; - - return rcode; + return num; } static void wakeup_fibctx_threads(struct aac_dev *dev, @@ -2124,7 +2185,6 @@ static void aac_process_events(struct aac_dev *dev) struct fib *fib; unsigned long flags; spinlock_t *t_lock; - unsigned int rcode; t_lock = dev->queues->queue[HostNormCmdQueue].lock; spin_lock_irqsave(t_lock, flags); @@ -2150,7 +2210,7 @@ static void aac_process_events(struct aac_dev *dev) /* Thor AIF */ aac_handle_sa_aif(dev, fib); aac_fib_adapter_complete(fib, (u16)sizeof(u32)); - continue; + goto free_fib; } /* * We will process the FIB here or pass it to a @@ -2207,8 +2267,8 @@ static void aac_process_events(struct aac_dev *dev) * Fill up fib pointer pools with actual fibs * and hw_fibs */ - rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num); - if (!rcode) + num = fillup_pools(dev, hw_fib_pool, fib_pool, num); + if (!num) goto free_mem; /* @@ -2264,8 +2324,8 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, aac_fib_init(fibptr); - vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus); - vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget); + vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus); + vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target); srbcmd = (struct aac_srb *)fib_data(fibptr); @@ -2434,7 +2494,7 @@ int aac_command_thread(void *data) /* Don't even try to talk to adapter if its sick */ ret = aac_check_health(dev); - if (!dev->queues) + if (ret || !dev->queues) break; next_check_jiffies = jiffies + ((long)(unsigned)check_interval) @@ -2446,8 +2506,7 @@ int aac_command_thread(void *data) && (now.tv_usec > (1000000 / HZ))) difference = (((1000000 - now.tv_usec) * HZ) + 500000) / 1000000; - else if (ret == 0) { - + else { if (now.tv_usec > 500000) ++now.tv_sec; @@ -2458,9 +2517,6 @@ int aac_command_thread(void *data) ret = aac_send_hosttime(dev, &now); difference = (long)(unsigned)update_interval*HZ; - } else { - /* retry shortly */ - difference = 10 * HZ; } next_jiffies = jiffies + difference; if (time_before(next_check_jiffies,next_jiffies)) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 838347c44f322c..520ada8266af10 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -891,13 +891,13 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) * Adapters that support a register, instead of a commanded, * reset. */ - if (((aac->supplement_adapter_info.SupportedOptions2 & + if (((aac->supplement_adapter_info.supported_options2 & AAC_OPTION_MU_RESET) || - (aac->supplement_adapter_info.SupportedOptions2 & + (aac->supplement_adapter_info.supported_options2 & AAC_OPTION_DOORBELL_RESET)) && aac_check_reset && ((aac_check_reset != 1) || - !(aac->supplement_adapter_info.SupportedOptions2 & + !(aac->supplement_adapter_info.supported_options2 & AAC_OPTION_IGNORE_RESET))) { /* Bypass wait for command quiesce */ aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET); @@ -1029,8 +1029,8 @@ static ssize_t aac_show_model(struct device *device, struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; int len; - if (dev->supplement_adapter_info.AdapterTypeText[0]) { - char * cp = dev->supplement_adapter_info.AdapterTypeText; + if (dev->supplement_adapter_info.adapter_type_text[0]) { + char *cp = dev->supplement_adapter_info.adapter_type_text; while (*cp && *cp != ' ') ++cp; while (*cp == ' ') @@ -1046,18 +1046,20 @@ static ssize_t aac_show_vendor(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; + struct aac_supplement_adapter_info *sup_adap_info; int len; - if (dev->supplement_adapter_info.AdapterTypeText[0]) { - char * cp = dev->supplement_adapter_info.AdapterTypeText; + sup_adap_info = &dev->supplement_adapter_info; + if (sup_adap_info->adapter_type_text[0]) { + char *cp = sup_adap_info->adapter_type_text; while (*cp && *cp != ' ') ++cp; len = snprintf(buf, PAGE_SIZE, "%.*s\n", - (int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText), - dev->supplement_adapter_info.AdapterTypeText); + (int)(cp - (char *)sup_adap_info->adapter_type_text), + sup_adap_info->adapter_type_text); } else len = snprintf(buf, PAGE_SIZE, "%s\n", - aac_drivers[dev->cardtype].vname); + aac_drivers[dev->cardtype].vname); return len; } @@ -1078,7 +1080,7 @@ static ssize_t aac_show_flags(struct device *cdev, "SAI_READ_CAPACITY_16\n"); if (dev->jbod) len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n"); - if (dev->supplement_adapter_info.SupportedOptions2 & + if (dev->supplement_adapter_info.supported_options2 & AAC_OPTION_POWER_MANAGEMENT) len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_POWER_MANAGEMENT\n"); @@ -1129,6 +1131,13 @@ static ssize_t aac_show_bios_version(struct device *device, return len; } +static ssize_t aac_show_driver_version(struct device *device, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version); +} + static ssize_t aac_show_serial_number(struct device *device, struct device_attribute *attr, char *buf) { @@ -1139,12 +1148,12 @@ static ssize_t aac_show_serial_number(struct device *device, len = snprintf(buf, 16, "%06X\n", le32_to_cpu(dev->adapter_info.serial[0])); if (len && - !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ - sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len], + !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[ + sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len], buf, len-1)) len = snprintf(buf, 16, "%.*s\n", - (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), - dev->supplement_adapter_info.MfgPcbaSerialNo); + (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no), + dev->supplement_adapter_info.mfg_pcba_serial_no); return min(len, 16); } @@ -1239,6 +1248,13 @@ static struct device_attribute aac_bios_version = { }, .show = aac_show_bios_version, }; +static struct device_attribute aac_lld_version = { + .attr = { + .name = "driver_version", + .mode = 0444, + }, + .show = aac_show_driver_version, +}; static struct device_attribute aac_serial_number = { .attr = { .name = "serial_number", @@ -1276,6 +1292,7 @@ static struct device_attribute *aac_attrs[] = { &aac_kernel_version, &aac_monitor_version, &aac_bios_version, + &aac_lld_version, &aac_serial_number, &aac_max_channel, &aac_max_id, diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 0e69a80c327583..5d19c31e3bbac5 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -475,7 +475,7 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) { u32 var = 0; - if (!(dev->supplement_adapter_info.SupportedOptions2 & + if (!(dev->supplement_adapter_info.supported_options2 & AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { if (bled) printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 8e4e2ddbafd744..7b0410e0f56948 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -436,17 +436,24 @@ static int aac_src_check_health(struct aac_dev *dev) { u32 status = src_readl(dev, MUnit.OMR); + /* + * Check to see if the board panic'd. + */ + if (unlikely(status & KERNEL_PANIC)) + goto err_blink; + /* * Check to see if the board failed any self tests. */ if (unlikely(status & SELF_TEST_FAILED)) - return -1; + goto err_out; /* - * Check to see if the board panic'd. + * Check to see if the board failed any self tests. */ - if (unlikely(status & KERNEL_PANIC)) - return (status >> 16) & 0xFF; + if (unlikely(status & MONITOR_PANIC)) + goto err_out; + /* * Wait for the adapter to be up and running. */ @@ -456,6 +463,12 @@ static int aac_src_check_health(struct aac_dev *dev) * Everything is OK */ return 0; + +err_out: + return -1; + +err_blink: + return (status >> 16) & 0xFF; } static inline u32 aac_get_vector(struct aac_dev *dev) @@ -657,7 +670,7 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) return 0; } -static void aac_set_intx_mode(struct aac_dev *dev) +void aac_set_intx_mode(struct aac_dev *dev) { if (dev->msi_enabled) { aac_src_access_devreg(dev, AAC_ENABLE_INTX); @@ -666,10 +679,27 @@ static void aac_set_intx_mode(struct aac_dev *dev) } } +static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev) +{ + __le32 supported_options3; + + if (!aac_fib_dump) + return; + + supported_options3 = dev->supplement_adapter_info.supported_options3; + if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP)) + return; + + aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP, + 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); +} + static void aac_send_iop_reset(struct aac_dev *dev, int bled) { u32 var, reset_mask; + aac_dump_fw_fib_iop_reset(dev); + bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); @@ -684,7 +714,7 @@ static void aac_send_iop_reset(struct aac_dev *dev, int bled) aac_set_intx_mode(dev); - if (!bled && (dev->supplement_adapter_info.SupportedOptions2 & + if (!bled && (dev->supplement_adapter_info.supported_options2 & AAC_OPTION_DOORBELL_RESET)) { src_writel(dev, MUnit.IDR, reset_mask); } else { @@ -714,6 +744,12 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) pr_err("%s%d: adapter kernel panic'd %x.\n", dev->name, dev->id, bled); + /* + * When there is a BlinkLED, IOP_RESET has not effect + */ + if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET) + reset_type &= ~HW_IOP_RESET; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; switch (reset_type) { diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 109e2c99e6c162..95d8f25cbccab7 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit) * does not disable its parity logic prior to * the start of the reset. This may cause a * parity error to be detected and thus a - * spurious SERR or PERR assertion. Disble + * spurious SERR or PERR assertion. Disable * PERR and SERR responses during the CHIPRST. */ mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index fdd4eb4e41b21c..4fc8ed5fe067e1 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index ed7f3228e2349c..89ef1a1678d192 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 7069639e92bc40..3061d8045382e4 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -2259,6 +2259,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, 0ULL }; static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, CXLFLASH_NOTIFY_SHUTDOWN }; +static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, + CXLFLASH_NOTIFY_SHUTDOWN }; /* * PCI device binding table @@ -2268,6 +2270,8 @@ static struct pci_device_id cxlflash_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, + {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, {} }; diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index e43545c86bcf9f..0be2261e631224 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -25,6 +25,7 @@ #define PCI_DEVICE_ID_IBM_CORSA 0x04F0 #define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600 +#define PCI_DEVICE_ID_IBM_BRIARD 0x0624 /* Since there is only one target, make it 0 */ #define CXLFLASH_TARGET 0 diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index ef5bf55f08a4c6..b46fd2f4562857 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -305,6 +305,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; struct glun_info *gli = lli->parent; + struct scsi_sense_hdr sshdr; u8 *cmd_buf = NULL; u8 *scsi_cmd = NULL; u8 *sense_buf = NULL; @@ -332,7 +333,8 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) /* Drop the ioctl read semahpore across lengthy call */ up_read(&cfg->ioctl_rwsem); result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf, - CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL); + CMD_BUFSIZE, sense_buf, &sshdr, to, CMD_RETRIES, + 0, 0, NULL); down_read(&cfg->ioctl_rwsem); rc = check_state(cfg); if (rc) { @@ -345,10 +347,6 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) if (driver_byte(result) == DRIVER_SENSE) { result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ if (result & SAM_STAT_CHECK_CONDITION) { - struct scsi_sense_hdr sshdr; - - scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE, - &sshdr); switch (sshdr.sense_key) { case NO_SENSE: case RECOVERED_ERROR: diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index 8fcc804dbef9d9..7aa06ef229fd79 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -453,8 +453,8 @@ static int write_same16(struct scsi_device *sdev, /* Drop the ioctl read semahpore across lengthy call */ up_read(&cfg->ioctl_rwsem); result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf, - CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, - 0, NULL); + CMD_BUFSIZE, sense_buf, NULL, to, + CMD_RETRIES, 0, 0, NULL); down_read(&cfg->ioctl_rwsem); rc = check_state(cfg); if (rc) { diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index d704752b63329f..c01b47e5b55a89 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -113,7 +113,7 @@ struct alua_queue_data { #define ALUA_POLICY_SWITCH_ALL 1 static void alua_rtpg_work(struct work_struct *work); -static void alua_rtpg_queue(struct alua_port_group *pg, +static bool alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, struct alua_queue_data *qdata, bool force); static void alua_check(struct scsi_device *sdev, bool force); @@ -151,11 +151,9 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, cdb[1] = MI_REPORT_TARGET_PGS; put_unaligned_be32(bufflen, &cdb[6]); - return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE, - buff, bufflen, sshdr, - ALUA_FAILOVER_TIMEOUT * HZ, - ALUA_FAILOVER_RETRIES, NULL, - req_flags, 0); + return scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, NULL, + sshdr, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, req_flags, 0, NULL); } /* @@ -185,11 +183,9 @@ static int submit_stpg(struct scsi_device *sdev, int group_id, cdb[1] = MO_SET_TARGET_PGS; put_unaligned_be32(stpg_len, &cdb[6]); - return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, - stpg_data, stpg_len, - sshdr, ALUA_FAILOVER_TIMEOUT * HZ, - ALUA_FAILOVER_RETRIES, NULL, - req_flags, 0); + return scsi_execute(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, NULL, + sshdr, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, req_flags, 0, NULL); } static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, @@ -866,7 +862,13 @@ static void alua_rtpg_work(struct work_struct *work) kref_put(&pg->kref, release_port_group); } -static void alua_rtpg_queue(struct alua_port_group *pg, +/** + * alua_rtpg_queue() - cause RTPG to be submitted asynchronously + * + * Returns true if and only if alua_rtpg_work() will be called asynchronously. + * That function is responsible for calling @qdata->fn(). + */ +static bool alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, struct alua_queue_data *qdata, bool force) { @@ -874,8 +876,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg, unsigned long flags; struct workqueue_struct *alua_wq = kaluad_wq; - if (!pg) - return; + if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) + return false; spin_lock_irqsave(&pg->lock, flags); if (qdata) { @@ -888,14 +890,12 @@ static void alua_rtpg_queue(struct alua_port_group *pg, pg->flags |= ALUA_PG_RUN_RTPG; kref_get(&pg->kref); pg->rtpg_sdev = sdev; - scsi_device_get(sdev); start_queue = 1; } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { pg->flags |= ALUA_PG_RUN_RTPG; /* Do not queue if the worker is already running */ if (!(pg->flags & ALUA_PG_RUNNING)) { kref_get(&pg->kref); - sdev = NULL; start_queue = 1; } } @@ -904,13 +904,17 @@ static void alua_rtpg_queue(struct alua_port_group *pg, alua_wq = kaluad_sync_wq; spin_unlock_irqrestore(&pg->lock, flags); - if (start_queue && - !queue_delayed_work(alua_wq, &pg->rtpg_work, - msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { - if (sdev) - scsi_device_put(sdev); - kref_put(&pg->kref, release_port_group); + if (start_queue) { + if (queue_delayed_work(alua_wq, &pg->rtpg_work, + msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) + sdev = NULL; + else + kref_put(&pg->kref, release_port_group); } + if (sdev) + scsi_device_put(sdev); + + return true; } /* @@ -1011,11 +1015,13 @@ static int alua_activate(struct scsi_device *sdev, mutex_unlock(&h->init_mutex); goto out; } - fn = NULL; rcu_read_unlock(); mutex_unlock(&h->init_mutex); - alua_rtpg_queue(pg, sdev, qdata, true); + if (alua_rtpg_queue(pg, sdev, qdata, true)) + fn = NULL; + else + err = SCSI_DH_DEV_OFFLINED; kref_put(&pg->kref, release_port_group); out: if (fn) diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 4a7679f6c73da0..8654e940e1a809 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -276,10 +276,9 @@ static int send_trespass_cmd(struct scsi_device *sdev, BUG_ON((len > CLARIION_BUFFER_SIZE)); memcpy(csdev->buffer, page22, len); - err = scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, - csdev->buffer, len, &sshdr, - CLARIION_TIMEOUT * HZ, CLARIION_RETRIES, - NULL, req_flags, 0); + err = scsi_execute(sdev, cdb, DMA_TO_DEVICE, csdev->buffer, len, NULL, + &sshdr, CLARIION_TIMEOUT * HZ, CLARIION_RETRIES, + req_flags, 0, NULL); if (err) { if (scsi_sense_valid(&sshdr)) res = trespass_endio(sdev, &sshdr); @@ -358,7 +357,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req) static int clariion_std_inquiry(struct scsi_device *sdev, struct clariion_dh_data *csdev) { - int err; + int err = SCSI_DH_OK; char *sp_model; sp_model = parse_sp_model(sdev, sdev->inquiry); diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index be43c940636df6..62d314e07d1125 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -100,9 +100,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) REQ_FAILFAST_DRIVER; retry: - res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, - HP_SW_TIMEOUT, HP_SW_RETRIES, - NULL, req_flags, 0); + res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL); if (res) { if (scsi_sense_valid(&sshdr)) ret = tur_done(sdev, h, &sshdr); @@ -139,9 +138,8 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h) REQ_FAILFAST_DRIVER; retry: - res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, - HP_SW_TIMEOUT, HP_SW_RETRIES, - NULL, req_flags, 0); + res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL); if (res) { if (!scsi_sense_valid(&sshdr)) { sdev_printk(KERN_WARNING, sdev, diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index b64eaae8533d99..3cbab8710e5813 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -555,10 +555,9 @@ static void send_mode_select(struct work_struct *work) (char *) h->ctlr->array_name, h->ctlr->index, (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); - if (scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, - &h->ctlr->mode_select, data_size, &sshdr, - RDAC_TIMEOUT * HZ, - RDAC_RETRIES, NULL, req_flags, 0)) { + if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select, + data_size, NULL, &sshdr, RDAC_TIMEOUT * HZ, + RDAC_RETRIES, req_flags, 0, NULL)) { err = mode_select_handle_sense(sdev, &sshdr); if (err == SCSI_DH_RETRY && retry_cnt--) goto retry; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 524a0c755ed7e7..9d659aaace15d0 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2956,7 +2956,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, /* fill_cmd can't fail here, no data buffer to map. */ (void) fill_cmd(c, reset_type, h, NULL, 0, 0, scsi3addr, TYPE_MSG); - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); if (rc) { dev_warn(&h->pdev->dev, "Failed to send reset command\n"); goto out; @@ -3714,7 +3714,7 @@ static int hpsa_get_volume_status(struct ctlr_info *h, * # (integer code indicating one of several NOT READY states * describing why a volume is to be kept offline) */ -static int hpsa_volume_offline(struct ctlr_info *h, +static unsigned char hpsa_volume_offline(struct ctlr_info *h, unsigned char scsi3addr[]) { struct CommandList *c; @@ -3735,7 +3735,7 @@ static int hpsa_volume_offline(struct ctlr_info *h, DEFAULT_TIMEOUT); if (rc) { cmd_free(h, c); - return 0; + return HPSA_VPD_LV_STATUS_UNSUPPORTED; } sense = c->err_info->SenseInfo; if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) @@ -3746,19 +3746,13 @@ static int hpsa_volume_offline(struct ctlr_info *h, cmd_status = c->err_info->CommandStatus; scsi_status = c->err_info->ScsiStatus; cmd_free(h, c); - /* Is the volume 'not ready'? */ - if (cmd_status != CMD_TARGET_STATUS || - scsi_status != SAM_STAT_CHECK_CONDITION || - sense_key != NOT_READY || - asc != ASC_LUN_NOT_READY) { - return 0; - } /* Determine the reason for not ready state */ ldstat = hpsa_get_volume_status(h, scsi3addr); /* Keep volume offline in certain cases: */ switch (ldstat) { + case HPSA_LV_FAILED: case HPSA_LV_UNDERGOING_ERASE: case HPSA_LV_NOT_AVAILABLE: case HPSA_LV_UNDERGOING_RPI: @@ -3780,7 +3774,7 @@ static int hpsa_volume_offline(struct ctlr_info *h, default: break; } - return 0; + return HPSA_LV_OK; } /* @@ -3853,10 +3847,10 @@ static int hpsa_update_device_info(struct ctlr_info *h, /* Do an inquiry to the device to see what it is. */ if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { - /* Inquiry failed (msg printed already) */ dev_err(&h->pdev->dev, - "hpsa_update_device_info: inquiry failed\n"); - rc = -EIO; + "%s: inquiry failed, device will be skipped.\n", + __func__); + rc = HPSA_INQUIRY_FAILED; goto bail_out; } @@ -3885,15 +3879,20 @@ static int hpsa_update_device_info(struct ctlr_info *h, if ((this_device->devtype == TYPE_DISK || this_device->devtype == TYPE_ZBC) && is_logical_dev_addr_mode(scsi3addr)) { - int volume_offline; + unsigned char volume_offline; hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) hpsa_get_ioaccel_status(h, scsi3addr, this_device); volume_offline = hpsa_volume_offline(h, scsi3addr); - if (volume_offline < 0 || volume_offline > 0xff) - volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; - this_device->volume_offline = volume_offline & 0xff; + this_device->volume_offline = volume_offline; + if (volume_offline == HPSA_LV_FAILED) { + rc = HPSA_LV_FAILED; + dev_err(&h->pdev->dev, + "%s: LV failed, device will be skipped.\n", + __func__); + goto bail_out; + } } else { this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; @@ -4379,8 +4378,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) goto out; } if (rc) { - dev_warn(&h->pdev->dev, - "Inquiry failed, skipping device.\n"); + h->drv_req_rescan = 1; continue; } @@ -5558,7 +5556,7 @@ static void hpsa_scan_complete(struct ctlr_info *h) spin_lock_irqsave(&h->scan_lock, flags); h->scan_finished = 1; - wake_up_all(&h->scan_wait_queue); + wake_up(&h->scan_wait_queue); spin_unlock_irqrestore(&h->scan_lock, flags); } @@ -5576,11 +5574,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh) if (unlikely(lockup_detected(h))) return hpsa_scan_complete(h); + /* + * If a scan is already waiting to run, no need to add another + */ + spin_lock_irqsave(&h->scan_lock, flags); + if (h->scan_waiting) { + spin_unlock_irqrestore(&h->scan_lock, flags); + return; + } + + spin_unlock_irqrestore(&h->scan_lock, flags); + /* wait until any scan already in progress is finished. */ while (1) { spin_lock_irqsave(&h->scan_lock, flags); if (h->scan_finished) break; + h->scan_waiting = 1; spin_unlock_irqrestore(&h->scan_lock, flags); wait_event(h->scan_wait_queue, h->scan_finished); /* Note: We don't need to worry about a race between this @@ -5590,6 +5600,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh) */ } h->scan_finished = 0; /* mark scan as in progress */ + h->scan_waiting = 0; spin_unlock_irqrestore(&h->scan_lock, flags); if (unlikely(lockup_detected(h))) @@ -8792,6 +8803,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) init_waitqueue_head(&h->event_sync_wait_queue); mutex_init(&h->reset_mutex); h->scan_finished = 1; /* no scan currently in progress */ + h->scan_waiting = 0; pci_set_drvdata(pdev, h); h->ndevices = 0; diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index bf6cdc1066544f..6f04f2ad412530 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -201,6 +201,7 @@ struct ctlr_info { dma_addr_t errinfo_pool_dhandle; unsigned long *cmd_pool_bits; int scan_finished; + u8 scan_waiting : 1; spinlock_t scan_lock; wait_queue_head_t scan_wait_queue; diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index a584cdf0705846..5961705eef7675 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -156,6 +156,7 @@ #define CFGTBL_BusType_Fibre2G 0x00000200l /* VPD Inquiry types */ +#define HPSA_INQUIRY_FAILED 0x02 #define HPSA_VPD_SUPPORTED_PAGES 0x00 #define HPSA_VPD_LV_DEVICE_ID 0x83 #define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 @@ -166,6 +167,7 @@ /* Logical volume states */ #define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff #define HPSA_LV_OK 0x0 +#define HPSA_LV_FAILED 0x01 #define HPSA_LV_NOT_AVAILABLE 0x0b #define HPSA_LV_UNDERGOING_ERASE 0x0F #define HPSA_LV_UNDERGOING_RPI 0x12 diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 6103231104dadb..fd501f8dbb1107 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -36,6 +36,8 @@ #include #include #include +#include + #include #include diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index c991f3b822f885..b44c3136eb5181 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -65,6 +65,8 @@ #include #include #include +#include + #include #include diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 834d1212b6d506..894b1e3ebd56f4 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -560,8 +561,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); task->state = state; - if (!list_empty(&task->running)) + spin_lock_bh(&conn->taskqueuelock); + if (!list_empty(&task->running)) { + pr_debug_once("%s while task on list", __func__); list_del_init(&task->running); + } + spin_unlock_bh(&conn->taskqueuelock); if (conn->task == task) conn->task = NULL; @@ -783,7 +788,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (session->tt->xmit_task(task)) goto free_task; } else { + spin_lock_bh(&conn->taskqueuelock); list_add_tail(&task->running, &conn->mgmtqueue); + spin_unlock_bh(&conn->taskqueuelock); iscsi_conn_queue_work(conn); } @@ -1474,8 +1481,10 @@ void iscsi_requeue_task(struct iscsi_task *task) * this may be on the requeue list already if the xmit_task callout * is handling the r2ts while we are adding new ones */ + spin_lock_bh(&conn->taskqueuelock); if (list_empty(&task->running)) list_add_tail(&task->running, &conn->requeue); + spin_unlock_bh(&conn->taskqueuelock); iscsi_conn_queue_work(conn); } EXPORT_SYMBOL_GPL(iscsi_requeue_task); @@ -1512,22 +1521,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) * only have one nop-out as a ping from us and targets should not * overflow us with nop-ins */ + spin_lock_bh(&conn->taskqueuelock); check_mgmt: while (!list_empty(&conn->mgmtqueue)) { conn->task = list_entry(conn->mgmtqueue.next, struct iscsi_task, running); list_del_init(&conn->task->running); + spin_unlock_bh(&conn->taskqueuelock); if (iscsi_prep_mgmt_task(conn, conn->task)) { /* regular RX path uses back_lock */ spin_lock_bh(&conn->session->back_lock); __iscsi_put_task(conn->task); spin_unlock_bh(&conn->session->back_lock); conn->task = NULL; + spin_lock_bh(&conn->taskqueuelock); continue; } rc = iscsi_xmit_task(conn); if (rc) goto done; + spin_lock_bh(&conn->taskqueuelock); } /* process pending command queue */ @@ -1535,19 +1548,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, running); list_del_init(&conn->task->running); + spin_unlock_bh(&conn->taskqueuelock); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { fail_scsi_task(conn->task, DID_IMM_RETRY); + spin_lock_bh(&conn->taskqueuelock); continue; } rc = iscsi_prep_scsi_cmd_pdu(conn->task); if (rc) { if (rc == -ENOMEM || rc == -EACCES) { + spin_lock_bh(&conn->taskqueuelock); list_add_tail(&conn->task->running, &conn->cmdqueue); conn->task = NULL; + spin_unlock_bh(&conn->taskqueuelock); goto done; } else fail_scsi_task(conn->task, DID_ABORT); + spin_lock_bh(&conn->taskqueuelock); continue; } rc = iscsi_xmit_task(conn); @@ -1558,6 +1576,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) * we need to check the mgmt queue for nops that need to * be sent to aviod starvation */ + spin_lock_bh(&conn->taskqueuelock); if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; } @@ -1577,12 +1596,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) conn->task = task; list_del_init(&conn->task->running); conn->task->state = ISCSI_TASK_RUNNING; + spin_unlock_bh(&conn->taskqueuelock); rc = iscsi_xmit_task(conn); if (rc) goto done; + spin_lock_bh(&conn->taskqueuelock); if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; } + spin_unlock_bh(&conn->taskqueuelock); spin_unlock_bh(&conn->session->frwd_lock); return -ENODATA; @@ -1738,7 +1760,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) goto prepd_reject; } } else { + spin_lock_bh(&conn->taskqueuelock); list_add_tail(&task->running, &conn->cmdqueue); + spin_unlock_bh(&conn->taskqueuelock); iscsi_conn_queue_work(conn); } @@ -2896,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, INIT_LIST_HEAD(&conn->mgmtqueue); INIT_LIST_HEAD(&conn->cmdqueue); INIT_LIST_HEAD(&conn->requeue); + spin_lock_init(&conn->taskqueuelock); INIT_WORK(&conn->xmitwork, iscsi_xmitworker); /* allocate login_task used for the login/text sequences */ diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 763f012fdeca00..87f5e694dbedd8 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -221,7 +221,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->num_scatter = qc->n_elem; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) - xfer += sg->length; + xfer += sg_dma_len(sg); task->total_xfer_len = xfer; task->num_scatter = si; diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index e2516ba8ebfa91..cb6aa802c48e48 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -1,9 +1,11 @@ #/******************************************************************* # * This file is part of the Emulex Linux Device Driver for * # * Fibre Channel Host Bus Adapters. * +# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * +# * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * # * Copyright (C) 2004-2012 Emulex. All rights reserved. * # * EMULEX and SLI are trademarks of Emulex. * -# * www.emulex.com * +# * www.broadcom.com * # * * # * This program is free software; you can redistribute it and/or * # * modify it under the terms of version 2 of the GNU General * @@ -28,6 +30,7 @@ endif obj-$(CONFIG_SCSI_LPFC) := lpfc.o -lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ - lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \ - lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o +lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \ + lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \ + lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \ + lpfc_nvme.o lpfc_nvmet.o diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 6593b073c52483..257bbdd0f0b83a 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -20,6 +22,7 @@ *******************************************************************/ #include +#include #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) #define CONFIG_SCSI_LPFC_DEBUG_FS @@ -53,6 +56,7 @@ struct lpfc_sli2_slim; #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ #define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ +#define LPFC_MIN_NVME_SEG_CNT 254 #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ @@ -95,12 +99,13 @@ struct lpfc_sli2_slim; #define FC_MAX_ADPTMSG 64 #define MAX_HBAEVT 32 +#define MAX_HBAS_NO_RESET 16 /* Number of MSI-X vectors the driver uses */ #define LPFC_MSIX_VECTORS 2 /* lpfc wait event data ready flag */ -#define LPFC_DATA_READY (1<<0) +#define LPFC_DATA_READY 0 /* bit 0 */ /* queue dump line buffer size */ #define LPFC_LBUF_SZ 128 @@ -114,6 +119,20 @@ enum lpfc_polling_flags { DISABLE_FCP_RING_INT = 0x2 }; +struct perf_prof { + uint16_t cmd_cpu[40]; + uint16_t rsp_cpu[40]; + uint16_t qh_cpu[40]; + uint16_t wqidx[40]; +}; + +/* + * Provide for FC4 TYPE x28 - NVME. The + * bit mask for FCP and NVME is 0x8 identically + * because they are 32 bit positions distance. + */ +#define LPFC_FC4_TYPE_BITMASK 0x00000100 + /* Provide DMA memory definitions the driver uses per port instance. */ struct lpfc_dmabuf { struct list_head list; @@ -131,10 +150,24 @@ struct lpfc_dma_pool { struct hbq_dmabuf { struct lpfc_dmabuf hbuf; struct lpfc_dmabuf dbuf; - uint32_t size; + uint16_t total_size; + uint16_t bytes_recv; uint32_t tag; struct lpfc_cq_event cq_event; unsigned long time_stamp; + void *context; +}; + +struct rqb_dmabuf { + struct lpfc_dmabuf hbuf; + struct lpfc_dmabuf dbuf; + uint16_t total_size; + uint16_t bytes_recv; + void *context; + struct lpfc_iocbq *iocbq; + struct lpfc_sglq *sglq; + struct lpfc_queue *hrq; /* ptr to associated Header RQ */ + struct lpfc_queue *drq; /* ptr to associated Data RQ */ }; /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ @@ -367,7 +400,8 @@ struct lpfc_vport { int32_t stopped; /* HBA has not been restarted since last ERATT */ uint8_t fc_linkspeed; /* Link speed after last READ_LA */ - uint32_t num_disc_nodes; /*in addition to hba_state */ + uint32_t num_disc_nodes; /* in addition to hba_state */ + uint32_t gidft_inp; /* cnt of outstanding GID_FTs */ uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ @@ -420,7 +454,6 @@ struct lpfc_vport { uint32_t cfg_max_scsicmpl_time; uint32_t cfg_tgt_queue_depth; uint32_t cfg_first_burst_size; - uint32_t dev_loss_tmo_changed; struct fc_vport *fc_vport; @@ -428,6 +461,9 @@ struct lpfc_vport { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct dentry *debug_disc_trc; struct dentry *debug_nodelist; + struct dentry *debug_nvmestat; + struct dentry *debug_nvmektime; + struct dentry *debug_cpucheck; struct dentry *vport_debugfs_root; struct lpfc_debugfs_trc *disc_trc; atomic_t disc_trc_cnt; @@ -442,6 +478,11 @@ struct lpfc_vport { uint16_t fdmi_num_disc; uint32_t fdmi_hba_mask; uint32_t fdmi_port_mask; + + /* There is a single nvme instance per vport. */ + struct nvme_fc_local_port *localport; + uint8_t nvmei_support; /* driver supports NVME Initiator */ + uint32_t last_fcp_wqidx; }; struct hbq_s { @@ -459,10 +500,9 @@ struct hbq_s { struct hbq_dmabuf *); }; -#define LPFC_MAX_HBQS 4 /* this matches the position in the lpfc_hbq_defs array */ #define LPFC_ELS_HBQ 0 -#define LPFC_EXTRA_HBQ 1 +#define LPFC_MAX_HBQS 1 enum hba_temp_state { HBA_NORMAL_TEMP, @@ -652,6 +692,9 @@ struct lpfc_hba { * Firmware supports Forced Link Speed * capability */ +#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */ +#define NVME_XRI_ABORT_EVENT 0x100000 + uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -700,6 +743,9 @@ struct lpfc_hba { uint8_t wwpn[8]; uint32_t RandomData[7]; uint8_t fcp_embed_io; + uint8_t nvme_support; /* Firmware supports NVME */ + uint8_t nvmet_support; /* driver supports NVMET */ +#define LPFC_NVMET_MAX_PORTS 32 uint8_t mds_diags_support; /* HBA Config Parameters */ @@ -725,6 +771,14 @@ struct lpfc_hba { uint32_t cfg_fcp_imax; uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_io_channel; + uint32_t cfg_suppress_rsp; + uint32_t cfg_nvme_oas; + uint32_t cfg_nvme_io_channel; + uint32_t cfg_nvmet_mrq; + uint32_t cfg_nvmet_mrq_post; + uint32_t cfg_enable_nvmet; + uint32_t cfg_nvme_enable_fb; + uint32_t cfg_nvmet_fb_size; uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -770,6 +824,13 @@ struct lpfc_hba { #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ uint32_t cfg_enable_SmartSAN; uint32_t cfg_enable_mds_diags; + uint32_t cfg_enable_fc4_type; + uint32_t cfg_xri_split; +#define LPFC_ENABLE_FCP 1 +#define LPFC_ENABLE_NVME 2 +#define LPFC_ENABLE_BOTH 3 + uint32_t io_channel_irqs; /* number of irqs for io channels */ + struct nvmet_fc_target_port *targetport; lpfc_vpd_t vpd; /* vital product data */ struct pci_dev *pcidev; @@ -784,11 +845,11 @@ struct lpfc_hba { unsigned long data_flags; uint32_t hbq_in_use; /* HBQs in use flag */ - struct list_head rb_pend_list; /* Received buffers to be processed */ uint32_t hbq_count; /* Count of configured HBQs */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ - atomic_t fcp_qidx; /* next work queue to post work to */ + atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */ + atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */ phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */ phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */ @@ -843,9 +904,17 @@ struct lpfc_hba { /* * stat counters */ - uint64_t fc4InputRequests; - uint64_t fc4OutputRequests; - uint64_t fc4ControlRequests; + uint64_t fc4ScsiInputRequests; + uint64_t fc4ScsiOutputRequests; + uint64_t fc4ScsiControlRequests; + uint64_t fc4ScsiIoCmpls; + uint64_t fc4NvmeInputRequests; + uint64_t fc4NvmeOutputRequests; + uint64_t fc4NvmeControlRequests; + uint64_t fc4NvmeIoCmpls; + uint64_t fc4NvmeLsRequests; + uint64_t fc4NvmeLsCmpls; + uint64_t bg_guard_err_cnt; uint64_t bg_apptag_err_cnt; uint64_t bg_reftag_err_cnt; @@ -856,17 +925,23 @@ struct lpfc_hba { struct list_head lpfc_scsi_buf_list_get; struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; + spinlock_t nvme_buf_list_get_lock; /* NVME buf alloc list lock */ + spinlock_t nvme_buf_list_put_lock; /* NVME buf free list lock */ + struct list_head lpfc_nvme_buf_list_get; + struct list_head lpfc_nvme_buf_list_put; + uint32_t total_nvme_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; struct list_head active_rrq_list; spinlock_t hbalock; /* pci_mem_pools */ - struct pci_pool *lpfc_scsi_dma_buf_pool; + struct pci_pool *lpfc_sg_dma_buf_pool; struct pci_pool *lpfc_mbuf_pool; struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ + struct pci_pool *txrdy_payload_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; @@ -878,8 +953,6 @@ struct lpfc_hba { enum intr_type_t intr_type; uint32_t intr_mode; #define LPFC_INTR_ERROR 0xFFFFFFFF - struct msix_entry msix_entries[LPFC_MSIX_VECTORS]; - struct list_head port_list; struct lpfc_vport *pport; /* physical lpfc_vport pointer */ uint16_t max_vpi; /* Maximum virtual nports */ @@ -925,6 +998,12 @@ struct lpfc_hba { struct dentry *debug_readApp; /* inject read app_tag errors */ struct dentry *debug_readRef; /* inject read ref_tag errors */ + struct dentry *debug_nvmeio_trc; + struct lpfc_debugfs_nvmeio_trc *nvmeio_trc; + atomic_t nvmeio_trc_cnt; + uint32_t nvmeio_trc_size; + uint32_t nvmeio_trc_output_idx; + /* T10 DIF error injection */ uint32_t lpfc_injerr_wgrd_cnt; uint32_t lpfc_injerr_wapp_cnt; @@ -950,7 +1029,9 @@ struct lpfc_hba { struct dentry *idiag_ctl_acc; struct dentry *idiag_mbx_acc; struct dentry *idiag_ext_acc; + uint8_t lpfc_idiag_last_eq; #endif + uint16_t nvmeio_trc_on; /* Used for deferred freeing of ELS data buffers */ struct list_head elsbuf; @@ -1023,6 +1104,53 @@ struct lpfc_hba { #define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000 uint16_t sfp_alarm; uint16_t sfp_warning; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +#define LPFC_CHECK_CPU_CNT 32 + uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT]; + uint16_t cpucheck_on; +#define LPFC_CHECK_OFF 0 +#define LPFC_CHECK_NVME_IO 1 +#define LPFC_CHECK_NVMET_RCV 2 +#define LPFC_CHECK_NVMET_IO 4 + uint16_t ktime_on; + uint64_t ktime_data_samples; + uint64_t ktime_status_samples; + uint64_t ktime_last_cmd; + uint64_t ktime_seg1_total; + uint64_t ktime_seg1_min; + uint64_t ktime_seg1_max; + uint64_t ktime_seg2_total; + uint64_t ktime_seg2_min; + uint64_t ktime_seg2_max; + uint64_t ktime_seg3_total; + uint64_t ktime_seg3_min; + uint64_t ktime_seg3_max; + uint64_t ktime_seg4_total; + uint64_t ktime_seg4_min; + uint64_t ktime_seg4_max; + uint64_t ktime_seg5_total; + uint64_t ktime_seg5_min; + uint64_t ktime_seg5_max; + uint64_t ktime_seg6_total; + uint64_t ktime_seg6_min; + uint64_t ktime_seg6_max; + uint64_t ktime_seg7_total; + uint64_t ktime_seg7_min; + uint64_t ktime_seg7_max; + uint64_t ktime_seg8_total; + uint64_t ktime_seg8_min; + uint64_t ktime_seg8_max; + uint64_t ktime_seg9_total; + uint64_t ktime_seg9_min; + uint64_t ktime_seg9_max; + uint64_t ktime_seg10_total; + uint64_t ktime_seg10_min; + uint64_t ktime_seg10_max; +#endif }; static inline struct Scsi_Host * @@ -1093,3 +1221,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) return 0; } + +static inline struct lpfc_sli_ring * +lpfc_phba_elsring(struct lpfc_hba *phba) +{ + if (phba->sli_rev == LPFC_SLI_REV4) + return phba->sli4_hba.els_wq->pring; + return &phba->sli.sli3_ring[LPFC_ELS_RING]; +} diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 03cb05abc821aa..22819afbaef5c4 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -35,14 +37,18 @@ #include #include +#include + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" #include "lpfc_logmsg.h" #include "lpfc_version.h" #include "lpfc_compat.h" @@ -50,9 +56,13 @@ #include "lpfc_vport.h" #include "lpfc_attr.h" -#define LPFC_DEF_DEVLOSS_TMO 30 -#define LPFC_MIN_DEVLOSS_TMO 1 -#define LPFC_MAX_DEVLOSS_TMO 255 +#define LPFC_DEF_DEVLOSS_TMO 30 +#define LPFC_MIN_DEVLOSS_TMO 1 +#define LPFC_MAX_DEVLOSS_TMO 255 + +#define LPFC_DEF_MRQ_POST 256 +#define LPFC_MIN_MRQ_POST 32 +#define LPFC_MAX_MRQ_POST 512 /* * Write key size should be multiple of 4. If write key is changed @@ -129,6 +139,211 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "0\n"); } +static ssize_t +lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_tgtport *tgtp; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct nvme_fc_remote_port *nrport; + char *statep; + int len = 0; + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { + len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n"); + return len; + } + if (phba->nvmet_support) { + if (!phba->targetport) { + len = snprintf(buf, PAGE_SIZE, + "NVME Target: x%llx is not allocated\n", + wwn_to_u64(vport->fc_portname.u.wwn)); + return len; + } + /* Port state is only one of two values for now. */ + if (phba->targetport->port_id) + statep = "REGISTERED"; + else + statep = "INIT"; + len += snprintf(buf + len, PAGE_SIZE - len, + "NVME Target: Enabled State %s\n", + statep); + len += snprintf(buf + len, PAGE_SIZE - len, + "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", + "NVME Target: lpfc", + phba->brd_no, + wwn_to_u64(vport->fc_portname.u.wwn), + wwn_to_u64(vport->fc_nodename.u.wwn), + phba->targetport->port_id); + + len += snprintf(buf + len, PAGE_SIZE, + "\nNVME Target: Statistics\n"); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + len += snprintf(buf+len, PAGE_SIZE-len, + "LS: Rcv %08x Drop %08x Abort %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_drop), + atomic_read(&tgtp->xmt_ls_abort)); + if (atomic_read(&tgtp->rcv_ls_req_in) != + atomic_read(&tgtp->rcv_ls_req_out)) { + len += snprintf(buf+len, PAGE_SIZE-len, + "Rcv LS: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_out)); + } + + len += snprintf(buf+len, PAGE_SIZE-len, + "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n", + atomic_read(&tgtp->xmt_ls_rsp), + atomic_read(&tgtp->xmt_ls_drop), + atomic_read(&tgtp->xmt_ls_rsp_cmpl), + atomic_read(&tgtp->xmt_ls_rsp_error)); + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP: Rcv %08x Drop %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_drop)); + + if (atomic_read(&tgtp->rcv_fcp_cmd_in) != + atomic_read(&tgtp->rcv_fcp_cmd_out)) { + len += snprintf(buf+len, PAGE_SIZE-len, + "Rcv FCP: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out)); + } + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n", + atomic_read(&tgtp->xmt_fcp_read), + atomic_read(&tgtp->xmt_fcp_read_rsp), + atomic_read(&tgtp->xmt_fcp_write), + atomic_read(&tgtp->xmt_fcp_rsp)); + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP Rsp: abort %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_drop)); + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP Rsp Cmpl: %08x err %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_cmpl), + atomic_read(&tgtp->xmt_fcp_rsp_error), + atomic_read(&tgtp->xmt_fcp_rsp_drop)); + + len += snprintf(buf+len, PAGE_SIZE-len, + "ABORT: Xmt %08x Err %08x Cmpl %08x", + atomic_read(&tgtp->xmt_abort_rsp), + atomic_read(&tgtp->xmt_abort_rsp_error), + atomic_read(&tgtp->xmt_abort_cmpl)); + + len += snprintf(buf+len, PAGE_SIZE-len, "\n"); + return len; + } + + localport = vport->localport; + if (!localport) { + len = snprintf(buf, PAGE_SIZE, + "NVME Initiator x%llx is not allocated\n", + wwn_to_u64(vport->fc_portname.u.wwn)); + return len; + } + len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n"); + + spin_lock_irq(shost->host_lock); + lport = (struct lpfc_nvme_lport *)localport->private; + + /* Port state is only one of two values for now. */ + if (localport->port_id) + statep = "ONLINE"; + else + statep = "UNKNOWN "; + + len += snprintf(buf + len, PAGE_SIZE - len, + "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", + "NVME LPORT lpfc", + phba->brd_no, + wwn_to_u64(vport->fc_portname.u.wwn), + wwn_to_u64(vport->fc_nodename.u.wwn), + localport->port_id, statep); + + list_for_each_entry(rport, &lport->rport_list, list) { + /* local short-hand pointer. */ + nrport = rport->remoteport; + + /* Port state is only one of two values for now. */ + switch (nrport->port_state) { + case FC_OBJSTATE_ONLINE: + statep = "ONLINE"; + break; + case FC_OBJSTATE_UNKNOWN: + statep = "UNKNOWN "; + break; + default: + statep = "UNSUPPORTED"; + break; + } + + /* Tab in to show lport ownership. */ + len += snprintf(buf + len, PAGE_SIZE - len, + "NVME RPORT "); + if (phba->brd_no >= 10) + len += snprintf(buf + len, PAGE_SIZE - len, " "); + + len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ", + nrport->port_name); + len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ", + nrport->node_name); + len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ", + nrport->port_id); + + switch (nrport->port_role) { + case FC_PORT_ROLE_NVME_INITIATOR: + len += snprintf(buf + len, PAGE_SIZE - len, + "INITIATOR "); + break; + case FC_PORT_ROLE_NVME_TARGET: + len += snprintf(buf + len, PAGE_SIZE - len, + "TARGET "); + break; + case FC_PORT_ROLE_NVME_DISCOVERY: + len += snprintf(buf + len, PAGE_SIZE - len, + "DISCOVERY "); + break; + default: + len += snprintf(buf + len, PAGE_SIZE - len, + "UNKNOWN_ROLE x%x", + nrport->port_role); + break; + } + len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep); + /* Terminate the string. */ + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + } + spin_unlock_irq(shost->host_lock); + + len += snprintf(buf + len, PAGE_SIZE, "\nNVME Statistics\n"); + len += snprintf(buf+len, PAGE_SIZE-len, + "LS: Xmt %016llx Cmpl %016llx\n", + phba->fc4NvmeLsRequests, + phba->fc4NvmeLsCmpls); + + len += snprintf(buf+len, PAGE_SIZE-len, + "FCP: Rd %016llx Wr %016llx IO %016llx\n", + phba->fc4NvmeInputRequests, + phba->fc4NvmeOutputRequests, + phba->fc4NvmeControlRequests); + + len += snprintf(buf+len, PAGE_SIZE-len, + " Cmpl %016llx\n", phba->fc4NvmeIoCmpls); + + return len; +} + static ssize_t lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -675,6 +890,28 @@ lpfc_issue_lip(struct Scsi_Host *shost) return 0; } +int +lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock) +{ + int cnt = 0; + + spin_lock_irq(lock); + while (!list_empty(q)) { + spin_unlock_irq(lock); + msleep(20); + if (cnt++ > 250) { /* 5 secs */ + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0466 %s %s\n", + "Outstanding IO when ", + "bringing Adapter offline\n"); + return 0; + } + spin_lock_irq(lock); + } + spin_unlock_irq(lock); + return 1; +} + /** * lpfc_do_offline - Issues a mailbox command to bring the link down * @phba: lpfc_hba pointer. @@ -694,10 +931,10 @@ static int lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) { struct completion online_compl; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; struct lpfc_sli *psli; int status = 0; - int cnt = 0; int i; int rc; @@ -717,20 +954,24 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) /* Wait a little for things to settle down, but not * long enough for dev loss timeout to expire. */ - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - while (!list_empty(&pring->txcmplq)) { - msleep(10); - if (cnt++ > 500) { /* 5 secs */ - lpfc_printf_log(phba, - KERN_WARNING, LOG_INIT, - "0466 Outstanding IO when " - "bringing Adapter offline\n"); - break; - } + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + if (!lpfc_emptyq_wait(phba, &pring->txcmplq, + &phba->hbalock)) + goto out; + } + } else { + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + if (!lpfc_emptyq_wait(phba, &pring->txcmplq, + &pring->ring_lock)) + goto out; } } - +out: init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, type); if (rc == 0) @@ -1945,6 +2186,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ } +static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL); static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); @@ -2751,6 +2993,13 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, lpfc_oas_lun_show, lpfc_oas_lun_store); +int lpfc_enable_nvmet_cnt; +unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444); +MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target"); + static int lpfc_poll = 0; module_param(lpfc_poll, int, S_IRUGO); MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" @@ -2761,6 +3010,12 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); +int lpfc_no_hba_reset_cnt; +unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444); +MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset"); + LPFC_ATTR(sli_mode, 0, 0, 3, "SLI mode selector:" " 0 - auto (SLI-3 if supported)," @@ -2816,9 +3071,9 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; + struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", - phba->sli.ring[LPFC_ELS_RING].txq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max); } static DEVICE_ATTR(txq_hw, S_IRUGO, @@ -2829,9 +3084,9 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; + struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", - phba->sli.ring[LPFC_ELS_RING].txcmplq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max); } static DEVICE_ATTR(txcmplq_hw, S_IRUGO, @@ -3029,6 +3284,59 @@ lpfc_vport_param_store(devloss_tmo) static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, lpfc_devloss_tmo_show, lpfc_devloss_tmo_store); +/* + * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it + * lpfc_suppress_rsp = 0 Disable + * lpfc_suppress_rsp = 1 Enable (default) + * + */ +LPFC_ATTR_R(suppress_rsp, 1, 0, 1, + "Enable suppress rsp feature is firmware supports it"); + +/* + * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds + * lpfc_nvmet_mrq = 1 use a single RQ pair + * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ + * + */ +LPFC_ATTR_R(nvmet_mrq, + 1, 1, 16, + "Specify number of RQ pairs for processing NVMET cmds"); + +/* + * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ + * + */ +LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST, + LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST, + "Specify number of buffers to post on every MRQ"); + +/* + * lpfc_enable_fc4_type: Defines what FC4 types are supported. + * Supported Values: 1 - register just FCP + * 3 - register both FCP and NVME + * Supported values are [1,3]. Default value is 1 + */ +LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP, + LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, + "Define fc4 type to register with fabric."); + +/* + * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME + * This parameter is only used if: + * lpfc_enable_fc4_type is 3 - register both FCP and NVME and + * port is not configured for NVMET. + * + * ELS/CT always get 10% of XRIs, up to a maximum of 250 + * The remaining XRIs get split up based on lpfc_xri_split per port: + * + * Supported Values are in percentages + * the xri_split value is the percentage the SCSI port will get. The remaining + * percentage will go to NVME. + */ +LPFC_ATTR_R(xri_split, 50, 10, 90, + "Division of XRI resources between SCSI and NVME"); + /* # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. @@ -4143,13 +4451,15 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, /* * Value range for the HBA is [5000,5000000] * The value for each EQ depends on how many EQs are configured. + * Allow value == 0 */ - if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX) + if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)) return -EINVAL; phba->cfg_fcp_imax = (uint32_t)val; - for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY) - lpfc_modify_fcp_eq_delay(phba, i); + + for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT) + lpfc_modify_hba_eq_delay(phba, i); return strlen(buf); } @@ -4187,7 +4497,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) return 0; } - if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) { + if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) || + (val == 0)) { phba->cfg_fcp_imax = val; return 0; } @@ -4376,6 +4687,32 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, "First burst size for Targets that support first burst"); +/* +* lpfc_nvmet_fb_size: NVME Target mode supported first burst size. +* When the driver is configured as an NVME target, this value is +* communicated to the NVME initiator in the PRLI response. It is +* used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support +* parameters are set and the target is sending the PRLI RSP. +* Parameter supported on physical port only - no NPIV support. +* Value range is [0,65536]. Default value is 0. +*/ +LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536, + "NVME Target mode first burst size in 512B increments."); + +/* + * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions. + * For the Initiator (I), enabling this parameter means that an NVMET + * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be + * processed by the initiator for subsequent NVME FCP IO. For the target + * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size + * driver parameter as the target function's first burst size returned to the + * initiator in the target's NVME PRLI response. Parameter supported on physical + * port only - no NPIV support. + * Value range is [0,1]. Default value is 0 (disabled). + */ +LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, + "Enable First Burst feature on I and T functions."); + /* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue # depth. Default value is 0. When the value of this parameter is zero the @@ -4423,17 +4760,25 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR, LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); /* -# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds -# range is [0,1]. Default value is 0. -# For [0], FCP commands are issued to Work Queues ina round robin fashion. -# For [1], FCP commands are issued to a Work Queue associated with the -# current CPU. -# It would be set to 1 by the driver if it's able to set up cpu affinity -# for FCP I/Os through Work Queue associated with the current CPU. Otherwise, -# roundrobin scheduling of FCP I/Os through WQs will be used. -*/ -LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for " - "issuing commands [0] - Round Robin, [1] - Current CPU"); + * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds + * range is [0,1]. Default value is 0. + * For [0], FCP commands are issued to Work Queues ina round robin fashion. + * For [1], FCP commands are issued to a Work Queue associated with the + * current CPU. + * + * LPFC_FCP_SCHED_ROUND_ROBIN == 0 + * LPFC_FCP_SCHED_BY_CPU == 1 + * + * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu + * affinity for FCP/NVME I/Os through Work Queues associated with the current + * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os + * through WQs will be used. + */ +LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN, + LPFC_FCP_SCHED_ROUND_ROBIN, + LPFC_FCP_SCHED_BY_CPU, + "Determine scheduling algorithm for " + "issuing commands [0] - Round Robin, [1] - Current CPU"); /* # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior @@ -4560,14 +4905,53 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* -# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels -# -# Value range is [1,7]. Default value is 4. -*/ -LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, - LPFC_FCP_IO_CHAN_MAX, + * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs + * + * 0 = NVME OAS disabled + * 1 = NVME OAS enabled + * + * Value range is [0,1]. Default value is 0. + */ +LPFC_ATTR_RW(nvme_oas, 0, 0, 1, + "Use OAS bit on NVME IOs"); + +/* + * lpfc_fcp_io_channel: Set the number of FCP IO channels the driver + * will advertise it supports to the SCSI layer. This also will map to + * the number of WQs the driver will create. + * + * 0 = Configure the number of io channels to the number of active CPUs. + * 1,32 = Manually specify how many io channels to use. + * + * Value range is [0,32]. Default value is 4. + */ +LPFC_ATTR_R(fcp_io_channel, + LPFC_FCP_IO_CHAN_DEF, + LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX, "Set the number of FCP I/O channels"); +/* + * lpfc_nvme_io_channel: Set the number of IO hardware queues the driver + * will advertise it supports to the NVME layer. This also will map to + * the number of WQs the driver will create. + * + * This module parameter is valid when lpfc_enable_fc4_type is set + * to support NVME. + * + * The NVME Layer will try to create this many, plus 1 administrative + * hardware queue. The administrative queue will always map to WQ 0 + * A hardware IO queue maps (qidx) to a specific driver WQ. + * + * 0 = Configure the number of io channels to the number of active CPUs. + * 1,32 = Manually specify how many io channels to use. + * + * Value range is [0,32]. Default value is 0. + */ +LPFC_ATTR_R(nvme_io_channel, + LPFC_NVME_IO_CHAN_DEF, + LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX, + "Set the number of NVME I/O channels"); + /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # 0 = HBA resets disabled @@ -4692,6 +5076,7 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); struct device_attribute *lpfc_hba_attrs[] = { + &dev_attr_nvme_info, &dev_attr_bg_info, &dev_attr_bg_guard_err, &dev_attr_bg_apptag_err, @@ -4718,6 +5103,8 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, + &dev_attr_lpfc_enable_fc4_type, + &dev_attr_lpfc_xri_split, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_first_burst_size, @@ -4752,9 +5139,16 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_task_mgmt_tmo, &dev_attr_lpfc_use_msi, + &dev_attr_lpfc_nvme_oas, &dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_io_channel, + &dev_attr_lpfc_suppress_rsp, + &dev_attr_lpfc_nvme_io_channel, + &dev_attr_lpfc_nvmet_mrq, + &dev_attr_lpfc_nvmet_mrq_post, + &dev_attr_lpfc_nvme_enable_fb, + &dev_attr_lpfc_nvmet_fb_size, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, @@ -5764,15 +6158,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_fdmi_on_init(phba, lpfc_fdmi_on); lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); lpfc_use_msi_init(phba, lpfc_use_msi); + lpfc_nvme_oas_init(phba, lpfc_nvme_oas); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); - lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); + lpfc_EnableXLane_init(phba, lpfc_EnableXLane); if (phba->sli_rev != LPFC_SLI_REV4) phba->cfg_EnableXLane = 0; lpfc_XLanePriority_init(phba, lpfc_XLanePriority); + memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); phba->cfg_oas_lun_state = 0; @@ -5786,9 +6182,48 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_poll = 0; else phba->cfg_poll = lpfc_poll; + lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); + + lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); + lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); + lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); + + /* Initialize first burst. Target vs Initiator are different. */ + lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); + lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); + lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); + lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel); + + if (phba->sli_rev != LPFC_SLI_REV4) { + /* NVME only supported on SLI4 */ + phba->nvmet_support = 0; + phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; + } else { + /* We MUST have FCP support */ + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP; + } + + /* A value of 0 means use the number of CPUs found in the system */ + if (phba->cfg_fcp_io_channel == 0) + phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; + if (phba->cfg_nvme_io_channel == 0) + phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu; + + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + phba->cfg_fcp_io_channel = 0; + + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) + phba->cfg_nvme_io_channel = 0; + + if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) + phba->io_channel_irqs = phba->cfg_fcp_io_channel; + else + phba->io_channel_irqs = phba->cfg_nvme_io_channel; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; + lpfc_xri_split_init(phba, lpfc_xri_split); lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); @@ -5804,6 +6239,60 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) return; } +/** + * lpfc_nvme_mod_param_dep - Adjust module parameter value based on + * dependencies between protocols and roles. + * @phba: lpfc_hba pointer. + **/ +void +lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) +{ + if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu) + phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu; + + if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu) + phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && + phba->nvmet_support) { + phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP; + phba->cfg_fcp_io_channel = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6013 %s x%x fb_size x%x, fb_max x%x\n", + "NVME Target PRLI ACC enable_fb ", + phba->cfg_nvme_enable_fb, + phba->cfg_nvmet_fb_size, + LPFC_NVMET_FB_SZ_MAX); + + if (phba->cfg_nvme_enable_fb == 0) + phba->cfg_nvmet_fb_size = 0; + else { + if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX) + phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX; + } + + /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ + if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) { + phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6018 Adjust lpfc_nvmet_mrq to %d\n", + phba->cfg_nvmet_mrq); + } + } else { + /* Not NVME Target mode. Turn off Target parameters. */ + phba->nvmet_support = 0; + phba->cfg_nvmet_mrq = 0; + phba->cfg_nvmet_mrq_post = 0; + phba->cfg_nvmet_fb_size = 0; + } + + if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) + phba->io_channel_irqs = phba->cfg_fcp_io_channel; + else + phba->io_channel_irqs = phba->cfg_nvme_io_channel; +} + /** * lpfc_get_vport_cfgparam - Used during port create, init the vport structure * @vport: lpfc_vport pointer. diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h index b2bd28e965faf2..d56dafcdd563b6 100644 --- a/drivers/scsi/lpfc/lpfc_attr.h +++ b/drivers/scsi/lpfc/lpfc_attr.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 7dca4d6a888346..18157d2840a3b0 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2009-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -1704,6 +1706,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) struct lpfc_vport **vports; struct Scsi_Host *shost; struct lpfc_sli *psli; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; int i = 0; @@ -1711,9 +1714,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) if (!psli) return -ENODEV; - pring = &psli->ring[LPFC_FCP_RING]; - if (!pring) - return -ENODEV; if ((phba->link_state == LPFC_HBA_ERROR) || (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || @@ -1732,10 +1732,18 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) scsi_block_requests(shost); } - while (!list_empty(&pring->txcmplq)) { - if (i++ > 500) /* wait up to 5 seconds */ + if (phba->sli_rev != LPFC_SLI_REV4) { + pring = &psli->sli3_ring[LPFC_FCP_RING]; + lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock); + return 0; + } + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring || (pring->ringno != LPFC_FCP_RING)) + continue; + if (!lpfc_emptyq_wait(phba, &pring->txcmplq, + &pring->ring_lock)) break; - msleep(10); } return 0; } @@ -2703,7 +2711,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers * @phba: Pointer to HBA context object * - * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and. + * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and * returns the pointer to the buffer. **/ static struct lpfc_dmabuf * @@ -2875,8 +2883,7 @@ diag_cmd_data_alloc(struct lpfc_hba *phba, static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, size_t len) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *cmdiocbq; IOCB_t *cmd = NULL; struct list_head head, *curr, *next; @@ -2890,6 +2897,8 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, int iocb_stat; int i = 0; + pring = lpfc_phba_elsring(phba); + cmdiocbq = lpfc_sli_get_iocbq(phba); rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (rxbmp != NULL) { @@ -5403,13 +5412,15 @@ lpfc_bsg_timeout(struct bsg_job *job) struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct bsg_job_data *dd_data; unsigned long flags; int rc = 0; LIST_HEAD(completions); struct lpfc_iocbq *check_iocb, *next_iocb; + pring = lpfc_phba_elsring(phba); + /* if job's driver data is NULL, the command completed or is in the * the process of completing. In this case, return status to request * so the timeout is retried. This avoids double completion issues diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index f2247aa4fa1736..e7d95a4e8042fd 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2010-2015 Emulex. All rights reserved. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2010-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h index c88e556ea62e54..6b32b0ae750666 100644 --- a/drivers/scsi/lpfc/lpfc_compat.h +++ b/drivers/scsi/lpfc/lpfc_compat.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 309643a2c55c7d..54e6ac42fbcd42 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -21,6 +23,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *); struct fc_rport; +struct fc_frame_header; void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli_read_link_ste(struct lpfc_hba *); void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t); @@ -167,6 +170,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *); void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); +int lpfc_issue_gidft(struct lpfc_vport *vport); +int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); void lpfc_fdmi_num_disc_check(struct lpfc_vport *); @@ -186,6 +191,8 @@ void lpfc_unblock_mgmt_io(struct lpfc_hba *); void lpfc_offline_prep(struct lpfc_hba *, int); void lpfc_offline(struct lpfc_hba *); void lpfc_reset_hba(struct lpfc_hba *); +int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd, + spinlock_t *slock); int lpfc_fof_queue_create(struct lpfc_hba *); int lpfc_fof_queue_setup(struct lpfc_hba *); @@ -193,7 +200,11 @@ int lpfc_fof_queue_destroy(struct lpfc_hba *); irqreturn_t lpfc_sli4_fof_intr_handler(int, void *); int lpfc_sli_setup(struct lpfc_hba *); -int lpfc_sli_queue_setup(struct lpfc_hba *); +int lpfc_sli4_setup(struct lpfc_hba *phba); +void lpfc_sli_queue_init(struct lpfc_hba *phba); +void lpfc_sli4_queue_init(struct lpfc_hba *phba); +struct lpfc_sli_ring *lpfc_sli4_calc_ring(struct lpfc_hba *phba, + struct lpfc_iocbq *iocbq); void lpfc_handle_eratt(struct lpfc_hba *); void lpfc_handle_latt(struct lpfc_hba *); @@ -220,6 +231,7 @@ void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *); void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode); void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t); @@ -231,8 +243,15 @@ struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); +struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); +void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, uint16_t); +int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, + struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); +int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq, + struct lpfc_queue *dq, int count); +int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); void lpfc_unregister_fcf(struct lpfc_hba *); void lpfc_unregister_fcf_rescan(struct lpfc_hba *); void lpfc_unregister_unused_fcf(struct lpfc_hba *); @@ -287,6 +306,11 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); +int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum, + struct lpfc_iocbq *iocbq); +struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri); +struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, + struct lpfc_iocbq *piocbq); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); @@ -336,8 +360,13 @@ void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *); void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); +void *lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int flags, + dma_addr_t *handle); +void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma); void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); +void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp); + /* Function prototypes. */ const char* lpfc_info(struct Scsi_Host *); int lpfc_scan_finished(struct Scsi_Host *, unsigned long); @@ -355,7 +384,8 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *); extern struct device_attribute *lpfc_hba_attrs[]; extern struct device_attribute *lpfc_vport_attrs[]; extern struct scsi_host_template lpfc_template; -extern struct scsi_host_template lpfc_template_s3; +extern struct scsi_host_template lpfc_template_no_hr; +extern struct scsi_host_template lpfc_template_nvme; extern struct scsi_host_template lpfc_vport_template; extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; @@ -375,9 +405,11 @@ void lpfc_host_attrib_init(struct Scsi_Host *); extern void lpfc_debugfs_initialize(struct lpfc_vport *); extern void lpfc_debugfs_terminate(struct lpfc_vport *); extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t, - uint32_t, uint32_t); + uint32_t, uint32_t); extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t, - uint32_t, uint32_t); + uint32_t, uint32_t); +extern void lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt, + uint16_t data1, uint16_t data2, uint32_t data3); extern struct lpfc_hbq_init *lpfc_hbq_defs[]; /* SLI4 if_type 2 externs. */ @@ -471,7 +503,10 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *); int lpfc_selective_reset(struct lpfc_hba *); int lpfc_sli4_read_config(struct lpfc_hba *); void lpfc_sli4_node_prep(struct lpfc_hba *); -int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); +int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); @@ -496,3 +531,28 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, uint32_t *, uint32_t *); int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox); void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); + +/* NVME interfaces. */ +void lpfc_nvme_unregister_port(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); +int lpfc_nvme_register_port(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); +int lpfc_nvme_create_localport(struct lpfc_vport *vport); +void lpfc_nvme_destroy_localport(struct lpfc_vport *vport); +void lpfc_nvme_update_localport(struct lpfc_vport *vport); +int lpfc_nvmet_create_targetport(struct lpfc_hba *phba); +int lpfc_nvmet_update_targetport(struct lpfc_hba *phba); +void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba); +void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb); +void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct rqb_dmabuf *nvmebuf, uint64_t isr_ts); +void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba); +void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocb, + struct lpfc_wcqe_complete *abts_cmpl); +extern int lpfc_enable_nvmet_cnt; +extern unsigned long long lpfc_enable_nvmet[]; +extern int lpfc_no_hba_reset_cnt; +extern unsigned long lpfc_no_hba_reset[]; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 4ac03b16d17f56..d3e9af983015cc 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -40,8 +42,9 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_version.h" @@ -453,8 +456,90 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) { return NULL; } +static void +lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) +{ + struct lpfc_nodelist *ndlp; + + if ((vport->port_type != LPFC_NPIV_PORT) || + !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { + + ndlp = lpfc_setup_disc_node(vport, Did); + + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Parse GID_FTrsp: did:x%x flg:x%x x%x", + Did, ndlp->nlp_flag, vport->fc_flag); + + /* By default, the driver expects to support FCP FC4 */ + if (fc4_type == FC_TYPE_FCP) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + + if (fc4_type == FC_TYPE_NVME) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0238 Process x%06x NameServer Rsp " + "Data: x%x x%x x%x x%x\n", Did, + ndlp->nlp_flag, ndlp->nlp_fc4_type, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } else { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, vport->fc_rscn_id_cnt); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0239 Skip x%06x NameServer Rsp " + "Data: x%x x%x\n", Did, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } + } else { + if (!(vport->fc_flag & FC_RSCN_MODE) || + lpfc_rscn_payload_check(vport, Did)) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Query GID_FTrsp: did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, vport->fc_rscn_id_cnt); + + /* + * This NPortID was previously a FCP target, + * Don't even bother to send GFF_ID. + */ + ndlp = lpfc_findnode_did(vport, Did); + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) + ndlp->nlp_fc4_type = fc4_type; + + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + ndlp->nlp_fc4_type = fc4_type; + + if (ndlp->nlp_type & NLP_FCP_TARGET) + lpfc_setup_disc_node(vport, Did); + + else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, + 0, Did) == 0) + vport->num_disc_nodes++; + + else + lpfc_setup_disc_node(vport, Did); + } + } else { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, vport->fc_rscn_id_cnt); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0245 Skip x%06x NameServer Rsp " + "Data: x%x x%x\n", Did, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } + } +} + static int -lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) +lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, + uint32_t Size) { struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ct_request *Response = @@ -499,97 +584,12 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) */ if ((Did != vport->fc_myDID) && ((lpfc_find_vport_by_did(phba, Did) == NULL) || - vport->cfg_peer_port_login)) { - if ((vport->port_type != LPFC_NPIV_PORT) || - (!(vport->ct_flags & FC_CT_RFF_ID)) || - (!vport->cfg_restrict_login)) { - ndlp = lpfc_setup_disc_node(vport, Did); - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { - lpfc_debugfs_disc_trc(vport, - LPFC_DISC_TRC_CT, - "Parse GID_FTrsp: " - "did:x%x flg:x%x x%x", - Did, ndlp->nlp_flag, - vport->fc_flag); - - lpfc_printf_vlog(vport, - KERN_INFO, - LOG_DISCOVERY, - "0238 Process " - "x%x NameServer Rsp" - "Data: x%x x%x x%x\n", - Did, ndlp->nlp_flag, - vport->fc_flag, - vport->fc_rscn_id_cnt); - } else { - lpfc_debugfs_disc_trc(vport, - LPFC_DISC_TRC_CT, - "Skip1 GID_FTrsp: " - "did:x%x flg:x%x cnt:%d", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - - lpfc_printf_vlog(vport, - KERN_INFO, - LOG_DISCOVERY, - "0239 Skip x%x " - "NameServer Rsp Data: " - "x%x x%x\n", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - } - - } else { - if (!(vport->fc_flag & FC_RSCN_MODE) || - (lpfc_rscn_payload_check(vport, Did))) { - lpfc_debugfs_disc_trc(vport, - LPFC_DISC_TRC_CT, - "Query GID_FTrsp: " - "did:x%x flg:x%x cnt:%d", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - - /* This NPortID was previously - * a FCP target, * Don't even - * bother to send GFF_ID. - */ - ndlp = lpfc_findnode_did(vport, - Did); - if (ndlp && - NLP_CHK_NODE_ACT(ndlp) - && (ndlp->nlp_type & - NLP_FCP_TARGET)) - lpfc_setup_disc_node - (vport, Did); - else if (lpfc_ns_cmd(vport, - SLI_CTNS_GFF_ID, - 0, Did) == 0) - vport->num_disc_nodes++; - else - lpfc_setup_disc_node - (vport, Did); - } - else { - lpfc_debugfs_disc_trc(vport, - LPFC_DISC_TRC_CT, - "Skip2 GID_FTrsp: " - "did:x%x flg:x%x cnt:%d", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - - lpfc_printf_vlog(vport, - KERN_INFO, - LOG_DISCOVERY, - "0245 Skip x%x " - "NameServer Rsp Data: " - "x%x x%x\n", - Did, vport->fc_flag, - vport->fc_rscn_id_cnt); - } - } - } + vport->cfg_peer_port_login)) + lpfc_prep_node_fc4type(vport, Did, fc4_type); + if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) goto nsout1; + Cnt -= sizeof(uint32_t); } ctptr = NULL; @@ -609,16 +609,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_dmabuf *outp; + struct lpfc_dmabuf *inp; struct lpfc_sli_ct_request *CTrsp; + struct lpfc_sli_ct_request *CTreq; struct lpfc_nodelist *ndlp; - int rc; + int rc, type; /* First save ndlp, before we overwrite it */ ndlp = cmdiocb->context_un.ndlp; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; - + inp = (struct lpfc_dmabuf *) cmdiocb->context1; outp = (struct lpfc_dmabuf *) cmdiocb->context2; irsp = &rspiocb->iocb; @@ -656,9 +658,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOERR_NO_RESOURCES) vport->fc_ns_retry++; + type = lpfc_get_gidft_type(vport, cmdiocb); + if (type == 0) + goto out; + /* CT command is being retried */ + vport->gidft_inp--; rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, - vport->fc_ns_retry, 0); + vport->fc_ns_retry, type); if (rc == 0) goto out; } @@ -670,13 +677,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, vport->fc_ns_retry); } else { /* Good status, continue checking */ + CTreq = (struct lpfc_sli_ct_request *) inp->virt; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (CTrsp->CommandResponse.bits.CmdRsp == cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0208 NameServer Rsp Data: x%x\n", - vport->fc_flag); - lpfc_ns_rsp(vport, outp, + "0208 NameServer Rsp Data: x%x x%x\n", + vport->fc_flag, + CTreq->un.gid.Fc4Type); + + lpfc_ns_rsp(vport, + outp, + CTreq->un.gid.Fc4Type, (uint32_t) (irsp->un.genreq64.bdl.bdeSize)); } else if (CTrsp->CommandResponse.bits.CmdRsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { @@ -731,9 +743,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } + vport->gidft_inp--; } /* Link up / RSCN discovery */ - if (vport->num_disc_nodes == 0) { + if ((vport->num_disc_nodes == 0) && + (vport->gidft_inp == 0)) { /* * The driver has cycled through all Nports in the RSCN payload. * Complete the handling by cleaning up and marking the @@ -881,6 +895,60 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, return; } +static void +lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1; + struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2; + struct lpfc_sli_ct_request *CTrsp; + int did; + struct lpfc_nodelist *ndlp; + uint32_t fc4_data_0, fc4_data_1; + + did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId; + did = be32_to_cpu(did); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GFT_ID cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], did); + + if (irsp->ulpStatus == IOSTAT_SUCCESS) { + /* Good status, continue checking */ + CTrsp = (struct lpfc_sli_ct_request *)outp->virt; + fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]); + fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]); + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n", + did, fc4_data_0, fc4_data_1); + + ndlp = lpfc_findnode_did(vport, did); + if (ndlp) { + /* The bitmask value for FCP and NVME FCP types is + * the same because they are 32 bits distant from + * each other in word0 and word0. + */ + if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + "3064 Setting ndlp %p, DID x%06x with " + "FC4 x%08x, Data: x%08x x%08x\n", + ndlp, did, ndlp->nlp_fc4_type, + FC_TYPE_FCP, FC_TYPE_NVME); + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + } + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); + lpfc_issue_els_prli(vport, ndlp, 0); + } else + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + "3065 GFT_ID failed x%08x\n", irsp->ulpStatus); + + lpfc_ct_free_iocb(phba, cmdiocb); +} static void lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, @@ -1071,31 +1139,27 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, return; } +/* + * Although the symbolic port name is thought to be an integer + * as of January 18, 2016, leave it as a string until more of + * the record state becomes defined. + */ int lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, size_t size) { int n; - uint8_t *wwn = vport->phba->wwpn; - n = snprintf(symbol, size, - "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", - wwn[0], wwn[1], wwn[2], wwn[3], - wwn[4], wwn[5], wwn[6], wwn[7]); - - if (vport->port_type == LPFC_PHYSICAL_PORT) - return n; - - if (n < size) - n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi); - - if (n < size && - strlen(vport->fc_vport->symbolic_name)) - n += snprintf(symbol + n, size - n, " VName-%s", - vport->fc_vport->symbolic_name); + /* + * Use the lpfc board number as the Symbolic Port + * Name object. NPIV is not in play so this integer + * value is sufficient and unique per FC-ID. + */ + n = snprintf(symbol, size, "%d", vport->phba->brd_no); return n; } + int lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, size_t size) @@ -1106,24 +1170,26 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, lpfc_decode_firmware_rev(vport->phba, fwrev, 0); n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName); - if (size < n) return n; - n += snprintf(symbol + n, size - n, " FV%s", fwrev); + n += snprintf(symbol + n, size - n, " FV%s", fwrev); if (size < n) return n; - n += snprintf(symbol + n, size - n, " DV%s", lpfc_release_version); + n += snprintf(symbol + n, size - n, " DV%s.", + lpfc_release_version); if (size < n) return n; - n += snprintf(symbol + n, size - n, " HN:%s", init_utsname()->nodename); - /* Note :- OS name is "Linux" */ + n += snprintf(symbol + n, size - n, " HN:%s.", + init_utsname()->nodename); if (size < n) return n; - n += snprintf(symbol + n, size - n, " OS:%s", init_utsname()->sysname); + /* Note :- OS name is "Linux" */ + n += snprintf(symbol + n, size - n, " OS:%s\n", + init_utsname()->sysname); return n; } @@ -1147,6 +1213,27 @@ lpfc_find_map_node(struct lpfc_vport *vport) return cnt; } +/* + * This routine will return the FC4 Type associated with the CT + * GID_FT command. + */ +int +lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_sli_ct_request *CtReq; + struct lpfc_dmabuf *mp; + uint32_t type; + + mp = cmdiocb->context1; + if (mp == NULL) + return 0; + CtReq = (struct lpfc_sli_ct_request *)mp->virt; + type = (uint32_t)CtReq->un.gid.Fc4Type; + if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME)) + return 0; + return type; +} + /* * lpfc_ns_cmd * Description: @@ -1207,8 +1294,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, /* NameServer Req */ lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY, - "0236 NameServer Req Data: x%x x%x x%x\n", - cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt); + "0236 NameServer Req Data: x%x x%x x%x x%x\n", + cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt, + context); bpl = (struct ulp_bde64 *) bmp->virt; memset(bpl, 0, sizeof(struct ulp_bde64)); @@ -1219,6 +1307,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, bpl->tus.f.bdeSize = GID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_GFF_ID) bpl->tus.f.bdeSize = GFF_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_GFT_ID) + bpl->tus.f.bdeSize = GFT_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFT_ID) bpl->tus.f.bdeSize = RFT_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RNN_ID) @@ -1246,7 +1336,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, case SLI_CTNS_GID_FT: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GID_FT); - CtReq->un.gid.Fc4Type = SLI_CTPT_FCP; + CtReq->un.gid.Fc4Type = context; + if (vport->port_state < LPFC_NS_QRY) vport->port_state = LPFC_NS_QRY; lpfc_set_disctmo(vport); @@ -1261,12 +1352,32 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, cmpl = lpfc_cmpl_ct_cmd_gff_id; break; + case SLI_CTNS_GFT_ID: + CtReq->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CTNS_GFT_ID); + CtReq->un.gft.PortId = cpu_to_be32(context); + cmpl = lpfc_cmpl_ct_cmd_gft_id; + break; + case SLI_CTNS_RFT_ID: vport->ct_flags &= ~FC_CT_RFT_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RFT_ID); CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID); - CtReq->un.rft.fcpReg = 1; + + /* Register FC4 FCP type if enabled. */ + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) + CtReq->un.rft.fcpReg = 1; + + /* Register NVME type if enabled. Defined LE and swapped. + * rsvd[0] is used as word1 because of the hard-coded + * word0 usage in the ct_request data structure. + */ + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) + CtReq->un.rft.rsvd[0] = cpu_to_be32(0x00000100); + cmpl = lpfc_cmpl_ct_cmd_rft_id; break; @@ -1316,7 +1427,31 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, cpu_to_be16(SLI_CTNS_RFF_ID); CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); CtReq->un.rff.fbits = FC4_FEATURE_INIT; - CtReq->un.rff.type_code = FC_TYPE_FCP; + + /* The driver always supports FC_TYPE_FCP. However, the + * caller can specify NVME (type x28) as well. But only + * these that FC4 type is supported. + */ + if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && + (context == FC_TYPE_NVME)) { + if ((vport == phba->pport) && phba->nvmet_support) { + CtReq->un.rff.fbits = (FC4_FEATURE_TARGET | + FC4_FEATURE_NVME_DISC); + lpfc_nvmet_update_targetport(phba); + } else { + lpfc_nvme_update_localport(vport); + } + CtReq->un.rff.type_code = context; + + } else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) && + (context == FC_TYPE_FCP)) + CtReq->un.rff.type_code = context; + + else + goto ns_cmd_free_bmpvirt; + cmpl = lpfc_cmpl_ct_cmd_rff_id; break; } @@ -1337,6 +1472,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, */ lpfc_nlp_put(ndlp); +ns_cmd_free_bmpvirt: lpfc_mbuf_free(phba, bmp->virt, bmp->phys); ns_cmd_free_bmp: kfree(bmp); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index caa7a7b0ec53b3..913eed822cb8ed 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2007-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -34,6 +36,9 @@ #include #include #include +#include + +#include #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -41,8 +46,10 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -99,6 +106,12 @@ module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, "Set debugfs slow ring trace depth"); +/* This MUST be a power of 2 */ +static int lpfc_debugfs_max_nvmeio_trc; +module_param(lpfc_debugfs_max_nvmeio_trc, int, 0444); +MODULE_PARM_DESC(lpfc_debugfs_max_nvmeio_trc, + "Set debugfs NVME IO trace depth"); + static int lpfc_debugfs_mask_disc_trc; module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, @@ -484,20 +497,23 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) off += (8 * sizeof(uint32_t)); } - for (i = 0; i < 4; i++) { - pgpp = &phba->port_gp[i]; - pring = &psli->ring[i]; - len += snprintf(buf+len, size-len, - "Ring %d: CMD GetInx:%d (Max:%d Next:%d " - "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n", - i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb, - pring->sli.sli3.next_cmdidx, - pring->sli.sli3.local_getidx, - pring->flag, pgpp->rspPutInx, - pring->sli.sli3.numRiocb); - } - if (phba->sli_rev <= LPFC_SLI_REV3) { + for (i = 0; i < 4; i++) { + pgpp = &phba->port_gp[i]; + pring = &psli->sli3_ring[i]; + len += snprintf(buf+len, size-len, + "Ring %d: CMD GetInx:%d " + "(Max:%d Next:%d " + "Local:%d flg:x%x) " + "RSP PutInx:%d Max:%d\n", + i, pgpp->cmdGetInx, + pring->sli.sli3.numCiocb, + pring->sli.sli3.next_cmdidx, + pring->sli.sli3.local_getidx, + pring->flag, pgpp->rspPutInx, + pring->sli.sli3.numRiocb); + } + word0 = readl(phba->HAregaddr); word1 = readl(phba->CAregaddr); word2 = readl(phba->HSregaddr); @@ -530,11 +546,18 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) int len = 0; int cnt; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; - unsigned char *statep, *name; + unsigned char *statep; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct lpfc_nvmet_tgtport *tgtp; + struct nvme_fc_remote_port *nrport; cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); + len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n"); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!cnt) { @@ -574,36 +597,32 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) default: statep = "UNKNOWN"; } - len += snprintf(buf+len, size-len, "%s DID:x%06x ", - statep, ndlp->nlp_DID); - name = (unsigned char *)&ndlp->nlp_portname; - len += snprintf(buf+len, size-len, - "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", - *name, *(name+1), *(name+2), *(name+3), - *(name+4), *(name+5), *(name+6), *(name+7)); - name = (unsigned char *)&ndlp->nlp_nodename; - len += snprintf(buf+len, size-len, - "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", - *name, *(name+1), *(name+2), *(name+3), - *(name+4), *(name+5), *(name+6), *(name+7)); + len += snprintf(buf+len, size-len, "%s DID:x%06x ", + statep, ndlp->nlp_DID); + len += snprintf(buf+len, size-len, + "WWPN x%llx ", + wwn_to_u64(ndlp->nlp_portname.u.wwn)); + len += snprintf(buf+len, size-len, + "WWNN x%llx ", + wwn_to_u64(ndlp->nlp_nodename.u.wwn)); if (ndlp->nlp_flag & NLP_RPI_REGISTERED) - len += snprintf(buf+len, size-len, "RPI:%03d ", - ndlp->nlp_rpi); + len += snprintf(buf+len, size-len, "RPI:%03d ", + ndlp->nlp_rpi); else - len += snprintf(buf+len, size-len, "RPI:none "); + len += snprintf(buf+len, size-len, "RPI:none "); len += snprintf(buf+len, size-len, "flag:x%08x ", ndlp->nlp_flag); if (!ndlp->nlp_type) - len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); + len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) - len += snprintf(buf+len, size-len, "FC_NODE "); + len += snprintf(buf+len, size-len, "FC_NODE "); if (ndlp->nlp_type & NLP_FABRIC) - len += snprintf(buf+len, size-len, "FABRIC "); + len += snprintf(buf+len, size-len, "FABRIC "); if (ndlp->nlp_type & NLP_FCP_TARGET) - len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ", + len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ", ndlp->nlp_sid); if (ndlp->nlp_type & NLP_FCP_INITIATOR) - len += snprintf(buf+len, size-len, "FCP_INITIATOR "); + len += snprintf(buf+len, size-len, "FCP_INITIATOR "); len += snprintf(buf+len, size-len, "usgmap:%x ", ndlp->nlp_usg_map); len += snprintf(buf+len, size-len, "refcnt:%x", @@ -611,8 +630,592 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) len += snprintf(buf+len, size-len, "\n"); } spin_unlock_irq(shost->host_lock); + + if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + len += snprintf(buf + len, size - len, + "\nNVME Targetport Entry ...\n"); + + /* Port state is only one of two values for now. */ + if (phba->targetport->port_id) + statep = "REGISTERED"; + else + statep = "INIT"; + len += snprintf(buf + len, size - len, + "TGT WWNN x%llx WWPN x%llx State %s\n", + wwn_to_u64(vport->fc_nodename.u.wwn), + wwn_to_u64(vport->fc_portname.u.wwn), + statep); + len += snprintf(buf + len, size - len, + " Targetport DID x%06x\n", + phba->targetport->port_id); + goto out_exit; + } + + len += snprintf(buf + len, size - len, + "\nNVME Lport/Rport Entries ...\n"); + + localport = vport->localport; + if (!localport) + goto out_exit; + + spin_lock_irq(shost->host_lock); + lport = (struct lpfc_nvme_lport *)localport->private; + + /* Port state is only one of two values for now. */ + if (localport->port_id) + statep = "ONLINE"; + else + statep = "UNKNOWN "; + + len += snprintf(buf + len, size - len, + "Lport DID x%06x PortState %s\n", + localport->port_id, statep); + + len += snprintf(buf + len, size - len, "\tRport List:\n"); + list_for_each_entry(rport, &lport->rport_list, list) { + /* local short-hand pointer. */ + nrport = rport->remoteport; + + /* Port state is only one of two values for now. */ + switch (nrport->port_state) { + case FC_OBJSTATE_ONLINE: + statep = "ONLINE"; + break; + case FC_OBJSTATE_UNKNOWN: + statep = "UNKNOWN "; + break; + default: + statep = "UNSUPPORTED"; + break; + } + + /* Tab in to show lport ownership. */ + len += snprintf(buf + len, size - len, + "\t%s Port ID:x%06x ", + statep, nrport->port_id); + len += snprintf(buf + len, size - len, "WWPN x%llx ", + nrport->port_name); + len += snprintf(buf + len, size - len, "WWNN x%llx ", + nrport->node_name); + switch (nrport->port_role) { + case FC_PORT_ROLE_NVME_INITIATOR: + len += snprintf(buf + len, size - len, + "NVME INITIATOR "); + break; + case FC_PORT_ROLE_NVME_TARGET: + len += snprintf(buf + len, size - len, + "NVME TARGET "); + break; + case FC_PORT_ROLE_NVME_DISCOVERY: + len += snprintf(buf + len, size - len, + "NVME DISCOVERY "); + break; + default: + len += snprintf(buf + len, size - len, + "UNKNOWN ROLE x%x", + nrport->port_role); + break; + } + + /* Terminate the string. */ + len += snprintf(buf + len, size - len, "\n"); + } + + spin_unlock_irq(shost->host_lock); + out_exit: + return len; +} + +/** + * lpfc_debugfs_nvmestat_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_tgtport *tgtp; + int len = 0; + + if (phba->nvmet_support) { + if (!phba->targetport) + return len; + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + len += snprintf(buf+len, size-len, + "\nNVME Targetport Statistics\n"); + + len += snprintf(buf+len, size-len, + "LS: Rcv %08x Drop %08x Abort %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_drop), + atomic_read(&tgtp->xmt_ls_abort)); + if (atomic_read(&tgtp->rcv_ls_req_in) != + atomic_read(&tgtp->rcv_ls_req_out)) { + len += snprintf(buf+len, size-len, + "Rcv LS: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_ls_req_in), + atomic_read(&tgtp->rcv_ls_req_out)); + } + + len += snprintf(buf+len, size-len, + "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n", + atomic_read(&tgtp->xmt_ls_rsp), + atomic_read(&tgtp->xmt_ls_drop), + atomic_read(&tgtp->xmt_ls_rsp_cmpl), + atomic_read(&tgtp->xmt_ls_rsp_error)); + + len += snprintf(buf+len, size-len, + "FCP: Rcv %08x Drop %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_drop)); + + if (atomic_read(&tgtp->rcv_fcp_cmd_in) != + atomic_read(&tgtp->rcv_fcp_cmd_out)) { + len += snprintf(buf+len, size-len, + "Rcv FCP: in %08x != out %08x\n", + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out)); + } + + len += snprintf(buf+len, size-len, + "FCP Rsp: read %08x readrsp %08x write %08x rsp %08x\n", + atomic_read(&tgtp->xmt_fcp_read), + atomic_read(&tgtp->xmt_fcp_read_rsp), + atomic_read(&tgtp->xmt_fcp_write), + atomic_read(&tgtp->xmt_fcp_rsp)); + + len += snprintf(buf+len, size-len, + "FCP Rsp: abort %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_drop)); + + len += snprintf(buf+len, size-len, + "FCP Rsp Cmpl: %08x err %08x drop %08x\n", + atomic_read(&tgtp->xmt_fcp_rsp_cmpl), + atomic_read(&tgtp->xmt_fcp_rsp_error), + atomic_read(&tgtp->xmt_fcp_rsp_drop)); + + len += snprintf(buf+len, size-len, + "ABORT: Xmt %08x Err %08x Cmpl %08x", + atomic_read(&tgtp->xmt_abort_rsp), + atomic_read(&tgtp->xmt_abort_rsp_error), + atomic_read(&tgtp->xmt_abort_cmpl)); + + len += snprintf(buf+len, size-len, "\n"); + } else { + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return len; + + len += snprintf(buf + len, size - len, + "\nNVME Lport Statistics\n"); + + len += snprintf(buf + len, size - len, + "LS: Xmt %016llx Cmpl %016llx\n", + phba->fc4NvmeLsRequests, + phba->fc4NvmeLsCmpls); + + len += snprintf(buf + len, size - len, + "FCP: Rd %016llx Wr %016llx IO %016llx\n", + phba->fc4NvmeInputRequests, + phba->fc4NvmeOutputRequests, + phba->fc4NvmeControlRequests); + + len += snprintf(buf + len, size - len, + " Cmpl %016llx\n", phba->fc4NvmeIoCmpls); + } + + return len; +} + + +/** + * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) +{ + struct lpfc_hba *phba = vport->phba; + int len = 0; + + if (phba->nvmet_support == 0) { + /* NVME Initiator */ + len += snprintf(buf + len, PAGE_SIZE - len, + "ktime %s: Total Samples: %lld\n", + (phba->ktime_on ? "Enabled" : "Disabled"), + phba->ktime_data_samples); + if (phba->ktime_data_samples == 0) + return len; + + len += snprintf( + buf + len, PAGE_SIZE - len, + "Segment 1: Last NVME Cmd cmpl " + "done -to- Start of next NVME cnd (in driver)\n"); + len += snprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg1_total, + phba->ktime_data_samples), + phba->ktime_seg1_min, + phba->ktime_seg1_max); + len += snprintf( + buf + len, PAGE_SIZE - len, + "Segment 2: Driver start of NVME cmd " + "-to- Firmware WQ doorbell\n"); + len += snprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg2_total, + phba->ktime_data_samples), + phba->ktime_seg2_min, + phba->ktime_seg2_max); + len += snprintf( + buf + len, PAGE_SIZE - len, + "Segment 3: Firmware WQ doorbell -to- " + "MSI-X ISR cmpl\n"); + len += snprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg3_total, + phba->ktime_data_samples), + phba->ktime_seg3_min, + phba->ktime_seg3_max); + len += snprintf( + buf + len, PAGE_SIZE - len, + "Segment 4: MSI-X ISR cmpl -to- " + "NVME cmpl done\n"); + len += snprintf( + buf + len, PAGE_SIZE - len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg4_total, + phba->ktime_data_samples), + phba->ktime_seg4_min, + phba->ktime_seg4_max); + len += snprintf( + buf + len, PAGE_SIZE - len, + "Total IO avg time: %08lld\n", + div_u64(phba->ktime_seg1_total + + phba->ktime_seg2_total + + phba->ktime_seg3_total + + phba->ktime_seg4_total, + phba->ktime_data_samples)); + return len; + } + + /* NVME Target */ + len += snprintf(buf + len, PAGE_SIZE-len, + "ktime %s: Total Samples: %lld %lld\n", + (phba->ktime_on ? "Enabled" : "Disabled"), + phba->ktime_data_samples, + phba->ktime_status_samples); + if (phba->ktime_data_samples == 0) + return len; + + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 1: MSI-X ISR Rcv cmd -to- " + "cmd pass to NVME Layer\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg1_total, + phba->ktime_data_samples), + phba->ktime_seg1_min, + phba->ktime_seg1_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 2: cmd pass to NVME Layer- " + "-to- Driver rcv cmd OP (action)\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg2_total, + phba->ktime_data_samples), + phba->ktime_seg2_min, + phba->ktime_seg2_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 3: Driver rcv cmd OP -to- " + "Firmware WQ doorbell: cmd\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg3_total, + phba->ktime_data_samples), + phba->ktime_seg3_min, + phba->ktime_seg3_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 4: Firmware WQ doorbell: cmd " + "-to- MSI-X ISR for cmd cmpl\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg4_total, + phba->ktime_data_samples), + phba->ktime_seg4_min, + phba->ktime_seg4_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 5: MSI-X ISR for cmd cmpl " + "-to- NVME layer passed cmd done\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg5_total, + phba->ktime_data_samples), + phba->ktime_seg5_min, + phba->ktime_seg5_max); + + if (phba->ktime_status_samples == 0) { + len += snprintf(buf + len, PAGE_SIZE-len, + "Total: cmd received by MSI-X ISR " + "-to- cmd completed on wire\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld " + "max %08lld\n", + div_u64(phba->ktime_seg10_total, + phba->ktime_data_samples), + phba->ktime_seg10_min, + phba->ktime_seg10_max); + return len; + } + + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 6: NVME layer passed cmd done " + "-to- Driver rcv rsp status OP\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg6_total, + phba->ktime_status_samples), + phba->ktime_seg6_min, + phba->ktime_seg6_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 7: Driver rcv rsp status OP " + "-to- Firmware WQ doorbell: status\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg7_total, + phba->ktime_status_samples), + phba->ktime_seg7_min, + phba->ktime_seg7_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 8: Firmware WQ doorbell: status" + " -to- MSI-X ISR for status cmpl\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg8_total, + phba->ktime_status_samples), + phba->ktime_seg8_min, + phba->ktime_seg8_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Segment 9: MSI-X ISR for status cmpl " + "-to- NVME layer passed status done\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg9_total, + phba->ktime_status_samples), + phba->ktime_seg9_min, + phba->ktime_seg9_max); + len += snprintf(buf + len, PAGE_SIZE-len, + "Total: cmd received by MSI-X ISR -to- " + "cmd completed on wire\n"); + len += snprintf(buf + len, PAGE_SIZE-len, + "avg:%08lld min:%08lld max %08lld\n", + div_u64(phba->ktime_seg10_total, + phba->ktime_status_samples), + phba->ktime_seg10_min, + phba->ktime_seg10_max); + return len; +} + +/** + * lpfc_debugfs_nvmeio_trc_data - Dump NVME IO trace list to a buffer + * @phba: The phba to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME IO trace associated with @phba + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) +{ + struct lpfc_debugfs_nvmeio_trc *dtp; + int i, state, index, skip; + int len = 0; + + state = phba->nvmeio_trc_on; + + index = (atomic_read(&phba->nvmeio_trc_cnt) + 1) & + (phba->nvmeio_trc_size - 1); + skip = phba->nvmeio_trc_output_idx; + + len += snprintf(buf + len, size - len, + "%s IO Trace %s: next_idx %d skip %d size %d\n", + (phba->nvmet_support ? "NVME" : "NVMET"), + (state ? "Enabled" : "Disabled"), + index, skip, phba->nvmeio_trc_size); + + if (!phba->nvmeio_trc || state) + return len; + + /* trace MUST bhe off to continue */ + + for (i = index; i < phba->nvmeio_trc_size; i++) { + if (skip) { + skip--; + continue; + } + dtp = phba->nvmeio_trc + i; + phba->nvmeio_trc_output_idx++; + + if (!dtp->fmt) + continue; + + len += snprintf(buf + len, size - len, dtp->fmt, + dtp->data1, dtp->data2, dtp->data3); + + if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { + phba->nvmeio_trc_output_idx = 0; + len += snprintf(buf + len, size - len, + "Trace Complete\n"); + goto out; + } + + if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { + len += snprintf(buf + len, size - len, + "Trace Continue (%d of %d)\n", + phba->nvmeio_trc_output_idx, + phba->nvmeio_trc_size); + goto out; + } + } + for (i = 0; i < index; i++) { + if (skip) { + skip--; + continue; + } + dtp = phba->nvmeio_trc + i; + phba->nvmeio_trc_output_idx++; + + if (!dtp->fmt) + continue; + + len += snprintf(buf + len, size - len, dtp->fmt, + dtp->data1, dtp->data2, dtp->data3); + + if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { + phba->nvmeio_trc_output_idx = 0; + len += snprintf(buf + len, size - len, + "Trace Complete\n"); + goto out; + } + + if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { + len += snprintf(buf + len, size - len, + "Trace Continue (%d of %d)\n", + phba->nvmeio_trc_output_idx, + phba->nvmeio_trc_size); + goto out; + } + } + + len += snprintf(buf + len, size - len, + "Trace Done\n"); +out: return len; } + +/** + * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the NVME statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) +{ + struct lpfc_hba *phba = vport->phba; + int i; + int len = 0; + uint32_t tot_xmt = 0; + uint32_t tot_rcv = 0; + uint32_t tot_cmpl = 0; + uint32_t tot_ccmpl = 0; + + if (phba->nvmet_support == 0) { + /* NVME Initiator */ + len += snprintf(buf + len, PAGE_SIZE - len, + "CPUcheck %s\n", + (phba->cpucheck_on & LPFC_CHECK_NVME_IO ? + "Enabled" : "Disabled")); + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + if (i >= LPFC_CHECK_CPU_CNT) + break; + len += snprintf(buf + len, PAGE_SIZE - len, + "%02d: xmit x%08x cmpl x%08x\n", + i, phba->cpucheck_xmt_io[i], + phba->cpucheck_cmpl_io[i]); + tot_xmt += phba->cpucheck_xmt_io[i]; + tot_cmpl += phba->cpucheck_cmpl_io[i]; + } + len += snprintf(buf + len, PAGE_SIZE - len, + "tot:xmit x%08x cmpl x%08x\n", + tot_xmt, tot_cmpl); + return len; + } + + /* NVME Target */ + len += snprintf(buf + len, PAGE_SIZE - len, + "CPUcheck %s ", + (phba->cpucheck_on & LPFC_CHECK_NVMET_IO ? + "IO Enabled - " : "IO Disabled - ")); + len += snprintf(buf + len, PAGE_SIZE - len, + "%s\n", + (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? + "Rcv Enabled\n" : "Rcv Disabled\n")); + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + if (i >= LPFC_CHECK_CPU_CNT) + break; + len += snprintf(buf + len, PAGE_SIZE - len, + "%02d: xmit x%08x ccmpl x%08x " + "cmpl x%08x rcv x%08x\n", + i, phba->cpucheck_xmt_io[i], + phba->cpucheck_ccmpl_io[i], + phba->cpucheck_cmpl_io[i], + phba->cpucheck_rcv_io[i]); + tot_xmt += phba->cpucheck_xmt_io[i]; + tot_rcv += phba->cpucheck_rcv_io[i]; + tot_cmpl += phba->cpucheck_cmpl_io[i]; + tot_ccmpl += phba->cpucheck_ccmpl_io[i]; + } + len += snprintf(buf + len, PAGE_SIZE - len, + "tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n", + tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv); + return len; +} + #endif /** @@ -697,6 +1300,40 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, return; } +/** + * lpfc_debugfs_nvme_trc - Store NVME/NVMET trace log + * @phba: The phba to associate this trace string with for retrieval. + * @fmt: Format string to be displayed when dumping the log. + * @data1: 1st data parameter to be applied to @fmt. + * @data2: 2nd data parameter to be applied to @fmt. + * @data3: 3rd data parameter to be applied to @fmt. + * + * Description: + * This routine is used by the driver code to add a debugfs log entry to the + * nvme trace buffer associated with @phba. @fmt, @data1, @data2, and + * @data3 are used like printf when displaying the log. + **/ +inline void +lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt, + uint16_t data1, uint16_t data2, uint32_t data3) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_debugfs_nvmeio_trc *dtp; + int index; + + if (!phba->nvmeio_trc_on || !phba->nvmeio_trc) + return; + + index = atomic_inc_return(&phba->nvmeio_trc_cnt) & + (phba->nvmeio_trc_size - 1); + dtp = phba->nvmeio_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; +#endif +} + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /** * lpfc_debugfs_disc_trc_open - Open the discovery trace log @@ -938,7 +1575,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file) goto out; /* Round to page boundary */ - printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n", + pr_err("9059 BLKGRD: %s: _dump_buf_data=0x%p\n", __func__, _dump_buf_data); debug->buffer = _dump_buf_data; if (!debug->buffer) { @@ -968,8 +1605,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file) goto out; /* Round to page boundary */ - printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n", - __func__, _dump_buf_dif, file); + pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n", + __func__, _dump_buf_dif, file); debug->buffer = _dump_buf_dif; if (!debug->buffer) { kfree(debug); @@ -1229,6 +1866,422 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) return 0; } + +static int +lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_NVMESTAT_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nvmestat_data(vport, debug->buffer, + LPFC_NVMESTAT_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_tgtport *tgtp; + char mybuf[64]; + char *pbuf; + + if (!phba->targetport) + return -ENXIO; + + if (nbytes > 64) + nbytes = 64; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if ((strncmp(pbuf, "reset", strlen("reset")) == 0) || + (strncmp(pbuf, "zero", strlen("zero")) == 0)) { + atomic_set(&tgtp->rcv_ls_req_in, 0); + atomic_set(&tgtp->rcv_ls_req_out, 0); + atomic_set(&tgtp->rcv_ls_req_drop, 0); + atomic_set(&tgtp->xmt_ls_abort, 0); + atomic_set(&tgtp->xmt_ls_rsp, 0); + atomic_set(&tgtp->xmt_ls_drop, 0); + atomic_set(&tgtp->xmt_ls_rsp_error, 0); + atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); + + atomic_set(&tgtp->rcv_fcp_cmd_in, 0); + atomic_set(&tgtp->rcv_fcp_cmd_out, 0); + atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); + atomic_set(&tgtp->xmt_fcp_abort, 0); + atomic_set(&tgtp->xmt_fcp_drop, 0); + atomic_set(&tgtp->xmt_fcp_read_rsp, 0); + atomic_set(&tgtp->xmt_fcp_read, 0); + atomic_set(&tgtp->xmt_fcp_write, 0); + atomic_set(&tgtp->xmt_fcp_rsp, 0); + atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); + atomic_set(&tgtp->xmt_fcp_rsp_error, 0); + atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); + + atomic_set(&tgtp->xmt_abort_rsp, 0); + atomic_set(&tgtp->xmt_abort_rsp_error, 0); + atomic_set(&tgtp->xmt_abort_cmpl, 0); + } + return nbytes; +} + +static int +lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer, + LPFC_NVMEKTIME_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + char mybuf[64]; + char *pbuf; + + if (nbytes > 64) + nbytes = 64; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + phba->ktime_data_samples = 0; + phba->ktime_status_samples = 0; + phba->ktime_seg1_total = 0; + phba->ktime_seg1_max = 0; + phba->ktime_seg1_min = 0xffffffff; + phba->ktime_seg2_total = 0; + phba->ktime_seg2_max = 0; + phba->ktime_seg2_min = 0xffffffff; + phba->ktime_seg3_total = 0; + phba->ktime_seg3_max = 0; + phba->ktime_seg3_min = 0xffffffff; + phba->ktime_seg4_total = 0; + phba->ktime_seg4_max = 0; + phba->ktime_seg4_min = 0xffffffff; + phba->ktime_seg5_total = 0; + phba->ktime_seg5_max = 0; + phba->ktime_seg5_min = 0xffffffff; + phba->ktime_seg6_total = 0; + phba->ktime_seg6_max = 0; + phba->ktime_seg6_min = 0xffffffff; + phba->ktime_seg7_total = 0; + phba->ktime_seg7_max = 0; + phba->ktime_seg7_min = 0xffffffff; + phba->ktime_seg8_total = 0; + phba->ktime_seg8_max = 0; + phba->ktime_seg8_min = 0xffffffff; + phba->ktime_seg9_total = 0; + phba->ktime_seg9_max = 0; + phba->ktime_seg9_min = 0xffffffff; + phba->ktime_seg10_total = 0; + phba->ktime_seg10_max = 0; + phba->ktime_seg10_min = 0xffffffff; + + phba->ktime_on = 1; + return strlen(pbuf); + } else if ((strncmp(pbuf, "off", + sizeof("off") - 1) == 0)) { + phba->ktime_on = 0; + return strlen(pbuf); + } else if ((strncmp(pbuf, "zero", + sizeof("zero") - 1) == 0)) { + phba->ktime_data_samples = 0; + phba->ktime_status_samples = 0; + phba->ktime_seg1_total = 0; + phba->ktime_seg1_max = 0; + phba->ktime_seg1_min = 0xffffffff; + phba->ktime_seg2_total = 0; + phba->ktime_seg2_max = 0; + phba->ktime_seg2_min = 0xffffffff; + phba->ktime_seg3_total = 0; + phba->ktime_seg3_max = 0; + phba->ktime_seg3_min = 0xffffffff; + phba->ktime_seg4_total = 0; + phba->ktime_seg4_max = 0; + phba->ktime_seg4_min = 0xffffffff; + phba->ktime_seg5_total = 0; + phba->ktime_seg5_max = 0; + phba->ktime_seg5_min = 0xffffffff; + phba->ktime_seg6_total = 0; + phba->ktime_seg6_max = 0; + phba->ktime_seg6_min = 0xffffffff; + phba->ktime_seg7_total = 0; + phba->ktime_seg7_max = 0; + phba->ktime_seg7_min = 0xffffffff; + phba->ktime_seg8_total = 0; + phba->ktime_seg8_max = 0; + phba->ktime_seg8_min = 0xffffffff; + phba->ktime_seg9_total = 0; + phba->ktime_seg9_max = 0; + phba->ktime_seg9_min = 0xffffffff; + phba->ktime_seg10_total = 0; + phba->ktime_seg10_max = 0; + phba->ktime_seg10_min = 0xffffffff; + return strlen(pbuf); + } + return -EINVAL; +} + +static int +lpfc_debugfs_nvmeio_trc_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_NVMEIO_TRC_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nvmeio_trc_data(phba, debug->buffer, + LPFC_NVMEIO_TRC_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int i; + unsigned long sz; + char mybuf[64]; + char *pbuf; + + if (nbytes > 64) + nbytes = 64; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "off", sizeof("off") - 1) == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0570 nvmeio_trc_off\n"); + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc_on = 0; + return strlen(pbuf); + } else if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0571 nvmeio_trc_on\n"); + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc_on = 1; + return strlen(pbuf); + } + + /* We must be off to allocate the trace buffer */ + if (phba->nvmeio_trc_on != 0) + return -EINVAL; + + /* If not on or off, the parameter is the trace buffer size */ + i = kstrtoul(pbuf, 0, &sz); + if (i) + return -EINVAL; + phba->nvmeio_trc_size = (uint32_t)sz; + + /* It must be a power of 2 - round down */ + i = 0; + while (sz > 1) { + sz = sz >> 1; + i++; + } + sz = (1 << i); + if (phba->nvmeio_trc_size != sz) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0572 nvmeio_trc_size changed to %ld\n", + sz); + phba->nvmeio_trc_size = (uint32_t)sz; + + /* If one previously exists, free it */ + kfree(phba->nvmeio_trc); + + /* Allocate new trace buffer and initialize */ + phba->nvmeio_trc = kmalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) * + sz), GFP_KERNEL); + if (!phba->nvmeio_trc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0573 Cannot create debugfs " + "nvmeio_trc buffer\n"); + return -ENOMEM; + } + memset(phba->nvmeio_trc, 0, + (sizeof(struct lpfc_debugfs_nvmeio_trc) * sz)); + atomic_set(&phba->nvmeio_trc_cnt, 0); + phba->nvmeio_trc_on = 0; + phba->nvmeio_trc_output_idx = 0; + + return strlen(pbuf); +} + +static int +lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer, + LPFC_NVMEKTIME_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + char mybuf[64]; + char *pbuf; + int i; + + if (nbytes > 64) + nbytes = 64; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + if (phba->nvmet_support) + phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; + else + phba->cpucheck_on |= LPFC_CHECK_NVME_IO; + return strlen(pbuf); + } else if ((strncmp(pbuf, "rcv", + sizeof("rcv") - 1) == 0)) { + if (phba->nvmet_support) + phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV; + else + return -EINVAL; + return strlen(pbuf); + } else if ((strncmp(pbuf, "off", + sizeof("off") - 1) == 0)) { + phba->cpucheck_on = LPFC_CHECK_OFF; + return strlen(pbuf); + } else if ((strncmp(pbuf, "zero", + sizeof("zero") - 1) == 0)) { + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + if (i >= LPFC_CHECK_CPU_CNT) + break; + phba->cpucheck_rcv_io[i] = 0; + phba->cpucheck_xmt_io[i] = 0; + phba->cpucheck_cmpl_io[i] = 0; + phba->cpucheck_ccmpl_io[i] = 0; + } + return strlen(pbuf); + } + return -EINVAL; +} + /* * --------------------------------- * iDiag debugfs file access methods @@ -1968,10 +3021,207 @@ lpfc_idiag_baracc_write(struct file *file, const char __user *buf, /* All other opecodes are illegal for now */ goto error_out; - return nbytes; -error_out: - memset(&idiag, 0, sizeof(idiag)); - return -EINVAL; + return nbytes; +error_out: + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +static int +__lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype, + char *pbuffer, int len) +{ + if (!qp) + return len; + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\t%s WQ info: ", wqtype); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n", + qp->assoc_qid, qp->q_cnt_1, + (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + len += snprintf(pbuffer + len, + LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + return len; +} + +static int +lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer, + int *len, int max_cnt, int cq_id) +{ + struct lpfc_queue *qp; + int qidx; + + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { + qp = phba->sli4_hba.fcp_wq[qidx]; + if (qp->assoc_qid != cq_id) + continue; + *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); + if (*len >= max_cnt) + return 1; + } + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { + qp = phba->sli4_hba.nvme_wq[qidx]; + if (qp->assoc_qid != cq_id) + continue; + *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); + if (*len >= max_cnt) + return 1; + } + return 0; +} + +static int +__lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype, + char *pbuffer, int len) +{ + if (!qp) + return len; + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t%s CQ info: ", cqtype); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x " + "xabt:x%x wq:x%llx]\n", + qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + + return len; +} + +static int +__lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp, + char *rqtype, char *pbuffer, int len) +{ + if (!qp || !datqp) + return len; + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\t%s RQ info: ", rqtype); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " + "trunc:x%x rcv:x%llx]\n", + qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]\n", + qp->queue_id, qp->entry_count, qp->entry_size, + qp->host_index, qp->hba_index); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]\n", + datqp->queue_id, datqp->entry_count, + datqp->entry_size, datqp->host_index, + datqp->hba_index); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + + return len; +} + +static int +lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, + int *len, int max_cnt, int eqidx, int eq_id) +{ + struct lpfc_queue *qp; + int qidx, rc; + + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { + qp = phba->sli4_hba.fcp_cq[qidx]; + if (qp->assoc_qid != eq_id) + continue; + + *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + if (*len >= max_cnt) + return 1; + + rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len, + max_cnt, qp->queue_id); + if (rc) + return 1; + } + + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { + qp = phba->sli4_hba.nvme_cq[qidx]; + if (qp->assoc_qid != eq_id) + continue; + + *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + if (*len >= max_cnt) + return 1; + + rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len, + max_cnt, qp->queue_id); + if (rc) + return 1; + } + + if (eqidx < phba->cfg_nvmet_mrq) { + /* NVMET CQset */ + qp = phba->sli4_hba.nvmet_cqset[eqidx]; + *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + if (*len >= max_cnt) + return 1; + + /* RQ header */ + qp = phba->sli4_hba.nvmet_mrq_hdr[eqidx]; + *len = __lpfc_idiag_print_rqpair(qp, + phba->sli4_hba.nvmet_mrq_data[eqidx], + "NVMET MRQ", pbuffer, *len); + + if (*len >= max_cnt) + return 1; + } + + return 0; +} + +static int +__lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, + char *pbuffer, int len) +{ + if (!qp) + return len; + + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "\n%s EQ info: EQ-STAT[max:x%x noE:x%x " + "bs:x%x proc:x%llx]\n", + eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, + (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, qp->entry_size, + qp->host_index, qp->hba_index); + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + + return len; } /** @@ -1984,6 +3234,9 @@ lpfc_idiag_baracc_write(struct file *file, const char __user *buf, * Description: * This routine reads data from the @phba SLI4 PCI function queue information, * and copies to user @buf. + * This routine only returns 1 EQs worth of information. It remembers the last + * EQ read and jumps to the next EQ. Thus subsequent calls to queInfo will + * retrieve all EQs allocated for the phba. * * Returns: * This function returns the amount of data that was read (this could be less @@ -1995,19 +3248,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; - int len = 0; char *pbuffer; - int x, cnt; - int max_cnt; + int max_cnt, rc, x, len = 0; struct lpfc_queue *qp = NULL; - if (!debug->buffer) debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; - max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128; + max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 256; if (*ppos) return 0; @@ -2015,375 +3265,134 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, spin_lock_irq(&phba->hbalock); /* Fast-path event queue */ - if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) { - cnt = phba->cfg_fcp_io_channel; + if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) { - for (x = 0; x < cnt; x++) { + x = phba->lpfc_idiag_last_eq; + if (phba->cfg_fof && (x >= phba->io_channel_irqs)) { + phba->lpfc_idiag_last_eq = 0; + goto fof; + } + phba->lpfc_idiag_last_eq++; + if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs) + if (phba->cfg_fof == 0) + phba->lpfc_idiag_last_eq = 0; - /* Fast-path EQ */ - qp = phba->sli4_hba.hba_eq[x]; - if (!qp) - goto proc_cq; + len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + "EQ %d out of %d HBA EQs\n", + x, phba->io_channel_irqs); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\nHBA EQ info: " - "EQ-STAT[max:x%x noE:x%x " - "bs:x%x proc:x%llx]\n", - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + /* Fast-path EQ */ + qp = phba->sli4_hba.hba_eq[x]; + if (!qp) + goto out; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "EQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - - /* Reset max counter */ - qp->EQ_max_eqe = 0; + len = __lpfc_idiag_print_eq(qp, "HBA", pbuffer, len); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; -proc_cq: - /* Fast-path FCP CQ */ - qp = phba->sli4_hba.fcp_cq[x]; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tFCP CQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocEQID[%02d]: " - "CQ STAT[max:x%x relw:x%x " - "xabt:x%x wq:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); + /* Reset max counter */ + qp->EQ_max_eqe = 0; + if (len >= max_cnt) + goto too_big; - /* Reset max counter */ - qp->CQ_max_cqe = 0; + /* will dump both fcp and nvme cqs/wqs for the eq */ + rc = lpfc_idiag_cqs_for_eq(phba, pbuffer, &len, + max_cnt, x, qp->queue_id); + if (rc) + goto too_big; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; + /* Only EQ 0 has slow path CQs configured */ + if (x) + goto out; - /* Fast-path FCP WQ */ - qp = phba->sli4_hba.fcp_wq[x]; + /* Slow-path mailbox CQ */ + qp = phba->sli4_hba.mbx_cq; + len = __lpfc_idiag_print_cq(qp, "MBX", pbuffer, len); + if (len >= max_cnt) + goto too_big; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tFCP WQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]: " - "WQ-STAT[oflow:x%x posted:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tWQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - - if (x) - continue; - - /* Only EQ 0 has slow path CQs configured */ - - /* Slow-path mailbox CQ */ - qp = phba->sli4_hba.mbx_cq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tMBX CQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocEQID[%02d]: " - "CQ-STAT[mbox:x%x relw:x%x " - "xabt:x%x wq:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, - (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + /* Slow-path MBOX MQ */ + qp = phba->sli4_hba.mbx_wq; + len = __lpfc_idiag_print_wq(qp, "MBX", pbuffer, len); + if (len >= max_cnt) + goto too_big; - /* Slow-path MBOX MQ */ - qp = phba->sli4_hba.mbx_wq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tMBX MQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]:\n", - phba->sli4_hba.mbx_wq->assoc_qid); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tWQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + /* Slow-path ELS response CQ */ + qp = phba->sli4_hba.els_cq; + len = __lpfc_idiag_print_cq(qp, "ELS", pbuffer, len); + /* Reset max counter */ + if (qp) + qp->CQ_max_cqe = 0; + if (len >= max_cnt) + goto too_big; - /* Slow-path ELS response CQ */ - qp = phba->sli4_hba.els_cq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tELS CQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocEQID[%02d]: " - "CQ-STAT[max:x%x relw:x%x " - "xabt:x%x wq:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, - (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID [%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - /* Reset max counter */ - qp->CQ_max_cqe = 0; - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + /* Slow-path ELS WQ */ + qp = phba->sli4_hba.els_wq; + len = __lpfc_idiag_print_wq(qp, "ELS", pbuffer, len); + if (len >= max_cnt) + goto too_big; - /* Slow-path ELS WQ */ - qp = phba->sli4_hba.els_wq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tELS WQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]: " - " WQ-STAT[oflow:x%x " - "posted:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, - (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tWQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + /* Slow-path NVME LS response CQ */ + qp = phba->sli4_hba.nvmels_cq; + len = __lpfc_idiag_print_cq(qp, "NVME LS", + pbuffer, len); + /* Reset max counter */ + if (qp) + qp->CQ_max_cqe = 0; + if (len >= max_cnt) + goto too_big; - if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) { - /* Slow-path RQ header */ - qp = phba->sli4_hba.hdr_rq; + /* Slow-path NVME LS WQ */ + qp = phba->sli4_hba.nvmels_wq; + len = __lpfc_idiag_print_wq(qp, "NVME LS", + pbuffer, len); + if (len >= max_cnt) + goto too_big; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tRQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]: " - "RQ-STAT[nopost:x%x nobuf:x%x " - "trunc:x%x rcv:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, - (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tHQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]\n", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - /* Slow-path RQ data */ - qp = phba->sli4_hba.dat_rq; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tDQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]\n", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - } - } + qp = phba->sli4_hba.hdr_rq; + len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq, + "RQpair", pbuffer, len); + if (len >= max_cnt) + goto too_big; + + goto out; } +fof: if (phba->cfg_fof) { /* FOF EQ */ qp = phba->sli4_hba.fof_eq; - if (!qp) - goto out; - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\nFOF EQ info: " - "EQ-STAT[max:x%x noE:x%x " - "bs:x%x proc:x%llx]\n", - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "EQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); + len = __lpfc_idiag_print_eq(qp, "FOF", pbuffer, len); /* Reset max counter */ - qp->EQ_max_eqe = 0; + if (qp) + qp->EQ_max_eqe = 0; - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); if (len >= max_cnt) goto too_big; - } - - if (phba->cfg_fof) { /* OAS CQ */ qp = phba->sli4_hba.oas_cq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tOAS CQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocEQID[%02d]: " - "CQ STAT[max:x%x relw:x%x " - "xabt:x%x wq:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, qp->q_cnt_2, - qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tCQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, qp->entry_count, - qp->entry_size, qp->host_index, - qp->hba_index); - - /* Reset max counter */ + len = __lpfc_idiag_print_cq(qp, "OAS", pbuffer, len); + /* Reset max counter */ + if (qp) qp->CQ_max_cqe = 0; - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + if (len >= max_cnt) + goto too_big; /* OAS WQ */ qp = phba->sli4_hba.oas_wq; - if (qp) { - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tOAS WQ info: "); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "AssocCQID[%02d]: " - "WQ-STAT[oflow:x%x posted:x%llx]\n", - qp->assoc_qid, - qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tWQID[%02d], " - "QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", - qp->queue_id, - qp->entry_count, - qp->entry_size, - qp->host_index, - qp->hba_index); - - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); - if (len >= max_cnt) - goto too_big; - } + len = __lpfc_idiag_print_wq(qp, "OAS", pbuffer, len); + if (len >= max_cnt) + goto too_big; } -out: + spin_unlock_irq(&phba->hbalock); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); too_big: - len += snprintf(pbuffer+len, - LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n"); + len += snprintf(pbuffer + len, + LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n"); +out: spin_unlock_irq(&phba->hbalock); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } @@ -2559,7 +3568,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t qidx, quetp, queid, index, count, offset, value; uint32_t *pentry; - struct lpfc_queue *pque; + struct lpfc_queue *pque, *qp; int rc; /* This is a user write operation */ @@ -2595,19 +3604,15 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, case LPFC_IDIAG_EQ: /* HBA event queue */ if (phba->sli4_hba.hba_eq) { - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; - qidx++) { - if (phba->sli4_hba.hba_eq[qidx] && - phba->sli4_hba.hba_eq[qidx]->queue_id == - queid) { + for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { + qp = phba->sli4_hba.hba_eq[qidx]; + if (qp && qp->queue_id == queid) { /* Sanity check */ - rc = lpfc_idiag_que_param_check( - phba->sli4_hba.hba_eq[qidx], + rc = lpfc_idiag_que_param_check(qp, index, count); if (rc) goto error_out; - idiag.ptr_private = - phba->sli4_hba.hba_eq[qidx]; + idiag.ptr_private = qp; goto pass_check; } } @@ -2637,24 +3642,51 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, idiag.ptr_private = phba->sli4_hba.els_cq; goto pass_check; } + /* NVME LS complete queue */ + if (phba->sli4_hba.nvmels_cq && + phba->sli4_hba.nvmels_cq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.nvmels_cq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.nvmels_cq; + goto pass_check; + } /* FCP complete queue */ if (phba->sli4_hba.fcp_cq) { + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; + qidx++) { + qp = phba->sli4_hba.fcp_cq[qidx]; + if (qp && qp->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + qp, index, count); + if (rc) + goto error_out; + idiag.ptr_private = qp; + goto pass_check; + } + } + } + /* NVME complete queue */ + if (phba->sli4_hba.nvme_cq) { qidx = 0; do { - if (phba->sli4_hba.fcp_cq[qidx] && - phba->sli4_hba.fcp_cq[qidx]->queue_id == + if (phba->sli4_hba.nvme_cq[qidx] && + phba->sli4_hba.nvme_cq[qidx]->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( - phba->sli4_hba.fcp_cq[qidx], + phba->sli4_hba.nvme_cq[qidx], index, count); if (rc) goto error_out; idiag.ptr_private = - phba->sli4_hba.fcp_cq[qidx]; + phba->sli4_hba.nvme_cq[qidx]; goto pass_check; } - } while (++qidx < phba->cfg_fcp_io_channel); + } while (++qidx < phba->cfg_nvme_io_channel); } goto error_out; break; @@ -2684,22 +3716,66 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, idiag.ptr_private = phba->sli4_hba.els_wq; goto pass_check; } + /* NVME LS work queue */ + if (phba->sli4_hba.nvmels_wq && + phba->sli4_hba.nvmels_wq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.nvmels_wq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.nvmels_wq; + goto pass_check; + } /* FCP work queue */ if (phba->sli4_hba.fcp_wq) { for (qidx = 0; qidx < phba->cfg_fcp_io_channel; + qidx++) { + qp = phba->sli4_hba.fcp_wq[qidx]; + if (qp && qp->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + qp, index, count); + if (rc) + goto error_out; + idiag.ptr_private = qp; + goto pass_check; + } + } + } + /* NVME work queue */ + if (phba->sli4_hba.nvme_wq) { + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; + qidx++) { + qp = phba->sli4_hba.nvme_wq[qidx]; + if (qp && qp->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + qp, index, count); + if (rc) + goto error_out; + idiag.ptr_private = qp; + goto pass_check; + } + } + } + + /* NVME work queues */ + if (phba->sli4_hba.nvme_wq) { + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { - if (!phba->sli4_hba.fcp_wq[qidx]) + if (!phba->sli4_hba.nvme_wq[qidx]) continue; - if (phba->sli4_hba.fcp_wq[qidx]->queue_id == + if (phba->sli4_hba.nvme_wq[qidx]->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( - phba->sli4_hba.fcp_wq[qidx], + phba->sli4_hba.nvme_wq[qidx], index, count); if (rc) goto error_out; idiag.ptr_private = - phba->sli4_hba.fcp_wq[qidx]; + phba->sli4_hba.nvme_wq[qidx]; goto pass_check; } } @@ -3687,6 +4763,46 @@ static const struct file_operations lpfc_debugfs_op_dumpHostSlim = { .release = lpfc_debugfs_release, }; +#undef lpfc_debugfs_op_nvmestat +static const struct file_operations lpfc_debugfs_op_nvmestat = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nvmestat_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_nvmestat_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_nvmektime +static const struct file_operations lpfc_debugfs_op_nvmektime = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nvmektime_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_nvmektime_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_nvmeio_trc +static const struct file_operations lpfc_debugfs_op_nvmeio_trc = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nvmeio_trc_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_nvmeio_trc_write, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_cpucheck +static const struct file_operations lpfc_debugfs_op_cpucheck = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_cpucheck_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_cpucheck_write, + .release = lpfc_debugfs_release, +}; + #undef lpfc_debugfs_op_dumpData static const struct file_operations lpfc_debugfs_op_dumpData = { .owner = THIS_MODULE, @@ -3853,7 +4969,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) { do_dump |= LPFC_BSG_DMP_MBX_RD_MBX; - printk(KERN_ERR "\nRead mbox command (x%x), " + pr_err("\nRead mbox command (x%x), " "nemb:0x%x, extbuf_cnt:%d:\n", sta_tp, nemb_tp, ext_buf); } @@ -3861,7 +4977,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) { do_dump |= LPFC_BSG_DMP_MBX_RD_BUF; - printk(KERN_ERR "\nRead mbox buffer (x%x), " + pr_err("\nRead mbox buffer (x%x), " "nemb:0x%x, extbuf_seq:%d:\n", sta_tp, nemb_tp, ext_buf); } @@ -3869,7 +4985,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) { do_dump |= LPFC_BSG_DMP_MBX_WR_MBX; - printk(KERN_ERR "\nWrite mbox command (x%x), " + pr_err("\nWrite mbox command (x%x), " "nemb:0x%x, extbuf_cnt:%d:\n", sta_tp, nemb_tp, ext_buf); } @@ -3877,7 +4993,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) { do_dump |= LPFC_BSG_DMP_MBX_WR_BUF; - printk(KERN_ERR "\nWrite mbox buffer (x%x), " + pr_err("\nWrite mbox buffer (x%x), " "nemb:0x%x, extbuf_seq:%d:\n", sta_tp, nemb_tp, ext_buf); } @@ -3889,7 +5005,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, for (i = 0; i < *mbx_word_cnt; i++) { if (!(i % 8)) { if (i != 0) - printk(KERN_ERR "%s\n", line_buf); + pr_err("%s\n", line_buf); len = 0; len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, @@ -3900,7 +5016,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, pword++; } if ((i - 1) % 8) - printk(KERN_ERR "%s\n", line_buf); + pr_err("%s\n", line_buf); (*mbx_dump_cnt)--; } @@ -3949,13 +5065,13 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) /* dump buffer content */ if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) { - printk(KERN_ERR "Mailbox command:0x%x dump by word:\n", + pr_err("Mailbox command:0x%x dump by word:\n", pmbox->mbxCommand); pword = (uint32_t *)pmbox; for (i = 0; i < *mbx_word_cnt; i++) { if (!(i % 8)) { if (i != 0) - printk(KERN_ERR "%s\n", line_buf); + pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); len += snprintf(line_buf+len, @@ -3968,17 +5084,17 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) pword++; } if ((i - 1) % 8) - printk(KERN_ERR "%s\n", line_buf); - printk(KERN_ERR "\n"); + pr_err("%s\n", line_buf); + pr_err("\n"); } if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) { - printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n", + pr_err("Mailbox command:0x%x dump by byte:\n", pmbox->mbxCommand); pbyte = (uint8_t *)pmbox; for (i = 0; i < *mbx_word_cnt; i++) { if (!(i % 8)) { if (i != 0) - printk(KERN_ERR "%s\n", line_buf); + pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); len += snprintf(line_buf+len, @@ -3996,8 +5112,8 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) LPFC_MBX_ACC_LBUF_SZ-len, " "); } if ((i - 1) % 8) - printk(KERN_ERR "%s\n", line_buf); - printk(KERN_ERR "\n"); + pr_err("%s\n", line_buf); + pr_err("\n"); } (*mbx_dump_cnt)--; @@ -4240,8 +5356,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) i++; } lpfc_debugfs_max_slow_ring_trc = (1 << i); - printk(KERN_ERR - "lpfc_debugfs_max_disc_trc changed to " + pr_err("lpfc_debugfs_max_disc_trc changed to " "%d\n", lpfc_debugfs_max_disc_trc); } } @@ -4273,6 +5388,61 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_slow_ring_trc)); } + + snprintf(name, sizeof(name), "nvmeio_trc"); + phba->debug_nvmeio_trc = + debugfs_create_file(name, 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_nvmeio_trc); + if (!phba->debug_nvmeio_trc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0574 No create debugfs nvmeio_trc\n"); + goto debug_failed; + } + + atomic_set(&phba->nvmeio_trc_cnt, 0); + if (lpfc_debugfs_max_nvmeio_trc) { + num = lpfc_debugfs_max_nvmeio_trc - 1; + if (num & lpfc_debugfs_max_disc_trc) { + /* Change to be a power of 2 */ + num = lpfc_debugfs_max_nvmeio_trc; + i = 0; + while (num > 1) { + num = num >> 1; + i++; + } + lpfc_debugfs_max_nvmeio_trc = (1 << i); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0575 lpfc_debugfs_max_nvmeio_trc " + "changed to %d\n", + lpfc_debugfs_max_nvmeio_trc); + } + phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc; + + /* Allocate trace buffer and initialize */ + phba->nvmeio_trc = kmalloc( + (sizeof(struct lpfc_debugfs_nvmeio_trc) * + phba->nvmeio_trc_size), GFP_KERNEL); + + if (!phba->nvmeio_trc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0576 Cannot create debugfs " + "nvmeio_trc buffer\n"); + goto nvmeio_off; + } + memset(phba->nvmeio_trc, 0, + (sizeof(struct lpfc_debugfs_nvmeio_trc) * + phba->nvmeio_trc_size)); + phba->nvmeio_trc_on = 1; + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc = NULL; + } else { +nvmeio_off: + phba->nvmeio_trc_size = 0; + phba->nvmeio_trc_on = 0; + phba->nvmeio_trc_output_idx = 0; + phba->nvmeio_trc = NULL; + } } snprintf(name, sizeof(name), "vport%d", vport->vpi); @@ -4298,8 +5468,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) i++; } lpfc_debugfs_max_disc_trc = (1 << i); - printk(KERN_ERR - "lpfc_debugfs_max_disc_trc changed to %d\n", + pr_err("lpfc_debugfs_max_disc_trc changed to %d\n", lpfc_debugfs_max_disc_trc); } } @@ -4338,6 +5507,39 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } + snprintf(name, sizeof(name), "nvmestat"); + vport->debug_nvmestat = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_nvmestat); + if (!vport->debug_nvmestat) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0811 Cannot create debugfs nvmestat\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "nvmektime"); + vport->debug_nvmektime = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_nvmektime); + if (!vport->debug_nvmektime) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0815 Cannot create debugfs nvmektime\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "cpucheck"); + vport->debug_cpucheck = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_cpucheck); + if (!vport->debug_cpucheck) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0819 Cannot create debugfs cpucheck\n"); + goto debug_failed; + } + /* * The following section is for additional directories/files for the * physical port. @@ -4502,140 +5704,126 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) kfree(vport->disc_trc); vport->disc_trc = NULL; } - if (vport->debug_disc_trc) { - debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ - vport->debug_disc_trc = NULL; - } - if (vport->debug_nodelist) { - debugfs_remove(vport->debug_nodelist); /* nodelist */ - vport->debug_nodelist = NULL; - } + + debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ + vport->debug_disc_trc = NULL; + + debugfs_remove(vport->debug_nodelist); /* nodelist */ + vport->debug_nodelist = NULL; + + debugfs_remove(vport->debug_nvmestat); /* nvmestat */ + vport->debug_nvmestat = NULL; + + debugfs_remove(vport->debug_nvmektime); /* nvmektime */ + vport->debug_nvmektime = NULL; + + debugfs_remove(vport->debug_cpucheck); /* cpucheck */ + vport->debug_cpucheck = NULL; + if (vport->vport_debugfs_root) { debugfs_remove(vport->vport_debugfs_root); /* vportX */ vport->vport_debugfs_root = NULL; atomic_dec(&phba->debugfs_vport_count); } + if (atomic_read(&phba->debugfs_vport_count) == 0) { - if (phba->debug_hbqinfo) { - debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ - phba->debug_hbqinfo = NULL; - } - if (phba->debug_dumpHBASlim) { - debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ - phba->debug_dumpHBASlim = NULL; - } - if (phba->debug_dumpHostSlim) { - debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ - phba->debug_dumpHostSlim = NULL; - } - if (phba->debug_dumpData) { - debugfs_remove(phba->debug_dumpData); /* dumpData */ - phba->debug_dumpData = NULL; - } + debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ + phba->debug_hbqinfo = NULL; - if (phba->debug_dumpDif) { - debugfs_remove(phba->debug_dumpDif); /* dumpDif */ - phba->debug_dumpDif = NULL; - } - if (phba->debug_InjErrLBA) { - debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ - phba->debug_InjErrLBA = NULL; - } - if (phba->debug_InjErrNPortID) { /* InjErrNPortID */ - debugfs_remove(phba->debug_InjErrNPortID); - phba->debug_InjErrNPortID = NULL; - } - if (phba->debug_InjErrWWPN) { - debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */ - phba->debug_InjErrWWPN = NULL; - } - if (phba->debug_writeGuard) { - debugfs_remove(phba->debug_writeGuard); /* writeGuard */ - phba->debug_writeGuard = NULL; - } - if (phba->debug_writeApp) { - debugfs_remove(phba->debug_writeApp); /* writeApp */ - phba->debug_writeApp = NULL; - } - if (phba->debug_writeRef) { - debugfs_remove(phba->debug_writeRef); /* writeRef */ - phba->debug_writeRef = NULL; - } - if (phba->debug_readGuard) { - debugfs_remove(phba->debug_readGuard); /* readGuard */ - phba->debug_readGuard = NULL; - } - if (phba->debug_readApp) { - debugfs_remove(phba->debug_readApp); /* readApp */ - phba->debug_readApp = NULL; - } - if (phba->debug_readRef) { - debugfs_remove(phba->debug_readRef); /* readRef */ - phba->debug_readRef = NULL; - } + debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ + phba->debug_dumpHBASlim = NULL; + + debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ + phba->debug_dumpHostSlim = NULL; + + debugfs_remove(phba->debug_dumpData); /* dumpData */ + phba->debug_dumpData = NULL; + + debugfs_remove(phba->debug_dumpDif); /* dumpDif */ + phba->debug_dumpDif = NULL; + + debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ + phba->debug_InjErrLBA = NULL; + + debugfs_remove(phba->debug_InjErrNPortID); + phba->debug_InjErrNPortID = NULL; + + debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */ + phba->debug_InjErrWWPN = NULL; + + debugfs_remove(phba->debug_writeGuard); /* writeGuard */ + phba->debug_writeGuard = NULL; + + debugfs_remove(phba->debug_writeApp); /* writeApp */ + phba->debug_writeApp = NULL; + + debugfs_remove(phba->debug_writeRef); /* writeRef */ + phba->debug_writeRef = NULL; + + debugfs_remove(phba->debug_readGuard); /* readGuard */ + phba->debug_readGuard = NULL; + + debugfs_remove(phba->debug_readApp); /* readApp */ + phba->debug_readApp = NULL; + + debugfs_remove(phba->debug_readRef); /* readRef */ + phba->debug_readRef = NULL; if (phba->slow_ring_trc) { kfree(phba->slow_ring_trc); phba->slow_ring_trc = NULL; } - if (phba->debug_slow_ring_trc) { - /* slow_ring_trace */ - debugfs_remove(phba->debug_slow_ring_trc); - phba->debug_slow_ring_trc = NULL; - } + + /* slow_ring_trace */ + debugfs_remove(phba->debug_slow_ring_trc); + phba->debug_slow_ring_trc = NULL; + + debugfs_remove(phba->debug_nvmeio_trc); + phba->debug_nvmeio_trc = NULL; + + kfree(phba->nvmeio_trc); + phba->nvmeio_trc = NULL; /* * iDiag release */ if (phba->sli_rev == LPFC_SLI_REV4) { - if (phba->idiag_ext_acc) { - /* iDiag extAcc */ - debugfs_remove(phba->idiag_ext_acc); - phba->idiag_ext_acc = NULL; - } - if (phba->idiag_mbx_acc) { - /* iDiag mbxAcc */ - debugfs_remove(phba->idiag_mbx_acc); - phba->idiag_mbx_acc = NULL; - } - if (phba->idiag_ctl_acc) { - /* iDiag ctlAcc */ - debugfs_remove(phba->idiag_ctl_acc); - phba->idiag_ctl_acc = NULL; - } - if (phba->idiag_drb_acc) { - /* iDiag drbAcc */ - debugfs_remove(phba->idiag_drb_acc); - phba->idiag_drb_acc = NULL; - } - if (phba->idiag_que_acc) { - /* iDiag queAcc */ - debugfs_remove(phba->idiag_que_acc); - phba->idiag_que_acc = NULL; - } - if (phba->idiag_que_info) { - /* iDiag queInfo */ - debugfs_remove(phba->idiag_que_info); - phba->idiag_que_info = NULL; - } - if (phba->idiag_bar_acc) { - /* iDiag barAcc */ - debugfs_remove(phba->idiag_bar_acc); - phba->idiag_bar_acc = NULL; - } - if (phba->idiag_pci_cfg) { - /* iDiag pciCfg */ - debugfs_remove(phba->idiag_pci_cfg); - phba->idiag_pci_cfg = NULL; - } + /* iDiag extAcc */ + debugfs_remove(phba->idiag_ext_acc); + phba->idiag_ext_acc = NULL; + + /* iDiag mbxAcc */ + debugfs_remove(phba->idiag_mbx_acc); + phba->idiag_mbx_acc = NULL; + + /* iDiag ctlAcc */ + debugfs_remove(phba->idiag_ctl_acc); + phba->idiag_ctl_acc = NULL; + + /* iDiag drbAcc */ + debugfs_remove(phba->idiag_drb_acc); + phba->idiag_drb_acc = NULL; + + /* iDiag queAcc */ + debugfs_remove(phba->idiag_que_acc); + phba->idiag_que_acc = NULL; + + /* iDiag queInfo */ + debugfs_remove(phba->idiag_que_info); + phba->idiag_que_info = NULL; + + /* iDiag barAcc */ + debugfs_remove(phba->idiag_bar_acc); + phba->idiag_bar_acc = NULL; + + /* iDiag pciCfg */ + debugfs_remove(phba->idiag_pci_cfg); + phba->idiag_pci_cfg = NULL; /* Finally remove the iDiag debugfs root */ - if (phba->idiag_root) { - /* iDiag root */ - debugfs_remove(phba->idiag_root); - phba->idiag_root = NULL; - } + debugfs_remove(phba->idiag_root); + phba->idiag_root = NULL; } if (phba->hba_debugfs_root) { @@ -4644,10 +5832,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) atomic_dec(&lpfc_debugfs_hba_count); } - if (atomic_read(&lpfc_debugfs_hba_count) == 0) { - debugfs_remove(lpfc_debugfs_root); /* lpfc */ - lpfc_debugfs_root = NULL; - } + debugfs_remove(lpfc_debugfs_root); /* lpfc */ + lpfc_debugfs_root = NULL; } #endif return; @@ -4668,31 +5854,39 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) void lpfc_debug_dump_all_queues(struct lpfc_hba *phba) { - int fcp_wqidx; + int idx; /* * Dump Work Queues (WQs) */ - lpfc_debug_dump_mbx_wq(phba); - lpfc_debug_dump_els_wq(phba); + lpfc_debug_dump_wq(phba, DUMP_MBX, 0); + lpfc_debug_dump_wq(phba, DUMP_ELS, 0); + lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0); - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) - lpfc_debug_dump_fcp_wq(phba, fcp_wqidx); + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + lpfc_debug_dump_wq(phba, DUMP_FCP, idx); + + for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) + lpfc_debug_dump_wq(phba, DUMP_NVME, idx); lpfc_debug_dump_hdr_rq(phba); lpfc_debug_dump_dat_rq(phba); /* * Dump Complete Queues (CQs) */ - lpfc_debug_dump_mbx_cq(phba); - lpfc_debug_dump_els_cq(phba); + lpfc_debug_dump_cq(phba, DUMP_MBX, 0); + lpfc_debug_dump_cq(phba, DUMP_ELS, 0); + lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0); + + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + lpfc_debug_dump_cq(phba, DUMP_FCP, idx); - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) - lpfc_debug_dump_fcp_cq(phba, fcp_wqidx); + for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) + lpfc_debug_dump_cq(phba, DUMP_NVME, idx); /* * Dump Event Queues (EQs) */ - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) - lpfc_debug_dump_hba_eq(phba, fcp_wqidx); + for (idx = 0; idx < phba->io_channel_irqs; idx++) + lpfc_debug_dump_hba_eq(phba, idx); } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 8b2b6a3bfc25b5..7b7d314af0e087 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -42,6 +44,14 @@ /* hbqinfo output buffer size */ #define LPFC_HBQINFO_SIZE 8192 +/* nvmestat output buffer size */ +#define LPFC_NVMESTAT_SIZE 8192 +#define LPFC_NVMEKTIME_SIZE 8192 +#define LPFC_CPUCHECK_SIZE 8192 +#define LPFC_NVMEIO_TRC_SIZE 8192 + +#define LPFC_DEBUG_OUT_LINE_SZ 80 + /* * For SLI4 iDiag debugfs diagnostics tool */ @@ -188,6 +198,12 @@ #define SIZE_U16 sizeof(uint16_t) #define SIZE_U32 sizeof(uint32_t) +#define lpfc_nvmeio_data(phba, fmt, arg...) \ + { \ + if (phba->nvmeio_trc_on) \ + lpfc_debugfs_nvme_trc(phba, fmt, ##arg); \ + } + struct lpfc_debug { char *i_private; char op; @@ -206,6 +222,13 @@ struct lpfc_debugfs_trc { unsigned long jif; }; +struct lpfc_debugfs_nvmeio_trc { + char *fmt; + uint16_t data1; + uint16_t data2; + uint32_t data3; +}; + struct lpfc_idiag_offset { uint32_t last_rd; }; @@ -252,8 +275,22 @@ struct lpfc_idiag { struct lpfc_idiag_offset offset; void *ptr_private; }; + +#else + +#define lpfc_nvmeio_data(phba, fmt, arg...) \ + no_printk(fmt, ##arg) + #endif +enum { + DUMP_FCP, + DUMP_NVME, + DUMP_MBX, + DUMP_ELS, + DUMP_NVMELS, +}; + /* Mask for discovery_trace */ #define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */ #define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */ @@ -358,58 +395,111 @@ lpfc_debug_dump_q(struct lpfc_queue *q) } /** - * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue + * lpfc_debug_dump_wq - dump all entries from the fcp or nvme work queue * @phba: Pointer to HBA context object. - * @fcp_wqidx: Index to a FCP work queue. + * @wqidx: Index to a FCP or NVME work queue. * - * This function dumps all entries from a FCP work queue specified by the - * @fcp_wqidx. + * This function dumps all entries from a FCP or NVME work queue specified + * by the wqidx. **/ static inline void -lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx) +lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) { - /* sanity check */ - if (fcp_wqidx >= phba->cfg_fcp_io_channel) + struct lpfc_queue *wq; + char *qtypestr; + + if (qtype == DUMP_FCP) { + wq = phba->sli4_hba.fcp_wq[wqidx]; + qtypestr = "FCP"; + } else if (qtype == DUMP_NVME) { + wq = phba->sli4_hba.nvme_wq[wqidx]; + qtypestr = "NVME"; + } else if (qtype == DUMP_MBX) { + wq = phba->sli4_hba.mbx_wq; + qtypestr = "MBX"; + } else if (qtype == DUMP_ELS) { + wq = phba->sli4_hba.els_wq; + qtypestr = "ELS"; + } else if (qtype == DUMP_NVMELS) { + wq = phba->sli4_hba.nvmels_wq; + qtypestr = "NVMELS"; + } else return; - printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n", - fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[fcp_wqidx]); + if (qtype == DUMP_FCP || qtype == DUMP_NVME) + pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n", + qtypestr, wqidx, wq->queue_id); + else + pr_err("%s WQ: WQ[Qid:%d]\n", + qtypestr, wq->queue_id); + + lpfc_debug_dump_q(wq); } /** - * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue + * lpfc_debug_dump_cq - dump all entries from a fcp or nvme work queue's + * cmpl queue * @phba: Pointer to HBA context object. - * @fcp_wqidx: Index to a FCP work queue. + * @wqidx: Index to a FCP work queue. * - * This function dumps all entries from a FCP complete queue which is - * associated to the FCP work queue specified by the @fcp_wqidx. + * This function dumps all entries from a FCP or NVME completion queue + * which is associated to the work queue specified by the @wqidx. **/ static inline void -lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx) +lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) { - int fcp_cqidx, fcp_cqid; - - /* sanity check */ - if (fcp_wqidx >= phba->cfg_fcp_io_channel) + struct lpfc_queue *wq, *cq, *eq; + char *qtypestr; + int eqidx; + + /* fcp/nvme wq and cq are 1:1, thus same indexes */ + + if (qtype == DUMP_FCP) { + wq = phba->sli4_hba.fcp_wq[wqidx]; + cq = phba->sli4_hba.fcp_cq[wqidx]; + qtypestr = "FCP"; + } else if (qtype == DUMP_NVME) { + wq = phba->sli4_hba.nvme_wq[wqidx]; + cq = phba->sli4_hba.nvme_cq[wqidx]; + qtypestr = "NVME"; + } else if (qtype == DUMP_MBX) { + wq = phba->sli4_hba.mbx_wq; + cq = phba->sli4_hba.mbx_cq; + qtypestr = "MBX"; + } else if (qtype == DUMP_ELS) { + wq = phba->sli4_hba.els_wq; + cq = phba->sli4_hba.els_cq; + qtypestr = "ELS"; + } else if (qtype == DUMP_NVMELS) { + wq = phba->sli4_hba.nvmels_wq; + cq = phba->sli4_hba.nvmels_cq; + qtypestr = "NVMELS"; + } else return; - fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) - if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) + for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) { + eq = phba->sli4_hba.hba_eq[eqidx]; + if (cq->assoc_qid == eq->queue_id) break; - if (phba->intr_type == MSIX) { - if (fcp_cqidx >= phba->cfg_fcp_io_channel) - return; - } else { - if (fcp_cqidx > 0) - return; + } + if (eqidx == phba->io_channel_irqs) { + pr_err("Couldn't find EQ for CQ. Using EQ[0]\n"); + eqidx = 0; + eq = phba->sli4_hba.hba_eq[0]; } - printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n", - fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, - fcp_cqidx, fcp_cqid); - lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[fcp_cqidx]); + if (qtype == DUMP_FCP || qtype == DUMP_NVME) + pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" + "->EQ[Idx:%d|Qid:%d]:\n", + qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, + eqidx, eq->queue_id); + else + pr_err("%s CQ: WQ[Qid:%d]->CQ[Qid:%d]" + "->EQ[Idx:%d|Qid:%d]:\n", + qtypestr, wq->queue_id, cq->queue_id, + eqidx, eq->queue_id); + + lpfc_debug_dump_q(cq); } /** @@ -421,64 +511,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx) * associated to the FCP work queue specified by the @fcp_wqidx. **/ static inline void -lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx) +lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx) { - struct lpfc_queue *qdesc; - int fcp_eqidx, fcp_eqid; - int fcp_cqidx, fcp_cqid; + struct lpfc_queue *qp; - /* sanity check */ - if (fcp_wqidx >= phba->cfg_fcp_io_channel) - return; - fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) - if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) - break; - if (phba->intr_type == MSIX) { - if (fcp_cqidx >= phba->cfg_fcp_io_channel) - return; - } else { - if (fcp_cqidx > 0) - return; - } - - fcp_eqidx = fcp_cqidx; - fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id; - qdesc = phba->sli4_hba.hba_eq[fcp_eqidx]; - - printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->" - "EQ[Idx:%d|Qid:%d]\n", - fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, - fcp_cqidx, fcp_cqid, fcp_eqidx, fcp_eqid); - lpfc_debug_dump_q(qdesc); -} + qp = phba->sli4_hba.hba_eq[qidx]; -/** - * lpfc_debug_dump_els_wq - dump all entries from the els work queue - * @phba: Pointer to HBA context object. - * - * This function dumps all entries from the ELS work queue. - **/ -static inline void -lpfc_debug_dump_els_wq(struct lpfc_hba *phba) -{ - printk(KERN_ERR "ELS WQ: WQ[Qid:%d]:\n", - phba->sli4_hba.els_wq->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.els_wq); -} + pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id); -/** - * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue - * @phba: Pointer to HBA context object. - * - * This function dumps all entries from the MBOX work queue. - **/ -static inline void -lpfc_debug_dump_mbx_wq(struct lpfc_hba *phba) -{ - printk(KERN_ERR "MBX WQ: WQ[Qid:%d]\n", - phba->sli4_hba.mbx_wq->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.mbx_wq); + lpfc_debug_dump_q(qp); } /** @@ -509,36 +550,6 @@ lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba) lpfc_debug_dump_q(phba->sli4_hba.hdr_rq); } -/** - * lpfc_debug_dump_els_cq - dump all entries from the els complete queue - * @phba: Pointer to HBA context object. - * - * This function dumps all entries from the els complete queue. - **/ -static inline void -lpfc_debug_dump_els_cq(struct lpfc_hba *phba) -{ - printk(KERN_ERR "ELS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n", - phba->sli4_hba.els_wq->queue_id, - phba->sli4_hba.els_cq->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.els_cq); -} - -/** - * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue - * @phba: Pointer to HBA context object. - * - * This function dumps all entries from the mbox complete queue. - **/ -static inline void -lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba) -{ - printk(KERN_ERR "MBX CQ: WQ[Qid:%d]->CQ[Qid:%d]\n", - phba->sli4_hba.mbx_wq->queue_id, - phba->sli4_hba.mbx_cq->queue_id); - lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); -} - /** * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id * @phba: Pointer to HBA context object. @@ -556,14 +567,29 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) break; if (wq_idx < phba->cfg_fcp_io_channel) { - printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); + pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); return; } + for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++) + if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid) + break; + if (wq_idx < phba->cfg_nvme_io_channel) { + pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]); + return; + } + if (phba->sli4_hba.els_wq->queue_id == qid) { - printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid); + pr_err("ELS WQ[Qid:%d]\n", qid); lpfc_debug_dump_q(phba->sli4_hba.els_wq); + return; + } + + if (phba->sli4_hba.nvmels_wq->queue_id == qid) { + pr_err("NVME LS WQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq); } } @@ -617,27 +643,42 @@ lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid) static inline void lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) { - int cq_idx = 0; + int cq_idx; - do { + for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++) if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) break; - } while (++cq_idx < phba->cfg_fcp_io_channel); if (cq_idx < phba->cfg_fcp_io_channel) { - printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); + pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); return; } + for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++) + if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid) + break; + + if (cq_idx < phba->cfg_nvme_io_channel) { + pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]); + return; + } + if (phba->sli4_hba.els_cq->queue_id == qid) { - printk(KERN_ERR "ELS CQ[Qid:%d]\n", qid); + pr_err("ELS CQ[Qid:%d]\n", qid); lpfc_debug_dump_q(phba->sli4_hba.els_cq); return; } + if (phba->sli4_hba.nvmels_cq->queue_id == qid) { + pr_err("NVME LS CQ[Qid:%d]\n", qid); + lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq); + return; + } + if (phba->sli4_hba.mbx_cq->queue_id == qid) { - printk(KERN_ERR "MBX CQ[Qid:%d]\n", qid); + pr_err("MBX CQ[Qid:%d]\n", qid); lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); } } @@ -655,17 +696,15 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid) { int eq_idx; - for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) { + for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++) if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid) break; - } - if (eq_idx < phba->cfg_fcp_io_channel) { + if (eq_idx < phba->io_channel_irqs) { printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]); return; } - } void lpfc_debug_dump_all_queues(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 361f5b3d9d936b..f4ff99d95db343 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -86,6 +88,17 @@ struct lpfc_nodelist { #define NLP_FABRIC 0x4 /* entry rep a Fabric entity */ #define NLP_FCP_TARGET 0x8 /* entry is an FCP target */ #define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */ +#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */ +#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */ + + uint16_t nlp_fc4_type; /* FC types node supports. */ + /* Assigned from GID_FF, only + * FCP (0x8) and NVME (0x28) + * supported. + */ +#define NLP_FC4_NONE 0x0 +#define NLP_FC4_FCP 0x1 /* FC4 Type FCP (value x8)) */ +#define NLP_FC4_NVME 0x2 /* FC4 TYPE NVME (value x28) */ uint16_t nlp_rpi; uint16_t nlp_state; /* state transition indicator */ @@ -107,8 +120,8 @@ struct lpfc_nodelist { struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ struct lpfc_hba *phba; - struct fc_rport *rport; /* Corresponding FC transport - port structure */ + struct fc_rport *rport; /* scsi_transport_fc port structure */ + struct lpfc_nvme_rport *nrport; /* nvme transport rport struct. */ struct lpfc_vport *vport; struct lpfc_work_evt els_retry_evt; struct lpfc_work_evt dev_loss_evt; @@ -118,6 +131,10 @@ struct lpfc_nodelist { unsigned long last_change_time; unsigned long *active_rrqs_xri_bitmap; struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ + uint32_t fc4_prli_sent; + uint32_t upcall_flags; + uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ +#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ }; struct lpfc_node_rrq { struct list_head list; @@ -133,6 +150,7 @@ struct lpfc_node_rrq { /* Defines for nlp_flag (uint32) */ #define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ #define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ +#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ #define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ #define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ #define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 3a1f1a2a2b559a..a5ca37e45fb682 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -29,7 +31,6 @@ #include #include - #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -1323,7 +1324,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba) "0201 Abort outstanding I/O on NPort x%x\n", Fabric_DID); - pring = &phba->sli.ring[LPFC_ELS_RING]; + pring = lpfc_phba_elsring(phba); /* * Check the txcmplq for an iocb that matches the nport the driver is @@ -1513,7 +1514,7 @@ static struct lpfc_nodelist * lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *ndlp) { - struct lpfc_vport *vport = ndlp->vport; + struct lpfc_vport *vport = ndlp->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *new_ndlp; struct lpfc_rport_data *rdata; @@ -1868,10 +1869,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* PLOGI completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0102 PLOGI completes to NPort x%x " + "0102 PLOGI completes to NPort x%06x " "Data: x%x x%x x%x x%x x%x\n", - ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], - irsp->ulpTimeout, disc, vport->num_disc_nodes); + ndlp->nlp_DID, ndlp->nlp_fc4_type, + irsp->ulpStatus, irsp->un.ulpWord[4], + disc, vport->num_disc_nodes); + /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { spin_lock_irq(shost->host_lock); @@ -2000,12 +2003,21 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) sp->cmn.fcphHigh = FC_PH3; sp->cmn.valid_vendor_ver_level = 0; - memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); + memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PLOGI: did:x%x", did, 0, 0); + /* If our firmware supports this feature, convey that + * information to the target using the vendor specific field. + */ + if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { + sp->cmn.valid_vendor_ver_level = 1; + sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); + sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); + } + phba->fc_stat.elsXmitPLOGI++; elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); @@ -2052,14 +2064,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "PRLI cmpl: status:x%x/x%x did:x%x", irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); + + /* Ddriver supports multiple FC4 types. Counters matter. */ + vport->fc_prli_sent--; + /* PRLI completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0103 PRLI completes to NPort x%x " + "0103 PRLI completes to NPort x%06x " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], - irsp->ulpTimeout, vport->num_disc_nodes); + vport->num_disc_nodes, ndlp->fc4_prli_sent); - vport->fc_prli_sent--; /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) goto out; @@ -2068,6 +2083,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ + ndlp->fc4_prli_sent--; goto out; } /* PRLI failed */ @@ -2082,9 +2098,14 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); } else - /* Good status, call state machine */ + /* Good status, call state machine. However, if another + * PRLI is outstanding, don't call the state machine + * because final disposition to Mapped or Unmapped is + * completed there. + */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); + out: lpfc_els_free_iocb(phba, cmdiocb); return; @@ -2118,42 +2139,100 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; PRLI *npr; + struct lpfc_nvme_prli *npr_nvme; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; - - cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); + u32 local_nlp_type, elscmd; + + local_nlp_type = ndlp->nlp_fc4_type; + + send_next_prli: + if (local_nlp_type & NLP_FC4_FCP) { + /* Payload is 4 + 16 = 20 x14 bytes. */ + cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); + elscmd = ELS_CMD_PRLI; + } else if (local_nlp_type & NLP_FC4_NVME) { + /* Payload is 4 + 20 = 24 x18 bytes. */ + cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); + elscmd = ELS_CMD_NVMEPRLI; + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3083 Unknown FC_TYPE x%x ndlp x%06x\n", + ndlp->nlp_fc4_type, ndlp->nlp_DID); + return 1; + } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, - ndlp->nlp_DID, ELS_CMD_PRLI); + ndlp->nlp_DID, elscmd); if (!elsiocb) return 1; pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); /* For PRLI request, remainder of payload is service parameters */ - memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t))); - *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; - pcmd += sizeof(uint32_t); + memset(pcmd, 0, cmdsize); - /* For PRLI, remainder of payload is PRLI parameter page */ - npr = (PRLI *) pcmd; - /* - * If our firmware version is 3.20 or later, - * set the following bits for FC-TAPE support. - */ - if (phba->vpd.rev.feaLevelHigh >= 0x02) { - npr->ConfmComplAllowed = 1; - npr->Retry = 1; - npr->TaskRetryIdReq = 1; - } - npr->estabImagePair = 1; - npr->readXferRdyDis = 1; - if (vport->cfg_first_burst_size) - npr->writeXferRdyDis = 1; + if (local_nlp_type & NLP_FC4_FCP) { + /* Remainder of payload is FCP PRLI parameter page. + * Note: this data structure is defined as + * BE/LE in the structure definition so no + * byte swap call is made. + */ + *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; + pcmd += sizeof(uint32_t); + npr = (PRLI *)pcmd; + + /* + * If our firmware version is 3.20 or later, + * set the following bits for FC-TAPE support. + */ + if (phba->vpd.rev.feaLevelHigh >= 0x02) { + npr->ConfmComplAllowed = 1; + npr->Retry = 1; + npr->TaskRetryIdReq = 1; + } + npr->estabImagePair = 1; + npr->readXferRdyDis = 1; + if (vport->cfg_first_burst_size) + npr->writeXferRdyDis = 1; + + /* For FCP support */ + npr->prliType = PRLI_FCP_TYPE; + npr->initiatorFunc = 1; + elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; + + /* Remove FCP type - processed. */ + local_nlp_type &= ~NLP_FC4_FCP; + } else if (local_nlp_type & NLP_FC4_NVME) { + /* Remainder of payload is NVME PRLI parameter page. + * This data structure is the newer definition that + * uses bf macros so a byte swap is required. + */ + *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; + pcmd += sizeof(uint32_t); + npr_nvme = (struct lpfc_nvme_prli *)pcmd; + bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); + bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ - /* For FCP support */ - npr->prliType = PRLI_FCP_TYPE; - npr->initiatorFunc = 1; + /* Only initiators request first burst. */ + if ((phba->cfg_nvme_enable_fb) && + !phba->nvmet_support) + bf_set(prli_fba, npr_nvme, 1); + + if (phba->nvmet_support) { + bf_set(prli_tgt, npr_nvme, 1); + bf_set(prli_disc, npr_nvme, 1); + + } else { + bf_set(prli_init, npr_nvme, 1); + } + npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); + npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); + elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; + + /* Remove NVME type - processed. */ + local_nlp_type &= ~NLP_FC4_NVME; + } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PRLI: did:x%x", @@ -2172,7 +2251,20 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_free_iocb(phba, elsiocb); return 1; } + + /* The vport counters are used for lpfc_scan_finished, but + * the ndlp is used to track outstanding PRLIs for different + * FC4 types. + */ vport->fc_prli_sent++; + ndlp->fc4_prli_sent++; + + /* The driver supports 2 FC4 types. Make sure + * a PRLI is issued for all types before exiting. + */ + if (local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) + goto send_next_prli; + return 0; } @@ -2543,6 +2635,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { phba->pport->fc_myDID = 0; + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (phba->nvmet_support) + lpfc_nvmet_update_targetport(phba); + else + lpfc_nvme_update_localport(phba->pport); + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_config_link(phba, mbox); @@ -3055,6 +3156,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) } break; case ELS_CMD_PRLI: + case ELS_CMD_NVMEPRLI: if (!lpfc_issue_els_prli(vport, ndlp, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); @@ -3245,7 +3347,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; } if ((cmd == ELS_CMD_PLOGI) || - (cmd == ELS_CMD_PRLI)) { + (cmd == ELS_CMD_PRLI) || + (cmd == ELS_CMD_NVMEPRLI)) { delay = 1000; maxretry = lpfc_max_els_tries + 1; retry = 1; @@ -3265,7 +3368,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case LSRJT_LOGICAL_BSY: if ((cmd == ELS_CMD_PLOGI) || - (cmd == ELS_CMD_PRLI)) { + (cmd == ELS_CMD_PRLI) || + (cmd == ELS_CMD_NVMEPRLI)) { delay = 1000; maxretry = 48; } else if (cmd == ELS_CMD_FDISC) { @@ -3399,7 +3503,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_unlock_irq(shost->host_lock); ndlp->nlp_prev_state = ndlp->nlp_state; - if (cmd == ELS_CMD_PRLI) + if ((cmd == ELS_CMD_PRLI) || + (cmd == ELS_CMD_NVMEPRLI)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); else @@ -3430,6 +3535,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); return 1; case ELS_CMD_PRLI: + case ELS_CMD_NVMEPRLI: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); @@ -3995,7 +4101,18 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, sizeof(struct serv_parm)); sp->cmn.valid_vendor_ver_level = 0; - memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); + memset(sp->un.vendorVersion, 0, + sizeof(sp->un.vendorVersion)); + + /* If our firmware supports this feature, convey that + * info to the target using the vendor specific field. + */ + if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { + sp->cmn.valid_vendor_ver_level = 1; + sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); + sp->un.vv.flags = + cpu_to_be32(LPFC_VV_SUPPRESS_RSP); + } } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, @@ -4231,17 +4348,43 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, { struct lpfc_hba *phba = vport->phba; PRLI *npr; + struct lpfc_nvme_prli *npr_nvme; lpfc_vpd_t *vpd; IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; + uint32_t prli_fc4_req, *req_payload; + struct lpfc_dmabuf *req_buf; int rc; + u32 elsrspcmd; + + /* Need the incoming PRLI payload to determine if the ACC is for an + * FC4 or NVME PRLI type. The PRLI type is at word 1. + */ + req_buf = (struct lpfc_dmabuf *)oldiocb->context2; + req_payload = (((uint32_t *)req_buf->virt) + 1); + + /* PRLI type payload is at byte 3 for FCP or NVME. */ + prli_fc4_req = be32_to_cpu(*req_payload); + prli_fc4_req = (prli_fc4_req >> 24) & 0xff; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", + prli_fc4_req, *((uint32_t *)req_payload)); + + if (prli_fc4_req == PRLI_FCP_TYPE) { + cmdsize = sizeof(uint32_t) + sizeof(PRLI); + elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); + } else if (prli_fc4_req & PRLI_NVME_TYPE) { + cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); + elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); + } else { + return 1; + } - cmdsize = sizeof(uint32_t) + sizeof(PRLI); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, - ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); + ndlp->nlp_DID, elsrspcmd); if (!elsiocb) return 1; @@ -4258,33 +4401,71 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + memset(pcmd, 0, cmdsize); *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); pcmd += sizeof(uint32_t); /* For PRLI, remainder of payload is PRLI parameter page */ - memset(pcmd, 0, sizeof(PRLI)); - - npr = (PRLI *) pcmd; vpd = &phba->vpd; - /* - * If the remote port is a target and our firmware version is 3.20 or - * later, set the following bits for FC-TAPE support. - */ - if ((ndlp->nlp_type & NLP_FCP_TARGET) && - (vpd->rev.feaLevelHigh >= 0x02)) { - npr->ConfmComplAllowed = 1; - npr->Retry = 1; - npr->TaskRetryIdReq = 1; - } - npr->acceptRspCode = PRLI_REQ_EXECUTED; - npr->estabImagePair = 1; - npr->readXferRdyDis = 1; - npr->ConfmComplAllowed = 1; + if (prli_fc4_req == PRLI_FCP_TYPE) { + /* + * If the remote port is a target and our firmware version + * is 3.20 or later, set the following bits for FC-TAPE + * support. + */ + npr = (PRLI *) pcmd; + if ((ndlp->nlp_type & NLP_FCP_TARGET) && + (vpd->rev.feaLevelHigh >= 0x02)) { + npr->ConfmComplAllowed = 1; + npr->Retry = 1; + npr->TaskRetryIdReq = 1; + } + npr->acceptRspCode = PRLI_REQ_EXECUTED; + npr->estabImagePair = 1; + npr->readXferRdyDis = 1; + npr->ConfmComplAllowed = 1; + npr->prliType = PRLI_FCP_TYPE; + npr->initiatorFunc = 1; + } else if (prli_fc4_req & PRLI_NVME_TYPE) { + /* Respond with an NVME PRLI Type */ + npr_nvme = (struct lpfc_nvme_prli *) pcmd; + bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); + bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ + bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); + if (phba->nvmet_support) { + bf_set(prli_tgt, npr_nvme, 1); + bf_set(prli_disc, npr_nvme, 1); + if (phba->cfg_nvme_enable_fb) { + bf_set(prli_fba, npr_nvme, 1); + + /* TBD. Target mode needs to post buffers + * that support the configured first burst + * byte size. + */ + bf_set(prli_fb_sz, npr_nvme, + phba->cfg_nvmet_fb_size); + } + } else { + bf_set(prli_init, npr_nvme, 1); + } - npr->prliType = PRLI_FCP_TYPE; - npr->initiatorFunc = 1; + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6015 NVME issue PRLI ACC word1 x%08x " + "word4 x%08x word5 x%08x flag x%x, " + "fcp_info x%x nlp_type x%x\n", + npr_nvme->word1, npr_nvme->word4, + npr_nvme->word5, ndlp->nlp_flag, + ndlp->nlp_fcp_info, ndlp->nlp_type); + npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); + npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); + npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); + } else + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", + prli_fc4_req, ndlp->nlp_fc4_type, + ndlp->nlp_DID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue ACC PRLI: did:x%x flg:x%x", @@ -4411,7 +4592,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, **/ static void lpfc_els_clear_rrq(struct lpfc_vport *vport, - struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) + struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; uint8_t *pcmd; @@ -4909,7 +5090,7 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); - memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2); + memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); desc->length = cpu_to_be32(sizeof(desc->opd_info)); return sizeof(struct fc_rdp_opd_sfp_desc); @@ -4996,15 +5177,15 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) static uint32_t lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, - struct lpfc_hba *phba) + struct lpfc_vport *vport) { desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); - memcpy(desc->port_names.wwnn, phba->wwnn, + memcpy(desc->port_names.wwnn, &vport->fc_nodename, sizeof(desc->port_names.wwnn)); - memcpy(desc->port_names.wwpn, &phba->wwpn, + memcpy(desc->port_names.wwpn, &vport->fc_portname, sizeof(desc->port_names.wwpn)); desc->length = cpu_to_be32(sizeof(desc->port_names)); @@ -5098,7 +5279,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) (len + pcmd), &rdp_context->link_stat); len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) - (len + pcmd), phba); + (len + pcmd), vport); len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) (len + pcmd), vport, ndlp); len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), @@ -5233,9 +5414,8 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct ls_rjt stat; if (phba->sli_rev < LPFC_SLI_REV4 || - (bf_get(lpfc_sli_intf_if_type, - &phba->sli4_hba.sli_intf) != - LPFC_SLI_INTF_IF_TYPE_2)) { + bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_2) { rjt_err = LSRJT_UNABLE_TPC; rjt_expl = LSEXP_REQ_UNSUPPORTED; goto error; @@ -5687,6 +5867,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) continue; + if (vport->phba->nvmet_support) + continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_cancel_retry_delay_tmo(vport, ndlp); @@ -5976,9 +6158,11 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) if (ndlp && NLP_CHK_NODE_ACT(ndlp) && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Good ndlp, issue CT Request to NameServer */ - if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) + vport->gidft_inp = 0; + if (lpfc_issue_gidft(vport) == 0) /* Wait for NameServer query cmpl before we can - continue */ + * continue + */ return 1; } else { /* If login to NameServer does not exist, issue one */ @@ -6082,7 +6266,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); - /* * If our portname is greater than the remote portname, * then we initiate Nport login. @@ -7155,7 +7338,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) timeout = (uint32_t)(phba->fc_ratov << 1); - pring = &phba->sli.ring[LPFC_ELS_RING]; + pring = lpfc_phba_elsring(phba); + if ((phba->pport->load_flag & FC_UNLOADING)) return; spin_lock_irq(&phba->hbalock); @@ -7224,7 +7408,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) spin_unlock_irq(&phba->hbalock); } - if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) + if (!list_empty(&pring->txcmplq)) if (!(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&vport->els_tmofunc, jiffies + msecs_to_jiffies(1000 * timeout)); @@ -7255,7 +7439,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) { LIST_HEAD(abort_list); struct lpfc_hba *phba = vport->phba; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *tmp_iocb, *piocb; IOCB_t *cmd = NULL; @@ -7267,6 +7451,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) * a working list and release the locks before calling the abort. */ spin_lock_irq(&phba->hbalock); + pring = lpfc_phba_elsring(phba); if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); @@ -7777,12 +7962,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_els_rcv_fan(vport, elsiocb, ndlp); break; case ELS_CMD_PRLI: + case ELS_CMD_NVMEPRLI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV PRLI: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLI++; - if (vport->port_state < LPFC_DISC_AUTH) { + if ((vport->port_state < LPFC_DISC_AUTH) && + (vport->fc_flag & FC_FABRIC)) { rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_NOTHING_MORE; break; @@ -8185,11 +8372,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); - if (vport->port_type == LPFC_PHYSICAL_PORT - && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) - lpfc_issue_init_vfi(vport); - else + if (mb->mbxStatus == MBX_NOT_FINISHED) + break; + if ((vport->port_type == LPFC_PHYSICAL_PORT) && + !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_issue_init_vfi(vport); + else + lpfc_initial_flogi(vport); + } else { lpfc_initial_fdisc(vport); + } break; } } else { @@ -8881,8 +9074,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; } - if (atomic_read(&phba->fabric_iocb_count) == 0) - BUG(); + BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; cmdiocb->fabric_iocb_cmpl = NULL; @@ -8927,8 +9119,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) int ready; int ret; - if (atomic_read(&phba->fabric_iocb_count) > 1) - BUG(); + BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); spin_lock_irqsave(&phba->hbalock, iflags); ready = atomic_read(&phba->fabric_iocb_count) == 0 && @@ -9013,7 +9204,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) LIST_HEAD(completions); struct lpfc_hba *phba = ndlp->phba; struct lpfc_iocbq *tmp_iocb, *piocb; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; + + pring = lpfc_phba_elsring(phba); spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, @@ -9069,13 +9262,13 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) unsigned long iflag = 0; spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); list_for_each_entry_safe(sglq_entry, sglq_next, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) sglq_entry->ndlp = NULL; } - spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } @@ -9099,22 +9292,22 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; unsigned long iflag = 0; struct lpfc_nodelist *ndlp; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; + + pring = lpfc_phba_elsring(phba); spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); list_for_each_entry_safe(sglq_entry, sglq_next, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { if (sglq_entry->sli4_xritag == xri) { list_del(&sglq_entry->list); ndlp = sglq_entry->ndlp; sglq_entry->ndlp = NULL; - spin_lock(&pring->ring_lock); list_add_tail(&sglq_entry->list, - &phba->sli4_hba.lpfc_sgl_list); + &phba->sli4_hba.lpfc_els_sgl_list); sglq_entry->state = SGL_FREED; - spin_unlock(&pring->ring_lock); - spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_set_rrq_active(phba, ndlp, sglq_entry->sli4_lxritag, @@ -9126,21 +9319,21 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, return; } } - spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); lxri = lpfc_sli4_xri_inrange(phba, xri); if (lxri == NO_XRI) { spin_unlock_irqrestore(&phba->hbalock, iflag); return; } - spin_lock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); sglq_entry = __lpfc_get_active_sglq(phba, lxri); if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { - spin_unlock(&pring->ring_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } sglq_entry->state = SGL_XRI_ABORTED; - spin_unlock(&pring->ring_lock); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 82047070cdc973..180b072beef6b0 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -31,6 +33,9 @@ #include #include #include +#include + +#include #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -38,8 +43,9 @@ #include "lpfc_disc.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -93,7 +99,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport) if (ndlp->nlp_sid != NLP_NO_SID) { lpfc_sli_abort_iocb(ndlp->vport, - &phba->sli.ring[phba->sli.fcp_ring], + &phba->sli.sli3_ring[LPFC_FCP_RING], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } } @@ -247,8 +253,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) if (ndlp->nlp_sid != NLP_NO_SID) { /* flush the target */ lpfc_sli_abort_iocb(vport, - &phba->sli.ring[phba->sli.fcp_ring], - ndlp->nlp_sid, 0, LPFC_CTX_TGT); + &phba->sli.sli3_ring[LPFC_FCP_RING], + ndlp->nlp_sid, 0, LPFC_CTX_TGT); } put_node = rdata->pnode != NULL; rdata->pnode = NULL; @@ -283,7 +289,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) if (ndlp->nlp_sid != NLP_NO_SID) { warn_on = 1; - lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], + lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } @@ -307,8 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_state, ndlp->nlp_rpi); } - if (!(vport->load_flag & FC_UNLOADING) && - !(ndlp->nlp_flag & NLP_DELAY_TMO) && + if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) && @@ -495,11 +500,12 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba, return; } - fc_host_post_vendor_event(shost, - fc_get_event_number(), - evt_data_size, - evt_data, - LPFC_NL_VENDOR_ID); + if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_host_post_vendor_event(shost, + fc_get_event_number(), + evt_data_size, + evt_data, + LPFC_NL_VENDOR_ID); lpfc_free_fast_evt(phba, fast_evt_data); return; @@ -634,6 +640,8 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_handle_rrq_active(phba); if (phba->hba_flag & FCP_XRI_ABORT_EVENT) lpfc_sli4_fcp_xri_abort_event_proc(phba); + if (phba->hba_flag & NVME_XRI_ABORT_EVENT) + lpfc_sli4_nvme_xri_abort_event_proc(phba); if (phba->hba_flag & ELS_XRI_ABORT_EVENT) lpfc_sli4_els_xri_abort_event_proc(phba); if (phba->hba_flag & ASYNC_EVENT) @@ -682,7 +690,7 @@ lpfc_work_done(struct lpfc_hba *phba) } lpfc_destroy_vport_work_array(phba, vports); - pring = &phba->sli.ring[LPFC_ELS_RING]; + pring = lpfc_phba_elsring(phba); status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status >>= (4*LPFC_ELS_RING); if ((status & HA_RXMASK) || @@ -852,9 +860,12 @@ lpfc_port_link_failure(struct lpfc_vport *vport) void lpfc_linkdown_port(struct lpfc_vport *vport) { + struct lpfc_hba *phba = vport->phba; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); + if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_host_post_event(shost, fc_get_event_number(), + FCH_EVT_LINKDOWN, 0); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Link Down: state:x%x rtry:x%x flg:x%x", @@ -894,11 +905,22 @@ lpfc_linkdown(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) + if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { /* Issue a LINK DOWN event to all nodes */ lpfc_linkdown_port(vports[i]); + + vports[i]->fc_myDID = 0; + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (phba->nvmet_support) + lpfc_nvmet_update_targetport(phba); + else + lpfc_nvme_update_localport(vports[i]); + } } + } lpfc_destroy_vport_work_array(phba, vports); /* Clean up any firmware default rpi's */ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -914,7 +936,6 @@ lpfc_linkdown(struct lpfc_hba *phba) /* Setup myDID for link up if we are in pt2pt mode */ if (phba->pport->fc_flag & FC_PT2PT) { - phba->pport->fc_myDID = 0; mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_config_link(phba, mb); @@ -929,7 +950,6 @@ lpfc_linkdown(struct lpfc_hba *phba) phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); spin_unlock_irq(shost->host_lock); } - return 0; } @@ -977,7 +997,9 @@ lpfc_linkup_port(struct lpfc_vport *vport) (vport != phba->pport)) return; - fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); + if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_host_post_event(shost, fc_get_event_number(), + FCH_EVT_LINKUP, 0); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | @@ -1016,7 +1038,7 @@ lpfc_linkup(struct lpfc_hba *phba) * This routine handles processing a CLEAR_LA mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is - * handed off to the SLI layer. + * handed off to the SLI layer. SLI3 only. */ static void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) @@ -1028,9 +1050,8 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) uint32_t control; /* Since we don't do discovery right now, turn these off here */ - psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; + psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT; + psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { @@ -2153,7 +2174,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) uint32_t boot_flag, addr_mode; uint16_t fcf_index, next_fcf_index; struct lpfc_fcf_rec *fcf_rec = NULL; - uint16_t vlan_id; + uint16_t vlan_id = LPFC_FCOE_NULL_VID; bool select_new_fcf; int rc; @@ -3277,7 +3298,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) * This routine handles processing a READ_TOPOLOGY mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is - * handed off to the SLI layer. + * handed off to the SLI layer. SLI4 only. */ void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) @@ -3285,11 +3306,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_mbx_read_top *la; + struct lpfc_sli_ring *pring; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); /* Unblock ELS traffic */ - phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; + pring = lpfc_phba_elsring(phba); + pring->flag &= ~LPFC_STOP_IOCB_EVENT; + /* Check for error */ if (mb->mbxStatus) { lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, @@ -3458,6 +3482,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; spin_unlock_irq(shost->host_lock); + + /* + * We cannot leave the RPI registered because + * if we go thru discovery again for this ndlp + * a subsequent REG_RPI will fail. + */ + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + lpfc_unreg_rpi(vport, ndlp); } /* Call state machine */ @@ -3556,6 +3588,14 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); spin_unlock_irq(shost->host_lock); vport->fc_myDID = 0; + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (phba->nvmet_support) + lpfc_nvmet_update_targetport(phba); + else + lpfc_nvme_update_localport(vport); + } goto out; } @@ -3805,6 +3845,52 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; } + /* + * This routine will issue a GID_FT for each FC4 Type supported + * by the driver. ALL GID_FTs must complete before discovery is started. + */ +int +lpfc_issue_gidft(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + + /* Good status, issue CT Request to NameServer */ + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { + if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) { + /* Cannot issue NameServer FCP Query, so finish up + * discovery + */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + "0604 %s FC TYPE %x %s\n", + "Failed to issue GID_FT to ", + FC_TYPE_FCP, + "Finishing discovery."); + return 0; + } + vport->gidft_inp++; + } + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) { + /* Cannot issue NameServer NVME Query, so finish up + * discovery + */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + "0605 %s FC_TYPE %x %s %d\n", + "Failed to issue GID_FT to ", + FC_TYPE_NVME, + "Finishing discovery: gidftinp ", + vport->gidft_inp); + if (vport->gidft_inp == 0) + return 0; + } else + vport->gidft_inp++; + } + return vport->gidft_inp; +} + /* * This routine handles processing a NameServer REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ @@ -3821,12 +3907,14 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->context1 = NULL; pmb->context2 = NULL; + vport->gidft_inp = 0; if (mb->mbxStatus) { -out: lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0260 Register NameServer error: 0x%x\n", mb->mbxStatus); + +out: /* decrement the node reference count held for this * callback function. */ @@ -3870,20 +3958,29 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); - lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) + lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP); + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) + lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, + FC_TYPE_NVME); /* Issue SCR just before NameServer GID_FT Query */ lpfc_issue_els_scr(vport, SCR_DID, 0); } vport->fc_ns_retry = 0; - /* Good status, issue CT Request to NameServer */ - if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) { - /* Cannot issue NameServer Query, so finish up discovery */ + if (lpfc_issue_gidft(vport) == 0) goto out; - } - /* decrement the node reference count held for this + /* + * At this point in time we may need to wait for multiple + * SLI_CTNS_GID_FT CT commands to complete before we start discovery. + * + * decrement the node reference count held for this * callback function. */ lpfc_nlp_put(ndlp); @@ -3903,6 +4000,9 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct fc_rport_identifiers rport_ids; struct lpfc_hba *phba = vport->phba; + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + return; + /* Remote port has reappeared. Re-register w/ FC transport */ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); @@ -3921,9 +4021,11 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) rdata = rport->dd_data; /* break the link before dropping the ref */ ndlp->rport = NULL; - if (rdata && rdata->pnode == ndlp) - lpfc_nlp_put(ndlp); - rdata->pnode = NULL; + if (rdata) { + if (rdata->pnode == ndlp) + lpfc_nlp_put(ndlp); + rdata->pnode = NULL; + } /* drop reference for earlier registeration */ put_device(&rport->dev); } @@ -3972,12 +4074,17 @@ static void lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) { struct fc_rport *rport = ndlp->rport; + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba = vport->phba; - lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + return; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport delete: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); - lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "3184 rport unregister x%06x, rport %p\n", ndlp->nlp_DID, rport); @@ -4029,6 +4136,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int old_state, int new_state) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; if (new_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; @@ -4039,23 +4147,56 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (new_state == NLP_STE_NPR_NODE) ndlp->nlp_flag &= ~NLP_RCV_PLOGI; - /* Transport interface */ - if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || - old_state == NLP_STE_UNMAPPED_NODE)) { - vport->phba->nport_event_cnt++; - lpfc_unregister_remote_port(ndlp); + /* FCP and NVME Transport interface */ + if ((old_state == NLP_STE_MAPPED_NODE || + old_state == NLP_STE_UNMAPPED_NODE)) { + if (ndlp->rport) { + vport->phba->nport_event_cnt++; + lpfc_unregister_remote_port(ndlp); + } + + /* Notify the NVME transport of this rport's loss */ + if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && + (vport->phba->nvmet_support == 0) && + ((ndlp->nlp_fc4_type & NLP_FC4_NVME) || + (ndlp->nlp_DID == Fabric_DID))) { + vport->phba->nport_event_cnt++; + lpfc_nvme_unregister_port(vport, ndlp); + } } + /* FCP and NVME Transport interfaces */ + if (new_state == NLP_STE_MAPPED_NODE || new_state == NLP_STE_UNMAPPED_NODE) { - vport->phba->nport_event_cnt++; - /* - * Tell the fc transport about the port, if we haven't - * already. If we have, and it's a scsi entity, be - * sure to unblock any attached scsi devices - */ - lpfc_register_remote_port(vport, ndlp); + if ((ndlp->nlp_fc4_type & NLP_FC4_FCP) || + (ndlp->nlp_DID == Fabric_DID)) { + vport->phba->nport_event_cnt++; + /* + * Tell the fc transport about the port, if we haven't + * already. If we have, and it's a scsi entity, be + */ + lpfc_register_remote_port(vport, ndlp); + } + /* Notify the NVME transport of this new rport. */ + if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { + if (vport->phba->nvmet_support == 0) { + /* Register this rport with the transport. + * Initiators take the NDLP ref count in + * the register. + */ + vport->phba->nport_event_cnt++; + lpfc_nvme_register_port(vport, ndlp); + } else { + /* Just take an NDLP ref count since the + * target does not register rports. + */ + lpfc_nlp_get(ndlp); + } + } } + if ((new_state == NLP_STE_MAPPED_NODE) && (vport->stat_data_enabled)) { /* @@ -4073,12 +4214,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, "0x%x\n", ndlp->nlp_DID); } /* - * if we added to Mapped list, but the remote port - * registration failed or assigned a target id outside - * our presentable range - move the node to the - * Unmapped List + * If the node just added to Mapped list was an FCP target, + * but the remote port registration failed or assigned a target + * id outside the presentable range - move the node to the + * Unmapped List. */ - if (new_state == NLP_STE_MAPPED_NODE && + if ((new_state == NLP_STE_MAPPED_NODE) && + (ndlp->nlp_type & NLP_FCP_TARGET) && (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { @@ -4205,13 +4347,13 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); - init_timer(&ndlp->nlp_delayfunc); - ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; - ndlp->nlp_delayfunc.data = (unsigned long)ndlp; + setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, + (unsigned long)ndlp); ndlp->nlp_DID = did; ndlp->vport = vport; ndlp->phba = vport->phba; ndlp->nlp_sid = NLP_NO_SID; + ndlp->nlp_fc4_type = NLP_FC4_NONE; kref_init(&ndlp->kref); NLP_INT_NODE_ACT(ndlp); atomic_set(&ndlp->cmd_pending, 0); @@ -4394,7 +4536,6 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) { - struct lpfc_sli *psli = &phba->sli; IOCB_t *icmd = &iocb->iocb; struct lpfc_vport *vport = ndlp->vport; @@ -4413,9 +4554,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, if (iocb->context1 == (uint8_t *) ndlp) return 1; } - } else if (pring->ringno == psli->extra_ring) { - - } else if (pring->ringno == psli->fcp_ring) { + } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && (ndlp->nlp_flag & NLP_DELAY_TMO)) { @@ -4424,12 +4563,58 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { return 1; } - } else if (pring->ringno == psli->next_ring) { - } return 0; } +static void +__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring, + struct list_head *dequeue_list) +{ + struct lpfc_iocbq *iocb, *next_iocb; + + list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { + /* Check to see if iocb matches the nport */ + if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) + /* match, dequeue */ + list_move_tail(&iocb->list, dequeue_list); + } +} + +static void +lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) +{ + struct lpfc_sli *psli = &phba->sli; + uint32_t i; + + spin_lock_irq(&phba->hbalock); + for (i = 0; i < psli->num_rings; i++) + __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i], + dequeue_list); + spin_unlock_irq(&phba->hbalock); +} + +static void +lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) +{ + struct lpfc_sli_ring *pring; + struct lpfc_queue *qp = NULL; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + spin_lock(&pring->ring_lock); + __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); + spin_unlock(&pring->ring_lock); + } + spin_unlock_irq(&phba->hbalock); +} + /* * Free resources / clean up outstanding I/Os * associated with nlp_rpi in the LPFC_NODELIST entry. @@ -4438,10 +4623,6 @@ static int lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); - struct lpfc_sli *psli; - struct lpfc_sli_ring *pring; - struct lpfc_iocbq *iocb, *next_iocb; - uint32_t i; lpfc_fabric_abort_nport(ndlp); @@ -4449,29 +4630,11 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ - psli = &phba->sli; if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { - /* Now process each ring */ - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - - spin_lock_irq(&phba->hbalock); - list_for_each_entry_safe(iocb, next_iocb, &pring->txq, - list) { - /* - * Check to see if iocb matches the nport we are - * looking for - */ - if ((lpfc_check_sli_ndlp(phba, pring, iocb, - ndlp))) { - /* It matches, so deque and call compl - with an error */ - list_move_tail(&iocb->list, - &completions); - } - } - spin_unlock_irq(&phba->hbalock); - } + if (phba->sli_rev != LPFC_SLI_REV4) + lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); + else + lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions); } /* Cancel all the IOCBs from the completions list */ @@ -4950,6 +5113,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) return NULL; lpfc_nlp_init(vport, ndlp, did); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + if (vport->phba->nvmet_support) + return ndlp; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -4958,6 +5123,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); if (!ndlp) return NULL; + if (vport->phba->nvmet_support) + return ndlp; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -4977,6 +5144,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) * delay timeout is not needed. */ lpfc_cancel_retry_delay_tmo(vport, ndlp); + if (vport->phba->nvmet_support) + return ndlp; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -4992,6 +5161,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ndlp->nlp_flag & NLP_RCV_PLOGI) return NULL; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + if (vport->phba->nvmet_support) + return ndlp; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -5040,14 +5211,14 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport) return; } +/* SLI3 only */ void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *mbox; struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring]; - struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring]; - struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; + struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING]; + struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING]; int rc; /* @@ -5071,7 +5242,6 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) lpfc_disc_flush_list(vport); extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; - next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; phba->link_state = LPFC_HBA_ERROR; } } @@ -5207,7 +5377,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) struct lpfc_sli_ring *pring; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; + pring = lpfc_phba_elsring(phba); /* Error matching iocb on txq or txcmplq * First check the txq. @@ -5331,12 +5501,13 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: - /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for - * FAN - */ - /* FAN timeout */ + /* + * port_state is identically LPFC_LOCAL_CFG_LINK while + * waiting for FAN timeout + */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "0221 FAN timeout\n"); + /* Start discovery by sending FLOGI, clean up old rpis */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { @@ -5407,8 +5578,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { /* Try it one more time */ vport->fc_ns_retry++; - rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, - vport->fc_ns_retry, 0); + vport->gidft_inp = 0; + rc = lpfc_issue_gidft(vport); if (rc == 0) break; } @@ -5523,12 +5694,14 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) if (clrlaerr) { lpfc_disc_flush_list(vport); - psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; + if (phba->sli_rev != LPFC_SLI_REV4) { + psli->sli3_ring[(LPFC_EXTRA_RING)].flag &= + ~LPFC_STOP_IOCB_EVENT; + psli->sli3_ring[LPFC_FCP_RING].flag &= + ~LPFC_STOP_IOCB_EVENT; + } vport->port_state = LPFC_VPORT_READY; } - return; } diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 3b970d3706008c..15ca2148415055 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -44,8 +46,6 @@ #define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */ #define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */ #define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ -#define LPFC_FCP_NEXT_RING 3 -#define LPFC_FCP_OAS_RING 3 #define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ #define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ @@ -92,8 +92,10 @@ union CtCommandResponse { uint32_t word; }; -#define FC4_FEATURE_INIT 0x2 -#define FC4_FEATURE_TARGET 0x1 +/* FC4 Feature bits for RFF_ID */ +#define FC4_FEATURE_TARGET 0x1 +#define FC4_FEATURE_INIT 0x2 +#define FC4_FEATURE_NVME_DISC 0x4 struct lpfc_sli_ct_request { /* Structure is in Big Endian format */ @@ -117,6 +119,16 @@ struct lpfc_sli_ct_request { uint8_t AreaScope; uint8_t Fc4Type; /* for GID_FT requests */ } gid; + struct gid_ff { + uint8_t Flags; + uint8_t DomainScope; + uint8_t AreaScope; + uint8_t rsvd1; + uint8_t rsvd2; + uint8_t rsvd3; + uint8_t Fc4FBits; + uint8_t Fc4Type; + } gid_ff; struct rft { uint32_t PortId; /* For RFT_ID requests */ @@ -161,6 +173,12 @@ struct lpfc_sli_ct_request { struct gff_acc { uint8_t fbits[128]; } gff_acc; + struct gft { + uint32_t PortId; + } gft; + struct gft_acc { + uint32_t fc4_types[8]; + } gft_acc; #define FCP_TYPE_FEATURE_OFFSET 7 struct rff { uint32_t PortId; @@ -176,8 +194,12 @@ struct lpfc_sli_ct_request { #define SLI_CT_REVISION 1 #define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct gid)) +#define GIDFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gid_ff)) #define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct gff)) +#define GFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gft)) #define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ sizeof(struct rft)) #define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ @@ -273,6 +295,7 @@ struct lpfc_sli_ct_request { #define SLI_CTNS_GNN_IP 0x0153 #define SLI_CTNS_GIPA_IP 0x0156 #define SLI_CTNS_GID_FT 0x0171 +#define SLI_CTNS_GID_FF 0x01F1 #define SLI_CTNS_GID_PT 0x01A1 #define SLI_CTNS_RPN_ID 0x0212 #define SLI_CTNS_RNN_ID 0x0213 @@ -290,15 +313,16 @@ struct lpfc_sli_ct_request { * Port Types */ -#define SLI_CTPT_N_PORT 0x01 -#define SLI_CTPT_NL_PORT 0x02 -#define SLI_CTPT_FNL_PORT 0x03 -#define SLI_CTPT_IP 0x04 -#define SLI_CTPT_FCP 0x08 -#define SLI_CTPT_NX_PORT 0x7F -#define SLI_CTPT_F_PORT 0x81 -#define SLI_CTPT_FL_PORT 0x82 -#define SLI_CTPT_E_PORT 0x84 +#define SLI_CTPT_N_PORT 0x01 +#define SLI_CTPT_NL_PORT 0x02 +#define SLI_CTPT_FNL_PORT 0x03 +#define SLI_CTPT_IP 0x04 +#define SLI_CTPT_FCP 0x08 +#define SLI_CTPT_NVME 0x28 +#define SLI_CTPT_NX_PORT 0x7F +#define SLI_CTPT_F_PORT 0x81 +#define SLI_CTPT_FL_PORT 0x82 +#define SLI_CTPT_E_PORT 0x84 #define SLI_CT_LAST_ENTRY 0x80000000 @@ -339,6 +363,7 @@ struct lpfc_name { uint8_t IEEE[6]; /* FC IEEE address */ } s; uint8_t wwn[8]; + uint64_t name; } u; }; @@ -492,7 +517,15 @@ struct serv_parm { /* Structure is in Big Endian format */ struct class_parms cls2; struct class_parms cls3; struct class_parms cls4; - uint8_t vendorVersion[16]; + union { + uint8_t vendorVersion[16]; + struct { + uint32_t vid; +#define LPFC_VV_EMLX_ID 0x454d4c58 /* EMLX */ + uint32_t flags; +#define LPFC_VV_SUPPRESS_RSP 1 + } vv; + } un; }; /* @@ -551,6 +584,7 @@ struct fc_vft_header { #define ELS_CMD_REC 0x13000000 #define ELS_CMD_RDP 0x18000000 #define ELS_CMD_PRLI 0x20100014 +#define ELS_CMD_NVMEPRLI 0x20140018 #define ELS_CMD_PRLO 0x21100014 #define ELS_CMD_PRLO_ACC 0x02100014 #define ELS_CMD_PDISC 0x50000000 @@ -590,6 +624,7 @@ struct fc_vft_header { #define ELS_CMD_REC 0x13 #define ELS_CMD_RDP 0x18 #define ELS_CMD_PRLI 0x14001020 +#define ELS_CMD_NVMEPRLI 0x18001420 #define ELS_CMD_PRLO 0x14001021 #define ELS_CMD_PRLO_ACC 0x14001002 #define ELS_CMD_PDISC 0x50 @@ -686,6 +721,7 @@ typedef struct _PRLI { /* Structure is in Big Endian format */ uint8_t prliType; /* FC Parm Word 0, bit 24:31 */ #define PRLI_FCP_TYPE 0x08 +#define PRLI_NVME_TYPE 0x28 uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */ #ifdef __BIG_ENDIAN_BITFIELD @@ -1245,8 +1281,7 @@ struct fc_rdp_opd_sfp_info { uint8_t vendor_name[16]; uint8_t model_number[16]; uint8_t serial_number[16]; - uint8_t revision[2]; - uint8_t reserved[2]; + uint8_t revision[4]; uint8_t date[8]; }; @@ -1265,14 +1300,14 @@ struct fc_rdp_req_frame { struct fc_rdp_res_frame { - uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */ - uint32_t length; /* FC Word 1 */ - struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */ - struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */ - struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10-12 */ - struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ - struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ - struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ + uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */ + uint32_t length; /* FC Word 1 */ + struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */ + struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */ + struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10 -12 */ + struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13 -21 */ + struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22 -27 */ + struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28 -33 */ struct fc_fec_rdp_desc fec_desc; /* FC word 34-37*/ struct fc_rdp_bbc_desc bbc_desc; /* FC Word 38-42*/ struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 43-47*/ @@ -1791,6 +1826,7 @@ typedef struct { /* FireFly BIU registers */ #define MBX_INIT_VFI 0xA3 #define MBX_INIT_VPI 0xA4 #define MBX_ACCESS_VDATA 0xA5 +#define MBX_REG_FCFI_MRQ 0xAF #define MBX_AUTH_PORT 0xF8 #define MBX_SECURITY_MGMT 0xF9 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 5646699b0516b2..15277705cb6b8c 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2016 Emulex. All rights reserved. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -108,6 +110,7 @@ struct lpfc_sli_intf { #define LPFC_MAX_MQ_PAGE 8 #define LPFC_MAX_WQ_PAGE_V0 4 #define LPFC_MAX_WQ_PAGE 8 +#define LPFC_MAX_RQ_PAGE 8 #define LPFC_MAX_CQ_PAGE 4 #define LPFC_MAX_EQ_PAGE 8 @@ -198,7 +201,7 @@ struct lpfc_sli_intf { /* Configuration of Interrupts / sec for entire HBA port */ #define LPFC_MIN_IMAX 5000 #define LPFC_MAX_IMAX 5000000 -#define LPFC_DEF_IMAX 50000 +#define LPFC_DEF_IMAX 150000 #define LPFC_MIN_CPU_MAP 0 #define LPFC_MAX_CPU_MAP 2 @@ -348,6 +351,7 @@ struct lpfc_cqe { #define CQE_CODE_RECEIVE 0x4 #define CQE_CODE_XRI_ABORTED 0x5 #define CQE_CODE_RECEIVE_V1 0x9 +#define CQE_CODE_NVME_ERSP 0xd /* * Define mask value for xri_aborted and wcqe completed CQE extended status. @@ -367,6 +371,9 @@ struct lpfc_wcqe_complete { #define lpfc_wcqe_c_hw_status_SHIFT 0 #define lpfc_wcqe_c_hw_status_MASK 0x000000FF #define lpfc_wcqe_c_hw_status_WORD word0 +#define lpfc_wcqe_c_ersp0_SHIFT 0 +#define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF +#define lpfc_wcqe_c_ersp0_WORD word0 uint32_t total_data_placed; uint32_t parameter; #define lpfc_wcqe_c_bg_edir_SHIFT 5 @@ -400,6 +407,9 @@ struct lpfc_wcqe_complete { #define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT #define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK #define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD +#define lpfc_wcqe_c_sqhead_SHIFT 0 +#define lpfc_wcqe_c_sqhead_MASK 0x0000FFFF +#define lpfc_wcqe_c_sqhead_WORD word3 }; /* completion queue entry for wqe release */ @@ -954,6 +964,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A #define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B #define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10 +#define LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET 0x1D #define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23 @@ -990,7 +1001,7 @@ struct eq_delay_info { uint32_t phase; uint32_t delay_multi; }; -#define LPFC_MAX_EQ_DELAY 8 +#define LPFC_MAX_EQ_DELAY_EQID_CNT 8 struct sgl_page_pairs { uint32_t sgl_pg0_addr_lo; @@ -1059,7 +1070,7 @@ struct lpfc_mbx_modify_eq_delay { union { struct { uint32_t num_eq; - struct eq_delay_info eq[LPFC_MAX_EQ_DELAY]; + struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT]; } request; struct { uint32_t word0; @@ -1135,6 +1146,116 @@ struct lpfc_mbx_cq_create { } u; }; +struct lpfc_mbx_cq_create_set { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_set_page_size_SHIFT 16 /* Version 2 Only */ +#define lpfc_mbx_cq_create_set_page_size_MASK 0x000000FF +#define lpfc_mbx_cq_create_set_page_size_WORD word0 +#define lpfc_mbx_cq_create_set_num_pages_SHIFT 0 +#define lpfc_mbx_cq_create_set_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_num_pages_WORD word0 + uint32_t word1; +#define lpfc_mbx_cq_create_set_evt_SHIFT 31 +#define lpfc_mbx_cq_create_set_evt_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_evt_WORD word1 +#define lpfc_mbx_cq_create_set_valid_SHIFT 29 +#define lpfc_mbx_cq_create_set_valid_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_valid_WORD word1 +#define lpfc_mbx_cq_create_set_cqe_cnt_SHIFT 27 +#define lpfc_mbx_cq_create_set_cqe_cnt_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_cqe_cnt_WORD word1 +#define lpfc_mbx_cq_create_set_cqe_size_SHIFT 25 +#define lpfc_mbx_cq_create_set_cqe_size_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_cqe_size_WORD word1 +#define lpfc_mbx_cq_create_set_auto_SHIFT 15 +#define lpfc_mbx_cq_create_set_auto_MASK 0x0000001 +#define lpfc_mbx_cq_create_set_auto_WORD word1 +#define lpfc_mbx_cq_create_set_nodelay_SHIFT 14 +#define lpfc_mbx_cq_create_set_nodelay_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_nodelay_WORD word1 +#define lpfc_mbx_cq_create_set_clswm_SHIFT 12 +#define lpfc_mbx_cq_create_set_clswm_MASK 0x00000003 +#define lpfc_mbx_cq_create_set_clswm_WORD word1 + uint32_t word2; +#define lpfc_mbx_cq_create_set_arm_SHIFT 31 +#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001 +#define lpfc_mbx_cq_create_set_arm_WORD word2 +#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0 +#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_num_cq_WORD word2 + uint32_t word3; +#define lpfc_mbx_cq_create_set_eq_id1_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id1_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id1_WORD word3 +#define lpfc_mbx_cq_create_set_eq_id0_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id0_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id0_WORD word3 + uint32_t word4; +#define lpfc_mbx_cq_create_set_eq_id3_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id3_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id3_WORD word4 +#define lpfc_mbx_cq_create_set_eq_id2_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id2_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id2_WORD word4 + uint32_t word5; +#define lpfc_mbx_cq_create_set_eq_id5_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id5_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id5_WORD word5 +#define lpfc_mbx_cq_create_set_eq_id4_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id4_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id4_WORD word5 + uint32_t word6; +#define lpfc_mbx_cq_create_set_eq_id7_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id7_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id7_WORD word6 +#define lpfc_mbx_cq_create_set_eq_id6_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id6_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id6_WORD word6 + uint32_t word7; +#define lpfc_mbx_cq_create_set_eq_id9_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id9_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id9_WORD word7 +#define lpfc_mbx_cq_create_set_eq_id8_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id8_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id8_WORD word7 + uint32_t word8; +#define lpfc_mbx_cq_create_set_eq_id11_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id11_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id11_WORD word8 +#define lpfc_mbx_cq_create_set_eq_id10_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id10_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id10_WORD word8 + uint32_t word9; +#define lpfc_mbx_cq_create_set_eq_id13_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id13_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id13_WORD word9 +#define lpfc_mbx_cq_create_set_eq_id12_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id12_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id12_WORD word9 + uint32_t word10; +#define lpfc_mbx_cq_create_set_eq_id15_SHIFT 16 +#define lpfc_mbx_cq_create_set_eq_id15_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id15_WORD word10 +#define lpfc_mbx_cq_create_set_eq_id14_SHIFT 0 +#define lpfc_mbx_cq_create_set_eq_id14_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_eq_id14_WORD word10 + struct dma_address page[1]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_set_num_alloc_SHIFT 16 +#define lpfc_mbx_cq_create_set_num_alloc_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_num_alloc_WORD word0 +#define lpfc_mbx_cq_create_set_base_id_SHIFT 0 +#define lpfc_mbx_cq_create_set_base_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_set_base_id_WORD word0 + } response; + } u; +}; + struct lpfc_mbx_cq_destroy { struct mbox_header header; union { @@ -1186,6 +1307,7 @@ struct lpfc_mbx_wq_create { #define lpfc_mbx_wq_create_page_size_SHIFT 0 #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF #define lpfc_mbx_wq_create_page_size_WORD word1 +#define LPFC_WQ_PAGE_SIZE_4096 0x1 #define lpfc_mbx_wq_create_wqe_size_SHIFT 8 #define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F #define lpfc_mbx_wq_create_wqe_size_WORD word1 @@ -1243,10 +1365,10 @@ struct rq_context { #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ -#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */ +#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1-2 Only */ #define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF #define lpfc_rq_context_rqe_count_1_WORD word0 -#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ +#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1-2 Only */ #define lpfc_rq_context_rqe_size_MASK 0x0000000F #define lpfc_rq_context_rqe_size_WORD word0 #define LPFC_RQE_SIZE_8 2 @@ -1257,7 +1379,14 @@ struct rq_context { #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ #define lpfc_rq_context_page_size_MASK 0x000000FF #define lpfc_rq_context_page_size_WORD word0 - uint32_t reserved1; +#define LPFC_RQ_PAGE_SIZE_4096 0x1 + uint32_t word1; +#define lpfc_rq_context_data_size_SHIFT 16 /* Version 2 Only */ +#define lpfc_rq_context_data_size_MASK 0x0000FFFF +#define lpfc_rq_context_data_size_WORD word1 +#define lpfc_rq_context_hdr_size_SHIFT 0 /* Version 2 Only */ +#define lpfc_rq_context_hdr_size_MASK 0x0000FFFF +#define lpfc_rq_context_hdr_size_WORD word1 uint32_t word2; #define lpfc_rq_context_cq_id_SHIFT 16 #define lpfc_rq_context_cq_id_MASK 0x000003FF @@ -1265,6 +1394,9 @@ struct rq_context { #define lpfc_rq_context_buf_size_SHIFT 0 #define lpfc_rq_context_buf_size_MASK 0x0000FFFF #define lpfc_rq_context_buf_size_WORD word2 +#define lpfc_rq_context_base_cq_SHIFT 0 /* Version 2 Only */ +#define lpfc_rq_context_base_cq_MASK 0x0000FFFF +#define lpfc_rq_context_base_cq_WORD word2 uint32_t buffer_size; /* Version 1 Only */ }; @@ -1286,10 +1418,65 @@ struct lpfc_mbx_rq_create { #define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF #define lpfc_mbx_rq_create_ulp_num_WORD word0 struct rq_context context; - struct dma_address page[LPFC_MAX_WQ_PAGE]; + struct dma_address page[LPFC_MAX_RQ_PAGE]; } request; struct { uint32_t word0; +#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16 +#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0 +#define lpfc_mbx_rq_create_q_id_SHIFT 0 +#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_id_WORD word0 + uint32_t doorbell_offset; + uint32_t word2; +#define lpfc_mbx_rq_create_bar_set_SHIFT 0 +#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_bar_set_WORD word2 +#define lpfc_mbx_rq_create_db_format_SHIFT 16 +#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_db_format_WORD word2 + } response; + } u; +}; + +struct lpfc_mbx_rq_create_v2 { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_num_pages_SHIFT 0 +#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_num_pages_WORD word0 +#define lpfc_mbx_rq_create_rq_cnt_SHIFT 16 +#define lpfc_mbx_rq_create_rq_cnt_MASK 0x000000FF +#define lpfc_mbx_rq_create_rq_cnt_WORD word0 +#define lpfc_mbx_rq_create_dua_SHIFT 16 +#define lpfc_mbx_rq_create_dua_MASK 0x00000001 +#define lpfc_mbx_rq_create_dua_WORD word0 +#define lpfc_mbx_rq_create_bqu_SHIFT 17 +#define lpfc_mbx_rq_create_bqu_MASK 0x00000001 +#define lpfc_mbx_rq_create_bqu_WORD word0 +#define lpfc_mbx_rq_create_ulp_num_SHIFT 24 +#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF +#define lpfc_mbx_rq_create_ulp_num_WORD word0 +#define lpfc_mbx_rq_create_dim_SHIFT 29 +#define lpfc_mbx_rq_create_dim_MASK 0x00000001 +#define lpfc_mbx_rq_create_dim_WORD word0 +#define lpfc_mbx_rq_create_dfd_SHIFT 30 +#define lpfc_mbx_rq_create_dfd_MASK 0x00000001 +#define lpfc_mbx_rq_create_dfd_WORD word0 +#define lpfc_mbx_rq_create_dnb_SHIFT 31 +#define lpfc_mbx_rq_create_dnb_MASK 0x00000001 +#define lpfc_mbx_rq_create_dnb_WORD word0 + struct rq_context context; + struct dma_address page[1]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16 +#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0 #define lpfc_mbx_rq_create_q_id_SHIFT 0 #define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF #define lpfc_mbx_rq_create_q_id_WORD word0 @@ -2203,6 +2390,160 @@ struct lpfc_mbx_reg_fcfi { #define lpfc_reg_fcfi_vlan_tag_WORD word8 }; +struct lpfc_mbx_reg_fcfi_mrq { + uint32_t word1; +#define lpfc_reg_fcfi_mrq_info_index_SHIFT 0 +#define lpfc_reg_fcfi_mrq_info_index_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_info_index_WORD word1 +#define lpfc_reg_fcfi_mrq_fcfi_SHIFT 16 +#define lpfc_reg_fcfi_mrq_fcfi_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_fcfi_WORD word1 + uint32_t word2; +#define lpfc_reg_fcfi_mrq_rq_id1_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rq_id1_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id1_WORD word2 +#define lpfc_reg_fcfi_mrq_rq_id0_SHIFT 16 +#define lpfc_reg_fcfi_mrq_rq_id0_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id0_WORD word2 + uint32_t word3; +#define lpfc_reg_fcfi_mrq_rq_id3_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rq_id3_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id3_WORD word3 +#define lpfc_reg_fcfi_mrq_rq_id2_SHIFT 16 +#define lpfc_reg_fcfi_mrq_rq_id2_MASK 0x0000FFFF +#define lpfc_reg_fcfi_mrq_rq_id2_WORD word3 + uint32_t word4; +#define lpfc_reg_fcfi_mrq_type_match0_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match0_WORD word4 +#define lpfc_reg_fcfi_mrq_type_mask0_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask0_WORD word4 +#define lpfc_reg_fcfi_mrq_rctl_match0_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match0_WORD word4 +#define lpfc_reg_fcfi_mrq_rctl_mask0_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask0_WORD word4 + uint32_t word5; +#define lpfc_reg_fcfi_mrq_type_match1_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match1_WORD word5 +#define lpfc_reg_fcfi_mrq_type_mask1_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask1_WORD word5 +#define lpfc_reg_fcfi_mrq_rctl_match1_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match1_WORD word5 +#define lpfc_reg_fcfi_mrq_rctl_mask1_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask1_WORD word5 + uint32_t word6; +#define lpfc_reg_fcfi_mrq_type_match2_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match2_WORD word6 +#define lpfc_reg_fcfi_mrq_type_mask2_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask2_WORD word6 +#define lpfc_reg_fcfi_mrq_rctl_match2_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match2_WORD word6 +#define lpfc_reg_fcfi_mrq_rctl_mask2_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask2_WORD word6 + uint32_t word7; +#define lpfc_reg_fcfi_mrq_type_match3_SHIFT 24 +#define lpfc_reg_fcfi_mrq_type_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_match3_WORD word7 +#define lpfc_reg_fcfi_mrq_type_mask3_SHIFT 16 +#define lpfc_reg_fcfi_mrq_type_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_type_mask3_WORD word7 +#define lpfc_reg_fcfi_mrq_rctl_match3_SHIFT 8 +#define lpfc_reg_fcfi_mrq_rctl_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_match3_WORD word7 +#define lpfc_reg_fcfi_mrq_rctl_mask3_SHIFT 0 +#define lpfc_reg_fcfi_mrq_rctl_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_rctl_mask3_WORD word7 + uint32_t word8; +#define lpfc_reg_fcfi_mrq_ptc7_SHIFT 31 +#define lpfc_reg_fcfi_mrq_ptc7_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc7_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc6_SHIFT 30 +#define lpfc_reg_fcfi_mrq_ptc6_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc6_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc5_SHIFT 29 +#define lpfc_reg_fcfi_mrq_ptc5_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc5_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc4_SHIFT 28 +#define lpfc_reg_fcfi_mrq_ptc4_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc4_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc3_SHIFT 27 +#define lpfc_reg_fcfi_mrq_ptc3_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc3_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc2_SHIFT 26 +#define lpfc_reg_fcfi_mrq_ptc2_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc2_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc1_SHIFT 25 +#define lpfc_reg_fcfi_mrq_ptc1_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc1_WORD word8 +#define lpfc_reg_fcfi_mrq_ptc0_SHIFT 24 +#define lpfc_reg_fcfi_mrq_ptc0_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_ptc0_WORD word8 +#define lpfc_reg_fcfi_mrq_pt7_SHIFT 23 +#define lpfc_reg_fcfi_mrq_pt7_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt7_WORD word8 +#define lpfc_reg_fcfi_mrq_pt6_SHIFT 22 +#define lpfc_reg_fcfi_mrq_pt6_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt6_WORD word8 +#define lpfc_reg_fcfi_mrq_pt5_SHIFT 21 +#define lpfc_reg_fcfi_mrq_pt5_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt5_WORD word8 +#define lpfc_reg_fcfi_mrq_pt4_SHIFT 20 +#define lpfc_reg_fcfi_mrq_pt4_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt4_WORD word8 +#define lpfc_reg_fcfi_mrq_pt3_SHIFT 19 +#define lpfc_reg_fcfi_mrq_pt3_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt3_WORD word8 +#define lpfc_reg_fcfi_mrq_pt2_SHIFT 18 +#define lpfc_reg_fcfi_mrq_pt2_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt2_WORD word8 +#define lpfc_reg_fcfi_mrq_pt1_SHIFT 17 +#define lpfc_reg_fcfi_mrq_pt1_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt1_WORD word8 +#define lpfc_reg_fcfi_mrq_pt0_SHIFT 16 +#define lpfc_reg_fcfi_mrq_pt0_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_pt0_WORD word8 +#define lpfc_reg_fcfi_mrq_xmv_SHIFT 15 +#define lpfc_reg_fcfi_mrq_xmv_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_xmv_WORD word8 +#define lpfc_reg_fcfi_mrq_mode_SHIFT 13 +#define lpfc_reg_fcfi_mrq_mode_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_mode_WORD word8 +#define lpfc_reg_fcfi_mrq_vv_SHIFT 12 +#define lpfc_reg_fcfi_mrq_vv_MASK 0x00000001 +#define lpfc_reg_fcfi_mrq_vv_WORD word8 +#define lpfc_reg_fcfi_mrq_vlan_tag_SHIFT 0 +#define lpfc_reg_fcfi_mrq_vlan_tag_MASK 0x00000FFF +#define lpfc_reg_fcfi_mrq_vlan_tag_WORD word8 + uint32_t word9; +#define lpfc_reg_fcfi_mrq_policy_SHIFT 12 +#define lpfc_reg_fcfi_mrq_policy_MASK 0x0000000F +#define lpfc_reg_fcfi_mrq_policy_WORD word9 +#define lpfc_reg_fcfi_mrq_filter_SHIFT 8 +#define lpfc_reg_fcfi_mrq_filter_MASK 0x0000000F +#define lpfc_reg_fcfi_mrq_filter_WORD word9 +#define lpfc_reg_fcfi_mrq_npairs_SHIFT 0 +#define lpfc_reg_fcfi_mrq_npairs_MASK 0x000000FF +#define lpfc_reg_fcfi_mrq_npairs_WORD word9 + uint32_t word10; + uint32_t word11; + uint32_t word12; + uint32_t word13; + uint32_t word14; + uint32_t word15; + uint32_t word16; +}; + struct lpfc_mbx_unreg_fcfi { uint32_t word1_rsv; uint32_t word2; @@ -2382,6 +2723,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11 #define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rq_perfh_WORD word2 +#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16 +#define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2 uint32_t word3; #define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 #define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 @@ -2410,6 +2754,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11 #define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16 +#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3 }; struct lpfc_mbx_supp_pages { @@ -2839,12 +3186,18 @@ struct lpfc_sli4_parameters { #define cfg_mqv_WORD word6 uint32_t word7; uint32_t word8; +#define cfg_wqpcnt_SHIFT 0 +#define cfg_wqpcnt_MASK 0x0000000f +#define cfg_wqpcnt_WORD word8 #define cfg_wqsize_SHIFT 8 #define cfg_wqsize_MASK 0x0000000f #define cfg_wqsize_WORD word8 #define cfg_wqv_SHIFT 14 #define cfg_wqv_MASK 0x00000003 #define cfg_wqv_WORD word8 +#define cfg_wqpsize_SHIFT 16 +#define cfg_wqpsize_MASK 0x000000ff +#define cfg_wqpsize_WORD word8 uint32_t word9; uint32_t word10; #define cfg_rqv_SHIFT 14 @@ -2895,6 +3248,12 @@ struct lpfc_sli4_parameters { #define cfg_mds_diags_SHIFT 1 #define cfg_mds_diags_MASK 0x00000001 #define cfg_mds_diags_WORD word19 +#define cfg_nvme_SHIFT 3 +#define cfg_nvme_MASK 0x00000001 +#define cfg_nvme_WORD word19 +#define cfg_xib_SHIFT 4 +#define cfg_xib_MASK 0x00000001 +#define cfg_xib_WORD word19 }; #define LPFC_SET_UE_RECOVERY 0x10 @@ -3290,14 +3649,17 @@ struct lpfc_mqe { struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl; struct lpfc_mbx_reg_fcfi reg_fcfi; + struct lpfc_mbx_reg_fcfi_mrq reg_fcfi_mrq; struct lpfc_mbx_unreg_fcfi unreg_fcfi; struct lpfc_mbx_mq_create mq_create; struct lpfc_mbx_mq_create_ext mq_create_ext; struct lpfc_mbx_eq_create eq_create; struct lpfc_mbx_modify_eq_delay eq_delay; struct lpfc_mbx_cq_create cq_create; + struct lpfc_mbx_cq_create_set cq_create_set; struct lpfc_mbx_wq_create wq_create; struct lpfc_mbx_rq_create rq_create; + struct lpfc_mbx_rq_create_v2 rq_create_v2; struct lpfc_mbx_mq_destroy mq_destroy; struct lpfc_mbx_eq_destroy eq_destroy; struct lpfc_mbx_cq_destroy cq_destroy; @@ -3657,6 +4019,9 @@ struct wqe_common { #define wqe_ebde_cnt_SHIFT 0 #define wqe_ebde_cnt_MASK 0x0000000f #define wqe_ebde_cnt_WORD word10 +#define wqe_nvme_SHIFT 4 +#define wqe_nvme_MASK 0x00000001 +#define wqe_nvme_WORD word10 #define wqe_oas_SHIFT 6 #define wqe_oas_MASK 0x00000001 #define wqe_oas_WORD word10 @@ -3717,9 +4082,18 @@ struct wqe_common { #define LPFC_ELS_ID_FDISC 2 #define LPFC_ELS_ID_LOGO 1 #define LPFC_ELS_ID_DEFAULT 0 +#define wqe_irsp_SHIFT 4 +#define wqe_irsp_MASK 0x00000001 +#define wqe_irsp_WORD word11 +#define wqe_sup_SHIFT 6 +#define wqe_sup_MASK 0x00000001 +#define wqe_sup_WORD word11 #define wqe_wqec_SHIFT 7 #define wqe_wqec_MASK 0x00000001 #define wqe_wqec_WORD word11 +#define wqe_irsplen_SHIFT 8 +#define wqe_irsplen_MASK 0x0000000f +#define wqe_irsplen_WORD word11 #define wqe_cqid_SHIFT 16 #define wqe_cqid_MASK 0x0000ffff #define wqe_cqid_WORD word11 @@ -3897,6 +4271,50 @@ struct gen_req64_wqe { uint32_t max_response_payload_len; }; +/* Define NVME PRLI request to fabric. NVME is a + * fabric-only protocol. + * Updated to red-lined v1.08 on Sept 16, 2016 + */ +struct lpfc_nvme_prli { + uint32_t word1; + /* The Response Code is defined in the FCP PRLI lpfc_hw.h */ +#define prli_acc_rsp_code_SHIFT 8 +#define prli_acc_rsp_code_MASK 0x0000000f +#define prli_acc_rsp_code_WORD word1 +#define prli_estabImagePair_SHIFT 13 +#define prli_estabImagePair_MASK 0x00000001 +#define prli_estabImagePair_WORD word1 +#define prli_type_code_ext_SHIFT 16 +#define prli_type_code_ext_MASK 0x000000ff +#define prli_type_code_ext_WORD word1 +#define prli_type_code_SHIFT 24 +#define prli_type_code_MASK 0x000000ff +#define prli_type_code_WORD word1 + uint32_t word_rsvd2; + uint32_t word_rsvd3; + uint32_t word4; +#define prli_fba_SHIFT 0 +#define prli_fba_MASK 0x00000001 +#define prli_fba_WORD word4 +#define prli_disc_SHIFT 3 +#define prli_disc_MASK 0x00000001 +#define prli_disc_WORD word4 +#define prli_tgt_SHIFT 4 +#define prli_tgt_MASK 0x00000001 +#define prli_tgt_WORD word4 +#define prli_init_SHIFT 5 +#define prli_init_MASK 0x00000001 +#define prli_init_WORD word4 +#define prli_recov_SHIFT 8 +#define prli_recov_MASK 0x00000001 +#define prli_recov_WORD word4 + uint32_t word5; +#define prli_fb_sz_SHIFT 0 +#define prli_fb_sz_MASK 0x0000ffff +#define prli_fb_sz_WORD word5 +#define LPFC_NVMET_FB_SZ_MAX 65536 /* Driver target mode only. */ +}; + struct create_xri_wqe { uint32_t rsrvd[5]; /* words 0-4 */ struct wqe_did wqe_dest; /* word 5 */ @@ -3969,6 +4387,35 @@ struct fcp_icmnd64_wqe { uint32_t rsvd_12_15[4]; /* word 12-15 */ }; +struct fcp_trsp64_wqe { + struct ulp_bde64 bde; + uint32_t response_len; + uint32_t rsvd_4_5[2]; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_tsend64_wqe { + struct ulp_bde64 bde; + uint32_t payload_offset_len; + uint32_t relative_offset; + uint32_t reserved; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t fcp_data_len; /* word 12 */ + uint32_t rsvd_13_15[3]; /* word 13-15 */ +}; + +struct fcp_treceive64_wqe { + struct ulp_bde64 bde; + uint32_t payload_offset_len; + uint32_t relative_offset; + uint32_t reserved; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t fcp_data_len; /* word 12 */ + uint32_t rsvd_13_15[3]; /* word 13-15 */ +}; +#define TXRDY_PAYLOAD_LEN 12 + union lpfc_wqe { uint32_t words[16]; @@ -3984,6 +4431,10 @@ union lpfc_wqe { struct xmit_els_rsp64_wqe xmit_els_rsp; struct els_request64_wqe els_req; struct gen_req64_wqe gen_req; + struct fcp_trsp64_wqe fcp_trsp; + struct fcp_tsend64_wqe fcp_tsend; + struct fcp_treceive64_wqe fcp_treceive; + }; union lpfc_wqe128 { @@ -3992,6 +4443,9 @@ union lpfc_wqe128 { struct fcp_icmnd64_wqe fcp_icmd; struct fcp_iread64_wqe fcp_iread; struct fcp_iwrite64_wqe fcp_iwrite; + struct fcp_trsp64_wqe fcp_trsp; + struct fcp_tsend64_wqe fcp_tsend; + struct fcp_treceive64_wqe fcp_treceive; struct xmit_seq64_wqe xmit_sequence; struct gen_req64_wqe gen_req; }; @@ -4015,11 +4469,39 @@ struct lpfc_grp_hdr { uint8_t revision[32]; }; -#define FCP_COMMAND 0x0 -#define FCP_COMMAND_DATA_OUT 0x1 -#define ELS_COMMAND_NON_FIP 0xC -#define ELS_COMMAND_FIP 0xD -#define OTHER_COMMAND 0x8 +/* Defines for WQE command type */ +#define FCP_COMMAND 0x0 +#define NVME_READ_CMD 0x0 +#define FCP_COMMAND_DATA_OUT 0x1 +#define NVME_WRITE_CMD 0x1 +#define FCP_COMMAND_TRECEIVE 0x2 +#define FCP_COMMAND_TRSP 0x3 +#define FCP_COMMAND_TSEND 0x7 +#define OTHER_COMMAND 0x8 +#define ELS_COMMAND_NON_FIP 0xC +#define ELS_COMMAND_FIP 0xD + +#define LPFC_NVME_EMBED_CMD 0x0 +#define LPFC_NVME_EMBED_WRITE 0x1 +#define LPFC_NVME_EMBED_READ 0x2 + +/* WQE Commands */ +#define CMD_ABORT_XRI_WQE 0x0F +#define CMD_XMIT_SEQUENCE64_WQE 0x82 +#define CMD_XMIT_BCAST64_WQE 0x84 +#define CMD_ELS_REQUEST64_WQE 0x8A +#define CMD_XMIT_ELS_RSP64_WQE 0x95 +#define CMD_XMIT_BLS_RSP64_WQE 0x97 +#define CMD_FCP_IWRITE64_WQE 0x98 +#define CMD_FCP_IREAD64_WQE 0x9A +#define CMD_FCP_ICMND64_WQE 0x9C +#define CMD_FCP_TSEND64_WQE 0x9F +#define CMD_FCP_TRECEIVE64_WQE 0xA1 +#define CMD_FCP_TRSP64_WQE 0xA3 +#define CMD_GEN_REQUEST64_WQE 0xC2 + +#define CMD_WQE_MASK 0xff + #define LPFC_FW_DUMP 1 #define LPFC_FW_RESET 2 diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h index 5733feafe25ffb..0ba3733eb36d07 100644 --- a/drivers/scsi/lpfc/lpfc_ids.h +++ b/drivers/scsi/lpfc/lpfc_ids.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 64717c171b1557..6cc561b042118e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -34,6 +36,7 @@ #include #include #include +#include #include #include @@ -46,8 +49,9 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -71,6 +75,7 @@ static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); static int lpfc_setup_endian_order(struct lpfc_hba *); static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); static void lpfc_free_els_sgl_list(struct lpfc_hba *); +static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); static void lpfc_init_sgl_list(struct lpfc_hba *); static int lpfc_init_active_sgl_array(struct lpfc_hba *); static void lpfc_free_active_sgl(struct lpfc_hba *); @@ -86,6 +91,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; static DEFINE_IDR(lpfc_hba_index); +#define LPFC_NVMET_BUF_POST 254 /** * lpfc_config_port_prep - Perform lpfc initialization prior to config port @@ -499,12 +505,10 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->link_state = LPFC_LINK_DOWN; /* Only process IOCBs on ELS ring till hba_state is READY */ - if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) - psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; - if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) - psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; - if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) - psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; + if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) + psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; + if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) + psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; /* Post receive buffers for desired rings */ if (phba->sli_rev != 3) @@ -892,7 +896,7 @@ lpfc_hba_free_post_buf(struct lpfc_hba *phba) lpfc_sli_hbqbuf_free_all(phba); else { /* Cleanup preposted buffers on the ELS ring */ - pring = &psli->ring[LPFC_ELS_RING]; + pring = &psli->sli3_ring[LPFC_ELS_RING]; spin_lock_irq(&phba->hbalock); list_splice_init(&pring->postbufq, &buflist); spin_unlock_irq(&phba->hbalock); @@ -925,32 +929,43 @@ static void lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; LIST_HEAD(completions); int i; - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - if (phba->sli_rev >= LPFC_SLI_REV4) - spin_lock_irq(&pring->ring_lock); - else + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; spin_lock_irq(&phba->hbalock); - /* At this point in time the HBA is either reset or DOA. Either - * way, nothing should be on txcmplq as it will NEVER complete. - */ - list_splice_init(&pring->txcmplq, &completions); - pring->txcmplq_cnt = 0; - - if (phba->sli_rev >= LPFC_SLI_REV4) - spin_unlock_irq(&pring->ring_lock); - else + /* At this point in time the HBA is either reset or DOA + * Nothing should be on txcmplq as it will + * NEVER complete. + */ + list_splice_init(&pring->txcmplq, &completions); + pring->txcmplq_cnt = 0; spin_unlock_irq(&phba->hbalock); + lpfc_sli_abort_iocb_ring(phba, pring); + } /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); + return; + } + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txcmplq, &completions); + pring->txcmplq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); lpfc_sli_abort_iocb_ring(phba, pring); } + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } /** @@ -989,43 +1004,58 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) { struct lpfc_scsi_buf *psb, *psb_next; LIST_HEAD(aborts); + LIST_HEAD(nvme_aborts); unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - lpfc_hba_free_post_buf(phba); + + lpfc_sli_hbqbuf_free_all(phba); lpfc_hba_clean_txcmplq(phba); - pring = &psli->ring[LPFC_ELS_RING]; /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be - * on the lpfc_sgl_list so that it can either be freed if the + * on the lpfc_els_sgl_list so that it can either be freed if the * driver is unloading or reposted if the driver is restarting * the port. */ - spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ + spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ /* scsl_buf_list */ - /* abts_sgl_list_lock required because worker thread uses this + /* sgl_list_lock required because worker thread uses this * list. */ - spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); list_for_each_entry(sglq_entry, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) sglq_entry->state = SGL_FREED; + list_for_each_entry(sglq_entry, + &phba->sli4_hba.lpfc_abts_nvmet_sgl_list, list) + sglq_entry->state = SGL_FREED; - spin_lock(&pring->ring_lock); list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, - &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&pring->ring_lock); - spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + &phba->sli4_hba.lpfc_els_sgl_list); + + if (phba->sli4_hba.nvme_wq) + list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + + spin_unlock(&phba->sli4_hba.sgl_list_lock); /* abts_scsi_buf_list_lock required because worker thread uses this * list. */ - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); - list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, - &aborts); - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, + &aborts); + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + } + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, + &nvme_aborts); + spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + } + spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(psb, psb_next, &aborts, list) { @@ -1036,6 +1066,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); + list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { + psb->pCmd = NULL; + psb->status = IOSTAT_SUCCESS; + } + spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); + list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); + spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); + lpfc_sli4_free_sp_events(phba); return 0; } @@ -1829,7 +1867,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) * @phba: pointer to lpfc hba data structure. * * This routine is invoked from the worker thread to handle a HBA host - * attention link event. + * attention link event. SLI3 only. **/ void lpfc_handle_latt(struct lpfc_hba *phba) @@ -1867,7 +1905,7 @@ lpfc_handle_latt(struct lpfc_hba *phba) pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = vport; /* Block ELS IOCBs until we have processed this mbox command */ - phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { rc = 4; @@ -1883,7 +1921,7 @@ lpfc_handle_latt(struct lpfc_hba *phba) return; lpfc_handle_latt_free_mbuf: - phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; + phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; lpfc_mbuf_free(phba, mp->virt, mp->phys); lpfc_handle_latt_free_mp: kfree(mp); @@ -2441,7 +2479,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) * * This routine posts initial receive IOCB buffers to the ELS ring. The * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is - * set to 64 IOCBs. + * set to 64 IOCBs. SLI3 only. * * Return codes * 0 - success (currently always success) @@ -2452,7 +2490,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; /* Ring 0, ELS / CT buffers */ - lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); + lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); /* Ring 2 - FCP no buffers needed */ return 0; @@ -2640,6 +2678,13 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); + if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { + /* Remove the NVME transport reference now and + * continue to remove the node. + */ + lpfc_nlp_put(ndlp); + } + lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } @@ -2894,11 +2939,6 @@ lpfc_online(struct lpfc_hba *phba) lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); - if (!lpfc_sli_queue_setup(phba)) { - lpfc_unblock_mgmt_io(phba); - return 1; - } - if (phba->sli_rev == LPFC_SLI_REV4) { if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ lpfc_unblock_mgmt_io(phba); @@ -2909,6 +2949,7 @@ lpfc_online(struct lpfc_hba *phba) vpis_cleared = true; spin_unlock_irq(&phba->hbalock); } else { + lpfc_sli_queue_init(phba); if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); return 1; @@ -3098,7 +3139,9 @@ static void lpfc_scsi_free(struct lpfc_hba *phba) { struct lpfc_scsi_buf *sb, *sb_next; - struct lpfc_iocbq *io, *io_next; + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; spin_lock_irq(&phba->hbalock); @@ -3108,7 +3151,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, list) { list_del(&sb->list); - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; @@ -3119,25 +3162,58 @@ lpfc_scsi_free(struct lpfc_hba *phba) list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, list) { list_del(&sb->list); - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } spin_unlock(&phba->scsi_buf_list_get_lock); + spin_unlock_irq(&phba->hbalock); +} +/** + * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists + * @phba: pointer to lpfc hba data structure. + * + * This routine is to free all the NVME buffers and IOCBs from the driver + * list back to kernel. It is called from lpfc_pci_remove_one to free + * the internal resources before the device is removed from the system. + **/ +static void +lpfc_nvme_free(struct lpfc_hba *phba) +{ + struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; - /* Release all the lpfc_iocbq entries maintained by this host. */ - list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { - list_del(&io->list); - kfree(io); - phba->total_iocbq_bufs--; - } + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return; + + spin_lock_irq(&phba->hbalock); + /* Release all the lpfc_nvme_bufs maintained by this host. */ + spin_lock(&phba->nvme_buf_list_put_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &phba->lpfc_nvme_buf_list_put, list) { + list_del(&lpfc_ncmd->list); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + phba->total_nvme_bufs--; + } + spin_unlock(&phba->nvme_buf_list_put_lock); + + spin_lock(&phba->nvme_buf_list_get_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &phba->lpfc_nvme_buf_list_get, list) { + list_del(&lpfc_ncmd->list); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + phba->total_nvme_bufs--; + } + spin_unlock(&phba->nvme_buf_list_get_lock); spin_unlock_irq(&phba->hbalock); } - /** - * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping + * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. * * This routine first calculates the sizes of the current els and allocated @@ -3149,20 +3225,18 @@ lpfc_scsi_free(struct lpfc_hba *phba) * 0 - successful (for now, it always returns 0) **/ int -lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) +lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; - struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; - uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; + uint16_t i, lxri, xri_cnt, els_xri_cnt; LIST_HEAD(els_sgl_list); - LIST_HEAD(scsi_sgl_list); int rc; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; /* * update on pci function's els xri-sgl list */ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { /* els xri-sgl expanded */ xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; @@ -3198,9 +3272,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) list_add_tail(&sglq_entry->list, &els_sgl_list); } spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&els_sgl_list, + &phba->sli4_hba.lpfc_els_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { /* els xri-sgl shrinked */ @@ -3210,24 +3285,22 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) "%d to %d\n", phba->sli4_hba.els_xri_cnt, els_xri_cnt); spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); - spin_unlock(&pring->ring_lock); - spin_unlock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, + &els_sgl_list); /* release extra els sgls from list */ for (i = 0; i < xri_cnt; i++) { list_remove_head(&els_sgl_list, sglq_entry, struct lpfc_sglq, list); if (sglq_entry) { - lpfc_mbuf_free(phba, sglq_entry->virt, - sglq_entry->phys); + __lpfc_mbuf_free(phba, sglq_entry->virt, + sglq_entry->phys); kfree(sglq_entry); } } - spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&pring->ring_lock); + list_splice_init(&els_sgl_list, + &phba->sli4_hba.lpfc_els_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -3239,7 +3312,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) sglq_entry = NULL; sglq_entry_next = NULL; list_for_each_entry_safe(sglq_entry, sglq_entry_next, - &phba->sli4_hba.lpfc_sgl_list, list) { + &phba->sli4_hba.lpfc_els_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -3251,21 +3324,182 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) sglq_entry->sli4_lxritag = lxri; sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } + return 0; + +out_free_mem: + lpfc_free_els_sgl_list(phba); + return rc; +} + +/** + * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; + uint16_t i, lxri, xri_cnt, els_xri_cnt; + uint16_t nvmet_xri_cnt, tot_cnt; + LIST_HEAD(nvmet_sgl_list); + int rc; /* - * update on pci function's allocated scsi xri-sgl list + * update on pci function's nvmet xri-sgl list + */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; + tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + if (nvmet_xri_cnt > tot_cnt) { + phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq; + nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6301 NVMET post-sgl count changed to %d\n", + phba->cfg_nvmet_mrq_post); + } + + if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { + /* els xri-sgl expanded */ + xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6302 NVMET xri-sgl cnt grew from %d to %d\n", + phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); + /* allocate the additional nvmet sgls */ + for (i = 0; i < xri_cnt; i++) { + sglq_entry = kzalloc(sizeof(struct lpfc_sglq), + GFP_KERNEL); + if (sglq_entry == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6303 Failure to allocate an " + "NVMET sgl entry:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->buff_type = NVMET_BUFF_TYPE; + sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, + &sglq_entry->phys); + if (sglq_entry->virt == NULL) { + kfree(sglq_entry); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6304 Failure to allocate an " + "NVMET buf:%d\n", i); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sgl = sglq_entry->virt; + memset(sglq_entry->sgl, 0, + phba->cfg_sg_dma_buf_size); + sglq_entry->state = SGL_FREED; + list_add_tail(&sglq_entry->list, &nvmet_sgl_list); + } + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&nvmet_sgl_list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { + /* nvmet xri-sgl shrunk */ + xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6305 NVMET xri-sgl count decreased from " + "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, + nvmet_xri_cnt); + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, + &nvmet_sgl_list); + /* release extra nvmet sgls from list */ + for (i = 0; i < xri_cnt; i++) { + list_remove_head(&nvmet_sgl_list, + sglq_entry, struct lpfc_sglq, list); + if (sglq_entry) { + lpfc_nvmet_buf_free(phba, sglq_entry->virt, + sglq_entry->phys); + kfree(sglq_entry); + } + } + list_splice_init(&nvmet_sgl_list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + } else + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6306 NVMET xri-sgl count unchanged: %d\n", + nvmet_xri_cnt); + phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; + + /* update xris to nvmet sgls on the list */ + sglq_entry = NULL; + sglq_entry_next = NULL; + list_for_each_entry_safe(sglq_entry, sglq_entry_next, + &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6307 Failed to allocate xri for " + "NVMET sgl\n"); + rc = -ENOMEM; + goto out_free_mem; + } + sglq_entry->sli4_lxritag = lxri; + sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + } + return 0; + +out_free_mem: + lpfc_free_nvmet_sgl_list(phba); + return rc; +} + +/** + * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_scsi_buf *psb, *psb_next; + uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt; + LIST_HEAD(scsi_sgl_list); + int rc; + + /* + * update on pci function's els xri-sgl list */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); phba->total_scsi_bufs = 0; + /* + * update on pci function's allocated scsi xri-sgl list + */ /* maximum number of xris available for scsi buffers */ phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2401 Current allocated SCSI xri-sgl count:%d, " - "maximum SCSI xri count:%d\n", - phba->sli4_hba.scsi_xri_cnt, - phba->sli4_hba.scsi_xri_max); + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return 0; + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + phba->sli4_hba.scsi_xri_max = /* Split them up */ + (phba->sli4_hba.scsi_xri_max * + phba->cfg_xri_split) / 100; spin_lock_irq(&phba->scsi_buf_list_get_lock); spin_lock(&phba->scsi_buf_list_put_lock); @@ -3283,7 +3517,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) list_remove_head(&scsi_sgl_list, psb, struct lpfc_scsi_buf, list); if (psb) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + pci_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); } @@ -3314,15 +3548,150 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); spin_unlock(&phba->scsi_buf_list_put_lock); spin_unlock_irq(&phba->scsi_buf_list_get_lock); - return 0; out_free_mem: - lpfc_free_els_sgl_list(phba); lpfc_scsi_free(phba); return rc; } +static uint64_t +lpfc_get_wwpn(struct lpfc_hba *phba) +{ + uint64_t wwn; + int rc; + LPFC_MBOXQ_t *mboxq; + MAILBOX_t *mb; + + + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!mboxq) + return (uint64_t)-1; + + /* First get WWN of HBA instance */ + lpfc_read_nv(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6019 Mailbox failed , mbxCmd x%x " + "READ_NV, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + mempool_free(mboxq, phba->mbox_mem_pool); + return (uint64_t) -1; + } + mb = &mboxq->u.mb; + memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); + /* wwn is WWPN of HBA instance */ + mempool_free(mboxq, phba->mbox_mem_pool); + if (phba->sli_rev == LPFC_SLI_REV4) + return be64_to_cpu(wwn); + else + return (((wwn & 0xffffffff00000000) >> 32) | + ((wwn & 0x00000000ffffffff) << 32)); + +} + +/** + * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping + * @phba: pointer to lpfc hba data structure. + * + * This routine first calculates the sizes of the current els and allocated + * scsi sgl lists, and then goes through all sgls to updates the physical + * XRIs assigned due to port function reset. During port initialization, the + * current els and allocated scsi sgl lists are 0s. + * + * Return codes + * 0 - successful (for now, it always returns 0) + **/ +int +lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) +{ + struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; + uint16_t i, lxri, els_xri_cnt; + uint16_t nvme_xri_cnt, nvme_xri_max; + LIST_HEAD(nvme_sgl_list); + int rc; + + phba->total_nvme_bufs = 0; + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return 0; + /* + * update on pci function's allocated nvme xri-sgl list + */ + + /* maximum number of xris available for nvme buffers */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + phba->sli4_hba.nvme_xri_max = nvme_xri_max; + phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6074 Current allocated NVME xri-sgl count:%d, " + "maximum NVME xri count:%d\n", + phba->sli4_hba.nvme_xri_cnt, + phba->sli4_hba.nvme_xri_max); + + spin_lock_irq(&phba->nvme_buf_list_get_lock); + spin_lock(&phba->nvme_buf_list_put_lock); + list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); + list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); + spin_unlock(&phba->nvme_buf_list_put_lock); + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + + if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) { + /* max nvme xri shrunk below the allocated nvme buffers */ + spin_lock_irq(&phba->nvme_buf_list_get_lock); + nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt - + phba->sli4_hba.nvme_xri_max; + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + /* release the extra allocated nvme buffers */ + for (i = 0; i < nvme_xri_cnt; i++) { + list_remove_head(&nvme_sgl_list, lpfc_ncmd, + struct lpfc_nvme_buf, list); + if (lpfc_ncmd) { + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + } + } + spin_lock_irq(&phba->nvme_buf_list_get_lock); + phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt; + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + } + + /* update xris associated to remaining allocated nvme buffers */ + lpfc_ncmd = NULL; + lpfc_ncmd_next = NULL; + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &nvme_sgl_list, list) { + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6075 Failed to allocate xri for " + "nvme buffer\n"); + rc = -ENOMEM; + goto out_free_mem; + } + lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; + lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + } + spin_lock_irq(&phba->nvme_buf_list_get_lock); + spin_lock(&phba->nvme_buf_list_put_lock); + list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); + spin_unlock(&phba->nvme_buf_list_put_lock); + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + return 0; + +out_free_mem: + lpfc_nvme_free(phba); + return rc; +} + /** * lpfc_create_port - Create an FC port * @phba: pointer to lpfc hba data structure. @@ -3343,18 +3712,38 @@ struct lpfc_vport * lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) { struct lpfc_vport *vport; - struct Scsi_Host *shost; + struct Scsi_Host *shost = NULL; int error = 0; + int i; + uint64_t wwn; + bool use_no_reset_hba = false; - if (dev != &phba->pcidev->dev) { - shost = scsi_host_alloc(&lpfc_vport_template, - sizeof(struct lpfc_vport)); - } else { - if (phba->sli_rev == LPFC_SLI_REV4) - shost = scsi_host_alloc(&lpfc_template, - sizeof(struct lpfc_vport)); - else - shost = scsi_host_alloc(&lpfc_template_s3, + wwn = lpfc_get_wwpn(phba); + + for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { + if (wwn == lpfc_no_hba_reset[i]) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6020 Setting use_no_reset port=%llx\n", + wwn); + use_no_reset_hba = true; + break; + } + } + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + if (dev != &phba->pcidev->dev) { + shost = scsi_host_alloc(&lpfc_vport_template, + sizeof(struct lpfc_vport)); + } else { + if (!use_no_reset_hba) + shost = scsi_host_alloc(&lpfc_template, + sizeof(struct lpfc_vport)); + else + shost = scsi_host_alloc(&lpfc_template_no_hr, + sizeof(struct lpfc_vport)); + } + } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + shost = scsi_host_alloc(&lpfc_template_nvme, sizeof(struct lpfc_vport)); } if (!shost) @@ -3365,8 +3754,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) vport->load_flag |= FC_LOADING; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; vport->fc_rscn_flush = 0; - lpfc_get_vport_cfgparam(vport); + shost->unique_id = instance; shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; @@ -3398,17 +3787,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); - init_timer(&vport->fc_disctmo); - vport->fc_disctmo.function = lpfc_disc_timeout; - vport->fc_disctmo.data = (unsigned long)vport; + setup_timer(&vport->fc_disctmo, lpfc_disc_timeout, + (unsigned long)vport); - init_timer(&vport->els_tmofunc); - vport->els_tmofunc.function = lpfc_els_timeout; - vport->els_tmofunc.data = (unsigned long)vport; + setup_timer(&vport->els_tmofunc, lpfc_els_timeout, + (unsigned long)vport); - init_timer(&vport->delayed_disc_tmo); - vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; - vport->delayed_disc_tmo.data = (unsigned long)vport; + setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, + (unsigned long)vport); error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) @@ -3944,7 +4330,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, lpfc_els_flush_all_cmd(phba); /* Block ELS IOCBs until we have done process link event */ - phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; /* Update link event statistics */ phba->sli.slistat.link_event++; @@ -4103,7 +4489,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) lpfc_els_flush_all_cmd(phba); /* Block ELS IOCBs until we have done process link event */ - phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; /* Update link event statistics */ phba->sli.slistat.link_event++; @@ -4272,13 +4658,13 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) sprintf(message, "Unqualified optics - Replace with " "Avago optics for Warranty and Technical " "Support - Link is%s operational", - (operational) ? "" : " not"); + (operational) ? " not" : ""); break; case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: sprintf(message, "Uncertified optics - Replace with " "Avago-certified optics to enable link " "operation - Link is%s operational", - (operational) ? "" : " not"); + (operational) ? " not" : ""); break; default: /* firmware is reporting a status we don't know about */ @@ -5000,48 +5386,112 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) } /** - * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. + * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to set up the driver internal resources specific to - * support the SLI-3 HBA device it attached to. + * This routine is invoked to set up the driver internal resources before the + * device specific resource setup to support the HBA device it attached to. * * Return codes - * 0 - successful - * other values - error + * 0 - successful + * other values - error **/ static int -lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) +lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) { - struct lpfc_sli *psli; - int rc; + struct lpfc_sli *psli = &phba->sli; /* - * Initialize timers used by driver + * Driver resources common to all SLI revisions */ + atomic_set(&phba->fast_event_count, 0); + spin_lock_init(&phba->hbalock); - /* Heartbeat timer */ - init_timer(&phba->hb_tmofunc); - phba->hb_tmofunc.function = lpfc_hb_timeout; - phba->hb_tmofunc.data = (unsigned long)phba; + /* Initialize ndlp management spinlock */ + spin_lock_init(&phba->ndlp_lock); + + INIT_LIST_HEAD(&phba->port_list); + INIT_LIST_HEAD(&phba->work_list); + init_waitqueue_head(&phba->wait_4_mlo_m_q); + + /* Initialize the wait queue head for the kernel thread */ + init_waitqueue_head(&phba->work_waitq); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1403 Protocols supported %s %s %s\n", + ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? + "SCSI" : " "), + ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? + "NVME" : " "), + (phba->nvmet_support ? "NVMET" : " ")); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + /* Initialize the scsi buffer list used by driver for scsi IO */ + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + } + + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && + (phba->nvmet_support == 0)) { + /* Initialize the NVME buffer list used by driver for NVME IO */ + spin_lock_init(&phba->nvme_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); + spin_lock_init(&phba->nvme_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); + } + + /* Initialize the fabric iocb list */ + INIT_LIST_HEAD(&phba->fabric_iocb_list); + + /* Initialize list to save ELS buffers */ + INIT_LIST_HEAD(&phba->elsbuf); + + /* Initialize FCF connection rec list */ + INIT_LIST_HEAD(&phba->fcf_conn_rec_list); + + /* Initialize OAS configuration list */ + spin_lock_init(&phba->devicelock); + INIT_LIST_HEAD(&phba->luns); - psli = &phba->sli; /* MBOX heartbeat timer */ - init_timer(&psli->mbox_tmo); - psli->mbox_tmo.function = lpfc_mbox_timeout; - psli->mbox_tmo.data = (unsigned long) phba; - /* FCP polling mode timer */ - init_timer(&phba->fcp_poll_timer); - phba->fcp_poll_timer.function = lpfc_poll_timeout; - phba->fcp_poll_timer.data = (unsigned long) phba; + setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba); /* Fabric block timer */ - init_timer(&phba->fabric_block_timer); - phba->fabric_block_timer.function = lpfc_fabric_block_timeout; - phba->fabric_block_timer.data = (unsigned long) phba; + setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout, + (unsigned long)phba); /* EA polling mode timer */ - init_timer(&phba->eratt_poll); - phba->eratt_poll.function = lpfc_poll_eratt; - phba->eratt_poll.data = (unsigned long) phba; + setup_timer(&phba->eratt_poll, lpfc_poll_eratt, + (unsigned long)phba); + /* Heartbeat timer */ + setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba); + + return 0; +} + +/** + * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources specific to + * support the SLI-3 HBA device it attached to. + * + * Return codes + * 0 - successful + * other values - error + **/ +static int +lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) +{ + int rc; + + /* + * Initialize timers used by driver + */ + + /* FCP polling mode timer */ + setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout, + (unsigned long)phba); /* Host attention work mask setup */ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); @@ -5049,6 +5499,12 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); + /* Set up phase-1 common device driver resources */ + + rc = lpfc_setup_driver_resource_phase1(phba); + if (rc) + return -ENODEV; + if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { phba->menlo_flag |= HBA_MENLO_SUPPORT; /* check for menlo minimum sg count */ @@ -5056,10 +5512,10 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; } - if (!phba->sli.ring) - phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING * + if (!phba->sli.sli3_ring) + phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING * sizeof(struct lpfc_sli_ring), GFP_KERNEL); - if (!phba->sli.ring) + if (!phba->sli.sli3_ring) return -ENOMEM; /* @@ -5069,7 +5525,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) /* Initialize the host templates the configured values. */ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { @@ -5118,7 +5575,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) * Initialize the SLI Layer to run with lpfc HBAs. */ lpfc_sli_setup(phba); - lpfc_sli_queue_setup(phba); + lpfc_sli_queue_init(phba); /* Allocate device driver memory */ if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) @@ -5174,18 +5631,27 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { - struct lpfc_vector_map_info *cpup; - struct lpfc_sli *psli; LPFC_MBOXQ_t *mboxq; - int rc, i, hbq_count, max_buf_size; + MAILBOX_t *mb; + int rc, i, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; int longs; int fof_vectors = 0; + uint64_t wwn; + + phba->sli4_hba.num_online_cpu = num_online_cpus(); + phba->sli4_hba.num_present_cpu = lpfc_present_cpu; + phba->sli4_hba.curr_disp_cpu = 0; /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); + /* Set up phase-1 common device driver resources */ + rc = lpfc_setup_driver_resource_phase1(phba); + if (rc) + return -ENODEV; + /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); if (rc) @@ -5195,31 +5661,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) * Initialize timers used by driver */ - /* Heartbeat timer */ - init_timer(&phba->hb_tmofunc); - phba->hb_tmofunc.function = lpfc_hb_timeout; - phba->hb_tmofunc.data = (unsigned long)phba; - init_timer(&phba->rrq_tmr); - phba->rrq_tmr.function = lpfc_rrq_timeout; - phba->rrq_tmr.data = (unsigned long)phba; - - psli = &phba->sli; - /* MBOX heartbeat timer */ - init_timer(&psli->mbox_tmo); - psli->mbox_tmo.function = lpfc_mbox_timeout; - psli->mbox_tmo.data = (unsigned long) phba; - /* Fabric block timer */ - init_timer(&phba->fabric_block_timer); - phba->fabric_block_timer.function = lpfc_fabric_block_timeout; - phba->fabric_block_timer.data = (unsigned long) phba; - /* EA polling mode timer */ - init_timer(&phba->eratt_poll); - phba->eratt_poll.function = lpfc_poll_eratt; - phba->eratt_poll.data = (unsigned long) phba; + setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba); + /* FCF rediscover timer */ - init_timer(&phba->fcf.redisc_wait); - phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; - phba->fcf.redisc_wait.data = (unsigned long)phba; + setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, + (unsigned long)phba); /* * Control structure for handling external multi-buffer mailbox @@ -5242,14 +5688,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands - * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. + * we will associate a new ring, for each EQ/CQ/WQ tuple. + * The WQ create will allocate the ring. */ - if (!phba->sli.ring) - phba->sli.ring = kzalloc( - (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * - sizeof(struct lpfc_sli_ring), GFP_KERNEL); - if (!phba->sli.ring) - return -ENOMEM; /* * It doesn't matter what family our adapter is in, we are @@ -5261,49 +5702,52 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; /* - * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size - * used to create the sg_dma_buf_pool must be dynamically calculated. + * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be calculated. */ - if (phba->cfg_enable_bg) { /* - * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, - * the FCP rsp, and a SGE for each. Sice we have no control - * over how many protection data segments the SCSI Layer + * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, + * the FCP rsp, and a SGE. Sice we have no control + * over how many protection segments the SCSI Layer * will hand us (ie: there could be one for every block - * in the IO), we just allocate enough SGEs to accomidate - * our max amount and we need to limit lpfc_sg_seg_cnt to - * minimize the risk of running out. + * in the IO), just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt + * to minimize the risk of running out. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + max_buf_size; + sizeof(struct fcp_rsp) + max_buf_size; /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; + phba->cfg_sg_seg_cnt = + LPFC_MAX_SG_SLI4_SEG_CNT_DIF; } else { /* - * The scsi_buf for a regular I/O will hold the FCP cmnd, + * The scsi_buf for a regular I/O holds the FCP cmnd, * the FCP rsp, a SGE for each, and a SGE for up to * cfg_sg_seg_cnt data segments. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * + sizeof(struct sli4_sge)); /* Total SGEs for scsi_sg_list */ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + /* - * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need - * to post 1 page for the SGL. + * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only + * need to post 1 page for the SGL. */ } /* Initialize the host templates with the updated values. */ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; @@ -5317,21 +5761,30 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->cfg_total_seg_cnt); /* Initialize buffer queue management fields */ - hbq_count = lpfc_sli_hbq_count(); - for (i = 0; i < hbq_count; ++i) - INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); - INIT_LIST_HEAD(&phba->rb_pend_list); + INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; /* * Initialize the SLI Layer to run with lpfc SLI4 HBAs. */ - /* Initialize the Abort scsi buffer list used by driver */ - spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + /* Initialize the Abort scsi buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + } + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Initialize the Abort nvme buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); + /* Fast-path XRI aborted CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); + } + /* This abort list used by worker thread */ - spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); + spin_lock_init(&phba->sli4_hba.sgl_list_lock); + spin_lock_init(&phba->sli4_hba.nvmet_io_lock); /* * Initialize driver internal slow-path work queues @@ -5359,10 +5812,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* initialize optic_state to 0xFF */ phba->sli4_hba.lnk_info.optic_state = 0xff; - /* Initialize the driver internal SLI layer lists. */ - lpfc_sli_setup(phba); - lpfc_sli_queue_setup(phba); - /* Allocate device driver memory */ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); if (rc) @@ -5372,8 +5821,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_2) { rc = lpfc_pci_function_reset(phba); - if (unlikely(rc)) - return -ENODEV; + if (unlikely(rc)) { + rc = -ENODEV; + goto out_free_mem; + } phba->temp_sensor_support = 1; } @@ -5410,6 +5861,53 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_bsmbx; } + /* Check for NVMET being configured */ + phba->nvmet_support = 0; + if (lpfc_enable_nvmet_cnt) { + + /* First get WWN of HBA instance */ + lpfc_read_nv(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6016 Mailbox failed , mbxCmd x%x " + "READ_NV, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + rc = -EIO; + goto out_free_bsmbx; + } + mb = &mboxq->u.mb; + memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, + sizeof(uint64_t)); + wwn = cpu_to_be64(wwn); + phba->sli4_hba.wwnn.u.name = wwn; + memcpy(&wwn, (char *)mb->un.varRDnvp.portname, + sizeof(uint64_t)); + /* wwn is WWPN of HBA instance */ + wwn = cpu_to_be64(wwn); + phba->sli4_hba.wwpn.u.name = wwn; + + /* Check to see if it matches any module parameter */ + for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { + if (wwn == lpfc_enable_nvmet[i]) { +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6017 NVME Target %016llx\n", + wwn); + phba->nvmet_support = 1; /* a match */ +#else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6021 Can't enable NVME Target." + " NVME_TARGET_FC infrastructure" + " is not in kernel\n"); +#endif + } + } + } + + lpfc_nvme_mod_param_dep(phba); + /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ lpfc_supported_pages(mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); @@ -5448,9 +5946,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2999 Unsupported SLI4 Parameters " "Extents and RPI headers enabled.\n"); - goto out_free_bsmbx; } + mempool_free(mboxq, phba->mbox_mem_pool); + goto out_free_bsmbx; } + mempool_free(mboxq, phba->mbox_mem_pool); /* Verify OAS is supported */ @@ -5497,11 +5997,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_remove_rpi_hdrs; } - phba->sli4_hba.fcp_eq_hdl = - kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * - (fof_vectors + phba->cfg_fcp_io_channel)), - GFP_KERNEL); - if (!phba->sli4_hba.fcp_eq_hdl) { + phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs, + sizeof(struct lpfc_hba_eq_hdl), + GFP_KERNEL); + if (!phba->sli4_hba.hba_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for " "fast-path per-EQ handle array\n"); @@ -5509,52 +6008,31 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcf_rr_bmask; } - phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * - (fof_vectors + - phba->cfg_fcp_io_channel)), GFP_KERNEL); - if (!phba->sli4_hba.msix_entries) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2573 Failed allocate memory for msi-x " - "interrupt vector entries\n"); - rc = -ENOMEM; - goto out_free_fcp_eq_hdl; - } - - phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * - phba->sli4_hba.num_present_cpu), - GFP_KERNEL); + phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, + sizeof(struct lpfc_vector_map_info), + GFP_KERNEL); if (!phba->sli4_hba.cpu_map) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3327 Failed allocate memory for msi-x " "interrupt vector mapping\n"); rc = -ENOMEM; - goto out_free_msix; + goto out_free_hba_eq_hdl; } if (lpfc_used_cpu == NULL) { - lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), - GFP_KERNEL); + lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t), + GFP_KERNEL); if (!lpfc_used_cpu) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3335 Failed allocate memory for msi-x " "interrupt vector mapping\n"); kfree(phba->sli4_hba.cpu_map); rc = -ENOMEM; - goto out_free_msix; + goto out_free_hba_eq_hdl; } for (i = 0; i < lpfc_present_cpu; i++) lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; } - /* Initialize io channels for round robin */ - cpup = phba->sli4_hba.cpu_map; - rc = 0; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - cpup->channel_id = rc; - rc++; - if (rc >= phba->cfg_fcp_io_channel) - rc = 0; - } - /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -5574,10 +6052,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; -out_free_msix: - kfree(phba->sli4_hba.msix_entries); -out_free_fcp_eq_hdl: - kfree(phba->sli4_hba.fcp_eq_hdl); +out_free_hba_eq_hdl: + kfree(phba->sli4_hba.hba_eq_hdl); out_free_fcf_rr_bmask: kfree(phba->fcf.fcf_rr_bmask); out_remove_rpi_hdrs: @@ -5611,11 +6087,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) phba->sli4_hba.num_online_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0; - /* Free memory allocated for msi-x interrupt vector entries */ - kfree(phba->sli4_hba.msix_entries); - /* Free memory allocated for fast-path work queue handles */ - kfree(phba->sli4_hba.fcp_eq_hdl); + kfree(phba->sli4_hba.hba_eq_hdl); /* Free the allocated rpi headers. */ lpfc_sli4_remove_rpi_hdrs(phba); @@ -5627,6 +6100,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); lpfc_free_els_sgl_list(phba); + lpfc_free_nvmet_sgl_list(phba); /* Free the completion queue EQ event pool */ lpfc_sli4_cq_event_release_all(phba); @@ -5688,58 +6162,6 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) return 0; } -/** - * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up the driver internal resources before the - * device specific resource setup to support the HBA device it attached to. - * - * Return codes - * 0 - successful - * other values - error - **/ -static int -lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) -{ - /* - * Driver resources common to all SLI revisions - */ - atomic_set(&phba->fast_event_count, 0); - spin_lock_init(&phba->hbalock); - - /* Initialize ndlp management spinlock */ - spin_lock_init(&phba->ndlp_lock); - - INIT_LIST_HEAD(&phba->port_list); - INIT_LIST_HEAD(&phba->work_list); - init_waitqueue_head(&phba->wait_4_mlo_m_q); - - /* Initialize the wait queue head for the kernel thread */ - init_waitqueue_head(&phba->work_waitq); - - /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_get_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); - spin_lock_init(&phba->scsi_buf_list_put_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - - /* Initialize the fabric iocb list */ - INIT_LIST_HEAD(&phba->fabric_iocb_list); - - /* Initialize list to save ELS buffers */ - INIT_LIST_HEAD(&phba->elsbuf); - - /* Initialize FCF connection rec list */ - INIT_LIST_HEAD(&phba->fcf_conn_rec_list); - - /* Initialize OAS configuration list */ - spin_lock_init(&phba->devicelock); - INIT_LIST_HEAD(&phba->luns); - - return 0; -} - /** * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. * @phba: pointer to lpfc hba data structure. @@ -5887,19 +6309,45 @@ static void lpfc_free_els_sgl_list(struct lpfc_hba *phba) { LIST_HEAD(sglq_list); - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; /* Retrieve all els sgls from driver list */ spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); - spin_unlock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); /* Now free the sgl list */ lpfc_free_sgl_list(phba, &sglq_list); } +/** + * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's nvmet sgl list and memory. + **/ +static void +lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + LIST_HEAD(sglq_list); + + /* Retrieve all nvmet sgls from driver list */ + spin_lock_irq(&phba->hbalock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + spin_unlock_irq(&phba->hbalock); + + /* Now free the sgl list */ + list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { + list_del(&sglq_entry->list); + lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); + kfree(sglq_entry); + } +} + /** * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. * @phba: pointer to lpfc hba data structure. @@ -5947,14 +6395,19 @@ static void lpfc_init_sgl_list(struct lpfc_hba *phba) { /* Initialize and populate the sglq list per host/VF. */ - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list); /* els xri-sgl book keeping */ phba->sli4_hba.els_xri_cnt = 0; /* scsi xri-buffer book keeping */ phba->sli4_hba.scsi_xri_cnt = 0; + + /* nvme xri-buffer book keeping */ + phba->sli4_hba.nvme_xri_cnt = 0; } /** @@ -6185,9 +6638,9 @@ lpfc_hba_free(struct lpfc_hba *phba) /* Release the driver assigned board number */ idr_remove(&lpfc_hba_index, phba->brd_no); - /* Free memory allocated with sli rings */ - kfree(phba->sli.ring); - phba->sli.ring = NULL; + /* Free memory allocated with sli3 rings */ + kfree(phba->sli.sli3_ring); + phba->sli.sli3_ring = NULL; kfree(phba); return; @@ -6223,6 +6676,23 @@ lpfc_create_shost(struct lpfc_hba *phba) shost = lpfc_shost_from_vport(vport); phba->pport = vport; + + if (phba->nvmet_support) { + /* Only 1 vport (pport) will support NVME target */ + if (phba->txrdy_payload_pool == NULL) { + phba->txrdy_payload_pool = pci_pool_create( + "txrdy_pool", phba->pcidev, + TXRDY_PAYLOAD_LEN, 16, 0); + if (phba->txrdy_payload_pool) { + phba->targetport = NULL; + phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; + lpfc_printf_log(phba, KERN_INFO, + LOG_INIT | LOG_NVME_DISC, + "6076 NVME Target Found\n"); + } + } + } + lpfc_debugfs_initialize(vport); /* Put reference to SCSI host to driver's device private data */ pci_set_drvdata(phba->pcidev, shost); @@ -6504,8 +6974,6 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); - INIT_LIST_HEAD(&phba->rb_pend_list); - phba->MBslimaddr = phba->slim_memmap_p; phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; @@ -7009,7 +7477,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) "VPI(B:%d M:%d) " "VFI(B:%d M:%d) " "RPI(B:%d M:%d) " - "FCFI(Count:%d)\n", + "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", phba->sli4_hba.extents_in_use, phba->sli4_hba.max_cfg_param.xri_base, phba->sli4_hba.max_cfg_param.max_xri, @@ -7019,7 +7487,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->sli4_hba.max_cfg_param.max_vfi, phba->sli4_hba.max_cfg_param.rpi_base, phba->sli4_hba.max_cfg_param.max_rpi, - phba->sli4_hba.max_cfg_param.max_fcfi); + phba->sli4_hba.max_cfg_param.max_fcfi, + phba->sli4_hba.max_cfg_param.max_eq, + phba->sli4_hba.max_cfg_param.max_cq, + phba->sli4_hba.max_cfg_param.max_wq, + phba->sli4_hba.max_cfg_param.max_rq); + } if (rc) @@ -7210,11 +7683,11 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) } /** - * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts + * lpfc_sli4_queue_verify - Verify and update EQ counts * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to check the user settable queue counts for EQs and - * CQs. after this routine is called the counts will be set to valid values that + * This routine is invoked to check the user settable queue counts for EQs. + * After this routine is called the counts will be set to valid values that * adhere to the constraints of the system's interrupt vectors and the port's * queue resources. * @@ -7225,9 +7698,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) static int lpfc_sli4_queue_verify(struct lpfc_hba *phba) { - int cfg_fcp_io_channel; - uint32_t cpu; - uint32_t i = 0; + int io_channel; int fof_vectors = phba->cfg_fof ? 1 : 0; /* @@ -7236,49 +7707,40 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) */ /* Sanity check on HBA EQ parameters */ - cfg_fcp_io_channel = phba->cfg_fcp_io_channel; + io_channel = phba->io_channel_irqs; - /* It doesn't make sense to have more io channels then online CPUs */ - for_each_present_cpu(cpu) { - if (cpu_online(cpu)) - i++; - } - phba->sli4_hba.num_online_cpu = i; - phba->sli4_hba.num_present_cpu = lpfc_present_cpu; - phba->sli4_hba.curr_disp_cpu = 0; - - if (i < cfg_fcp_io_channel) { + if (phba->sli4_hba.num_online_cpu < io_channel) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3188 Reducing IO channels to match number of " "online CPUs: from %d to %d\n", - cfg_fcp_io_channel, i); - cfg_fcp_io_channel = i; + io_channel, phba->sli4_hba.num_online_cpu); + io_channel = phba->sli4_hba.num_online_cpu; } - if (cfg_fcp_io_channel + fof_vectors > - phba->sli4_hba.max_cfg_param.max_eq) { - if (phba->sli4_hba.max_cfg_param.max_eq < - LPFC_FCP_IO_CHAN_MIN) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2574 Not enough EQs (%d) from the " - "pci function for supporting FCP " - "EQs (%d)\n", - phba->sli4_hba.max_cfg_param.max_eq, - phba->cfg_fcp_io_channel); - goto out_error; - } + if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2575 Reducing IO channels to match number of " "available EQs: from %d to %d\n", - cfg_fcp_io_channel, + io_channel, phba->sli4_hba.max_cfg_param.max_eq); - cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - - fof_vectors; + io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors; } - /* The actual number of FCP event queues adopted */ - phba->cfg_fcp_io_channel = cfg_fcp_io_channel; + /* The actual number of FCP / NVME event queues adopted */ + if (io_channel != phba->io_channel_irqs) + phba->io_channel_irqs = io_channel; + if (phba->cfg_fcp_io_channel > io_channel) + phba->cfg_fcp_io_channel = io_channel; + if (phba->cfg_nvme_io_channel > io_channel) + phba->cfg_nvme_io_channel = io_channel; + if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) + phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", + phba->io_channel_irqs, phba->cfg_fcp_io_channel, + phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq); /* Get EQ depth from module parameter, fake the default for now */ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; @@ -7287,10 +7749,67 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) /* Get CQ depth from module parameter, fake the default for now */ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; + return 0; +} + +static int +lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) +{ + struct lpfc_queue *qdesc; + int cnt; + + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0508 Failed allocate fast-path NVME CQ (%d)\n", + wqidx); + return 1; + } + phba->sli4_hba.nvme_cq[wqidx] = qdesc; + cnt = LPFC_NVME_WQSIZE; + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0509 Failed allocate fast-path NVME WQ (%d)\n", + wqidx); + return 1; + } + phba->sli4_hba.nvme_wq[wqidx] = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); + return 0; +} + +static int +lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) +{ + struct lpfc_queue *qdesc; + uint32_t wqesize; + + /* Create Fast Path FCP CQs */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); + return 1; + } + phba->sli4_hba.fcp_cq[wqidx] = qdesc; + + /* Create Fast Path FCP WQs */ + wqesize = (phba->fcp_embed_io) ? + LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; + qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0503 Failed allocate fast-path FCP WQ (%d)\n", + wqidx); + return 1; + } + phba->sli4_hba.fcp_wq[wqidx] = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; -out_error: - return -ENOMEM; } /** @@ -7311,13 +7830,14 @@ int lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; - uint32_t wqesize; - int idx; + int idx, io_channel, max; /* * Create HBA Record arrays. + * Both NVME and FCP will share that same vectors / EQs */ - if (!phba->cfg_fcp_io_channel) + io_channel = phba->io_channel_irqs; + if (!io_channel) return -ERANGE; phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; @@ -7326,9 +7846,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; + phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; + phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; + phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; + phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; - phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_io_channel), GFP_KERNEL); + phba->sli4_hba.hba_eq = kcalloc(io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); if (!phba->sli4_hba.hba_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2576 Failed allocate memory for " @@ -7336,44 +7861,115 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } - phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_io_channel), GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2577 Failed allocate memory for fast-path " - "CQ record array\n"); - goto out_error; + if (phba->cfg_fcp_io_channel) { + phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.fcp_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2577 Failed allocate memory for " + "fast-path CQ record array\n"); + goto out_error; + } + phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.fcp_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2578 Failed allocate memory for " + "fast-path FCP WQ record array\n"); + goto out_error; + } + /* + * Since the first EQ can have multiple CQs associated with it, + * this array is used to quickly see if we have a FCP fast-path + * CQ match. + */ + phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel, + sizeof(uint16_t), + GFP_KERNEL); + if (!phba->sli4_hba.fcp_cq_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2545 Failed allocate memory for " + "fast-path CQ map\n"); + goto out_error; + } } - phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_io_channel), GFP_KERNEL); - if (!phba->sli4_hba.fcp_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2578 Failed allocate memory for fast-path " - "WQ record array\n"); - goto out_error; - } + if (phba->cfg_nvme_io_channel) { + phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvme_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6077 Failed allocate memory for " + "fast-path CQ record array\n"); + goto out_error; + } - /* - * Since the first EQ can have multiple CQs associated with it, - * this array is used to quickly see if we have a FCP fast-path - * CQ match. - */ - phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * - phba->cfg_fcp_io_channel), GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq_map) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2545 Failed allocate memory for fast-path " - "CQ map\n"); - goto out_error; + phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvme_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2581 Failed allocate memory for " + "fast-path NVME WQ record array\n"); + goto out_error; + } + + /* + * Since the first EQ can have multiple CQs associated with it, + * this array is used to quickly see if we have a NVME fast-path + * CQ match. + */ + phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel, + sizeof(uint16_t), + GFP_KERNEL); + if (!phba->sli4_hba.nvme_cq_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6078 Failed allocate memory for " + "fast-path CQ map\n"); + goto out_error; + } + + if (phba->nvmet_support) { + phba->sli4_hba.nvmet_cqset = kcalloc( + phba->cfg_nvmet_mrq, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvmet_cqset) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3121 Fail allocate memory for " + "fast-path CQ set array\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_hdr = kcalloc( + phba->cfg_nvmet_mrq, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvmet_mrq_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3122 Fail allocate memory for " + "fast-path RQ set hdr array\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_data = kcalloc( + phba->cfg_nvmet_mrq, + sizeof(struct lpfc_queue *), + GFP_KERNEL); + if (!phba->sli4_hba.nvmet_mrq_data) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3124 Fail allocate memory for " + "fast-path RQ set data array\n"); + goto out_error; + } + } } - /* - * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies - * how many EQs to create. - */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); + /* Create HBA Event Queues (EQs) */ + for (idx = 0; idx < io_channel; idx++) { /* Create EQs */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); @@ -7383,33 +7979,42 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } phba->sli4_hba.hba_eq[idx] = qdesc; + } - /* Create Fast Path FCP CQs */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0499 Failed allocate fast-path FCP " - "CQ (%d)\n", idx); + /* FCP and NVME io channels are not required to be balanced */ + + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + if (lpfc_alloc_fcp_wq_cq(phba, idx)) goto out_error; - } - phba->sli4_hba.fcp_cq[idx] = qdesc; - /* Create Fast Path FCP WQs */ - wqesize = (phba->fcp_embed_io) ? - LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; - qdesc = lpfc_sli4_queue_alloc(phba, wqesize, - phba->sli4_hba.wq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0503 Failed allocate fast-path FCP " - "WQ (%d)\n", idx); + for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) + if (lpfc_alloc_nvme_wq_cq(phba, idx)) goto out_error; + + /* allocate MRQ CQs */ + max = phba->cfg_nvme_io_channel; + if (max < phba->cfg_nvmet_mrq) + max = phba->cfg_nvmet_mrq; + + for (idx = 0; idx < max; idx++) + if (lpfc_alloc_nvme_wq_cq(phba, idx)) + goto out_error; + + if (phba->nvmet_support) { + for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { + qdesc = lpfc_sli4_queue_alloc(phba, + phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3142 Failed allocate NVME " + "CQ Set (%d)\n", idx); + goto out_error; + } + phba->sli4_hba.nvmet_cqset[idx] = qdesc; } - phba->sli4_hba.fcp_wq[idx] = qdesc; } - /* * Create Slow Path Completion Queues (CQs) */ @@ -7463,6 +8068,30 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } phba->sli4_hba.els_wq = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Create NVME LS Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6079 Failed allocate NVME LS CQ\n"); + goto out_error; + } + phba->sli4_hba.nvmels_cq = qdesc; + + /* Create NVME LS Work Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6080 Failed allocate NVME LS WQ\n"); + goto out_error; + } + phba->sli4_hba.nvmels_wq = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); + } /* * Create Receive Queue (RQ) @@ -7488,6 +8117,44 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } phba->sli4_hba.dat_rq = qdesc; + if (phba->nvmet_support) { + for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { + /* Create NVMET Receive Queue for header */ + qdesc = lpfc_sli4_queue_alloc(phba, + phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3146 Failed allocate " + "receive HRQ\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; + + /* Only needed for header of RQ pair */ + qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb), + GFP_KERNEL); + if (qdesc->rqbp == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6131 Failed allocate " + "Header RQBP\n"); + goto out_error; + } + + /* Create NVMET Receive Queue for data */ + qdesc = lpfc_sli4_queue_alloc(phba, + phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3156 Failed allocate " + "receive DRQ\n"); + goto out_error; + } + phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; + } + } + /* Create the Queues needed for Flash Optimized Fabric operations */ if (phba->cfg_fof) lpfc_fof_queue_create(phba); @@ -7498,6 +8165,39 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) return -ENOMEM; } +static inline void +__lpfc_sli4_release_queue(struct lpfc_queue **qp) +{ + if (*qp != NULL) { + lpfc_sli4_queue_free(*qp); + *qp = NULL; + } +} + +static inline void +lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) +{ + int idx; + + if (*qs == NULL) + return; + + for (idx = 0; idx < max; idx++) + __lpfc_sli4_release_queue(&(*qs)[idx]); + + kfree(*qs); + *qs = NULL; +} + +static inline void +lpfc_sli4_release_queue_map(uint16_t **qmap) +{ + if (*qmap != NULL) { + kfree(*qmap); + *qmap = NULL; + } +} + /** * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues * @phba: pointer to lpfc hba data structure. @@ -7513,91 +8213,196 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { - int idx; - if (phba->cfg_fof) lpfc_fof_queue_destroy(phba); - if (phba->sli4_hba.hba_eq != NULL) { - /* Release HBA event queue */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { - if (phba->sli4_hba.hba_eq[idx] != NULL) { - lpfc_sli4_queue_free( - phba->sli4_hba.hba_eq[idx]); - phba->sli4_hba.hba_eq[idx] = NULL; - } - } - kfree(phba->sli4_hba.hba_eq); - phba->sli4_hba.hba_eq = NULL; - } + /* Release HBA eqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); - if (phba->sli4_hba.fcp_cq != NULL) { - /* Release FCP completion queue */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { - if (phba->sli4_hba.fcp_cq[idx] != NULL) { - lpfc_sli4_queue_free( - phba->sli4_hba.fcp_cq[idx]); - phba->sli4_hba.fcp_cq[idx] = NULL; - } - } - kfree(phba->sli4_hba.fcp_cq); - phba->sli4_hba.fcp_cq = NULL; - } + /* Release FCP cqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, + phba->cfg_fcp_io_channel); - if (phba->sli4_hba.fcp_wq != NULL) { - /* Release FCP work queue */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { - if (phba->sli4_hba.fcp_wq[idx] != NULL) { - lpfc_sli4_queue_free( - phba->sli4_hba.fcp_wq[idx]); - phba->sli4_hba.fcp_wq[idx] = NULL; - } - } - kfree(phba->sli4_hba.fcp_wq); - phba->sli4_hba.fcp_wq = NULL; - } + /* Release FCP wqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, + phba->cfg_fcp_io_channel); /* Release FCP CQ mapping array */ - if (phba->sli4_hba.fcp_cq_map != NULL) { - kfree(phba->sli4_hba.fcp_cq_map); - phba->sli4_hba.fcp_cq_map = NULL; - } + lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); + + /* Release NVME cqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq, + phba->cfg_nvme_io_channel); + + /* Release NVME wqs */ + lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq, + phba->cfg_nvme_io_channel); + + /* Release NVME CQ mapping array */ + lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); + + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, + phba->cfg_nvmet_mrq); + + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, + phba->cfg_nvmet_mrq); + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, + phba->cfg_nvmet_mrq); /* Release mailbox command work queue */ - if (phba->sli4_hba.mbx_wq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); - phba->sli4_hba.mbx_wq = NULL; - } + __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); /* Release ELS work queue */ - if (phba->sli4_hba.els_wq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.els_wq); - phba->sli4_hba.els_wq = NULL; - } + __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); + + /* Release ELS work queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); /* Release unsolicited receive queue */ - if (phba->sli4_hba.hdr_rq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); - phba->sli4_hba.hdr_rq = NULL; + __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); + __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); + + /* Release ELS complete queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); + + /* Release NVME LS complete queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); + + /* Release mailbox command complete queue */ + __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); + + /* Everything on this list has been freed */ + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); +} + +int +lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq, int count) +{ + int rc, i; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + struct lpfc_rqb *rqbp; + struct rqb_dmabuf *rqb_buffer; + LIST_HEAD(rqb_buf_list); + + rqbp = hrq->rqbp; + for (i = 0; i < count; i++) { + rqb_buffer = (rqbp->rqb_alloc_buffer)(phba); + if (!rqb_buffer) + break; + rqb_buffer->hrq = hrq; + rqb_buffer->drq = drq; + list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); + } + while (!list_empty(&rqb_buf_list)) { + list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, + hbuf.list); + + hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); + hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); + drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); + drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); + rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); + if (rc < 0) { + (rqbp->rqb_free_buffer)(phba, rqb_buffer); + } else { + list_add_tail(&rqb_buffer->hbuf.list, + &rqbp->rqb_buffer_list); + rqbp->buffer_count++; + } } - if (phba->sli4_hba.dat_rq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); - phba->sli4_hba.dat_rq = NULL; + return 1; +} + +int +lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) +{ + struct lpfc_rqb *rqbp; + struct lpfc_dmabuf *h_buf; + struct rqb_dmabuf *rqb_buffer; + + rqbp = rq->rqbp; + while (!list_empty(&rqbp->rqb_buffer_list)) { + list_remove_head(&rqbp->rqb_buffer_list, h_buf, + struct lpfc_dmabuf, list); + + rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); + (rqbp->rqb_free_buffer)(phba, rqb_buffer); + rqbp->buffer_count--; } + return 1; +} - /* Release ELS complete queue */ - if (phba->sli4_hba.els_cq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.els_cq); - phba->sli4_hba.els_cq = NULL; +static int +lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, + struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, + int qidx, uint32_t qtype) +{ + struct lpfc_sli_ring *pring; + int rc; + + if (!eq || !cq || !wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6085 Fast-path %s (%d) not allocated\n", + ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); + return -ENOMEM; } - /* Release mailbox command complete queue */ - if (phba->sli4_hba.mbx_cq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); - phba->sli4_hba.mbx_cq = NULL; + /* create the Cq first */ + rc = lpfc_cq_create(phba, cq, eq, + (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6086 Failed setup of CQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + return rc; } - return; + if (qtype != LPFC_MBOX) { + /* Setup nvme_cq_map for fast lookup */ + if (cq_map) + *cq_map = cq->queue_id; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", + qidx, cq->queue_id, qidx, eq->queue_id); + + /* create the wq */ + rc = lpfc_wq_create(phba, wq, cq, qtype); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + /* no need to tear down cq - caller will do so */ + return rc; + } + + /* Bind this CQ/WQ to the NVME ring */ + pring = wq->pring; + pring->sli.sli4.wqp = (void *)wq; + cq->pring = pring; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", + qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); + } else { + rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0539 Failed setup of slow-path MQ: " + "rc = 0x%x\n", rc); + /* no need to tear down cq - caller will do so */ + return rc; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_cq->queue_id); + } + + return 0; } /** @@ -7615,15 +8420,12 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) int lpfc_sli4_queue_setup(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - int rc = -ENOMEM; - int fcp_eqidx, fcp_cqidx, fcp_wqidx; - int fcp_cq_index = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; LPFC_MBOXQ_t *mboxq; - uint32_t length; + int qidx; + uint32_t length, io_channel; + int rc = -ENOMEM; /* Check for dual-ULP support */ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -7673,220 +8475,263 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) /* * Set up HBA Event Queues (EQs) */ + io_channel = phba->io_channel_irqs; /* Set up HBA event queue */ - if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { + if (io_channel && !phba->sli4_hba.hba_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3147 Fast-path EQs not allocated\n"); rc = -ENOMEM; goto out_error; } - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { - if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { + for (qidx = 0; qidx < io_channel; qidx++) { + if (!phba->sli4_hba.hba_eq[qidx]) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0522 Fast-path EQ (%d) not " - "allocated\n", fcp_eqidx); + "allocated\n", qidx); rc = -ENOMEM; - goto out_destroy_hba_eq; + goto out_destroy; } - rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], - (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); + rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx], + phba->cfg_fcp_imax); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0523 Failed setup of fast-path EQ " - "(%d), rc = 0x%x\n", fcp_eqidx, + "(%d), rc = 0x%x\n", qidx, (uint32_t)rc); - goto out_destroy_hba_eq; + goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2584 HBA EQ setup: " - "queue[%d]-id=%d\n", fcp_eqidx, - phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); - } - - /* Set up fast-path FCP Response Complete Queue */ - if (!phba->sli4_hba.fcp_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3148 Fast-path FCP CQ array not " - "allocated\n"); - rc = -ENOMEM; - goto out_destroy_hba_eq; + "2584 HBA EQ setup: queue[%d]-id=%d\n", + qidx, phba->sli4_hba.hba_eq[qidx]->queue_id); } - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { - if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { + if (phba->cfg_nvme_io_channel) { + if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0526 Fast-path FCP CQ (%d) not " - "allocated\n", fcp_cqidx); + "6084 Fast-path NVME %s array not allocated\n", + (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ"); rc = -ENOMEM; - goto out_destroy_fcp_cq; + goto out_destroy; } - rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], - phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0527 Failed setup of fast-path FCP " - "CQ (%d), rc = 0x%x\n", fcp_cqidx, - (uint32_t)rc); - goto out_destroy_fcp_cq; - } - - /* Setup fcp_cq_map for fast lookup */ - phba->sli4_hba.fcp_cq_map[fcp_cqidx] = - phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2588 FCP CQ setup: cq[%d]-id=%d, " - "parent seq[%d]-id=%d\n", - fcp_cqidx, - phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, - fcp_cqidx, - phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); - } - /* Set up fast-path FCP Work Queue */ - if (!phba->sli4_hba.fcp_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3149 Fast-path FCP WQ array not " - "allocated\n"); - rc = -ENOMEM; - goto out_destroy_fcp_cq; + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { + rc = lpfc_create_wq_cq(phba, + phba->sli4_hba.hba_eq[ + qidx % io_channel], + phba->sli4_hba.nvme_cq[qidx], + phba->sli4_hba.nvme_wq[qidx], + &phba->sli4_hba.nvme_cq_map[qidx], + qidx, LPFC_NVME); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6123 Failed to setup fastpath " + "NVME WQ/CQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + goto out_destroy; + } + } } - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { - if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { + if (phba->cfg_fcp_io_channel) { + /* Set up fast-path FCP Response Complete Queue */ + if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0534 Fast-path FCP WQ (%d) not " - "allocated\n", fcp_wqidx); + "3148 Fast-path FCP %s array not allocated\n", + phba->sli4_hba.fcp_cq ? "WQ" : "CQ"); rc = -ENOMEM; - goto out_destroy_fcp_wq; - } - rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], - phba->sli4_hba.fcp_cq[fcp_wqidx], - LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0535 Failed setup of fast-path FCP " - "WQ (%d), rc = 0x%x\n", fcp_wqidx, - (uint32_t)rc); - goto out_destroy_fcp_wq; + goto out_destroy; } - /* Bind this WQ to the next FCP ring */ - pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; - pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; - phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2591 FCP WQ setup: wq[%d]-id=%d, " - "parent cq[%d]-id=%d\n", - fcp_wqidx, - phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, - fcp_cq_index, - phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { + rc = lpfc_create_wq_cq(phba, + phba->sli4_hba.hba_eq[ + qidx % io_channel], + phba->sli4_hba.fcp_cq[qidx], + phba->sli4_hba.fcp_wq[qidx], + &phba->sli4_hba.fcp_cq_map[qidx], + qidx, LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0535 Failed to setup fastpath " + "FCP WQ/CQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); + goto out_destroy; + } + } } + /* - * Set up Complete Queues (CQs) + * Set up Slow Path Complete Queues (CQs) */ - /* Set up slow-path MBOX Complete Queue as the first CQ */ - if (!phba->sli4_hba.mbx_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0528 Mailbox CQ not allocated\n"); - rc = -ENOMEM; - goto out_destroy_fcp_wq; - } - rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, - phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0529 Failed setup of slow-path mailbox CQ: " - "rc = 0x%x\n", (uint32_t)rc); - goto out_destroy_fcp_wq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", - phba->sli4_hba.mbx_cq->queue_id, - phba->sli4_hba.hba_eq[0]->queue_id); + /* Set up slow-path MBOX CQ/MQ */ - /* Set up slow-path ELS Complete Queue */ - if (!phba->sli4_hba.els_cq) { + if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0530 ELS CQ not allocated\n"); + "0528 %s not allocated\n", + phba->sli4_hba.mbx_cq ? + "Mailbox WQ" : "Mailbox CQ"); rc = -ENOMEM; - goto out_destroy_mbx_cq; + goto out_destroy; } - rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, - phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0531 Failed setup of slow-path ELS CQ: " - "rc = 0x%x\n", (uint32_t)rc); - goto out_destroy_mbx_cq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", - phba->sli4_hba.els_cq->queue_id, - phba->sli4_hba.hba_eq[0]->queue_id); - - /* - * Set up all the Work Queues (WQs) - */ - /* Set up Mailbox Command Queue */ - if (!phba->sli4_hba.mbx_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0538 Slow-path MQ not allocated\n"); - rc = -ENOMEM; - goto out_destroy_els_cq; - } - rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, - phba->sli4_hba.mbx_cq, LPFC_MBOX); + rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], + phba->sli4_hba.mbx_cq, + phba->sli4_hba.mbx_wq, + NULL, 0, LPFC_MBOX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0539 Failed setup of slow-path MQ: " - "rc = 0x%x\n", rc); - goto out_destroy_els_cq; + "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + if (phba->nvmet_support) { + if (!phba->sli4_hba.nvmet_cqset) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3165 Fast-path NVME CQ Set " + "array not allocated\n"); + rc = -ENOMEM; + goto out_destroy; + } + if (phba->cfg_nvmet_mrq > 1) { + rc = lpfc_cq_create_set(phba, + phba->sli4_hba.nvmet_cqset, + phba->sli4_hba.hba_eq, + LPFC_WCQ, LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3164 Failed setup of NVME CQ " + "Set, rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + } else { + /* Set up NVMET Receive Complete Queue */ + rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], + phba->sli4_hba.hba_eq[0], + LPFC_WCQ, LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6089 Failed setup NVMET CQ: " + "rc = 0x%x\n", (uint32_t)rc); + goto out_destroy; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6090 NVMET CQ setup: cq-id=%d, " + "parent eq-id=%d\n", + phba->sli4_hba.nvmet_cqset[0]->queue_id, + phba->sli4_hba.hba_eq[0]->queue_id); + } } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", - phba->sli4_hba.mbx_wq->queue_id, - phba->sli4_hba.mbx_cq->queue_id); - /* Set up slow-path ELS Work Queue */ - if (!phba->sli4_hba.els_wq) { + /* Set up slow-path ELS WQ/CQ */ + if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0536 Slow-path ELS WQ not allocated\n"); + "0530 ELS %s not allocated\n", + phba->sli4_hba.els_cq ? "WQ" : "CQ"); rc = -ENOMEM; - goto out_destroy_mbx_wq; + goto out_destroy; } - rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, - phba->sli4_hba.els_cq, LPFC_ELS); + rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], + phba->sli4_hba.els_cq, + phba->sli4_hba.els_wq, + NULL, 0, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0537 Failed setup of slow-path ELS WQ: " - "rc = 0x%x\n", (uint32_t)rc); - goto out_destroy_mbx_wq; + "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; } - - /* Bind this WQ to the ELS ring */ - pring = &psli->ring[LPFC_ELS_RING]; - pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; - phba->sli4_hba.els_cq->pring = pring; - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_cq->queue_id); + if (phba->cfg_nvme_io_channel) { + /* Set up NVME LS Complete Queue */ + if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6091 LS %s not allocated\n", + phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); + rc = -ENOMEM; + goto out_destroy; + } + rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], + phba->sli4_hba.nvmels_cq, + phba->sli4_hba.nvmels_wq, + NULL, 0, LPFC_NVME_LS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0529 Failed setup of NVVME LS WQ/CQ: " + "rc = 0x%x\n", (uint32_t)rc); + goto out_destroy; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6096 ELS WQ setup: wq-id=%d, " + "parent cq-id=%d\n", + phba->sli4_hba.nvmels_wq->queue_id, + phba->sli4_hba.nvmels_cq->queue_id); + } + /* - * Create Receive Queue (RQ) + * Create NVMET Receive Queue (RQ) */ + if (phba->nvmet_support) { + if ((!phba->sli4_hba.nvmet_cqset) || + (!phba->sli4_hba.nvmet_mrq_hdr) || + (!phba->sli4_hba.nvmet_mrq_data)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6130 MRQ CQ Queues not " + "allocated\n"); + rc = -ENOMEM; + goto out_destroy; + } + if (phba->cfg_nvmet_mrq > 1) { + rc = lpfc_mrq_create(phba, + phba->sli4_hba.nvmet_mrq_hdr, + phba->sli4_hba.nvmet_mrq_data, + phba->sli4_hba.nvmet_cqset, + LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6098 Failed setup of NVMET " + "MRQ: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + + } else { + rc = lpfc_rq_create(phba, + phba->sli4_hba.nvmet_mrq_hdr[0], + phba->sli4_hba.nvmet_mrq_data[0], + phba->sli4_hba.nvmet_cqset[0], + LPFC_NVMET); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6057 Failed setup of NVMET " + "Receive Queue: rc = 0x%x\n", + (uint32_t)rc); + goto out_destroy; + } + + lpfc_printf_log( + phba, KERN_INFO, LOG_INIT, + "6099 NVMET RQ setup: hdr-rq-id=%d, " + "dat-rq-id=%d parent cq-id=%d\n", + phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, + phba->sli4_hba.nvmet_mrq_data[0]->queue_id, + phba->sli4_hba.nvmet_cqset[0]->queue_id); + + } + } + if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0540 Receive Queue not allocated\n"); rc = -ENOMEM; - goto out_destroy_els_wq; + goto out_destroy; } lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); @@ -7898,7 +8743,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0541 Failed setup of Receive Queue: " "rc = 0x%x\n", (uint32_t)rc); - goto out_destroy_fcp_wq; + goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -7914,38 +8759,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0549 Failed setup of FOF Queues: " "rc = 0x%x\n", rc); - goto out_destroy_els_rq; + goto out_destroy; } } - /* - * Configure EQ delay multipier for interrupt coalescing using - * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time. - */ - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; - fcp_eqidx += LPFC_MAX_EQ_DELAY) - lpfc_modify_fcp_eq_delay(phba, fcp_eqidx); + for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) + lpfc_modify_hba_eq_delay(phba, qidx); + return 0; -out_destroy_els_rq: - lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); -out_destroy_els_wq: - lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); -out_destroy_mbx_wq: - lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); -out_destroy_els_cq: - lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); -out_destroy_mbx_cq: - lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); -out_destroy_fcp_wq: - for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); -out_destroy_fcp_cq: - for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); -out_destroy_hba_eq: - for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) - lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); +out_destroy: + lpfc_sli4_queue_unset(phba); out_error: return rc; } @@ -7965,39 +8789,81 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) void lpfc_sli4_queue_unset(struct lpfc_hba *phba) { - int fcp_qidx; + int qidx; /* Unset the queues created for Flash Optimized Fabric operations */ if (phba->cfg_fof) lpfc_fof_queue_destroy(phba); + /* Unset mailbox command work queue */ - lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); + if (phba->sli4_hba.mbx_wq) + lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); + + /* Unset NVME LS work queue */ + if (phba->sli4_hba.nvmels_wq) + lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); + /* Unset ELS work queue */ - lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); + if (phba->sli4_hba.els_cq) + lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); + /* Unset unsolicited receive queue */ - lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); + if (phba->sli4_hba.hdr_rq) + lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, + phba->sli4_hba.dat_rq); + /* Unset FCP work queue */ - if (phba->sli4_hba.fcp_wq) { - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; - fcp_qidx++) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); + if (phba->sli4_hba.fcp_wq) + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]); + + /* Unset NVME work queue */ + if (phba->sli4_hba.nvme_wq) { + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) + lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]); } + /* Unset mailbox command complete queue */ - lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); + if (phba->sli4_hba.mbx_cq) + lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); + /* Unset ELS complete queue */ - lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); - /* Unset FCP response complete queue */ - if (phba->sli4_hba.fcp_cq) { - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; - fcp_qidx++) - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); + if (phba->sli4_hba.els_cq) + lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); + + /* Unset NVME LS complete queue */ + if (phba->sli4_hba.nvmels_cq) + lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); + + /* Unset NVME response complete queue */ + if (phba->sli4_hba.nvme_cq) + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) + lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); + + /* Unset NVMET MRQ queue */ + if (phba->sli4_hba.nvmet_mrq_hdr) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) + lpfc_rq_destroy(phba, + phba->sli4_hba.nvmet_mrq_hdr[qidx], + phba->sli4_hba.nvmet_mrq_data[qidx]); } - /* Unset fast-path event queue */ - if (phba->sli4_hba.hba_eq) { - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; - fcp_qidx++) - lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); + + /* Unset NVMET CQ Set complete queue */ + if (phba->sli4_hba.nvmet_cqset) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) + lpfc_cq_destroy(phba, + phba->sli4_hba.nvmet_cqset[qidx]); } + + /* Unset FCP response complete queue */ + if (phba->sli4_hba.fcp_cq) + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]); + + /* Unset fast-path event queue */ + if (phba->sli4_hba.hba_eq) + for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) + lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]); } /** @@ -8155,6 +9021,11 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) /* Pending ELS XRI abort events */ list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, &cqelist); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Pending NVME XRI abort events */ + list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue, + &cqelist); + } /* Pending asynnc events */ list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, &cqelist); @@ -8484,16 +9355,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-3 interface specs. The kernel function pci_enable_msix_exact() - * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(), - * once invoked, enables either all or nothing, depending on the current - * availability of PCI vector resources. The device driver is responsible - * for calling the individual request_irq() to register each MSI-X vector - * with a interrupt handler, which is done in this function. Note that - * later when device is unloading, the driver should always call free_irq() - * on all MSI-X vectors it has done request_irq() on before calling - * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device - * will be left with MSI-X enabled and leaks its vectors. + * with SLI-3 interface specs. * * Return codes * 0 - successful @@ -8502,33 +9364,24 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) static int lpfc_sli_enable_msix(struct lpfc_hba *phba) { - int rc, i; + int rc; LPFC_MBOXQ_t *pmb; /* Set up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - phba->msix_entries[i].entry = i; - - /* Configure MSI-X capability structure */ - rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries, - LPFC_MSIX_VECTORS); - if (rc) { + rc = pci_alloc_irq_vectors(phba->pcidev, + LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); + if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0420 PCI enable MSI-X failed (%d)\n", rc); goto vec_fail_out; } - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0477 MSI-X entry[%d]: vector=x%x " - "message=%d\n", i, - phba->msix_entries[i].vector, - phba->msix_entries[i].entry); + /* * Assign MSI-X vectors to interrupt handlers */ /* vector-0 is associated to slow-path handler */ - rc = request_irq(phba->msix_entries[0].vector, + rc = request_irq(pci_irq_vector(phba->pcidev, 0), &lpfc_sli_sp_intr_handler, 0, LPFC_SP_DRIVER_HANDLER_NAME, phba); if (rc) { @@ -8539,7 +9392,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba) } /* vector-1 is associated to fast-path handler */ - rc = request_irq(phba->msix_entries[1].vector, + rc = request_irq(pci_irq_vector(phba->pcidev, 1), &lpfc_sli_fp_intr_handler, 0, LPFC_FP_DRIVER_HANDLER_NAME, phba); @@ -8584,41 +9437,20 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba) mem_fail_out: /* free the irq already requested */ - free_irq(phba->msix_entries[1].vector, phba); + free_irq(pci_irq_vector(phba->pcidev, 1), phba); irq_fail_out: /* free the irq already requested */ - free_irq(phba->msix_entries[0].vector, phba); + free_irq(pci_irq_vector(phba->pcidev, 0), phba); msi_fail_out: /* Unconfigure MSI-X capability structure */ - pci_disable_msix(phba->pcidev); + pci_free_irq_vectors(phba->pcidev); vec_fail_out: return rc; } -/** - * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode to device with SLI-3 interface spec. - **/ -static void -lpfc_sli_disable_msix(struct lpfc_hba *phba) -{ - int i; - - /* Free up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - free_irq(phba->msix_entries[i].vector, phba); - /* Disable MSI-X */ - pci_disable_msix(phba->pcidev); - - return; -} - /** * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. * @phba: pointer to lpfc hba data structure. @@ -8658,24 +9490,6 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba) return rc; } -/** - * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to disable the MSI interrupt mode to device with - * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has - * done request_irq() on before calling pci_disable_msi(). Failure to do so - * results in a BUG_ON() and a device will be left with MSI enabled and leaks - * its vector. - */ -static void -lpfc_sli_disable_msi(struct lpfc_hba *phba) -{ - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); - return; -} - /** * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. * @phba: pointer to lpfc hba data structure. @@ -8747,107 +9561,50 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) static void lpfc_sli_disable_intr(struct lpfc_hba *phba) { - /* Disable the currently initialized interrupt mode */ + int nr_irqs, i; + if (phba->intr_type == MSIX) - lpfc_sli_disable_msix(phba); - else if (phba->intr_type == MSI) - lpfc_sli_disable_msi(phba); - else if (phba->intr_type == INTx) - free_irq(phba->pcidev->irq, phba); + nr_irqs = LPFC_MSIX_VECTORS; + else + nr_irqs = 1; + + for (i = 0; i < nr_irqs; i++) + free_irq(pci_irq_vector(phba->pcidev, i), phba); + pci_free_irq_vectors(phba->pcidev); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; - - return; } /** - * lpfc_find_next_cpu - Find next available CPU that matches the phys_id + * lpfc_cpu_affinity_check - Check vector CPU affinity mappings * @phba: pointer to lpfc hba data structure. + * @vectors: number of msix vectors allocated. * - * Find next available CPU to use for IRQ to CPU affinity. + * The routine will figure out the CPU affinity assignment for every + * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated + * with a pointer to the CPU mask that defines ALL the CPUs this vector + * can be associated with. If the vector can be unquely associated with + * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu. + * In addition, the CPU to IO channel mapping will be calculated + * and the phba->sli4_hba.cpu_map array will reflect this. */ -static int -lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) +static void +lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) { struct lpfc_vector_map_info *cpup; + int index = 0; + int vec = 0; int cpu; - - cpup = phba->sli4_hba.cpu_map; - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { - /* CPU must be online */ - if (cpu_online(cpu)) { - if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && - (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && - (cpup->phys_id == phys_id)) { - return cpu; - } - } - cpup++; - } - - /* - * If we get here, we have used ALL CPUs for the specific - * phys_id. Now we need to clear out lpfc_used_cpu and start - * reusing CPUs. - */ - - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { - if (lpfc_used_cpu[cpu] == phys_id) - lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; - } - - cpup = phba->sli4_hba.cpu_map; - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { - /* CPU must be online */ - if (cpu_online(cpu)) { - if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && - (cpup->phys_id == phys_id)) { - return cpu; - } - } - cpup++; - } - return LPFC_VECTOR_MAP_EMPTY; -} - -/** - * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors - * @phba: pointer to lpfc hba data structure. - * @vectors: number of HBA vectors - * - * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector - * affinization across multple physical CPUs (numa nodes). - * In addition, this routine will assign an IO channel for each CPU - * to use when issuing I/Os. - */ -static int -lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) -{ - int i, idx, saved_chann, used_chann, cpu, phys_id; - int max_phys_id, min_phys_id; - int num_io_channel, first_cpu, chan; - struct lpfc_vector_map_info *cpup; #ifdef CONFIG_X86 struct cpuinfo_x86 *cpuinfo; #endif - uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; - - /* If there is no mapping, just return */ - if (!phba->cfg_fcp_cpu_map) - return 1; /* Init cpu_map array */ memset(phba->sli4_hba.cpu_map, 0xff, (sizeof(struct lpfc_vector_map_info) * - phba->sli4_hba.num_present_cpu)); - - max_phys_id = 0; - min_phys_id = 0xff; - phys_id = 0; - num_io_channel = 0; - first_cpu = LPFC_VECTOR_MAP_EMPTY; + phba->sli4_hba.num_present_cpu)); /* Update CPU map with physical id and core id of each CPU */ cpup = phba->sli4_hba.cpu_map; @@ -8861,184 +9618,16 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) cpup->phys_id = 0; cpup->core_id = 0; #endif - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3328 CPU physid %d coreid %d\n", - cpup->phys_id, cpup->core_id); - - if (cpup->phys_id > max_phys_id) - max_phys_id = cpup->phys_id; - if (cpup->phys_id < min_phys_id) - min_phys_id = cpup->phys_id; + cpup->channel_id = index; /* For now round robin */ + cpup->irq = pci_irq_vector(phba->pcidev, vec); + vec++; + if (vec >= vectors) + vec = 0; + index++; + if (index >= phba->cfg_fcp_io_channel) + index = 0; cpup++; } - - phys_id = min_phys_id; - /* Now associate the HBA vectors with specific CPUs */ - for (idx = 0; idx < vectors; idx++) { - cpup = phba->sli4_hba.cpu_map; - cpu = lpfc_find_next_cpu(phba, phys_id); - if (cpu == LPFC_VECTOR_MAP_EMPTY) { - - /* Try for all phys_id's */ - for (i = 1; i < max_phys_id; i++) { - phys_id++; - if (phys_id > max_phys_id) - phys_id = min_phys_id; - cpu = lpfc_find_next_cpu(phba, phys_id); - if (cpu == LPFC_VECTOR_MAP_EMPTY) - continue; - goto found; - } - - /* Use round robin for scheduling */ - phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; - chan = 0; - cpup = phba->sli4_hba.cpu_map; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - cpup->channel_id = chan; - cpup++; - chan++; - if (chan >= phba->cfg_fcp_io_channel) - chan = 0; - } - - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3329 Cannot set affinity:" - "Error mapping vector %d (%d)\n", - idx, vectors); - return 0; - } -found: - cpup += cpu; - if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) - lpfc_used_cpu[cpu] = phys_id; - - /* Associate vector with selected CPU */ - cpup->irq = phba->sli4_hba.msix_entries[idx].vector; - - /* Associate IO channel with selected CPU */ - cpup->channel_id = idx; - num_io_channel++; - - if (first_cpu == LPFC_VECTOR_MAP_EMPTY) - first_cpu = cpu; - - /* Now affinitize to the selected CPU */ - i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. - vector, get_cpu_mask(cpu)); - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3330 Set Affinity: CPU %d channel %d " - "irq %d (%x)\n", - cpu, cpup->channel_id, - phba->sli4_hba.msix_entries[idx].vector, i); - - /* Spread vector mapping across multple physical CPU nodes */ - phys_id++; - if (phys_id > max_phys_id) - phys_id = min_phys_id; - } - - /* - * Finally fill in the IO channel for any remaining CPUs. - * At this point, all IO channels have been assigned to a specific - * MSIx vector, mapped to a specific CPU. - * Base the remaining IO channel assigned, to IO channels already - * assigned to other CPUs on the same phys_id. - */ - for (i = min_phys_id; i <= max_phys_id; i++) { - /* - * If there are no io channels already mapped to - * this phys_id, just round robin thru the io_channels. - * Setup chann[] for round robin. - */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) - chann[idx] = idx; - - saved_chann = 0; - used_chann = 0; - - /* - * First build a list of IO channels already assigned - * to this phys_id before reassigning the same IO - * channels to the remaining CPUs. - */ - cpup = phba->sli4_hba.cpu_map; - cpu = first_cpu; - cpup += cpu; - for (idx = 0; idx < phba->sli4_hba.num_present_cpu; - idx++) { - if (cpup->phys_id == i) { - /* - * Save any IO channels that are - * already mapped to this phys_id. - */ - if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { - if (saved_chann <= - LPFC_FCP_IO_CHAN_MAX) { - chann[saved_chann] = - cpup->channel_id; - saved_chann++; - } - goto out; - } - - /* See if we are using round-robin */ - if (saved_chann == 0) - saved_chann = - phba->cfg_fcp_io_channel; - - /* Associate next IO channel with CPU */ - cpup->channel_id = chann[used_chann]; - num_io_channel++; - used_chann++; - if (used_chann == saved_chann) - used_chann = 0; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3331 Set IO_CHANN " - "CPU %d channel %d\n", - idx, cpup->channel_id); - } -out: - cpu++; - if (cpu >= phba->sli4_hba.num_present_cpu) { - cpup = phba->sli4_hba.cpu_map; - cpu = 0; - } else { - cpup++; - } - } - } - - if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { - cpup = phba->sli4_hba.cpu_map; - for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { - if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { - cpup->channel_id = 0; - num_io_channel++; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3332 Assign IO_CHANN " - "CPU %d channel %d\n", - idx, cpup->channel_id); - } - cpup++; - } - } - - /* Sanity check */ - if (num_io_channel != phba->sli4_hba.num_present_cpu) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3333 Set affinity mismatch:" - "%d chann != %d cpus: %d vectors\n", - num_io_channel, phba->sli4_hba.num_present_cpu, - vectors); - - /* Enable using cpu affinity for scheduling */ - phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; - return 1; } @@ -9047,14 +9636,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-4 interface spec. The kernel function pci_enable_msix_range() - * is called to enable the MSI-X vectors. The device driver is responsible - * for calling the individual request_irq() to register each MSI-X vector - * with a interrupt handler, which is done in this function. Note that - * later when device is unloading, the driver should always call free_irq() - * on all MSI-X vectors it has done request_irq() on before calling - * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device - * will be left with MSI-X enabled and leaks its vectors. + * with SLI-4 interface spec. * * Return codes * 0 - successful @@ -9066,17 +9648,13 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) int vectors, rc, index; /* Set up MSI-X multi-message vectors */ - for (index = 0; index < phba->cfg_fcp_io_channel; index++) - phba->sli4_hba.msix_entries[index].entry = index; - - /* Configure MSI-X capability structure */ - vectors = phba->cfg_fcp_io_channel; - if (phba->cfg_fof) { - phba->sli4_hba.msix_entries[index].entry = index; + vectors = phba->io_channel_irqs; + if (phba->cfg_fof) vectors++; - } - rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries, - 2, vectors); + + rc = pci_alloc_irq_vectors(phba->pcidev, + (phba->nvmet_support) ? 1 : 2, + vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0484 PCI enable MSI-X failed (%d)\n", rc); @@ -9084,14 +9662,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) } vectors = rc; - /* Log MSI-X vector assignment */ - for (index = 0; index < vectors; index++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0489 MSI-X entry[%d]: vector=x%x " - "message=%d\n", index, - phba->sli4_hba.msix_entries[index].vector, - phba->sli4_hba.msix_entries[index].entry); - /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { memset(&phba->sli4_hba.handler_name[index], 0, 16); @@ -9099,21 +9669,19 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) LPFC_SLI4_HANDLER_NAME_SZ, LPFC_DRIVER_HANDLER_NAME"%d", index); - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; - atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); + phba->sli4_hba.hba_eq_hdl[index].idx = index; + phba->sli4_hba.hba_eq_hdl[index].phba = phba; + atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); if (phba->cfg_fof && (index == (vectors - 1))) - rc = request_irq( - phba->sli4_hba.msix_entries[index].vector, + rc = request_irq(pci_irq_vector(phba->pcidev, index), &lpfc_sli4_fof_intr_handler, 0, (char *)&phba->sli4_hba.handler_name[index], - &phba->sli4_hba.fcp_eq_hdl[index]); + &phba->sli4_hba.hba_eq_hdl[index]); else - rc = request_irq( - phba->sli4_hba.msix_entries[index].vector, + rc = request_irq(pci_irq_vector(phba->pcidev, index), &lpfc_sli4_hba_intr_handler, 0, (char *)&phba->sli4_hba.handler_name[index], - &phba->sli4_hba.fcp_eq_hdl[index]); + &phba->sli4_hba.hba_eq_hdl[index]); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " @@ -9125,63 +9693,37 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) if (phba->cfg_fof) vectors--; - if (vectors != phba->cfg_fcp_io_channel) { + if (vectors != phba->io_channel_irqs) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3238 Reducing IO channels to match number of " "MSI-X vectors, requested %d got %d\n", - phba->cfg_fcp_io_channel, vectors); - phba->cfg_fcp_io_channel = vectors; + phba->io_channel_irqs, vectors); + if (phba->cfg_fcp_io_channel > vectors) + phba->cfg_fcp_io_channel = vectors; + if (phba->cfg_nvme_io_channel > vectors) + phba->cfg_nvme_io_channel = vectors; + if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) + phba->io_channel_irqs = phba->cfg_fcp_io_channel; + else + phba->io_channel_irqs = phba->cfg_nvme_io_channel; } + lpfc_cpu_affinity_check(phba, vectors); - if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport))) - lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: /* free the irq already requested */ - for (--index; index >= 0; index--) { - irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. - vector, NULL); - free_irq(phba->sli4_hba.msix_entries[index].vector, - &phba->sli4_hba.fcp_eq_hdl[index]); - } + for (--index; index >= 0; index--) + free_irq(pci_irq_vector(phba->pcidev, index), + &phba->sli4_hba.hba_eq_hdl[index]); /* Unconfigure MSI-X capability structure */ - pci_disable_msix(phba->pcidev); + pci_free_irq_vectors(phba->pcidev); vec_fail_out: return rc; } -/** - * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode to device with SLI-4 interface spec. - **/ -static void -lpfc_sli4_disable_msix(struct lpfc_hba *phba) -{ - int index; - - /* Free up MSI-X multi-message vectors */ - for (index = 0; index < phba->cfg_fcp_io_channel; index++) { - irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. - vector, NULL); - free_irq(phba->sli4_hba.msix_entries[index].vector, - &phba->sli4_hba.fcp_eq_hdl[index]); - } - if (phba->cfg_fof) { - free_irq(phba->sli4_hba.msix_entries[index].vector, - &phba->sli4_hba.fcp_eq_hdl[index]); - } - /* Disable MSI-X */ - pci_disable_msix(phba->pcidev); - - return; -} - /** * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. @@ -9220,36 +9762,18 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) return rc; } - for (index = 0; index < phba->cfg_fcp_io_channel; index++) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + for (index = 0; index < phba->io_channel_irqs; index++) { + phba->sli4_hba.hba_eq_hdl[index].idx = index; + phba->sli4_hba.hba_eq_hdl[index].phba = phba; } if (phba->cfg_fof) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + phba->sli4_hba.hba_eq_hdl[index].idx = index; + phba->sli4_hba.hba_eq_hdl[index].phba = phba; } return 0; } -/** - * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to disable the MSI interrupt mode to device with - * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has - * done request_irq() on before calling pci_disable_msi(). Failure to do so - * results in a BUG_ON() and a device will be left with MSI enabled and leaks - * its vector. - **/ -static void -lpfc_sli4_disable_msi(struct lpfc_hba *phba) -{ - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); - return; -} - /** * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. @@ -9270,7 +9794,7 @@ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { uint32_t intr_mode = LPFC_INTR_ERROR; - int retval, index; + int retval, idx; if (cfg_mode == 2) { /* Preparation before conf_msi mbox cmd */ @@ -9301,21 +9825,23 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { + struct lpfc_hba_eq_hdl *eqhdl; + /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; - for (index = 0; index < phba->cfg_fcp_io_channel; - index++) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; - atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. - fcp_eq_in_use, 1); + + for (idx = 0; idx < phba->io_channel_irqs; idx++) { + eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; + eqhdl->idx = idx; + eqhdl->phba = phba; + atomic_set(&eqhdl->hba_eq_in_use, 1); } if (phba->cfg_fof) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; - atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. - fcp_eq_in_use, 1); + eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; + eqhdl->idx = idx; + eqhdl->phba = phba; + atomic_set(&eqhdl->hba_eq_in_use, 1); } } } @@ -9335,18 +9861,26 @@ static void lpfc_sli4_disable_intr(struct lpfc_hba *phba) { /* Disable the currently initialized interrupt mode */ - if (phba->intr_type == MSIX) - lpfc_sli4_disable_msix(phba); - else if (phba->intr_type == MSI) - lpfc_sli4_disable_msi(phba); - else if (phba->intr_type == INTx) + if (phba->intr_type == MSIX) { + int index; + + /* Free up MSI-X multi-message vectors */ + for (index = 0; index < phba->io_channel_irqs; index++) + free_irq(pci_irq_vector(phba->pcidev, index), + &phba->sli4_hba.hba_eq_hdl[index]); + + if (phba->cfg_fof) + free_irq(pci_irq_vector(phba->pcidev, index), + &phba->sli4_hba.hba_eq_hdl[index]); + } else { free_irq(phba->pcidev->irq, phba); + } + + pci_free_irq_vectors(phba->pcidev); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; - - return; } /** @@ -9399,11 +9933,27 @@ static void lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) { int wait_time = 0; - int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + int nvme_xri_cmpl = 1; + int fcp_xri_cmpl = 1; int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); + int nvmet_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) + fcp_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + nvme_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); - while (!fcp_xri_cmpl || !els_xri_cmpl) { + while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || + !nvmet_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { + if (!nvme_xri_cmpl) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6100 NVME XRI exchange busy " + "wait time: %d seconds.\n", + wait_time/1000); if (!fcp_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2877 FCP XRI exchange busy " @@ -9420,10 +9970,19 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; } - fcp_xri_cmpl = - list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + nvme_xri_cmpl = list_empty( + &phba->sli4_hba.lpfc_abts_nvme_buf_list); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) + fcp_xri_cmpl = list_empty( + &phba->sli4_hba.lpfc_abts_scsi_buf_list); + els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); + + nvmet_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list); } } @@ -9635,10 +10194,35 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); + sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, mbx_sli4_parameters); phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); + phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) && + bf_get(cfg_xib, mbx_sli4_parameters)); + + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) || + !phba->nvme_support) { + phba->nvme_support = 0; + phba->nvmet_support = 0; + phba->cfg_nvmet_mrq = 0; + phba->cfg_nvme_io_channel = 0; + phba->io_channel_irqs = phba->cfg_fcp_io_channel; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, + "6101 Disabling NVME support: " + "Not supported by firmware: %d %d\n", + bf_get(cfg_nvme, mbx_sli4_parameters), + bf_get(cfg_xib, mbx_sli4_parameters)); + + /* If firmware doesn't support NVME, just use SCSI support */ + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return -ENODEV; + phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; + } + + if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp) + phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; /* Make sure that sge_supp_len can be handled by the driver */ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) @@ -9713,14 +10297,6 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_disable_pci_dev; } - /* Set up phase-1 common device driver resources */ - error = lpfc_setup_driver_resource_phase1(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1403 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s3; - } - /* Set up SLI-3 specific device driver resources */ error = lpfc_sli_driver_resource_setup(phba); if (error) { @@ -9876,6 +10452,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); + lpfc_cleanup(vport); /* @@ -10295,6 +10872,23 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) return 0; } +/** + * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve + * @phba: pointer to lpfc hba data structure. + * + * returns the number of ELS/CT + NVMET IOCBs to reserve + **/ +int +lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) +{ + int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); + + if (phba->nvmet_support) + max_xri += LPFC_NVMET_BUF_POST; + return max_xri; +} + + /** * lpfc_write_firmware - attempt to write a firmware image to the port * @fw: pointer to firmware image returned from request_firmware. @@ -10459,7 +11053,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) struct Scsi_Host *shost = NULL; int error; uint32_t cfg_mode, intr_mode; - int adjusted_fcp_io_channel; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); @@ -10484,14 +11077,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_disable_pci_dev; } - /* Set up phase-1 common device driver resources */ - error = lpfc_setup_driver_resource_phase1(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1411 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s4; - } - /* Set up SLI-4 Specific device driver resources */ error = lpfc_sli4_driver_resource_setup(phba); if (error) { @@ -10550,6 +11135,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Put device to a known state before enabling interrupt */ lpfc_stop_port(phba); + /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { @@ -10559,11 +11145,17 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_sysfs_attr; } /* Default to single EQ for non-MSI-X */ - if (phba->intr_type != MSIX) - adjusted_fcp_io_channel = 1; - else - adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; - phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; + if (phba->intr_type != MSIX) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) + phba->cfg_fcp_io_channel = 1; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + phba->cfg_nvme_io_channel = 1; + if (phba->nvmet_support) + phba->cfg_nvmet_mrq = 1; + } + phba->io_channel_irqs = 1; + } + /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -10579,6 +11171,24 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Perform post initialization setup */ lpfc_post_init_setup(phba); + /* NVME support in FW earlier in the driver load corrects the + * FC4 type making a check for nvme_support unnecessary. + */ + if ((phba->nvmet_support == 0) && + (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { + /* Create NVME binding with nvme_fc_transport. This + * ensures the vport is initialized. + */ + error = lpfc_nvme_create_localport(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6004 NVME registration failed, " + "error x%x\n", + error); + goto out_disable_intr; + } + } + /* check for firmware upgrade or downgrade */ if (phba->cfg_request_firmware_upgrade) lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); @@ -10650,8 +11260,12 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) fc_remove_host(shost); scsi_remove_host(shost); - /* Perform cleanup on the physical port */ + /* Perform ndlp cleanup on the physical port. The nvme and nvmet + * localports are destroyed after to cleanup all transport memory. + */ lpfc_cleanup(vport); + lpfc_nvmet_destroy_targetport(phba); + lpfc_nvme_destroy_localport(vport); /* * Bring down the SLI Layer. This step disables all interrupts, @@ -10669,6 +11283,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) * buffers are released to their corresponding pools here. */ lpfc_scsi_free(phba); + lpfc_nvme_free(phba); + lpfc_free_iocb_list(phba); lpfc_sli4_driver_resource_unset(phba); @@ -11314,7 +11930,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba) int lpfc_fof_queue_setup(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; int rc; rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); @@ -11333,8 +11949,11 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba) if (rc) goto out_oas_wq; - phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; - phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; + /* Bind this CQ/WQ to the NVME ring */ + pring = phba->sli4_hba.oas_wq->pring; + pring->sli.sli4.wqp = + (void *)phba->sli4_hba.oas_wq; + phba->sli4_hba.oas_cq->pring = pring; } return 0; @@ -11391,6 +12010,7 @@ lpfc_fof_queue_create(struct lpfc_hba *phba) goto out_error; phba->sli4_hba.oas_wq = qdesc; + list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); } return 0; @@ -11446,6 +12066,7 @@ static struct pci_driver lpfc_driver = { .id_table = lpfc_id_table, .probe = lpfc_pci_probe_one, .remove = lpfc_pci_remove_one, + .shutdown = lpfc_pci_remove_one, .suspend = lpfc_pci_suspend_one, .resume = lpfc_pci_resume_one, .err_handler = &lpfc_err_handler, @@ -11476,7 +12097,6 @@ static struct miscdevice lpfc_mgmt_dev = { static int __init lpfc_init(void) { - int cpu; int error = 0; printk(LPFC_MODULE_DESC "\n"); @@ -11502,9 +12122,7 @@ lpfc_init(void) /* Initialize in case vector mapping is needed */ lpfc_used_cpu = NULL; - lpfc_present_cpu = 0; - for_each_present_cpu(cpu) - lpfc_present_cpu++; + lpfc_present_cpu = num_present_cpus(); error = pci_register_driver(&lpfc_driver); if (error) { @@ -11550,5 +12168,5 @@ module_init(lpfc_init); module_exit(lpfc_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(LPFC_MODULE_DESC); -MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); +MODULE_AUTHOR("Broadcom"); MODULE_VERSION("0:" LPFC_DRIVER_VERSION); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 2a4e5d21eab2ad..3b654ad08d1f99 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -38,6 +40,10 @@ #define LOG_FIP 0x00020000 /* FIP events */ #define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ #define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ +#define LOG_NVME 0x00100000 /* NVME general events. */ +#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */ +#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */ +#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index b234c50c255feb..a928f5187fa46b 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -954,7 +956,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) pcbp->maxRing = (psli->num_rings - 1); for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + pring = &psli->sli3_ring[i]; pring->sli.sli3.sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE : @@ -1217,7 +1219,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) mb->un.varCfgRing.recvNotify = 1; psli = &phba->sli; - pring = &psli->ring[ring]; + pring = &psli->sli3_ring[ring]; mb->un.varCfgRing.numMask = pring->num_mask; mb->mbxCommand = MBX_CONFIG_RING; mb->mbxOwner = OWN_HOST; @@ -2081,6 +2083,9 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) if (phba->max_vpi && phba->cfg_enable_npiv) bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); + if (phba->nvmet_support) + bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1); + return; } @@ -2434,14 +2439,45 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) memset(mbox, 0, sizeof(*mbox)); reg_fcfi = &mbox->u.mqe.un.reg_fcfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); - bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); - bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); + if (phba->nvmet_support == 0) { + bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, + phba->sli4_hba.hdr_rq->queue_id); + /* Match everything - rq_id0 */ + bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0); + + bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); + + /* addr mode is bit wise inverted value of fcf addr_mode */ + bf_set(lpfc_reg_fcfi_mam, reg_fcfi, + (~phba->fcf.addr_mode) & 0x3); + } else { + /* This is ONLY for NVMET MRQ == 1 */ + if (phba->cfg_nvmet_mrq != 1) + return; + + bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, + phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id); + /* Match type FCP - rq_id0 */ + bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP); + bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff); + bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, + FC_RCTL_DD_UNSOL_CMD); + + bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, + phba->sli4_hba.hdr_rq->queue_id); + /* Match everything else - rq_id1 */ + bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0); + } bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.current_rec.fcf_indx); - /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ - bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3); if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, @@ -2449,6 +2485,70 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) } } +/** + * lpfc_reg_fcfi_mrq - Initialize the REG_FCFI_MRQ mailbox command + * @phba: pointer to the hba structure containing the FCF index and RQ ID. + * @mbox: pointer to lpfc mbox command to initialize. + * @mode: 0 to register FCFI, 1 to register MRQs + * + * The REG_FCFI_MRQ mailbox command supports Fibre Channel Forwarders (FCFs). + * The SLI Host uses the command to activate an FCF after it has acquired FCF + * information via a READ_FCF mailbox command. This mailbox command also is used + * to indicate where received unsolicited frames from this FCF will be sent. By + * default this routine will set up the FCF to forward all unsolicited frames + * the the RQ ID passed in the @phba. This can be overridden by the caller for + * more complicated setups. + **/ +void +lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode) +{ + struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi; + + /* This is ONLY for MRQ */ + if (phba->cfg_nvmet_mrq <= 1) + return; + + memset(mbox, 0, sizeof(*mbox)); + reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ); + if (mode == 0) { + bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi, + phba->fcf.current_rec.fcf_indx); + if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { + bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi, + phba->fcf.current_rec.vlan_id); + } + return; + } + + bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi, + phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id); + /* Match NVME frames of type FCP (protocol NVME) - rq_id0 */ + bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP); + bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff); + bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD); + bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff); + bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1); + + bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3); /* NVME connection id */ + bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1); /* rq_id0 */ + bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq); + + bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi, + phba->sli4_hba.hdr_rq->queue_id); + /* Match everything - rq_id1 */ + bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0); + bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0); + + bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); +} + /** * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 3fa65338d3f556..5986c7957199df 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -24,10 +26,12 @@ #include #include +#include #include #include +#include -#include +#include #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -35,8 +39,10 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" #include "lpfc_crtn.h" #include "lpfc_logmsg.h" @@ -66,7 +72,7 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { * lpfc_mem_alloc - create and allocate all PCI and memory pools * @phba: HBA to allocate pools for * - * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, + * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool, * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * @@ -90,21 +96,23 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) else i = SLI4_PAGE_SIZE; - phba->lpfc_scsi_dma_buf_pool = - pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, - phba->cfg_sg_dma_buf_size, - i, - 0); + phba->lpfc_sg_dma_buf_pool = + pci_pool_create("lpfc_sg_dma_buf_pool", + phba->pcidev, + phba->cfg_sg_dma_buf_size, + i, 0); + if (!phba->lpfc_sg_dma_buf_pool) + goto fail; + } else { - phba->lpfc_scsi_dma_buf_pool = - pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, phba->cfg_sg_dma_buf_size, - align, 0); - } + phba->lpfc_sg_dma_buf_pool = + pci_pool_create("lpfc_sg_dma_buf_pool", + phba->pcidev, phba->cfg_sg_dma_buf_size, + align, 0); - if (!phba->lpfc_scsi_dma_buf_pool) - goto fail; + if (!phba->lpfc_sg_dma_buf_pool) + goto fail; + } phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, LPFC_BPL_SIZE, @@ -170,12 +178,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) LPFC_DEVICE_DATA_POOL_SIZE, sizeof(struct lpfc_device_data)); if (!phba->device_data_mem_pool) - goto fail_free_hrb_pool; + goto fail_free_drb_pool; } else { phba->device_data_mem_pool = NULL; } return 0; +fail_free_drb_pool: + pci_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; fail_free_hrb_pool: pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; @@ -197,8 +208,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) pci_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; fail_free_dma_buf_pool: - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - phba->lpfc_scsi_dma_buf_pool = NULL; + pci_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; fail: return -ENOMEM; } @@ -227,6 +238,9 @@ lpfc_mem_free(struct lpfc_hba *phba) if (phba->lpfc_hrb_pool) pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; + if (phba->txrdy_payload_pool) + pci_pool_destroy(phba->txrdy_payload_pool); + phba->txrdy_payload_pool = NULL; if (phba->lpfc_hbq_pool) pci_pool_destroy(phba->lpfc_hbq_pool); @@ -258,8 +272,8 @@ lpfc_mem_free(struct lpfc_hba *phba) phba->lpfc_mbuf_pool = NULL; /* Free DMA buffer memory pool */ - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - phba->lpfc_scsi_dma_buf_pool = NULL; + pci_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; /* Free Device Data memory pool */ if (phba->device_data_mem_pool) { @@ -282,7 +296,7 @@ lpfc_mem_free(struct lpfc_hba *phba) * @phba: HBA to free memory for * * Description: Free memory from PCI and driver memory pools and also those - * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees + * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees * the VPI bitmask. * @@ -430,6 +444,44 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) return; } +/** + * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the + * lpfc_sg_dma_buf_pool PCI pool + * @phba: HBA which owns the pool to allocate from + * @mem_flags: indicates if this is a priority (MEM_PRI) allocation + * @handle: used to return the DMA-mapped address of the nvmet_buf + * + * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool + * PCI pool. Allocates from generic pci_pool_alloc function. + * + * Returns: + * pointer to the allocated nvmet_buf on success + * NULL on failure + **/ +void * +lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) +{ + void *ret; + + ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); + return ret; +} + +/** + * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool + * PCI pool + * @phba: HBA which owns the pool to return to + * @virt: nvmet_buf to free + * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed + * + * Returns: None + **/ +void +lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) +{ + pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); +} + /** * lpfc_els_hbq_alloc - Allocate an HBQ buffer * @phba: HBA to allocate HBQ buffer for @@ -458,7 +510,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) kfree(hbqbp); return NULL; } - hbqbp->size = LPFC_BPL_SIZE; + hbqbp->total_size = LPFC_BPL_SIZE; return hbqbp; } @@ -518,7 +570,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba) kfree(dma_buf); return NULL; } - dma_buf->size = LPFC_BPL_SIZE; + dma_buf->total_size = LPFC_DATA_BUF_SIZE; return dma_buf; } @@ -540,7 +592,134 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); kfree(dmab); - return; +} + +/** + * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer + * @phba: HBA to allocate a receive buffer for + * + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * + * Notes: Not interrupt-safe. Must be called with no locks held. + * + * Returns: + * pointer to HBQ on success + * NULL on failure + **/ +struct rqb_dmabuf * +lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) +{ + struct rqb_dmabuf *dma_buf; + struct lpfc_iocbq *nvmewqe; + union lpfc_wqe128 *wqe; + + dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); + if (!dma_buf) + return NULL; + + dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + &dma_buf->hbuf.phys); + if (!dma_buf->hbuf.virt) { + kfree(dma_buf); + return NULL; + } + dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + &dma_buf->dbuf.phys); + if (!dma_buf->dbuf.virt) { + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + dma_buf->total_size = LPFC_DATA_BUF_SIZE; + + dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), + GFP_KERNEL); + if (!dma_buf->context) { + pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, + dma_buf->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + + dma_buf->iocbq = lpfc_sli_get_iocbq(phba); + if (!dma_buf->iocbq) { + kfree(dma_buf->context); + pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, + dma_buf->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "2621 Ran out of nvmet iocb/WQEs\n"); + return NULL; + } + dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET; + nvmewqe = dma_buf->iocbq; + wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; + /* Initialize WQE */ + memset(wqe, 0, sizeof(union lpfc_wqe)); + /* Word 7 */ + bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe->generic.wqe_com, 1); + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); + bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); + + dma_buf->iocbq->context1 = NULL; + spin_lock(&phba->sli4_hba.sgl_list_lock); + dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + if (!dma_buf->sglq) { + lpfc_sli_release_iocbq(phba, dma_buf->iocbq); + kfree(dma_buf->context); + pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, + dma_buf->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6132 Ran out of nvmet XRIs\n"); + return NULL; + } + return dma_buf; +} + +/** + * lpfc_sli4_nvmet_free - Frees a receive buffer + * @phba: HBA buffer was allocated for + * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc + * + * Description: Frees both the container and the DMA-mapped buffers returned by + * lpfc_sli4_nvmet_alloc. + * + * Notes: Can be called with or without locks held. + * + * Returns: None + **/ +void +lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) +{ + unsigned long flags; + + __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag); + dmab->sglq->state = SGL_FREED; + dmab->sglq->ndlp = NULL; + + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags); + list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags); + + lpfc_sli_release_iocbq(phba, dmab->iocbq); + kfree(dmab->context); + pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); + kfree(dmab); } /** @@ -565,13 +744,13 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) return; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); /* Check whether HBQ is still in use */ spin_lock_irqsave(&phba->hbalock, flags); if (!phba->hbq_in_use) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } - hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); list_del(&hbq_entry->dbuf.list); if (hbq_entry->tag == -1) { (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) @@ -586,3 +765,48 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) } return; } + +/** + * lpfc_rq_buf_free - Free a RQ DMA buffer + * @phba: HBA buffer is associated with + * @mp: Buffer to free + * + * Description: Frees the given DMA buffer in the appropriate way given by + * reposting it to its associated RQ so it can be reused. + * + * Notes: Takes phba->hbalock. Can be called with or without other locks held. + * + * Returns: None + **/ +void +lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) +{ + struct lpfc_rqb *rqbp; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + struct rqb_dmabuf *rqb_entry; + unsigned long flags; + int rc; + + if (!mp) + return; + + rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); + rqbp = rqb_entry->hrq->rqbp; + + spin_lock_irqsave(&phba->hbalock, flags); + list_del(&rqb_entry->hbuf.list); + hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); + hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); + drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); + drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); + rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); + if (rc < 0) { + (rqbp->rqb_free_buffer)(phba, rqb_entry); + } else { + list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); + rqbp->buffer_count++; + } + + spin_unlock_irqrestore(&phba->hbalock, flags); +} diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h index f2b1bbcb196ff4..b93e78f671fbc2 100644 --- a/drivers/scsi/lpfc/lpfc_nl.h +++ b/drivers/scsi/lpfc/lpfc_nl.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2010 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 56a3df4fddb05e..061626bdf70106 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -28,6 +30,9 @@ #include #include #include +#include + +#include #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -35,8 +40,9 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -204,10 +210,11 @@ int lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(abort_list); - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; + pring = lpfc_phba_elsring(phba); + /* Abort outstanding I/O on NPort */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " @@ -283,6 +290,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t ed_tov; LPFC_MBOXQ_t *mbox; struct ls_rjt stat; + uint32_t vid, flag; int rc; memset(&stat, 0, sizeof (struct ls_rjt)); @@ -418,6 +426,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_can_disctmo(vport); } + ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && + sp->cmn.valid_vendor_ver_level) { + vid = be32_to_cpu(sp->un.vv.vid); + flag = be32_to_cpu(sp->un.vv.flags); + if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) + ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) goto out; @@ -707,6 +724,7 @@ static void lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { + struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint32_t *lp; PRLI *npr; @@ -720,16 +738,32 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_flag &= ~NLP_FIRSTBURST; - if (npr->prliType == PRLI_FCP_TYPE) { - if (npr->initiatorFunc) - ndlp->nlp_type |= NLP_FCP_INITIATOR; + if ((npr->prliType == PRLI_FCP_TYPE) || + (npr->prliType == PRLI_NVME_TYPE)) { + if (npr->initiatorFunc) { + if (npr->prliType == PRLI_FCP_TYPE) + ndlp->nlp_type |= NLP_FCP_INITIATOR; + if (npr->prliType == PRLI_NVME_TYPE) + ndlp->nlp_type |= NLP_NVME_INITIATOR; + } if (npr->targetFunc) { - ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->prliType == PRLI_FCP_TYPE) + ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->prliType == PRLI_NVME_TYPE) + ndlp->nlp_type |= NLP_NVME_TARGET; if (npr->writeXferRdyDis) ndlp->nlp_flag |= NLP_FIRSTBURST; } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; + + /* If this driver is in nvme target mode, set the ndlp's fc4 + * type to NVME provided the PRLI response claims NVME FC4 + * type. Target mode does not issue gft_id so doesn't get + * the fc4 type set until now. + */ + if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE)) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; } if (rport) { /* We need to update the rport role values */ @@ -743,7 +777,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, "rport rolechg: role:x%x did:x%x flg:x%x", roles, ndlp->nlp_DID, ndlp->nlp_flag); - fc_remote_port_rolechg(rport, roles); + if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + fc_remote_port_rolechg(rport, roles); } } @@ -1026,6 +1061,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_dmabuf *pcmd, *prsp, *mp; uint32_t *lp; + uint32_t vid, flag; IOCB_t *irsp; struct serv_parm *sp; uint32_t ed_tov; @@ -1094,6 +1130,16 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ed_tov = (phba->fc_edtov + 999999) / 1000000; } + ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && + sp->cmn.valid_vendor_ver_level) { + vid = be32_to_cpu(sp->un.vv.vid); + flag = be32_to_cpu(sp->un.vv.flags); + if ((vid == LPFC_VV_EMLX_ID) && + (flag & LPFC_VV_SUPPRESS_RSP)) + ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + } + /* * Use the larger EDTOV * RATOV = 2 * EDTOV for pt-to-pt @@ -1489,8 +1535,38 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + struct ls_rjt stat; + + if (vport->phba->nvmet_support) { + /* NVME Target mode. Handle and respond to the PRLI and + * transition to UNMAPPED provided the RPI has completed + * registration. + */ + if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + lpfc_rcv_prli(vport, ndlp, cmdiocb); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } else { + /* RPI registration has not completed. Reject the PRLI + * to prevent an illegal state transition when the + * rpi registration does complete. + */ + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC, + "6115 NVMET ndlp rpi %d state " + "unknown, state x%x flags x%08x\n", + ndlp->nlp_rpi, ndlp->nlp_state, + ndlp->nlp_flag); + memset(&stat, 0, sizeof(struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, + ndlp, NULL); + } + } else { + /* Initiator mode. */ + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); + } - lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } @@ -1573,9 +1649,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; MAILBOX_t *mb = &pmb->u.mb; uint32_t did = mb->un.varWords[1]; + int rc = 0; if (mb->mbxStatus) { /* RegLogin failed */ @@ -1610,19 +1688,55 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } /* SLI4 ports have preallocated logical rpis. */ - if (vport->phba->sli_rev < LPFC_SLI_REV4) + if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; /* Only if we are not a fabric nport do we issue PRLI */ - if (!(ndlp->nlp_type & NLP_FABRIC)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3066 RegLogin Complete on x%x x%x x%x\n", + did, ndlp->nlp_type, ndlp->nlp_fc4_type); + if (!(ndlp->nlp_type & NLP_FABRIC) && + (phba->nvmet_support == 0)) { + /* The driver supports FCP and NVME concurrently. If the + * ndlp's nlp_fc4_type is still zero, the driver doesn't + * know what PRLI to send yet. Figure that out now and + * call PRLI depending on the outcome. + */ + if (vport->fc_flag & FC_PT2PT) { + /* If we are pt2pt, there is no Fabric to determine + * the FC4 type of the remote nport. So if NVME + * is configured try it. + */ + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + /* We need to update the localport also */ + lpfc_nvme_update_localport(vport); + } + + } else if (ndlp->nlp_fc4_type == 0) { + rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, + 0, ndlp->nlp_DID); + return ndlp->nlp_state; + } + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); lpfc_issue_els_prli(vport, ndlp, 0); } else { - ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support) + phba->targetport->port_id = vport->fc_myDID; + + /* Only Fabric ports should transition. NVME target + * must complete PRLI. + */ + if (ndlp->nlp_type & NLP_FABRIC) { + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } } return ndlp->nlp_state; } @@ -1663,7 +1777,14 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); - ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + + /* If we are a target we won't immediately transition into PRLI, + * so if REG_LOGIN already completed we don't need to ignore it. + */ + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || + !vport->phba->nvmet_support) + ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); lpfc_disc_set_adisc(vport, ndlp); @@ -1739,10 +1860,23 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; IOCB_t *irsp; PRLI *npr; + struct lpfc_nvme_prli *nvpr; + void *temp_ptr; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; - npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); + + /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp + * format is different so NULL the two PRLI types so that the + * driver correctly gets the correct context. + */ + npr = NULL; + nvpr = NULL; + temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); + if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ) + npr = (PRLI *) temp_ptr; + else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ) + nvpr = (struct lpfc_nvme_prli *) temp_ptr; irsp = &rspiocb->iocb; if (irsp->ulpStatus) { @@ -1750,7 +1884,21 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, vport->cfg_restrict_login) { goto out; } + + /* The LS Req had some error. Don't let this be a + * target. + */ + if ((ndlp->fc4_prli_sent == 1) && + (ndlp->nlp_state == NLP_STE_PRLI_ISSUE) && + (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR))) + /* The FCP PRLI completed successfully but + * the NVME PRLI failed. Since they are sent in + * succession, allow the FCP to complete. + */ + goto out_err; + ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; + ndlp->nlp_type |= NLP_FCP_INITIATOR; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); return ndlp->nlp_state; } @@ -1758,9 +1906,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Check out PRLI rsp */ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + + /* NVME or FCP first burst must be negotiated for each PRLI. */ ndlp->nlp_flag &= ~NLP_FIRSTBURST; - if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && + ndlp->nvme_fb_size = 0; + if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) && (npr->prliType == PRLI_FCP_TYPE)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6028 FCP NPR PRLI Cmpl Init %d Target %d\n", + npr->initiatorFunc, + npr->targetFunc); if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; if (npr->targetFunc) { @@ -1770,6 +1925,49 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; + + /* PRLI completed. Decrement count. */ + ndlp->fc4_prli_sent--; + } else if (nvpr && + (bf_get_be32(prli_acc_rsp_code, nvpr) == + PRLI_REQ_EXECUTED) && + (bf_get_be32(prli_type_code, nvpr) == + PRLI_NVME_TYPE)) { + + /* Complete setting up the remote ndlp personality. */ + if (bf_get_be32(prli_init, nvpr)) + ndlp->nlp_type |= NLP_NVME_INITIATOR; + + /* Target driver cannot solicit NVME FB. */ + if (bf_get_be32(prli_tgt, nvpr)) { + ndlp->nlp_type |= NLP_NVME_TARGET; + if ((bf_get_be32(prli_fba, nvpr) == 1) && + (bf_get_be32(prli_fb_sz, nvpr) > 0) && + (phba->cfg_nvme_enable_fb) && + (!phba->nvmet_support)) { + /* Both sides support FB. The target's first + * burst size is a 512 byte encoded value. + */ + ndlp->nlp_flag |= NLP_FIRSTBURST; + ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, + nvpr); + } + } + + if (bf_get_be32(prli_recov, nvpr)) + ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6029 NVME PRLI Cmpl w1 x%08x " + "w4 x%08x w5 x%08x flag x%x, " + "fcp_info x%x nlp_type x%x\n", + be32_to_cpu(nvpr->word1), + be32_to_cpu(nvpr->word4), + be32_to_cpu(nvpr->word5), + ndlp->nlp_flag, ndlp->nlp_fcp_info, + ndlp->nlp_type); + /* PRLI completed. Decrement count. */ + ndlp->fc4_prli_sent--; } if (!(ndlp->nlp_type & NLP_FCP_TARGET) && (vport->port_type == LPFC_NPIV_PORT) && @@ -1785,11 +1983,24 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp->nlp_state; } - ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; - if (ndlp->nlp_type & NLP_FCP_TARGET) - lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); - else - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); +out_err: + /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs + * are complete. + */ + if (ndlp->fc4_prli_sent == 0) { + ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) + lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); + else + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } else + lpfc_printf_vlog(vport, + KERN_INFO, LOG_ELS, + "3067 PRLI's still outstanding " + "on x%06x - count %d, Pend Node Mode " + "transition...\n", + ndlp->nlp_DID, ndlp->fc4_prli_sent); + return ndlp->nlp_state; } @@ -2104,7 +2315,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* flush the target */ - lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], + lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], ndlp->nlp_sid, 0, LPFC_CTX_TGT); /* Treat like rcv logo */ diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c new file mode 100644 index 00000000000000..0024de1c6c1fea --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -0,0 +1,2521 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "lpfc_version.h" +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_nvme.h" +#include "lpfc_scsi.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +/* NVME initiator-based functions */ + +static struct lpfc_nvme_buf * +lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp); + +static void +lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *); + + +/** + * lpfc_nvme_create_queue - + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. + * @handle: An opaque driver handle used in follow-up calls. + * + * Driver registers this routine to preallocate and initialize any + * internal data structures to bind the @qidx to its internal IO queues. + * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. + * + * Return value : + * 0 - Success + * -EINVAL - Unsupported input value. + * -ENOMEM - Could not alloc necessary memory + **/ +static int +lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, + unsigned int qidx, u16 qsize, + void **handle) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_nvme_qhandle *qhandle; + char *str; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); + if (qhandle == NULL) + return -ENOMEM; + + qhandle->cpu_id = smp_processor_id(); + qhandle->qidx = qidx; + /* + * NVME qidx == 0 is the admin queue, so both admin queue + * and first IO queue will use MSI-X vector and associated + * EQ/CQ/WQ at index 0. After that they are sequentially assigned. + */ + if (qidx) { + str = "IO "; /* IO queue */ + qhandle->index = ((qidx - 1) % + vport->phba->cfg_nvme_io_channel); + } else { + str = "ADM"; /* Admin queue */ + qhandle->index = qidx; + } + + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6073 Binding %s HdwQueue %d (cpu %d) to " + "io_channel %d qhandle %p\n", str, + qidx, qhandle->cpu_id, qhandle->index, qhandle); + *handle = (void *)qhandle; + return 0; +} + +/** + * lpfc_nvme_delete_queue - + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. + * @handle: An opaque driver handle from lpfc_nvme_create_queue + * + * Driver registers this routine to free + * any internal data structures to bind the @qidx to its internal + * IO queues. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static void +lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, + unsigned int qidx, + void *handle) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n", + lport, qidx, handle); + kfree(handle); +} + +static void +lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) +{ + struct lpfc_nvme_lport *lport = localport->private; + + /* release any threads waiting for the unreg to complete */ + complete(&lport->lport_unreg_done); +} + +/* lpfc_nvme_remoteport_delete + * + * @remoteport: Pointer to an nvme transport remoteport instance. + * + * This is a template downcall. NVME transport calls this function + * when it has completed the unregistration of a previously + * registered remoteport. + * + * Return value : + * None + */ +void +lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) +{ + struct lpfc_nvme_rport *rport = remoteport->private; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + + ndlp = rport->ndlp; + if (!ndlp) + goto rport_err; + + vport = ndlp->vport; + if (!vport) + goto rport_err; + + /* Remove this rport from the lport's list - memory is owned by the + * transport. Remove the ndlp reference for the NVME transport before + * calling state machine to remove the node, this is devloss = 0 + * semantics. + */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6146 remoteport delete complete %p\n", + remoteport); + list_del(&rport->list); + lpfc_nlp_put(ndlp); + + rport_err: + /* This call has to execute as long as the rport is valid. + * Release any threads waiting for the unreg to complete. + */ + complete(&rport->rport_unreg_done); +} + +static void +lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_vport *vport = cmdwqe->vport; + uint32_t status; + struct nvmefc_ls_req *pnvme_lsreq; + struct lpfc_dmabuf *buf_ptr; + struct lpfc_nodelist *ndlp; + + vport->phba->fc4NvmeLsCmpls++; + + pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; + status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + ndlp = (struct lpfc_nodelist *)cmdwqe->context1; + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6047 nvme cmpl Enter " + "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p " + "bmp:%p ndlp:%p\n", + pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, + cmdwqe->sli4_xritag, status, + cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp); + + lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n", + cmdwqe->sli4_xritag, status, wcqe->parameter); + + if (cmdwqe->context3) { + buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3; + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + cmdwqe->context3 = NULL; + } + if (pnvme_lsreq->done) + pnvme_lsreq->done(pnvme_lsreq, status); + else + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6046 nvme cmpl without done call back? " + "Data %p DID %x Xri: %x status %x\n", + pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, + cmdwqe->sli4_xritag, status); + if (ndlp) { + lpfc_nlp_put(ndlp); + cmdwqe->context1 = NULL; + } + lpfc_sli_release_iocbq(phba, cmdwqe); +} + +static int +lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, + struct lpfc_dmabuf *inp, + struct nvmefc_ls_req *pnvme_lsreq, + void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_wcqe_complete *), + struct lpfc_nodelist *ndlp, uint32_t num_entry, + uint32_t tmo, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + union lpfc_wqe *wqe; + struct lpfc_iocbq *genwqe; + struct ulp_bde64 *bpl; + struct ulp_bde64 bde; + int i, rc, xmit_len, first_len; + + /* Allocate buffer for command WQE */ + genwqe = lpfc_sli_get_iocbq(phba); + if (genwqe == NULL) + return 1; + + wqe = &genwqe->wqe; + memset(wqe, 0, sizeof(union lpfc_wqe)); + + genwqe->context3 = (uint8_t *)bmp; + genwqe->iocb_flag |= LPFC_IO_NVME_LS; + + /* Save for completion so we can release these resources */ + genwqe->context1 = lpfc_nlp_get(ndlp); + genwqe->context2 = (uint8_t *)pnvme_lsreq; + /* Fill in payload, bp points to frame payload */ + + if (!tmo) + /* FC spec states we need 3 * ratov for CT requests */ + tmo = (3 * phba->fc_ratov); + + /* For this command calculate the xmit length of the request bde. */ + xmit_len = 0; + first_len = 0; + bpl = (struct ulp_bde64 *)bmp->virt; + for (i = 0; i < num_entry; i++) { + bde.tus.w = bpl[i].tus.w; + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) + break; + xmit_len += bde.tus.f.bdeSize; + if (i == 0) + first_len = xmit_len; + } + + genwqe->rsvd2 = num_entry; + genwqe->hba_wqidx = 0; + + /* Words 0 - 2 */ + wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->generic.bde.tus.f.bdeSize = first_len; + wqe->generic.bde.addrLow = bpl[0].addrLow; + wqe->generic.bde.addrHigh = bpl[0].addrHigh; + + /* Word 3 */ + wqe->gen_req.request_payload_len = first_len; + + /* Word 4 */ + + /* Word 5 */ + bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); + bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); + bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); + bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ); + bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1)); + bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); + bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); + bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); + + /* Word 8 */ + wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); + + /* Word 10 */ + bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); + + + /* Issue GEN REQ WQE for NPORT */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6050 Issue GEN REQ WQE to NPORT x%x " + "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n", + ndlp->nlp_DID, genwqe->iotag, + vport->port_state, + genwqe, pnvme_lsreq, bmp, xmit_len, first_len); + genwqe->wqe_cmpl = cmpl; + genwqe->iocb_cmpl = NULL; + genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; + genwqe->vport = vport; + genwqe->retry = retry; + + lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", + genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); + + rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe); + if (rc == WQE_ERROR) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "6045 Issue GEN REQ WQE to NPORT x%x " + "Data: x%x x%x\n", + ndlp->nlp_DID, genwqe->iotag, + vport->port_state); + lpfc_sli_release_iocbq(phba, genwqe); + return 1; + } + return 0; +} + +/** + * lpfc_nvme_ls_req - Issue an Link Service request + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * + * Driver registers this routine to handle any link service request + * from the nvme_fc transport to a remote nvme-aware port. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + struct nvmefc_ls_req *pnvme_lsreq) +{ + int ret = 0; + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + struct ulp_bde64 *bpl; + struct lpfc_dmabuf *bmp; + + /* there are two dma buf in the request, actually there is one and + * the second one is just the start address + cmd size. + * Before calling lpfc_nvme_gen_req these buffers need to be wrapped + * in a lpfc_dmabuf struct. When freeing we just free the wrapper + * because the nvem layer owns the data bufs. + * We do not have to break these packets open, we don't care what is in + * them. And we do not have to look at the resonse data, we only care + * that we got a response. All of the caring is going to happen in the + * nvme-fc layer. + */ + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6043 Could not find node for DID %x\n", + pnvme_rport->port_id); + return 1; + } + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6044 Could not find node for DID %x\n", + pnvme_rport->port_id); + return 2; + } + INIT_LIST_HEAD(&bmp->list); + bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); + if (!bmp->virt) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6042 Could not find node for DID %x\n", + pnvme_rport->port_id); + kfree(bmp); + return 3; + } + bpl = (struct ulp_bde64 *)bmp->virt; + bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); + bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); + bpl->tus.f.bdeFlags = 0; + bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + + bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); + bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + + /* Expand print to include key fields. */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6051 ENTER. lport %p, rport %p lsreq%p rqstlen:%d " + "rsplen:%d %pad %pad\n", + pnvme_lport, pnvme_rport, + pnvme_lsreq, pnvme_lsreq->rqstlen, + pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, + &pnvme_lsreq->rspdma); + + vport->phba->fc4NvmeLsRequests++; + + /* Hardcode the wait to 30 seconds. Connections are failing otherwise. + * This code allows it all to work. + */ + ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, + pnvme_lsreq, lpfc_nvme_cmpl_gen_req, + ndlp, 2, 30, 0); + if (ret != WQE_SUCCESS) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6052 EXIT. issue ls wqe failed lport %p, " + "rport %p lsreq%p Status %x DID %x\n", + pnvme_lport, pnvme_rport, pnvme_lsreq, + ret, ndlp->nlp_DID); + lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); + kfree(bmp); + return ret; + } + + /* Stub in routine and return 0 for now. */ + return ret; +} + +/** + * lpfc_nvme_ls_abort - Issue an Link Service request + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * + * Driver registers this routine to handle any link service request + * from the nvme_fc transport to a remote nvme-aware port. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static void +lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + struct nvmefc_ls_req *pnvme_lsreq) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + LIST_HEAD(abort_list); + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *wqe, *next_wqe; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + phba = vport->phba; + + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + "6049 Could not find node for DID %x\n", + pnvme_rport->port_id); + return; + } + + /* Expand print to include key fields. */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, + "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d " + "rsplen:%d %pad %pad\n", + pnvme_lport, pnvme_rport, + pnvme_lsreq, pnvme_lsreq->rqstlen, + pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, + &pnvme_lsreq->rspdma); + + /* + * Lock the ELS ring txcmplq and build a local list of all ELS IOs + * that need an ABTS. The IOs need to stay on the txcmplq so that + * the abort operation completes them successfully. + */ + pring = phba->sli4_hba.nvmels_wq->pring; + spin_lock_irq(&phba->hbalock); + spin_lock(&pring->ring_lock); + list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { + /* Add to abort_list on on NDLP match. */ + if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) { + wqe->iocb_flag |= LPFC_DRIVER_ABORTED; + list_add_tail(&wqe->dlist, &abort_list); + } + } + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + + /* Abort the targeted IOs and remove them from the abort list. */ + list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) { + spin_lock_irq(&phba->hbalock); + list_del_init(&wqe->dlist); + lpfc_sli_issue_abort_iotag(phba, pring, wqe); + spin_unlock_irq(&phba->hbalock); + } +} + +/* Fix up the existing sgls for NVME IO. */ +static void +lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, + struct lpfc_nvme_buf *lpfc_ncmd, + struct nvmefc_fcp_req *nCmd) +{ + struct sli4_sge *sgl; + union lpfc_wqe128 *wqe; + uint32_t *wptr, *dptr; + + /* + * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to + * match NVME. NVME sends 96 bytes. Also, use the + * nvme commands command and response dma addresses + * rather than the virtual memory to ease the restore + * operation. + */ + sgl = lpfc_ncmd->nvme_sgl; + sgl->sge_len = cpu_to_le32(nCmd->cmdlen); + + sgl++; + + /* Setup the physical region for the FCP RSP */ + sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); + sgl->word2 = le32_to_cpu(sgl->word2); + if (nCmd->sg_cnt) + bf_set(lpfc_sli4_sge_last, sgl, 0); + else + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(nCmd->rsplen); + + /* + * Get a local pointer to the built-in wqe and correct + * the cmd size to match NVME's 96 bytes and fix + * the dma address. + */ + + /* 128 byte wqe support here */ + wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe; + + /* Word 0-2 - NVME CMND IU (embedded payload) */ + wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; + wqe->generic.bde.tus.f.bdeSize = 60; + wqe->generic.bde.addrHigh = 0; + wqe->generic.bde.addrLow = 64; /* Word 16 */ + + /* Word 3 */ + bf_set(payload_offset_len, &wqe->fcp_icmd, + (nCmd->rsplen + nCmd->cmdlen)); + + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); + + /* + * Embed the payload in the last half of the WQE + * WQE words 16-30 get the NVME CMD IU payload + * + * WQE words 16-19 get payload Words 1-4 + * WQE words 20-21 get payload Words 6-7 + * WQE words 22-29 get payload Words 16-23 + */ + wptr = &wqe->words[16]; /* WQE ptr */ + dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ + dptr++; /* Skip Word 0 in payload */ + + *wptr++ = *dptr++; /* Word 1 */ + *wptr++ = *dptr++; /* Word 2 */ + *wptr++ = *dptr++; /* Word 3 */ + *wptr++ = *dptr++; /* Word 4 */ + dptr++; /* Skip Word 5 in payload */ + *wptr++ = *dptr++; /* Word 6 */ + *wptr++ = *dptr++; /* Word 7 */ + dptr += 8; /* Skip Words 8-15 in payload */ + *wptr++ = *dptr++; /* Word 16 */ + *wptr++ = *dptr++; /* Word 17 */ + *wptr++ = *dptr++; /* Word 18 */ + *wptr++ = *dptr++; /* Word 19 */ + *wptr++ = *dptr++; /* Word 20 */ + *wptr++ = *dptr++; /* Word 21 */ + *wptr++ = *dptr++; /* Word 22 */ + *wptr = *dptr; /* Word 23 */ +} + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +static void +lpfc_nvme_ktime(struct lpfc_hba *phba, + struct lpfc_nvme_buf *lpfc_ncmd) +{ + uint64_t seg1, seg2, seg3, seg4; + + if (!phba->ktime_on) + return; + if (!lpfc_ncmd->ts_last_cmd || + !lpfc_ncmd->ts_cmd_start || + !lpfc_ncmd->ts_cmd_wqput || + !lpfc_ncmd->ts_isr_cmpl || + !lpfc_ncmd->ts_data_nvme) + return; + if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd) + return; + if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start) + return; + if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput) + return; + if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl) + return; + /* + * Segment 1 - Time from Last FCP command cmpl is handed + * off to NVME Layer to start of next command. + * Segment 2 - Time from Driver receives a IO cmd start + * from NVME Layer to WQ put is done on IO cmd. + * Segment 3 - Time from Driver WQ put is done on IO cmd + * to MSI-X ISR for IO cmpl. + * Segment 4 - Time from MSI-X ISR for IO cmpl to when + * cmpl is handled off to the NVME Layer. + */ + seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd; + if (seg1 > 5000000) /* 5 ms - for sequential IOs */ + return; + + /* Calculate times relative to start of IO */ + seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start); + seg3 = (lpfc_ncmd->ts_isr_cmpl - + lpfc_ncmd->ts_cmd_start) - seg2; + seg4 = (lpfc_ncmd->ts_data_nvme - + lpfc_ncmd->ts_cmd_start) - seg2 - seg3; + phba->ktime_data_samples++; + phba->ktime_seg1_total += seg1; + if (seg1 < phba->ktime_seg1_min) + phba->ktime_seg1_min = seg1; + else if (seg1 > phba->ktime_seg1_max) + phba->ktime_seg1_max = seg1; + phba->ktime_seg2_total += seg2; + if (seg2 < phba->ktime_seg2_min) + phba->ktime_seg2_min = seg2; + else if (seg2 > phba->ktime_seg2_max) + phba->ktime_seg2_max = seg2; + phba->ktime_seg3_total += seg3; + if (seg3 < phba->ktime_seg3_min) + phba->ktime_seg3_min = seg3; + else if (seg3 > phba->ktime_seg3_max) + phba->ktime_seg3_max = seg3; + phba->ktime_seg4_total += seg4; + if (seg4 < phba->ktime_seg4_min) + phba->ktime_seg4_min = seg4; + else if (seg4 > phba->ktime_seg4_max) + phba->ktime_seg4_max = seg4; + + lpfc_ncmd->ts_last_cmd = 0; + lpfc_ncmd->ts_cmd_start = 0; + lpfc_ncmd->ts_cmd_wqput = 0; + lpfc_ncmd->ts_isr_cmpl = 0; + lpfc_ncmd->ts_data_nvme = 0; +} +#endif + +/** + * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static void +lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvme_buf *lpfc_ncmd = + (struct lpfc_nvme_buf *)pwqeIn->context1; + struct lpfc_vport *vport = pwqeIn->vport; + struct nvmefc_fcp_req *nCmd; + struct nvme_fc_ersp_iu *ep; + struct nvme_fc_cmd_iu *cp; + struct lpfc_nvme_rport *rport; + struct lpfc_nodelist *ndlp; + unsigned long flags; + uint32_t code; + uint16_t cid, sqhd, data; + uint32_t *ptr; + + /* Sanity check on return of outstanding command */ + if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + "6071 Completion pointers bad on wqe %p.\n", + wcqe); + return; + } + phba->fc4NvmeIoCmpls++; + + nCmd = lpfc_ncmd->nvmeCmd; + rport = lpfc_ncmd->nrport; + + lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter); + /* + * Catch race where our node has transitioned, but the + * transport is still transitioning. + */ + ndlp = rport->ndlp; + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + "6061 rport %p, ndlp %p, DID x%06x ndlp " + "not ready.\n", + rport, ndlp, rport->remoteport->port_id); + + ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6062 Ignoring NVME cmpl. No ndlp\n"); + goto out_err; + } + } + + code = bf_get(lpfc_wcqe_c_code, wcqe); + if (code == CQE_CODE_NVME_ERSP) { + /* For this type of CQE, we need to rebuild the rsp */ + ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; + + /* + * Get Command Id from cmd to plug into response. This + * code is not needed in the next NVME Transport drop. + */ + cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; + cid = cp->sqe.common.command_id; + + /* + * RSN is in CQE word 2 + * SQHD is in CQE Word 3 bits 15:0 + * Cmd Specific info is in CQE Word 1 + * and in CQE Word 0 bits 15:0 + */ + sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe); + + /* Now lets build the NVME ERSP IU */ + ep->iu_len = cpu_to_be16(8); + ep->rsn = wcqe->parameter; + ep->xfrd_len = cpu_to_be32(nCmd->payload_length); + ep->rsvd12 = 0; + ptr = (uint32_t *)&ep->cqe.result.u64; + *ptr++ = wcqe->total_data_placed; + data = bf_get(lpfc_wcqe_c_ersp0, wcqe); + *ptr = (uint32_t)data; + ep->cqe.sq_head = sqhd; + ep->cqe.sq_id = nCmd->sqid; + ep->cqe.command_id = cid; + ep->cqe.status = 0; + + lpfc_ncmd->status = IOSTAT_SUCCESS; + lpfc_ncmd->result = 0; + nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; + nCmd->transferred_length = nCmd->payload_length; + } else { + lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) & + LPFC_IOCB_STATUS_MASK); + lpfc_ncmd->result = wcqe->parameter; + + /* For NVME, the only failure path that results in an + * IO error is when the adapter rejects it. All other + * conditions are a success case and resolved by the + * transport. + * IOSTAT_FCP_RSP_ERROR means: + * 1. Length of data received doesn't match total + * transfer length in WQE + * 2. If the RSP payload does NOT match these cases: + * a. RSP length 12/24 bytes and all zeros + * b. NVME ERSP + */ + switch (lpfc_ncmd->status) { + case IOSTAT_SUCCESS: + nCmd->transferred_length = wcqe->total_data_placed; + nCmd->rcv_rsplen = 0; + nCmd->status = 0; + break; + case IOSTAT_FCP_RSP_ERROR: + nCmd->transferred_length = wcqe->total_data_placed; + nCmd->rcv_rsplen = wcqe->parameter; + nCmd->status = 0; + /* Sanity check */ + if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) + break; + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6081 NVME Completion Protocol Error: " + "status x%x result x%x placed x%x\n", + lpfc_ncmd->status, lpfc_ncmd->result, + wcqe->total_data_placed); + break; + default: +out_err: + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6072 NVME Completion Error: " + "status x%x result x%x placed x%x\n", + lpfc_ncmd->status, lpfc_ncmd->result, + wcqe->total_data_placed); + nCmd->transferred_length = 0; + nCmd->rcv_rsplen = 0; + nCmd->status = NVME_SC_FC_TRANSPORT_ERROR; + } + } + + /* pick up SLI4 exhange busy condition */ + if (bf_get(lpfc_wcqe_c_xb, wcqe)) + lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; + else + lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; + + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) + atomic_dec(&ndlp->cmd_pending); + + /* Update stats and complete the IO. There is + * no need for dma unprep because the nvme_transport + * owns the dma address. + */ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; + lpfc_ncmd->ts_data_nvme = ktime_get_ns(); + phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; + lpfc_nvme_ktime(phba, lpfc_ncmd); + } + if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { + if (lpfc_ncmd->cpu != smp_processor_id()) + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6701 CPU Check cmpl: " + "cpu %d expect %d\n", + smp_processor_id(), lpfc_ncmd->cpu); + if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) + phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++; + } +#endif + nCmd->done(nCmd); + + spin_lock_irqsave(&phba->hbalock, flags); + lpfc_ncmd->nrport = NULL; + spin_unlock_irqrestore(&phba->hbalock, flags); + + lpfc_release_nvme_buf(phba, lpfc_ncmd); +} + + +/** + * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @lpfc_nvme_fcreq: IO request from nvme fc to driver. + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, + struct lpfc_nvme_buf *lpfc_ncmd, + struct lpfc_nodelist *pnode) +{ + struct lpfc_hba *phba = vport->phba; + struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; + struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); + union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&pwqeq->wqe; + uint32_t req_len; + + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) + return -EINVAL; + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. + */ + wqe->fcp_iwrite.initial_xfer_len = 0; + if (nCmd->sg_cnt) { + if (nCmd->io_dir == NVMEFC_FCP_WRITE) { + /* Word 5 */ + if ((phba->cfg_nvme_enable_fb) && + (pnode->nlp_flag & NLP_FIRSTBURST)) { + req_len = lpfc_ncmd->nvmeCmd->payload_length; + if (req_len < pnode->nvme_fb_size) + wqe->fcp_iwrite.initial_xfer_len = + req_len; + else + wqe->fcp_iwrite.initial_xfer_len = + pnode->nvme_fb_size; + } + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->generic.wqe_com, + CMD_FCP_IWRITE64_WQE); + bf_set(wqe_pu, &wqe->generic.wqe_com, + PARM_READ_CHECK); + + /* Word 10 */ + bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, + LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, + LPFC_WQE_LENLOC_WORD4); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, + NVME_WRITE_CMD); + + phba->fc4NvmeOutputRequests++; + } else { + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->generic.wqe_com, + CMD_FCP_IREAD64_WQE); + bf_set(wqe_pu, &wqe->generic.wqe_com, + PARM_READ_CHECK); + + /* Word 10 */ + bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, + LPFC_WQE_IOD_READ); + bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, + LPFC_WQE_LENLOC_WORD4); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, + NVME_READ_CMD); + + phba->fc4NvmeInputRequests++; + } + } else { + /* Word 4 */ + wqe->fcp_icmd.rsrvd4 = 0; + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_FCP_ICMND64_WQE); + bf_set(wqe_pu, &wqe->generic.wqe_com, 0); + + /* Word 10 */ + bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, + LPFC_WQE_LENLOC_NONE); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD); + + phba->fc4NvmeControlRequests++; + } + /* + * Finish initializing those WQE fields that are independent + * of the nvme_cmnd request_buffer + */ + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, + phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); + + /* Word 7 */ + /* Preserve Class data in the ndlp. */ + bf_set(wqe_class, &wqe->generic.wqe_com, + (pnode->nlp_fcp_info & 0x0f)); + + /* Word 8 */ + wqe->generic.wqe_com.abort_tag = pwqeq->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + + pwqeq->vport = vport; + return 0; +} + + +/** + * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @lpfc_nvme_fcreq: IO request from nvme fc to driver. + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, + struct lpfc_nvme_buf *lpfc_ncmd) +{ + struct lpfc_hba *phba = vport->phba; + struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; + union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe; + struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl; + struct scatterlist *data_sg; + struct sli4_sge *first_data_sgl; + dma_addr_t physaddr; + uint32_t num_bde = 0; + uint32_t dma_len; + uint32_t dma_offset = 0; + int nseg, i; + + /* Fix up the command and response DMA stuff. */ + lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. + */ + if (nCmd->sg_cnt) { + /* + * Jump over the cmd and rsp SGEs. The fix routine + * has already adjusted for this. + */ + sgl += 2; + + first_data_sgl = sgl; + lpfc_ncmd->seg_cnt = nCmd->sg_cnt; + if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6058 Too many sg segments from " + "NVME Transport. Max %d, " + "nvmeIO sg_cnt %d\n", + phba->cfg_sg_seg_cnt, + lpfc_ncmd->seg_cnt); + lpfc_ncmd->seg_cnt = 0; + return 1; + } + + /* + * The driver established a maximum scatter-gather segment count + * during probe that limits the number of sg elements in any + * single nvme command. Just run through the seg_cnt and format + * the sge's. + */ + nseg = nCmd->sg_cnt; + data_sg = nCmd->first_sgl; + for (i = 0; i < nseg; i++) { + if (data_sg == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6059 dptr err %d, nseg %d\n", + i, nseg); + lpfc_ncmd->seg_cnt = 0; + return 1; + } + physaddr = data_sg->dma_address; + dma_len = data_sg->length; + sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); + sgl->word2 = le32_to_cpu(sgl->word2); + if ((num_bde + 1) == nseg) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(dma_len); + + dma_offset += dma_len; + data_sg = sg_next(data_sg); + sgl++; + } + } else { + /* For this clause to be valid, the payload_length + * and sg_cnt must zero. + */ + if (nCmd->payload_length != 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6063 NVME DMA Prep Err: sg_cnt %d " + "payload_length x%x\n", + nCmd->sg_cnt, nCmd->payload_length); + return 1; + } + } + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of WQE here + */ + wqe->fcp_iread.total_xfer_len = nCmd->payload_length; + return 0; +} + +/** + * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @lpfc_nvme_fcreq: IO request from nvme fc to driver. + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * + * Driver registers this routine as it io request handler. This + * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport + indicated in @lpfc_nvme_rport. + * + * Return value : + * 0 - Success + * TODO: What are the failure codes. + **/ +static int +lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + void *hw_queue_handle, + struct nvmefc_fcp_req *pnvme_fcreq) +{ + int ret = 0; + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + struct lpfc_nvme_buf *lpfc_ncmd; + struct lpfc_nvme_rport *rport; + struct lpfc_nvme_qhandle *lpfc_queue_info; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint64_t start = 0; +#endif + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + vport = lport->vport; + phba = vport->phba; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) + start = ktime_get_ns(); +#endif + rport = (struct lpfc_nvme_rport *)pnvme_rport->private; + lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle; + + /* + * Catch race where our node has transitioned, but the + * transport is still transitioning. + */ + ndlp = rport->ndlp; + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + "6053 rport %p, ndlp %p, DID x%06x " + "ndlp not ready.\n", + rport, ndlp, pnvme_rport->port_id); + + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6066 Missing node for DID %x\n", + pnvme_rport->port_id); + ret = -ENODEV; + goto out_fail; + } + } + + /* The remote node has to be a mapped target or it's an error. */ + if ((ndlp->nlp_type & NLP_NVME_TARGET) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + "6036 rport %p, DID x%06x not ready for " + "IO. State x%x, Type x%x\n", + rport, pnvme_rport->port_id, + ndlp->nlp_state, ndlp->nlp_type); + ret = -ENODEV; + goto out_fail; + + } + + /* The node is shared with FCP IO, make sure the IO pending count does + * not exceed the programmed depth. + */ + if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { + ret = -EAGAIN; + goto out_fail; + } + + lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp); + if (lpfc_ncmd == NULL) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + "6065 driver's buffer pool is empty, " + "IO failed\n"); + ret = -ENOMEM; + goto out_fail; + } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + lpfc_ncmd->ts_cmd_start = start; + lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; + } +#endif + + /* + * Store the data needed by the driver to issue, abort, and complete + * an IO. + * Do not let the IO hang out forever. There is no midlayer issuing + * an abort so inform the FW of the maximum IO pending time. + */ + pnvme_fcreq->private = (void *)lpfc_ncmd; + lpfc_ncmd->nvmeCmd = pnvme_fcreq; + lpfc_ncmd->nrport = rport; + lpfc_ncmd->ndlp = ndlp; + lpfc_ncmd->start_time = jiffies; + + lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp); + ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); + if (ret) { + ret = -ENOMEM; + goto out_free_nvme_buf; + } + + atomic_inc(&ndlp->cmd_pending); + + /* + * Issue the IO on the WQ indicated by index in the hw_queue_handle. + * This identfier was create in our hardware queue create callback + * routine. The driver now is dependent on the IO queue steering from + * the transport. We are trusting the upper NVME layers know which + * index to use and that they have affinitized a CPU to this hardware + * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. + */ + lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index; + + lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + lpfc_queue_info->index, ndlp->nlp_DID); + + ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); + if (ret) { + atomic_dec(&ndlp->cmd_pending); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6113 FCP could not issue WQE err %x " + "sid: x%x did: x%x oxid: x%x\n", + ret, vport->fc_myDID, ndlp->nlp_DID, + lpfc_ncmd->cur_iocbq.sli4_xritag); + ret = -EBUSY; + goto out_free_nvme_buf; + } + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) + lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); + + if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { + lpfc_ncmd->cpu = smp_processor_id(); + if (lpfc_ncmd->cpu != lpfc_queue_info->index) { + /* Check for admin queue */ + if (lpfc_queue_info->qidx) { + lpfc_printf_vlog(vport, + KERN_ERR, LOG_NVME_IOERR, + "6702 CPU Check cmd: " + "cpu %d wq %d\n", + lpfc_ncmd->cpu, + lpfc_queue_info->index); + } + lpfc_ncmd->cpu = lpfc_queue_info->index; + } + if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) + phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++; + } +#endif + return 0; + + out_free_nvme_buf: + lpfc_release_nvme_buf(phba, lpfc_ncmd); + out_fail: + return ret; +} + +/** + * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. + * @phba: Pointer to HBA context object + * @cmdiocb: Pointer to command iocb object. + * @rspiocb: Pointer to response iocb object. + * + * This is the callback function for any NVME FCP IO that was aborted. + * + * Return value: + * None + **/ +void +lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_wcqe_complete *abts_cmpl) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6145 ABORT_XRI_CN completing on rpi x%x " + "original iotag x%x, abort cmd iotag x%x " + "req_tag x%x, status x%x, hwstatus x%x\n", + cmdiocb->iocb.un.acxri.abortContextTag, + cmdiocb->iocb.un.acxri.abortIoTag, + cmdiocb->iotag, + bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), + bf_get(lpfc_wcqe_c_status, abts_cmpl), + bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); + lpfc_sli_release_iocbq(phba, cmdiocb); +} + +/** + * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS + * @lpfc_pnvme: Pointer to the driver's nvme instance data + * @lpfc_nvme_lport: Pointer to the driver's local port data + * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq + * @lpfc_nvme_fcreq: IO request from nvme fc to driver. + * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue + * + * Driver registers this routine as its nvme request io abort handler. This + * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq + * data structure to the rport indicated in @lpfc_nvme_rport. This routine + * is executed asynchronously - one the target is validated as "MAPPED" and + * ready for IO, the driver issues the abort request and returns. + * + * Return value: + * None + **/ +static void +lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + void *hw_queue_handle, + struct nvmefc_fcp_req *pnvme_fcreq) +{ + struct lpfc_nvme_lport *lport; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct lpfc_nodelist *ndlp; + struct lpfc_nvme_rport *rport; + struct lpfc_nvme_buf *lpfc_nbuf; + struct lpfc_iocbq *abts_buf; + struct lpfc_iocbq *nvmereq_wqe; + union lpfc_wqe *abts_wqe; + unsigned long flags; + int ret_val; + + lport = (struct lpfc_nvme_lport *)pnvme_lport->private; + rport = (struct lpfc_nvme_rport *)pnvme_rport->private; + vport = lport->vport; + phba = vport->phba; + + /* Announce entry to new IO submit field. */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + "6002 Abort Request to rport DID x%06x " + "for nvme_fc_req %p\n", + pnvme_rport->port_id, + pnvme_fcreq); + + /* + * Catch race where our node has transitioned, but the + * transport is still transitioning. + */ + ndlp = rport->ndlp; + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS, + "6054 rport %p, ndlp %p, DID x%06x ndlp " + " not ready.\n", + rport, ndlp, pnvme_rport->port_id); + + ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + "6055 Could not find node for " + "DID %x\n", + pnvme_rport->port_id); + return; + } + } + + /* The remote node has to be ready to send an abort. */ + if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && + !(ndlp->nlp_type & NLP_NVME_TARGET)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS, + "6048 rport %p, DID x%06x not ready for " + "IO. State x%x, Type x%x\n", + rport, pnvme_rport->port_id, + ndlp->nlp_state, ndlp->nlp_type); + return; + } + + /* If the hba is getting reset, this flag is set. It is + * cleared when the reset is complete and rings reestablished. + */ + spin_lock_irqsave(&phba->hbalock, flags); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6139 Driver in reset cleanup - flushing " + "NVME Req now. hba_flag x%x\n", + phba->hba_flag); + return; + } + + lpfc_nbuf = (struct lpfc_nvme_buf *)pnvme_fcreq->private; + if (!lpfc_nbuf) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6140 NVME IO req has no matching lpfc nvme " + "io buffer. Skipping abort req.\n"); + return; + } else if (!lpfc_nbuf->nvmeCmd) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6141 lpfc NVME IO req has no nvme_fcreq " + "io buffer. Skipping abort req.\n"); + return; + } + + /* + * The lpfc_nbuf and the mapped nvme_fcreq in the driver's + * state must match the nvme_fcreq passed by the nvme + * transport. If they don't match, it is likely the driver + * has already completed the NVME IO and the nvme transport + * has not seen it yet. + */ + if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6143 NVME req mismatch: " + "lpfc_nbuf %p nvmeCmd %p, " + "pnvme_fcreq %p. Skipping Abort\n", + lpfc_nbuf, lpfc_nbuf->nvmeCmd, + pnvme_fcreq); + return; + } + + /* Don't abort IOs no longer on the pending queue. */ + nvmereq_wqe = &lpfc_nbuf->cur_iocbq; + if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6142 NVME IO req %p not queued - skipping " + "abort req\n", + pnvme_fcreq); + return; + } + + lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", + nvmereq_wqe->sli4_xritag, + nvmereq_wqe->hba_wqidx, ndlp->nlp_DID); + + /* Outstanding abort is in progress */ + if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6144 Outstanding NVME I/O Abort Request " + "still pending on nvme_fcreq %p, " + "lpfc_ncmd %p\n", + pnvme_fcreq, lpfc_nbuf); + return; + } + + abts_buf = __lpfc_sli_get_iocbq(phba); + if (!abts_buf) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6136 No available abort wqes. Skipping " + "Abts req for nvme_fcreq %p.\n", + pnvme_fcreq); + return; + } + + /* Ready - mark outstanding as aborted by driver. */ + nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED; + + /* Complete prepping the abort wqe and issue to the FW. */ + abts_wqe = &abts_buf->wqe; + + /* WQEs are reused. Clear stale data and set key fields to + * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. + */ + memset(abts_wqe, 0, sizeof(union lpfc_wqe)); + bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); + + /* word 7 */ + bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); + bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, + nvmereq_wqe->iocb.ulpClass); + + /* word 8 - tell the FW to abort the IO associated with this + * outstanding exchange ID. + */ + abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag; + + /* word 9 - this is the iotag for the abts_wqe completion. */ + bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, + abts_buf->iotag); + + /* word 10 */ + bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx); + bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); + + /* word 11 */ + bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); + bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abts_buf->iocb_flag |= LPFC_IO_NVME; + abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx; + abts_buf->vport = vport; + abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; + ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (ret_val == IOCB_ERROR) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6137 Failed abts issue_wqe with status x%x " + "for nvme_fcreq %p.\n", + ret_val, pnvme_fcreq); + lpfc_sli_release_iocbq(phba, abts_buf); + return; + } + + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6138 Transport Abort NVME Request Issued for\n" + "ox_id x%x on reqtag x%x\n", + nvmereq_wqe->sli4_xritag, + abts_buf->iotag); +} + +/* Declare and initialization an instance of the FC NVME template. */ +static struct nvme_fc_port_template lpfc_nvme_template = { + /* initiator-based functions */ + .localport_delete = lpfc_nvme_localport_delete, + .remoteport_delete = lpfc_nvme_remoteport_delete, + .create_queue = lpfc_nvme_create_queue, + .delete_queue = lpfc_nvme_delete_queue, + .ls_req = lpfc_nvme_ls_req, + .fcp_io = lpfc_nvme_fcp_io_submit, + .ls_abort = lpfc_nvme_ls_abort, + .fcp_abort = lpfc_nvme_fcp_abort, + + .max_hw_queues = 1, + .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, + .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS, + .dma_boundary = 0xFFFFFFFF, + + /* Sizes of additional private data for data structures. + * No use for the last two sizes at this time. + */ + .local_priv_sz = sizeof(struct lpfc_nvme_lport), + .remote_priv_sz = sizeof(struct lpfc_nvme_rport), + .lsrqst_priv_sz = 0, + .fcprqst_priv_sz = 0, +}; + +/** + * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware + * @phba: pointer to lpfc hba data structure. + * @nblist: pointer to nvme buffer list. + * @count: number of scsi buffers on the list. + * + * This routine is invoked to post a block of @count scsi sgl pages from a + * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. + * No Lock is held. + * + **/ +static int +lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba, + struct list_head *nblist, + int count) +{ + struct lpfc_nvme_buf *lpfc_ncmd; + struct lpfc_mbx_post_uembed_sgl_page1 *sgl; + struct sgl_page_pairs *sgl_pg_pairs; + void *viraddr; + LPFC_MBOXQ_t *mbox; + uint32_t reqlen, alloclen, pg_pairs; + uint32_t mbox_tmo; + uint16_t xritag_start = 0; + int rc = 0; + uint32_t shdr_status, shdr_add_status; + dma_addr_t pdma_phys_bpl1; + union lpfc_sli4_cfg_shdr *shdr; + + /* Calculate the requested length of the dma memory */ + reqlen = count * sizeof(struct sgl_page_pairs) + + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); + if (reqlen > SLI4_PAGE_SIZE) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "6118 Block sgl registration required DMA " + "size (%d) great than a page\n", reqlen); + return -ENOMEM; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6119 Failed to allocate mbox cmd memory\n"); + return -ENOMEM; + } + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, + LPFC_SLI4_MBX_NEMBED); + + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6120 Allocated DMA memory size (%d) is " + "less than the requested DMA memory " + "size (%d)\n", alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory */ + viraddr = mbox->sge_array->addr[0]; + + /* Set up the SGL pages in the non-embedded DMA pages */ + sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; + sgl_pg_pairs = &sgl->sgl_pg_pairs; + + pg_pairs = 0; + list_for_each_entry(lpfc_ncmd, nblist, list) { + /* Set up the sge entry */ + sgl_pg_pairs->sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); + sgl_pg_pairs->sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); + if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + sgl_pg_pairs->sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); + sgl_pg_pairs->sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); + /* Keep the first xritag on the list */ + if (pg_pairs == 0) + xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; + sgl_pg_pairs++; + pg_pairs++; + } + bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); + /* Perform endian conversion if necessary */ + sgl->word0 = cpu_to_le32(sgl->word0); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6125 POST_SGL_BLOCK mailbox command failed " + "status x%x add_status x%x mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list + * @phba: pointer to lpfc hba data structure. + * @post_nblist: pointer to the nvme buffer list. + * + * This routine walks a list of nvme buffers that was passed in. It attempts + * to construct blocks of nvme buffer sgls which contains contiguous xris and + * uses the non-embedded SGL block post mailbox commands to post to the port. + * For single NVME buffer sgl with non-contiguous xri, if any, it shall use + * embedded SGL post mailbox command for posting. The @post_nblist passed in + * must be local list, thus no lock is needed when manipulate the list. + * + * Returns: 0 = failure, non-zero number of successfully posted buffers. + **/ +static int +lpfc_post_nvme_sgl_list(struct lpfc_hba *phba, + struct list_head *post_nblist, int sb_count) +{ + struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; + int status, sgl_size; + int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; + dma_addr_t pdma_phys_sgl1; + int last_xritag = NO_XRI; + int cur_xritag; + LIST_HEAD(prep_nblist); + LIST_HEAD(blck_nblist); + LIST_HEAD(nvme_nblist); + + /* sanity check */ + if (sb_count <= 0) + return -EINVAL; + + sgl_size = phba->cfg_sg_dma_buf_size; + + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { + list_del_init(&lpfc_ncmd->list); + block_cnt++; + if ((last_xritag != NO_XRI) && + (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { + /* a hole in xri block, form a sgl posting block */ + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt - 1; + /* prepare list for next posting block */ + list_add_tail(&lpfc_ncmd->list, &prep_nblist); + block_cnt = 1; + } else { + /* prepare list for next posting block */ + list_add_tail(&lpfc_ncmd->list, &prep_nblist); + /* enough sgls for non-embed sgl mbox command */ + if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt; + block_cnt = 0; + } + } + num_posting++; + last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; + + /* end of repost sgl list condition for NVME buffers */ + if (num_posting == sb_count) { + if (post_cnt == 0) { + /* last sgl posting block */ + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt; + } else if (block_cnt == 1) { + /* last single sgl with non-contiguous xri */ + if (sgl_size > SGL_PAGE_SIZE) + pdma_phys_sgl1 = + lpfc_ncmd->dma_phys_sgl + + SGL_PAGE_SIZE; + else + pdma_phys_sgl1 = 0; + cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; + status = lpfc_sli4_post_sgl(phba, + lpfc_ncmd->dma_phys_sgl, + pdma_phys_sgl1, cur_xritag); + if (status) { + /* failure, put on abort nvme list */ + lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; + } else { + /* success, put on NVME buffer list */ + lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; + lpfc_ncmd->status = IOSTAT_SUCCESS; + num_posted++; + } + /* success, put on NVME buffer sgl list */ + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); + } + } + + /* continue until a nembed page worth of sgls */ + if (post_cnt == 0) + continue; + + /* post block of NVME buffer list sgls */ + status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist, + post_cnt); + + /* don't reset xirtag due to hole in xri block */ + if (block_cnt == 0) + last_xritag = NO_XRI; + + /* reset NVME buffer post count for next round of posting */ + post_cnt = 0; + + /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ + while (!list_empty(&blck_nblist)) { + list_remove_head(&blck_nblist, lpfc_ncmd, + struct lpfc_nvme_buf, list); + if (status) { + /* failure, put on abort nvme list */ + lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; + } else { + /* success, put on NVME buffer list */ + lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; + lpfc_ncmd->status = IOSTAT_SUCCESS; + num_posted++; + } + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); + } + } + /* Push NVME buffers with sgl posted to the available list */ + while (!list_empty(&nvme_nblist)) { + list_remove_head(&nvme_nblist, lpfc_ncmd, + struct lpfc_nvme_buf, list); + lpfc_release_nvme_buf(phba, lpfc_ncmd); + } + return num_posted; +} + +/** + * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls + * @phba: pointer to lpfc hba data structure. + * + * This routine walks the list of nvme buffers that have been allocated and + * repost them to the port by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine + * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list + * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers. + * + * Returns: 0 = success, non-zero failure. + **/ +int +lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba) +{ + LIST_HEAD(post_nblist); + int num_posted, rc = 0; + + /* get all NVME buffers need to repost to a local list */ + spin_lock_irq(&phba->nvme_buf_list_get_lock); + spin_lock(&phba->nvme_buf_list_put_lock); + list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist); + list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist); + spin_unlock(&phba->nvme_buf_list_put_lock); + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + + /* post the list of nvme buffer sgls to port if available */ + if (!list_empty(&post_nblist)) { + num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist, + phba->sli4_hba.nvme_xri_cnt); + /* failed to post any nvme buffer, return error */ + if (num_posted == 0) + rc = -EIO; + } + return rc; +} + +/** + * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec + * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. + * + * This routine allocates nvme buffers for device with SLI-4 interface spec, + * the nvme buffer contains all the necessary information needed to initiate + * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put + * them on a list, it post them to the port by using SGL block post. + * + * Return codes: + * int - number of nvme buffers that were allocated and posted. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +static int +lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvme_buf *lpfc_ncmd; + struct lpfc_iocbq *pwqeq; + union lpfc_wqe128 *wqe; + struct sli4_sge *sgl; + dma_addr_t pdma_phys_sgl; + uint16_t iotag, lxri = 0; + int bcnt, num_posted, sgl_size; + LIST_HEAD(prep_nblist); + LIST_HEAD(post_nblist); + LIST_HEAD(nvme_nblist); + + sgl_size = phba->cfg_sg_dma_buf_size; + + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL); + if (!lpfc_ncmd) + break; + /* + * Get memory from the pci pool to map the virt space to + * pci bus space for an I/O. The DMA buffer includes the + * number of SGE's necessary to support the sg_tablesize. + */ + lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, + GFP_KERNEL, + &lpfc_ncmd->dma_handle); + if (!lpfc_ncmd->data) { + kfree(lpfc_ncmd); + break; + } + memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size); + + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + break; + } + pwqeq = &(lpfc_ncmd->cur_iocbq); + wqe = (union lpfc_wqe128 *)&pwqeq->wqe; + + /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, pwqeq); + if (iotag == 0) { + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6121 Failed to allocated IOTAG for" + " XRI:0x%x\n", lxri); + lpfc_sli4_free_xri(phba, lxri); + break; + } + pwqeq->sli4_lxritag = lxri; + pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + pwqeq->iocb_flag |= LPFC_IO_NVME; + pwqeq->context1 = lpfc_ncmd; + pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; + + /* Initialize local short-hand pointers. */ + lpfc_ncmd->nvme_sgl = lpfc_ncmd->data; + sgl = lpfc_ncmd->nvme_sgl; + pdma_phys_sgl = lpfc_ncmd->dma_handle; + lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl; + + /* Rsp SGE will be filled in when we rcv an IO + * from the NVME Layer to be sent. + * The cmd is going to be embedded so we need a SKIP SGE. + */ + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + /* Fill in word 3 / sgl_len during cmd submission */ + + lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; + + /* Word 7 */ + bf_set(wqe_erp, &wqe->generic.wqe_com, 0); + /* NVME upper layers will time things out, if needed */ + bf_set(wqe_tmo, &wqe->generic.wqe_com, 0); + + /* Word 10 */ + bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); + bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); + + /* add the nvme buffer to a post list */ + list_add_tail(&lpfc_ncmd->list, &post_nblist); + spin_lock_irq(&phba->nvme_buf_list_get_lock); + phba->sli4_hba.nvme_xri_cnt++; + spin_unlock_irq(&phba->nvme_buf_list_get_lock); + } + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6114 Allocate %d out of %d requested new NVME " + "buffers\n", bcnt, num_to_alloc); + + /* post the list of nvme buffer sgls to port if available */ + if (!list_empty(&post_nblist)) + num_posted = lpfc_post_nvme_sgl_list(phba, + &post_nblist, bcnt); + else + num_posted = 0; + + return num_posted; +} + +/** + * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA + * @phba: The HBA for which this call is being executed. + * + * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list + * and returns to caller. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_nvme_buf - Success + **/ +static struct lpfc_nvme_buf * +lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +{ + struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; + unsigned long iflag = 0; + int found = 0; + + spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &phba->lpfc_nvme_buf_list_get, list) { + if (lpfc_test_rrq_active(phba, ndlp, + lpfc_ncmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_ncmd->list); + found = 1; + break; + } + if (!found) { + spin_lock(&phba->nvme_buf_list_put_lock); + list_splice(&phba->lpfc_nvme_buf_list_put, + &phba->lpfc_nvme_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); + spin_unlock(&phba->nvme_buf_list_put_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &phba->lpfc_nvme_buf_list_get, list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_ncmd->list); + found = 1; + break; + } + } + spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag); + if (!found) + return NULL; + return lpfc_ncmd; +} + +/** + * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. + * @phba: The Hba for which this call is being executed. + * @lpfc_ncmd: The nvme buffer which is being released. + * + * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba + * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer + * and cannot be reused for at least RA_TOV amount of time if it was + * aborted. + **/ +static void +lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) +{ + unsigned long iflag = 0; + + lpfc_ncmd->nonsg_phys = 0; + if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { + spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, + iflag); + lpfc_ncmd->nvmeCmd = NULL; + list_add_tail(&lpfc_ncmd->list, + &phba->sli4_hba.lpfc_abts_nvme_buf_list); + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, + iflag); + } else { + lpfc_ncmd->nvmeCmd = NULL; + lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME; + spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); + list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put); + spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); + } +} + +/** + * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. + * @pvport - the lpfc_vport instance requesting a localport. + * + * This routine is invoked to create an nvme localport instance to bind + * to the nvme_fc_transport. It is called once during driver load + * like lpfc_create_shost after all other services are initialized. + * It requires a vport, vpi, and wwns at call time. Other localport + * parameters are modified as the driver's FCID and the Fabric WWN + * are established. + * + * Return codes + * 0 - successful + * -ENOMEM - no heap memory available + * other values - from nvme registration upcall + **/ +int +lpfc_nvme_create_localport(struct lpfc_vport *vport) +{ + int ret = 0; + struct lpfc_hba *phba = vport->phba; + struct nvme_fc_port_info nfcp_info; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + int len; + + /* Initialize this localport instance. The vport wwn usage ensures + * that NPIV is accounted for. + */ + memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info)); + nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR; + nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); + nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); + + /* For now need + 1 to get around NVME transport logic */ + lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1; + lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; + + /* localport is allocated from the stack, but the registration + * call allocates heap memory as well as the private area. + */ +#if (IS_ENABLED(CONFIG_NVME_FC)) + ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, + &vport->phba->pcidev->dev, &localport); +#else + ret = -ENOMEM; +#endif + if (!ret) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, + "6005 Successfully registered local " + "NVME port num %d, localP %p, private %p, " + "sg_seg %d\n", + localport->port_num, localport, + localport->private, + lpfc_nvme_template.max_sgl_segments); + + /* Private is our lport size declared in the template. */ + lport = (struct lpfc_nvme_lport *)localport->private; + vport->localport = localport; + lport->vport = vport; + INIT_LIST_HEAD(&lport->rport_list); + vport->nvmei_support = 1; + len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max); + vport->phba->total_nvme_bufs += len; + } + + return ret; +} + +/** + * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. + * @pnvme: pointer to lpfc nvme data structure. + * + * This routine is invoked to destroy all lports bound to the phba. + * The lport memory was allocated by the nvme fc transport and is + * released there. This routine ensures all rports bound to the + * lport have been disconnected. + * + **/ +void +lpfc_nvme_destroy_localport(struct lpfc_vport *vport) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; + int ret; + + if (vport->nvmei_support == 0) + return; + + localport = vport->localport; + vport->localport = NULL; + lport = (struct lpfc_nvme_lport *)localport->private; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6011 Destroying NVME localport %p\n", + localport); + list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) { + /* The last node ref has to get released now before the rport + * private memory area is released by the transport. + */ + list_del(&rport->list); + + init_completion(&rport->rport_unreg_done); + ret = nvme_fc_unregister_remoteport(rport->remoteport); + if (ret) + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6008 rport fail destroy %x\n", ret); + wait_for_completion_timeout(&rport->rport_unreg_done, 5); + } + + /* lport's rport list is clear. Unregister + * lport and release resources. + */ + init_completion(&lport->lport_unreg_done); + ret = nvme_fc_unregister_localport(localport); + wait_for_completion_timeout(&lport->lport_unreg_done, 5); + + /* Regardless of the unregister upcall response, clear + * nvmei_support. All rports are unregistered and the + * driver will clean up. + */ + vport->nvmei_support = 0; + if (ret == 0) { + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_DISC, + "6009 Unregistered lport Success\n"); + } else { + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_DISC, + "6010 Unregistered lport " + "Failed, status x%x\n", + ret); + } +#endif +} + +void +lpfc_nvme_update_localport(struct lpfc_vport *vport) +{ + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + + localport = vport->localport; + lport = (struct lpfc_nvme_lport *)localport->private; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6012 Update NVME lport %p did x%x\n", + localport, vport->fc_myDID); + + localport->port_id = vport->fc_myDID; + if (localport->port_id == 0) + localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; + else + localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6030 bound lport %p to DID x%06x\n", + lport, localport->port_id); + +} + +int +lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + int ret = 0; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct nvme_fc_remote_port *remote_port; + struct nvme_fc_port_info rpinfo; + + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, + "6006 Register NVME PORT. DID x%06x nlptype x%x\n", + ndlp->nlp_DID, ndlp->nlp_type); + + localport = vport->localport; + lport = (struct lpfc_nvme_lport *)localport->private; + + if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) { + + /* The driver isn't expecting the rport wwn to change + * but it might get a different DID on a different + * fabric. + */ + list_for_each_entry(rport, &lport->rport_list, list) { + if (rport->remoteport->port_name != + wwn_to_u64(ndlp->nlp_portname.u.wwn)) + continue; + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, + "6035 lport %p, found matching rport " + "at wwpn 0x%llx, Data: x%x x%x x%x " + "x%06x\n", + lport, + rport->remoteport->port_name, + rport->remoteport->port_id, + rport->remoteport->port_role, + ndlp->nlp_type, + ndlp->nlp_DID); + remote_port = rport->remoteport; + if ((remote_port->port_id == 0) && + (remote_port->port_role == + FC_PORT_ROLE_NVME_DISCOVERY)) { + remote_port->port_id = ndlp->nlp_DID; + remote_port->port_role &= + ~FC_PORT_ROLE_NVME_DISCOVERY; + if (ndlp->nlp_type & NLP_NVME_TARGET) + remote_port->port_role |= + FC_PORT_ROLE_NVME_TARGET; + if (ndlp->nlp_type & NLP_NVME_INITIATOR) + remote_port->port_role |= + FC_PORT_ROLE_NVME_INITIATOR; + + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NVME_DISC, + "6014 Rebinding lport to " + "rport wwpn 0x%llx, " + "Data: x%x x%x x%x x%06x\n", + remote_port->port_name, + remote_port->port_id, + remote_port->port_role, + ndlp->nlp_type, + ndlp->nlp_DID); + } + return 0; + } + + /* NVME rports are not preserved across devloss. + * Just register this instance. + */ + rpinfo.port_id = ndlp->nlp_DID; + rpinfo.port_role = 0; + if (ndlp->nlp_type & NLP_NVME_TARGET) + rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; + if (ndlp->nlp_type & NLP_NVME_INITIATOR) + rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; + rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); + rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + ret = nvme_fc_register_remoteport(localport, &rpinfo, + &remote_port); + if (!ret) { + rport = remote_port->private; + rport->remoteport = remote_port; + rport->lport = lport; + rport->ndlp = lpfc_nlp_get(ndlp); + if (!rport->ndlp) + return -1; + ndlp->nrport = rport; + INIT_LIST_HEAD(&rport->list); + list_add_tail(&rport->list, &lport->rport_list); + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NVME_DISC | LOG_NODE, + "6022 Binding new rport to lport %p " + "Rport WWNN 0x%llx, Rport WWPN 0x%llx " + "DID x%06x Role x%x\n", + lport, + rpinfo.node_name, rpinfo.port_name, + rpinfo.port_id, rpinfo.port_role); + } else { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_NVME_DISC | LOG_NODE, + "6031 RemotePort Registration failed " + "err: %d, DID x%06x\n", + ret, ndlp->nlp_DID); + } + } else { + ret = -EINVAL; + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6027 Unknown nlp_type x%x on DID x%06x " + "ndlp %p. Not Registering nvme rport\n", + ndlp->nlp_type, ndlp->nlp_DID, ndlp); + } + return ret; +#else + return 0; +#endif +} + +/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. + * + * There is no notion of Devloss or rport recovery from the current + * nvme_transport perspective. Loss of an rport just means IO cannot + * be sent and recovery is completely up to the initator. + * For now, the driver just unbinds the DID and port_role so that + * no further IO can be issued. Changes are planned for later. + * + * Notes - the ndlp reference count is not decremented here since + * since there is no nvme_transport api for devloss. Node ref count + * is only adjusted in driver unload. + */ +void +lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + int ret; + struct nvme_fc_local_port *localport; + struct lpfc_nvme_lport *lport; + struct lpfc_nvme_rport *rport; + struct nvme_fc_remote_port *remoteport; + + localport = vport->localport; + + /* This is fundamental error. The localport is always + * available until driver unload. Just exit. + */ + if (!localport) + return; + + lport = (struct lpfc_nvme_lport *)localport->private; + if (!lport) + goto input_err; + + rport = ndlp->nrport; + if (!rport) + goto input_err; + + remoteport = rport->remoteport; + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6033 Unreg nvme remoteport %p, portname x%llx, " + "port_id x%06x, portstate x%x port type x%x\n", + remoteport, remoteport->port_name, + remoteport->port_id, remoteport->port_state, + ndlp->nlp_type); + + /* Sanity check ndlp type. Only call for NVME ports. Don't + * clear any rport state until the transport calls back. + */ + if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) { + init_completion(&rport->rport_unreg_done); + ret = nvme_fc_unregister_remoteport(remoteport); + if (ret != 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6167 NVME unregister failed %d " + "port_state x%x\n", + ret, remoteport->port_state); + } + + /* Wait for the driver's delete completion routine to finish + * before proceeding. This guarantees the transport and driver + * have completed the unreg process. + */ + ret = wait_for_completion_timeout(&rport->rport_unreg_done, 5); + if (ret == 0) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6169 Unreg nvme wait failed %d\n", + ret); + } + } + return; + + input_err: +#endif + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + "6168: State error: lport %p, rport%p FCID x%06x\n", + vport->localport, ndlp->rport, ndlp->nlp_DID); +} + +/** + * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the fcp xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * FCP aborted xri. + **/ +void +lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); + struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd; + struct lpfc_nodelist *ndlp; + unsigned long iflag = 0; + int rrq_empty = 0; + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return; + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd, + &phba->sli4_hba.lpfc_abts_nvme_buf_list, + list) { + if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) { + list_del(&lpfc_ncmd->list); + lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; + lpfc_ncmd->status = IOSTAT_SUCCESS; + spin_unlock( + &phba->sli4_hba.abts_nvme_buf_list_lock); + + rrq_empty = list_empty(&phba->active_rrq_list); + spin_unlock_irqrestore(&phba->hbalock, iflag); + ndlp = lpfc_ncmd->ndlp; + if (ndlp) { + lpfc_set_rrq_active( + phba, ndlp, + lpfc_ncmd->cur_iocbq.sli4_lxritag, + rxid, 1); + lpfc_sli4_abts_err_handler(phba, ndlp, axri); + } + lpfc_release_nvme_buf(phba, lpfc_ncmd); + if (rrq_empty) + lpfc_worker_wake_up(phba); + return; + } + } + spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); +} diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h new file mode 100644 index 00000000000000..1347deb8dd6cbd --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -0,0 +1,104 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ + +#define LPFC_NVME_MIN_SEGS 16 +#define LPFC_NVME_DEFAULT_SEGS 66 /* 256K IOs - 64 + 2 */ +#define LPFC_NVME_MAX_SEGS 510 +#define LPFC_NVMET_MIN_POSTBUF 16 +#define LPFC_NVMET_DEFAULT_POSTBUF 1024 +#define LPFC_NVMET_MAX_POSTBUF 4096 +#define LPFC_NVME_WQSIZE 256 + +#define LPFC_NVME_ERSP_LEN 0x20 + +struct lpfc_nvme_qhandle { + uint32_t index; /* WQ index to use */ + uint32_t qidx; /* queue index passed to create */ + uint32_t cpu_id; /* current cpu id at time of create */ +}; + +/* Declare nvme-based local and remote port definitions. */ +struct lpfc_nvme_lport { + struct lpfc_vport *vport; + struct list_head rport_list; + struct completion lport_unreg_done; + /* Add sttats counters here */ +}; + +struct lpfc_nvme_rport { + struct list_head list; + struct lpfc_nvme_lport *lport; + struct nvme_fc_remote_port *remoteport; + struct lpfc_nodelist *ndlp; + struct completion rport_unreg_done; +}; + +struct lpfc_nvme_buf { + struct list_head list; + struct nvmefc_fcp_req *nvmeCmd; + struct lpfc_nvme_rport *nrport; + struct lpfc_nodelist *ndlp; + + uint32_t timeout; + + uint16_t flags; /* TBD convert exch_busy to flags */ +#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ + uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ + uint16_t status; /* From IOCB Word 7- ulpStatus */ + uint16_t cpu; + uint16_t qidx; + uint16_t sqid; + uint32_t result; /* From IOCB Word 4. */ + + uint32_t seg_cnt; /* Number of scatter-gather segments returned by + * dma_map_sg. The driver needs this for calls + * to dma_unmap_sg. + */ + dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ + + /* + * data and dma_handle are the kernel virtual and bus address of the + * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter + * gather bde list that supports the sg_tablesize value. + */ + void *data; + dma_addr_t dma_handle; + + struct sli4_sge *nvme_sgl; + dma_addr_t dma_phys_sgl; + + /* cur_iocbq has phys of the dma-able buffer. + * Iotag is in here + */ + struct lpfc_iocbq cur_iocbq; + + wait_queue_head_t *waitq; + unsigned long start_time; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint64_t ts_cmd_start; + uint64_t ts_last_cmd; + uint64_t ts_cmd_wqput; + uint64_t ts_isr_cmpl; + uint64_t ts_data_nvme; +#endif +}; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c new file mode 100644 index 00000000000000..acba1b67e505e9 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -0,0 +1,2013 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channsel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include <../drivers/nvme/host/nvme.h> +#include + +#include "lpfc_version.h" +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *, + dma_addr_t rspbuf, + uint16_t rspsize); +static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *); +static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *, + uint32_t, uint16_t); +static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *, + uint32_t, uint16_t); +static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, + struct lpfc_nvmet_rcv_ctx *, + uint32_t, uint16_t); + +/** + * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME LS commands + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct nvmefc_tgt_ls_req *rsp; + struct lpfc_nvmet_rcv_ctx *ctxp; + uint32_t status, result; + + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + if (!phba->targetport) + goto out; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + + if (status) + atomic_inc(&tgtp->xmt_ls_rsp_error); + else + atomic_inc(&tgtp->xmt_ls_rsp_cmpl); + +out: + ctxp = cmdwqe->context2; + rsp = &ctxp->ctx.ls_req; + + lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", + ctxp->oxid, status, result); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__, + ctxp, status, result); + + lpfc_nlp_put(cmdwqe->context1); + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); + rsp->done(rsp); + kfree(ctxp); +} + +/** + * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context + * @phba: HBA buffer is associated with + * @ctxp: context to clean up + * @mp: Buffer to free + * + * Description: Frees the given DMA buffer in the appropriate way given by + * reposting it to its associated RQ so it can be reused. + * + * Notes: Takes phba->hbalock. Can be called with or without other locks held. + * + * Returns: None + **/ +void +lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, + struct lpfc_dmabuf *mp) +{ + if (ctxp) { + if (ctxp->txrdy) { + pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, + ctxp->txrdy_phys); + ctxp->txrdy = NULL; + ctxp->txrdy_phys = 0; + } + ctxp->state = LPFC_NVMET_STE_FREE; + } + lpfc_rq_buf_free(phba, mp); +} + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +static void +lpfc_nvmet_ktime(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp) +{ + uint64_t seg1, seg2, seg3, seg4, seg5; + uint64_t seg6, seg7, seg8, seg9, seg10; + + if (!phba->ktime_on) + return; + + if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || + !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || + !ctxp->ts_isr_data || !ctxp->ts_data_nvme || + !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || + !ctxp->ts_isr_status || !ctxp->ts_status_nvme) + return; + + if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) + return; + if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) + return; + if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) + return; + if (ctxp->ts_data_wqput > ctxp->ts_isr_data) + return; + if (ctxp->ts_isr_data > ctxp->ts_data_nvme) + return; + if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) + return; + if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) + return; + if (ctxp->ts_status_wqput > ctxp->ts_isr_status) + return; + if (ctxp->ts_isr_status > ctxp->ts_status_nvme) + return; + /* + * Segment 1 - Time from FCP command received by MSI-X ISR + * to FCP command is passed to NVME Layer. + * Segment 2 - Time from FCP command payload handed + * off to NVME Layer to Driver receives a Command op + * from NVME Layer. + * Segment 3 - Time from Driver receives a Command op + * from NVME Layer to Command is put on WQ. + * Segment 4 - Time from Driver WQ put is done + * to MSI-X ISR for Command cmpl. + * Segment 5 - Time from MSI-X ISR for Command cmpl to + * Command cmpl is passed to NVME Layer. + * Segment 6 - Time from Command cmpl is passed to NVME + * Layer to Driver receives a RSP op from NVME Layer. + * Segment 7 - Time from Driver receives a RSP op from + * NVME Layer to WQ put is done on TRSP FCP Status. + * Segment 8 - Time from Driver WQ put is done on TRSP + * FCP Status to MSI-X ISR for TRSP cmpl. + * Segment 9 - Time from MSI-X ISR for TRSP cmpl to + * TRSP cmpl is passed to NVME Layer. + * Segment 10 - Time from FCP command received by + * MSI-X ISR to command is completed on wire. + * (Segments 1 thru 8) for READDATA / WRITEDATA + * (Segments 1 thru 4) for READDATA_RSP + */ + seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; + seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1; + seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) - + seg1 - seg2; + seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3; + seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - seg4; + + /* For auto rsp commands seg6 thru seg10 will be 0 */ + if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { + seg6 = (ctxp->ts_nvme_status - + ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - seg4 - seg5; + seg7 = (ctxp->ts_status_wqput - + ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - + seg4 - seg5 - seg6; + seg8 = (ctxp->ts_isr_status - + ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - seg4 - + seg5 - seg6 - seg7; + seg9 = (ctxp->ts_status_nvme - + ctxp->ts_isr_cmd) - + seg1 - seg2 - seg3 - seg4 - + seg5 - seg6 - seg7 - seg8; + seg10 = (ctxp->ts_isr_status - + ctxp->ts_isr_cmd); + } else { + seg6 = 0; + seg7 = 0; + seg8 = 0; + seg9 = 0; + seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); + } + + phba->ktime_seg1_total += seg1; + if (seg1 < phba->ktime_seg1_min) + phba->ktime_seg1_min = seg1; + else if (seg1 > phba->ktime_seg1_max) + phba->ktime_seg1_max = seg1; + + phba->ktime_seg2_total += seg2; + if (seg2 < phba->ktime_seg2_min) + phba->ktime_seg2_min = seg2; + else if (seg2 > phba->ktime_seg2_max) + phba->ktime_seg2_max = seg2; + + phba->ktime_seg3_total += seg3; + if (seg3 < phba->ktime_seg3_min) + phba->ktime_seg3_min = seg3; + else if (seg3 > phba->ktime_seg3_max) + phba->ktime_seg3_max = seg3; + + phba->ktime_seg4_total += seg4; + if (seg4 < phba->ktime_seg4_min) + phba->ktime_seg4_min = seg4; + else if (seg4 > phba->ktime_seg4_max) + phba->ktime_seg4_max = seg4; + + phba->ktime_seg5_total += seg5; + if (seg5 < phba->ktime_seg5_min) + phba->ktime_seg5_min = seg5; + else if (seg5 > phba->ktime_seg5_max) + phba->ktime_seg5_max = seg5; + + phba->ktime_data_samples++; + if (!seg6) + goto out; + + phba->ktime_seg6_total += seg6; + if (seg6 < phba->ktime_seg6_min) + phba->ktime_seg6_min = seg6; + else if (seg6 > phba->ktime_seg6_max) + phba->ktime_seg6_max = seg6; + + phba->ktime_seg7_total += seg7; + if (seg7 < phba->ktime_seg7_min) + phba->ktime_seg7_min = seg7; + else if (seg7 > phba->ktime_seg7_max) + phba->ktime_seg7_max = seg7; + + phba->ktime_seg8_total += seg8; + if (seg8 < phba->ktime_seg8_min) + phba->ktime_seg8_min = seg8; + else if (seg8 > phba->ktime_seg8_max) + phba->ktime_seg8_max = seg8; + + phba->ktime_seg9_total += seg9; + if (seg9 < phba->ktime_seg9_min) + phba->ktime_seg9_min = seg9; + else if (seg9 > phba->ktime_seg9_max) + phba->ktime_seg9_max = seg9; +out: + phba->ktime_seg10_total += seg10; + if (seg10 < phba->ktime_seg10_min) + phba->ktime_seg10_min = seg10; + else if (seg10 > phba->ktime_seg10_max) + phba->ktime_seg10_max = seg10; + phba->ktime_status_samples++; +} +#endif + +/** + * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME FCP commands + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct nvmefc_tgt_fcp_req *rsp; + struct lpfc_nvmet_rcv_ctx *ctxp; + uint32_t status, result, op, start_clean; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t id; +#endif + + ctxp = cmdwqe->context2; + rsp = &ctxp->ctx.fcp_req; + op = rsp->op; + ctxp->flag &= ~LPFC_NVMET_IO_INP; + + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + if (!phba->targetport) + goto out; + + lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", + ctxp->oxid, op, status); + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (status) { + rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; + rsp->transferred_length = 0; + atomic_inc(&tgtp->xmt_fcp_rsp_error); + } else { + rsp->fcp_error = NVME_SC_SUCCESS; + if (op == NVMET_FCOP_RSP) + rsp->transferred_length = rsp->rsplen; + else + rsp->transferred_length = rsp->transfer_length; + atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); + } + +out: + if ((op == NVMET_FCOP_READDATA_RSP) || + (op == NVMET_FCOP_RSP)) { + /* Sanity check */ + ctxp->state = LPFC_NVMET_STE_DONE; + ctxp->entry_cnt++; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + if (rsp->op == NVMET_FCOP_READDATA_RSP) { + ctxp->ts_isr_data = + cmdwqe->isr_timestamp; + ctxp->ts_data_nvme = + ktime_get_ns(); + ctxp->ts_nvme_status = + ctxp->ts_data_nvme; + ctxp->ts_status_wqput = + ctxp->ts_data_nvme; + ctxp->ts_isr_status = + ctxp->ts_data_nvme; + ctxp->ts_status_nvme = + ctxp->ts_data_nvme; + } else { + ctxp->ts_isr_status = + cmdwqe->isr_timestamp; + ctxp->ts_status_nvme = + ktime_get_ns(); + } + } + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + id = smp_processor_id(); + if (ctxp->cpu != id) + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6703 CPU Check cmpl: " + "cpu %d expect %d\n", + id, ctxp->cpu); + if (ctxp->cpu < LPFC_CHECK_CPU_CNT) + phba->cpucheck_cmpl_io[id]++; + } +#endif + rsp->done(rsp); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) + lpfc_nvmet_ktime(phba, ctxp); +#endif + /* Let Abort cmpl repost the context */ + if (!(ctxp->flag & LPFC_NVMET_ABORT_OP)) + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + } else { + ctxp->entry_cnt++; + start_clean = offsetof(struct lpfc_iocbq, wqe); + memset(((char *)cmdwqe) + start_clean, 0, + (sizeof(struct lpfc_iocbq) - start_clean)); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + ctxp->ts_isr_data = cmdwqe->isr_timestamp; + ctxp->ts_data_nvme = ktime_get_ns(); + } + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + id = smp_processor_id(); + if (ctxp->cpu != id) + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6704 CPU Check cmdcmpl: " + "cpu %d expect %d\n", + id, ctxp->cpu); + if (ctxp->cpu < LPFC_CHECK_CPU_CNT) + phba->cpucheck_ccmpl_io[id]++; + } +#endif + rsp->done(rsp); + } +} + +static int +lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_ls_req *rsp) +{ + struct lpfc_nvmet_rcv_ctx *ctxp = + container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); + struct lpfc_hba *phba = ctxp->phba; + struct hbq_dmabuf *nvmebuf = + (struct hbq_dmabuf *)ctxp->rqb_buffer; + struct lpfc_iocbq *nvmewqeq; + struct lpfc_nvmet_tgtport *nvmep = tgtport->private; + struct lpfc_dmabuf dmabuf; + struct ulp_bde64 bpl; + int rc; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6023 %s: Entrypoint ctx %p %p\n", __func__, + ctxp, tgtport); + + nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, + rsp->rsplen); + if (nvmewqeq == NULL) { + atomic_inc(&nvmep->xmt_ls_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6150 LS Drop IO x%x: Prep\n", + ctxp->oxid); + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, + ctxp->sid, ctxp->oxid); + return -ENOMEM; + } + + /* Save numBdes for bpl2sgl */ + nvmewqeq->rsvd2 = 1; + nvmewqeq->hba_wqidx = 0; + nvmewqeq->context3 = &dmabuf; + dmabuf.virt = &bpl; + bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; + bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; + bpl.tus.f.bdeSize = rsp->rsplen; + bpl.tus.f.bdeFlags = 0; + bpl.tus.w = le32_to_cpu(bpl.tus.w); + + nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp; + nvmewqeq->iocb_cmpl = NULL; + nvmewqeq->context2 = ctxp; + + lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", + ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); + + rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); + if (rc == WQE_SUCCESS) { + /* + * Okay to repost buffer here, but wait till cmpl + * before freeing ctxp and iocbq. + */ + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + ctxp->rqb_buffer = 0; + atomic_inc(&nvmep->xmt_ls_rsp); + return 0; + } + /* Give back resources */ + atomic_inc(&nvmep->xmt_ls_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6151 LS Drop IO x%x: Issue %d\n", + ctxp->oxid, rc); + + lpfc_nlp_put(nvmewqeq->context1); + + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); + return -ENXIO; +} + +static int +lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *rsp) +{ + struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; + struct lpfc_nvmet_rcv_ctx *ctxp = + container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); + struct lpfc_hba *phba = ctxp->phba; + struct lpfc_iocbq *nvmewqeq; + unsigned long iflags; + int rc; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + if (rsp->op == NVMET_FCOP_RSP) + ctxp->ts_nvme_status = ktime_get_ns(); + else + ctxp->ts_nvme_data = ktime_get_ns(); + } + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + int id = smp_processor_id(); + ctxp->cpu = id; + if (id < LPFC_CHECK_CPU_CNT) + phba->cpucheck_xmt_io[id]++; + if (rsp->hwqid != id) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6705 CPU Check OP: " + "cpu %d expect %d\n", + id, rsp->hwqid); + ctxp->cpu = rsp->hwqid; + } + } +#endif + + if (rsp->op == NVMET_FCOP_ABORT) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6103 Abort op: oxri x%x %d cnt %d\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + + lpfc_nvmeio_data(phba, "NVMET FCP ABRT: " + "xri x%x state x%x cnt x%x\n", + ctxp->oxid, ctxp->state, ctxp->entry_cnt); + + atomic_inc(&lpfc_nvmep->xmt_fcp_abort); + ctxp->entry_cnt++; + ctxp->flag |= LPFC_NVMET_ABORT_OP; + if (ctxp->flag & LPFC_NVMET_IO_INP) + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + else + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + return 0; + } + + /* Sanity check */ + if (ctxp->state == LPFC_NVMET_STE_ABORT) { + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6102 Bad state IO x%x aborted\n", + ctxp->oxid); + rc = -ENXIO; + goto aerr; + } + + nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); + if (nvmewqeq == NULL) { + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6152 FCP Drop IO x%x: Prep\n", + ctxp->oxid); + rc = -ENXIO; + goto aerr; + } + + nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; + nvmewqeq->iocb_cmpl = NULL; + nvmewqeq->context2 = ctxp; + nvmewqeq->iocb_flag |= LPFC_IO_NVMET; + ctxp->wqeq->hba_wqidx = rsp->hwqid; + + lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", + ctxp->oxid, rsp->op, rsp->rsplen); + + /* For now we take hbalock */ + spin_lock_irqsave(&phba->hbalock, iflags); + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (rc == WQE_SUCCESS) { + ctxp->flag |= LPFC_NVMET_IO_INP; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (!phba->ktime_on) + return 0; + if (rsp->op == NVMET_FCOP_RSP) + ctxp->ts_status_wqput = ktime_get_ns(); + else + ctxp->ts_data_wqput = ktime_get_ns(); +#endif + return 0; + } + + /* Give back resources */ + atomic_inc(&lpfc_nvmep->xmt_fcp_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6153 FCP Drop IO x%x: Issue: %d\n", + ctxp->oxid, rc); + + ctxp->wqeq->hba_wqidx = 0; + nvmewqeq->context2 = NULL; + nvmewqeq->context3 = NULL; + rc = -EBUSY; +aerr: + return rc; +} + +static void +lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) +{ + struct lpfc_nvmet_tgtport *tport = targetport->private; + + /* release any threads waiting for the unreg to complete */ + complete(&tport->tport_unreg_done); +} + +static struct nvmet_fc_target_template lpfc_tgttemplate = { + .targetport_delete = lpfc_nvmet_targetport_delete, + .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, + .fcp_op = lpfc_nvmet_xmt_fcp_op, + + .max_hw_queues = 1, + .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, + .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, + .dma_boundary = 0xFFFFFFFF, + + /* optional features */ + .target_features = 0, + /* sizes of additional private data for data structures */ + .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), +}; + +int +lpfc_nvmet_create_targetport(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct lpfc_nvmet_tgtport *tgtp; + struct nvmet_fc_port_info pinfo; + int error = 0; + + if (phba->targetport) + return 0; + + memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); + pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); + pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); + pinfo.port_id = vport->fc_myDID; + + lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; + lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt; + lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | + NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; + +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, + &phba->pcidev->dev, + &phba->targetport); +#else + error = -ENOMEM; +#endif + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6025 Cannot register NVME targetport " + "x%x\n", error); + phba->targetport = NULL; + } else { + tgtp = (struct lpfc_nvmet_tgtport *) + phba->targetport->private; + tgtp->phba = phba; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6026 Registered NVME " + "targetport: %p, private %p " + "portnm %llx nodenm %llx\n", + phba->targetport, tgtp, + pinfo.port_name, pinfo.node_name); + + atomic_set(&tgtp->rcv_ls_req_in, 0); + atomic_set(&tgtp->rcv_ls_req_out, 0); + atomic_set(&tgtp->rcv_ls_req_drop, 0); + atomic_set(&tgtp->xmt_ls_abort, 0); + atomic_set(&tgtp->xmt_ls_rsp, 0); + atomic_set(&tgtp->xmt_ls_drop, 0); + atomic_set(&tgtp->xmt_ls_rsp_error, 0); + atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); + atomic_set(&tgtp->rcv_fcp_cmd_in, 0); + atomic_set(&tgtp->rcv_fcp_cmd_out, 0); + atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); + atomic_set(&tgtp->xmt_fcp_abort, 0); + atomic_set(&tgtp->xmt_fcp_drop, 0); + atomic_set(&tgtp->xmt_fcp_read_rsp, 0); + atomic_set(&tgtp->xmt_fcp_read, 0); + atomic_set(&tgtp->xmt_fcp_write, 0); + atomic_set(&tgtp->xmt_fcp_rsp, 0); + atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); + atomic_set(&tgtp->xmt_fcp_rsp_error, 0); + atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); + atomic_set(&tgtp->xmt_abort_rsp, 0); + atomic_set(&tgtp->xmt_abort_rsp_error, 0); + atomic_set(&tgtp->xmt_abort_cmpl, 0); + } + return error; +} + +int +lpfc_nvmet_update_targetport(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + + if (!phba->targetport) + return 0; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6007 Update NVMET port %p did x%x\n", + phba->targetport, vport->fc_myDID); + + phba->targetport->port_id = vport->fc_myDID; + return 0; +} + +/** + * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the nvmet xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * NVMET aborted xri. + **/ +void +lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ + /* TODO: work in progress */ +} + +void +lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_tgtport *tgtp; + + if (phba->nvmet_support == 0) + return; + if (phba->targetport) { + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + init_completion(&tgtp->tport_unreg_done); + nvmet_fc_unregister_targetport(phba->targetport); + wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); + } + phba->targetport = NULL; +#endif +} + +/** + * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @nvmebuf: pointer to lpfc nvme command HBQ data structure. + * + * This routine is used for processing the WQE associated with a unsolicited + * event. It first determines whether there is an existing ndlp that matches + * the DID from the unsolicited WQE. If not, it will create a new one with + * the DID from the unsolicited WQE. The ELS command from the unsolicited + * WQE is then used to invoke the proper routine and to set up proper state + * of the discovery state machine. + **/ +static void +lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct hbq_dmabuf *nvmebuf) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_tgtport *tgtp; + struct fc_frame_header *fc_hdr; + struct lpfc_nvmet_rcv_ctx *ctxp; + uint32_t *payload; + uint32_t size, oxid, sid, rc; + + if (!nvmebuf || !phba->targetport) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6154 LS Drop IO\n"); + oxid = 0; + size = 0; + sid = 0; + goto dropit; + } + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + payload = (uint32_t *)(nvmebuf->dbuf.virt); + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + sid = sli4_sid_from_fc_hdr(fc_hdr); + + ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); + if (ctxp == NULL) { + atomic_inc(&tgtp->rcv_ls_req_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6155 LS Drop IO x%x: Alloc\n", + oxid); +dropit: + lpfc_nvmeio_data(phba, "NVMET LS DROP: " + "xri x%x sz %d from %06x\n", + oxid, size, sid); + if (nvmebuf) + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + return; + } + ctxp->phba = phba; + ctxp->size = size; + ctxp->oxid = oxid; + ctxp->sid = sid; + ctxp->wqeq = NULL; + ctxp->state = LPFC_NVMET_STE_RCV; + ctxp->rqb_buffer = (void *)nvmebuf; + + lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", + oxid, size, sid); + /* + * The calling sequence should be: + * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done + * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. + */ + atomic_inc(&tgtp->rcv_ls_req_in); + rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, + payload, size); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x " + "%08x %08x %08x\n", __func__, ctxp, size, rc, + *payload, *(payload+1), *(payload+2), + *(payload+3), *(payload+4), *(payload+5)); + + if (rc == 0) { + atomic_inc(&tgtp->rcv_ls_req_out); + return; + } + + lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n", + oxid, size, sid); + + atomic_inc(&tgtp->rcv_ls_req_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", + ctxp->oxid, rc); + + /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ + if (nvmebuf) + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + + atomic_inc(&tgtp->xmt_ls_abort); + lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); +#endif +} + +/** + * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @nvmebuf: pointer to lpfc nvme command HBQ data structure. + * + * This routine is used for processing the WQE associated with a unsolicited + * event. It first determines whether there is an existing ndlp that matches + * the DID from the unsolicited WQE. If not, it will create a new one with + * the DID from the unsolicited WQE. The ELS command from the unsolicited + * WQE is then used to invoke the proper routine and to set up proper state + * of the discovery state machine. + **/ +static void +lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct rqb_dmabuf *nvmebuf, + uint64_t isr_timestamp) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + struct fc_frame_header *fc_hdr; + uint32_t *payload; + uint32_t size, oxid, sid, rc; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t id; +#endif + + if (!nvmebuf || !phba->targetport) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6157 FCP Drop IO\n"); + oxid = 0; + size = 0; + sid = 0; + goto dropit; + } + + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + payload = (uint32_t *)(nvmebuf->dbuf.virt); + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + size = nvmebuf->bytes_recv; + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + sid = sli4_sid_from_fc_hdr(fc_hdr); + + ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; + if (ctxp == NULL) { + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6158 FCP Drop IO x%x: Alloc\n", + oxid); + lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); + /* Cannot send ABTS without context */ + return; + } + memset(ctxp, 0, sizeof(ctxp->ctx)); + ctxp->wqeq = NULL; + ctxp->txrdy = NULL; + ctxp->offset = 0; + ctxp->phba = phba; + ctxp->size = size; + ctxp->oxid = oxid; + ctxp->sid = sid; + ctxp->state = LPFC_NVMET_STE_RCV; + ctxp->rqb_buffer = nvmebuf; + ctxp->entry_cnt = 1; + ctxp->flag = 0; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + ctxp->ts_isr_cmd = isr_timestamp; + ctxp->ts_cmd_nvme = ktime_get_ns(); + ctxp->ts_nvme_data = 0; + ctxp->ts_data_wqput = 0; + ctxp->ts_isr_data = 0; + ctxp->ts_data_nvme = 0; + ctxp->ts_nvme_status = 0; + ctxp->ts_status_wqput = 0; + ctxp->ts_isr_status = 0; + ctxp->ts_status_nvme = 0; + } + + if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { + id = smp_processor_id(); + if (id < LPFC_CHECK_CPU_CNT) + phba->cpucheck_rcv_io[id]++; + } +#endif + + lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n", + oxid, size, sid); + + atomic_inc(&tgtp->rcv_fcp_cmd_in); + /* + * The calling sequence should be: + * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done + * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. + */ + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, + payload, size); + + /* Process FCP command */ + if (rc == 0) { + atomic_inc(&tgtp->rcv_fcp_cmd_out); + return; + } + + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6159 FCP Drop IO x%x: err x%x\n", + ctxp->oxid, rc); +dropit: + lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", + oxid, size, sid); + if (oxid) { + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); + return; + } + + if (nvmebuf) { + nvmebuf->iocbq->hba_wqidx = 0; + /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ + lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); + } +#endif +} + +/** + * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @nvmebuf: pointer to received nvme data structure. + * + * This routine is used to process an unsolicited event received from a SLI + * (Service Level Interface) ring. The actual processing of the data buffer + * associated with the unsolicited event is done by invoking the routine + * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the + * SLI RQ on which the unsolicited event was received. + **/ +void +lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocb) +{ + struct lpfc_dmabuf *d_buf; + struct hbq_dmabuf *nvmebuf; + + d_buf = piocb->context2; + nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + + if (phba->nvmet_support == 0) { + lpfc_in_buf_free(phba, &nvmebuf->dbuf); + return; + } + lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf); +} + +/** + * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @nvmebuf: pointer to received nvme data structure. + * + * This routine is used to process an unsolicited event received from a SLI + * (Service Level Interface) ring. The actual processing of the data buffer + * associated with the unsolicited event is done by invoking the routine + * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the + * SLI RQ on which the unsolicited event was received. + **/ +void +lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct rqb_dmabuf *nvmebuf, + uint64_t isr_timestamp) +{ + if (phba->nvmet_support == 0) { + lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); + return; + } + lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, + isr_timestamp); +} + +/** + * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure + * @phba: pointer to a host N_Port data structure. + * @ctxp: Context info for NVME LS Request + * @rspbuf: DMA buffer of NVME command. + * @rspsize: size of the NVME command. + * + * This routine is used for allocating a lpfc-WQE data structure from + * the driver lpfc-WQE free-list and prepare the WQE with the parameters + * passed into the routine for discovery state machine to issue an Extended + * Link Service (NVME) commands. It is a generic lpfc-WQE allocation + * and preparation routine that is used by all the discovery state machine + * routines and the NVME command-specific fields will be later set up by + * the individual discovery machine routines after calling this routine + * allocating and preparing a generic WQE data structure. It fills in the + * Buffer Descriptor Entries (BDEs), allocates buffers for both command + * payload and response payload (if expected). The reference count on the + * ndlp is incremented by 1 and the reference to the ndlp is put into + * context1 of the WQE data structure for this WQE to hold the ndlp + * reference for the command's callback function to access later. + * + * Return code + * Pointer to the newly allocated/prepared nvme wqe data structure + * NULL - when nvme wqe data structure allocation/preparation failed + **/ +static struct lpfc_iocbq * +lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + dma_addr_t rspbuf, uint16_t rspsize) +{ + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *nvmewqe; + union lpfc_wqe *wqe; + + if (!lpfc_is_link_up(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6104 lpfc_nvmet_prep_ls_wqe: link err: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + + /* Allocate buffer for command wqe */ + nvmewqe = lpfc_sli_get_iocbq(phba); + if (nvmewqe == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6105 lpfc_nvmet_prep_ls_wqe: No WQE: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + goto nvme_wqe_free_wqeq_exit; + } + ctxp->wqeq = nvmewqe; + + /* prevent preparing wqe with NULL ndlp reference */ + nvmewqe->context1 = lpfc_nlp_get(ndlp); + if (nvmewqe->context1 == NULL) + goto nvme_wqe_free_wqeq_exit; + nvmewqe->context2 = ctxp; + + wqe = &nvmewqe->wqe; + memset(wqe, 0, sizeof(union lpfc_wqe)); + + /* Words 0 - 2 */ + wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; + wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); + wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); + + /* Word 3 */ + + /* Word 4 */ + + /* Word 5 */ + bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); + bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); + bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); + bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP); + bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, + CMD_XMIT_SEQUENCE64_WQE); + bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); + + /* Word 8 */ + wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); + /* Needs to be set by caller */ + bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); + + /* Word 10 */ + bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, + OTHER_COMMAND); + + /* Word 12 */ + wqe->xmit_sequence.xmit_len = rspsize; + + nvmewqe->retry = 1; + nvmewqe->vport = phba->pport; + nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; + nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; + + /* Xmit NVME response to remote NPORT */ + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, + "6039 Xmit NVME LS response to remote " + "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", + ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, + rspsize); + return nvmewqe; + +nvme_wqe_free_wqeq_exit: + nvmewqe->context2 = NULL; + nvmewqe->context3 = NULL; + lpfc_sli_release_iocbq(phba, nvmewqe); + return NULL; +} + + +static struct lpfc_iocbq * +lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp) +{ + struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req; + struct lpfc_nvmet_tgtport *tgtp; + struct sli4_sge *sgl; + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *nvmewqe; + struct scatterlist *sgel; + union lpfc_wqe128 *wqe; + uint32_t *txrdy; + dma_addr_t physaddr; + int i, cnt; + int xc = 1; + + if (!lpfc_is_link_up(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6107 lpfc_nvmet_prep_fcp_wqe: link err:" + "NPORT x%x oxid:x%x\n", ctxp->sid, + ctxp->oxid); + return NULL; + } + + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + + if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: " + "NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + nvmewqe = ctxp->wqeq; + if (nvmewqe == NULL) { + /* Allocate buffer for command wqe */ + nvmewqe = ctxp->rqb_buffer->iocbq; + if (nvmewqe == NULL) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6110 lpfc_nvmet_prep_fcp_wqe: No " + "WQE: NPORT x%x oxid:x%x\n", + ctxp->sid, ctxp->oxid); + return NULL; + } + ctxp->wqeq = nvmewqe; + xc = 0; /* create new XRI */ + nvmewqe->sli4_lxritag = NO_XRI; + nvmewqe->sli4_xritag = NO_XRI; + } + + /* Sanity check */ + if (((ctxp->state == LPFC_NVMET_STE_RCV) && + (ctxp->entry_cnt == 1)) || + ((ctxp->state == LPFC_NVMET_STE_DATA) && + (ctxp->entry_cnt > 1))) { + wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6111 Wrong state %s: %d cnt %d\n", + __func__, ctxp->state, ctxp->entry_cnt); + return NULL; + } + + sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; + switch (rsp->op) { + case NVMET_FCOP_READDATA: + case NVMET_FCOP_READDATA_RSP: + /* Words 0 - 2 : The first sg segment */ + sgel = &rsp->sg[0]; + physaddr = sg_dma_address(sgel); + wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); + wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); + wqe->fcp_tsend.bde.addrHigh = + cpu_to_le32(putPaddrHigh(physaddr)); + + /* Word 3 */ + wqe->fcp_tsend.payload_offset_len = 0; + + /* Word 4 */ + wqe->fcp_tsend.relative_offset = ctxp->offset; + + /* Word 5 */ + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, + nvmewqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); + + /* Word 8 */ + wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); + bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); + + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc); + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, + FCP_COMMAND_TSEND); + + /* Word 12 */ + wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; + + /* Setup 2 SKIP SGEs */ + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + if (rsp->op == NVMET_FCOP_READDATA_RSP) { + atomic_inc(&tgtp->xmt_fcp_read_rsp); + bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); + if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) && + (rsp->rsplen == 12)) { + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); + } else { + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, + ((rsp->rsplen >> 2) - 1)); + memcpy(&wqe->words[16], rsp->rspaddr, + rsp->rsplen); + } + } else { + atomic_inc(&tgtp->xmt_fcp_read); + + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); + } + ctxp->state = LPFC_NVMET_STE_DATA; + break; + + case NVMET_FCOP_WRITEDATA: + /* Words 0 - 2 : The first sg segment */ + txrdy = pci_pool_alloc(phba->txrdy_payload_pool, + GFP_KERNEL, &physaddr); + if (!txrdy) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6041 Bad txrdy buffer: oxid x%x\n", + ctxp->oxid); + return NULL; + } + ctxp->txrdy = txrdy; + ctxp->txrdy_phys = physaddr; + wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; + wqe->fcp_treceive.bde.addrLow = + cpu_to_le32(putPaddrLow(physaddr)); + wqe->fcp_treceive.bde.addrHigh = + cpu_to_le32(putPaddrHigh(physaddr)); + + /* Word 3 */ + wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; + + /* Word 4 */ + wqe->fcp_treceive.relative_offset = ctxp->offset; + + /* Word 5 */ + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, + nvmewqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, + CMD_FCP_TRECEIVE64_WQE); + + /* Word 8 */ + wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); + bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); + + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); + bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc); + bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, + FCP_COMMAND_TRECEIVE); + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + + /* Word 12 */ + wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; + + /* Setup 1 TXRDY and 1 SKIP SGE */ + txrdy[0] = 0; + txrdy[1] = cpu_to_be32(rsp->transfer_length); + txrdy[2] = 0; + + sgl->addr_hi = putPaddrHigh(physaddr); + sgl->addr_lo = putPaddrLow(physaddr); + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); + sgl++; + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; + sgl++; + ctxp->state = LPFC_NVMET_STE_DATA; + atomic_inc(&tgtp->xmt_fcp_write); + break; + + case NVMET_FCOP_RSP: + /* Words 0 - 2 */ + physaddr = rsp->rspdma; + wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; + wqe->fcp_trsp.bde.addrLow = + cpu_to_le32(putPaddrLow(physaddr)); + wqe->fcp_trsp.bde.addrHigh = + cpu_to_le32(putPaddrHigh(physaddr)); + + /* Word 3 */ + wqe->fcp_trsp.response_len = rsp->rsplen; + + /* Word 4 */ + wqe->fcp_trsp.rsvd_4_5[0] = 0; + + + /* Word 5 */ + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, + nvmewqe->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); + + /* Word 8 */ + wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); + bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); + + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, + LPFC_WQE_LENLOC_WORD3); + bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc); + bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); + if (phba->cfg_nvme_oas) + bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, + FCP_COMMAND_TRSP); + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + ctxp->state = LPFC_NVMET_STE_RSP; + + if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { + /* Good response - all zero's on wire */ + bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); + } else { + bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, + ((rsp->rsplen >> 2) - 1)); + memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); + } + + /* Use rspbuf, NOT sg list */ + rsp->sg_cnt = 0; + sgl->word2 = 0; + atomic_inc(&tgtp->xmt_fcp_rsp); + break; + + default: + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6064 Unknown Rsp Op %d\n", + rsp->op); + return NULL; + } + + nvmewqe->retry = 1; + nvmewqe->vport = phba->pport; + nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; + nvmewqe->context1 = ndlp; + + for (i = 0; i < rsp->sg_cnt; i++) { + sgel = &rsp->sg[i]; + physaddr = sg_dma_address(sgel); + cnt = sg_dma_len(sgel); + sgl->addr_hi = putPaddrHigh(physaddr); + sgl->addr_lo = putPaddrLow(physaddr); + sgl->word2 = 0; + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); + if ((i+1) == rsp->sg_cnt) + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(cnt); + sgl++; + ctxp->offset += cnt; + } + return nvmewqe; +} + +/** + * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME ABTS for FCP cmds + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t status, result; + + ctxp = cmdwqe->context2; + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_abort_cmpl); + + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + "6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n", + ctxp->oxid, wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + ctxp->state = LPFC_NVMET_STE_DONE; + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); +} + +/** + * lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME ABTS for FCP cmds + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t status, result; + + ctxp = cmdwqe->context2; + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_abort_cmpl); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", + ctxp, wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + if (ctxp) { + /* Sanity check */ + if (ctxp->state != LPFC_NVMET_STE_ABORT) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + "6112 ABORT Wrong state:%d oxid x%x\n", + ctxp->state, ctxp->oxid); + } + ctxp->state = LPFC_NVMET_STE_DONE; + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; + } +} + +/** + * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS + * @phba: Pointer to HBA context object. + * @cmdwqe: Pointer to driver command WQE object. + * @wcqe: Pointer to driver response CQE object. + * + * The function is called from SLI ring event handler with no + * lock held. This function is the completion handler for NVME ABTS for LS cmds + * The function frees memory resources used for the NVME commands. + **/ +static void +lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t status, result; + + ctxp = cmdwqe->context2; + status = bf_get(lpfc_wcqe_c_status, wcqe); + result = wcqe->parameter; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_abort_cmpl); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", + ctxp, wcqe->word0, wcqe->total_data_placed, + result, wcqe->word3); + + if (ctxp) { + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; + lpfc_sli_release_iocbq(phba, cmdwqe); + kfree(ctxp); + } else + lpfc_sli_release_iocbq(phba, cmdwqe); +} + +static int +lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + union lpfc_wqe *wqe_abts; + struct lpfc_nodelist *ndlp; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6067 Abort: sid %x xri x%x/x%x\n", + sid, xri, ctxp->wqeq->sli4_xritag); + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + + ndlp = lpfc_findnode_did(phba->pport, sid); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6134 Drop ABTS - wrong NDLP state x%x.\n", + (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); + + /* No failure to an ABTS request. */ + return 0; + } + + abts_wqeq = ctxp->wqeq; + wqe_abts = &abts_wqeq->wqe; + ctxp->state = LPFC_NVMET_STE_ABORT; + + /* + * Since we zero the whole WQE, we need to ensure we set the WQE fields + * that were initialized in lpfc_sli4_nvmet_alloc. + */ + memset(wqe_abts, 0, sizeof(union lpfc_wqe)); + + /* Word 5 */ + bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); + bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); + bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); + bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); + bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, + abts_wqeq->sli4_xritag); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, + CMD_XMIT_SEQUENCE64_WQE); + bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); + + /* Word 8 */ + wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); + /* Needs to be set by caller */ + bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); + + /* Word 10 */ + bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); + bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); + bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, + LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, + OTHER_COMMAND); + + abts_wqeq->vport = phba->pport; + abts_wqeq->context1 = ndlp; + abts_wqeq->context2 = ctxp; + abts_wqeq->context3 = NULL; + abts_wqeq->rsvd2 = 0; + /* hba_wqidx should already be setup from command we are aborting */ + abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; + abts_wqeq->iocb.ulpLe = 1; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6069 Issue ABTS to xri x%x reqtag x%x\n", + xri, abts_wqeq->iotag); + return 1; +} + +static int +lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + union lpfc_wqe *abts_wqe; + struct lpfc_nodelist *ndlp; + unsigned long flags; + int rc; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctxp->wqeq) { + ctxp->wqeq = ctxp->rqb_buffer->iocbq; + ctxp->wqeq->hba_wqidx = 0; + } + + ndlp = lpfc_findnode_did(phba->pport, sid); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6160 Drop ABTS - wrong NDLP state x%x.\n", + (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); + + /* No failure to an ABTS request. */ + return 0; + } + + /* Issue ABTS for this WQE based on iotag */ + ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); + if (!ctxp->abort_wqeq) { + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6161 Abort failed: No wqeqs: " + "xri: x%x\n", ctxp->oxid); + /* No failure to an ABTS request. */ + return 0; + } + abts_wqeq = ctxp->abort_wqeq; + abts_wqe = &abts_wqeq->wqe; + ctxp->state = LPFC_NVMET_STE_ABORT; + + /* Announce entry to new IO submit field. */ + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + "6162 Abort Request to rport DID x%06x " + "for xri x%x x%x\n", + ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); + + /* If the hba is getting reset, this flag is set. It is + * cleared when the reset is complete and rings reestablished. + */ + spin_lock_irqsave(&phba->hbalock, flags); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6163 Driver in reset cleanup - flushing " + "NVME Req now. hba_flag x%x oxid x%x\n", + phba->hba_flag, ctxp->oxid); + lpfc_sli_release_iocbq(phba, abts_wqeq); + return 0; + } + + /* Outstanding abort is in progress */ + if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6164 Outstanding NVME I/O Abort Request " + "still pending on oxid x%x\n", + ctxp->oxid); + lpfc_sli_release_iocbq(phba, abts_wqeq); + return 0; + } + + /* Ready - mark outstanding as aborted by driver. */ + abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; + + /* WQEs are reused. Clear stale data and set key fields to + * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. + */ + memset(abts_wqe, 0, sizeof(union lpfc_wqe)); + + /* word 3 */ + bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); + + /* word 7 */ + bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); + bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + + /* word 8 - tell the FW to abort the IO associated with this + * outstanding exchange ID. + */ + abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; + + /* word 9 - this is the iotag for the abts_wqe completion. */ + bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, + abts_wqeq->iotag); + + /* word 10 */ + bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); + + /* word 11 */ + bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); + bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; + abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; + abts_wqeq->iocb_cmpl = 0; + abts_wqeq->iocb_flag |= LPFC_IO_NVME; + abts_wqeq->context2 = ctxp; + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (rc == WQE_SUCCESS) + return 0; + + lpfc_sli_release_iocbq(phba, abts_wqeq); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6166 Failed abts issue_wqe with status x%x " + "for oxid x%x.\n", + rc, ctxp->oxid); + return 1; +} + + +static int +lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + unsigned long flags; + int rc; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctxp->wqeq) { + ctxp->wqeq = ctxp->rqb_buffer->iocbq; + ctxp->wqeq->hba_wqidx = 0; + } + + rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); + if (rc == 0) + goto aerr; + + spin_lock_irqsave(&phba->hbalock, flags); + abts_wqeq = ctxp->wqeq; + abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp; + abts_wqeq->iocb_cmpl = 0; + abts_wqeq->iocb_flag |= LPFC_IO_NVMET; + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (rc == WQE_SUCCESS) { + atomic_inc(&tgtp->xmt_abort_rsp); + return 0; + } + +aerr: + lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + atomic_inc(&tgtp->xmt_abort_rsp_error); + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", + ctxp->oxid, rc); + return 1; +} + +static int +lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, + struct lpfc_nvmet_rcv_ctx *ctxp, + uint32_t sid, uint16_t xri) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_iocbq *abts_wqeq; + union lpfc_wqe *wqe_abts; + unsigned long flags; + int rc; + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctxp->wqeq) { + /* Issue ABTS for this WQE based on iotag */ + ctxp->wqeq = lpfc_sli_get_iocbq(phba); + if (!ctxp->wqeq) { + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6068 Abort failed: No wqeqs: " + "xri: x%x\n", xri); + /* No failure to an ABTS request. */ + kfree(ctxp); + return 0; + } + } + abts_wqeq = ctxp->wqeq; + wqe_abts = &abts_wqeq->wqe; + lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); + + spin_lock_irqsave(&phba->hbalock, flags); + abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; + abts_wqeq->iocb_cmpl = 0; + abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; + rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (rc == WQE_SUCCESS) { + atomic_inc(&tgtp->xmt_abort_rsp); + return 0; + } + + atomic_inc(&tgtp->xmt_abort_rsp_error); + abts_wqeq->context2 = NULL; + abts_wqeq->context3 = NULL; + lpfc_sli_release_iocbq(phba, abts_wqeq); + kfree(ctxp); + lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, + "6056 Failed to Issue ABTS. Status x%x\n", rc); + return 0; +} diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h new file mode 100644 index 00000000000000..ca96f05c1604f5 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -0,0 +1,116 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + ********************************************************************/ + +#define LPFC_NVMET_MIN_SEGS 16 +#define LPFC_NVMET_DEFAULT_SEGS 64 /* 256K IOs */ +#define LPFC_NVMET_MAX_SEGS 510 +#define LPFC_NVMET_SUCCESS_LEN 12 + +/* Used for NVME Target */ +struct lpfc_nvmet_tgtport { + struct lpfc_hba *phba; + struct completion tport_unreg_done; + + /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ + atomic_t rcv_ls_req_in; + atomic_t rcv_ls_req_out; + atomic_t rcv_ls_req_drop; + atomic_t xmt_ls_abort; + + /* Stats counters - lpfc_nvmet_xmt_ls_rsp */ + atomic_t xmt_ls_rsp; + atomic_t xmt_ls_drop; + + /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */ + atomic_t xmt_ls_rsp_error; + atomic_t xmt_ls_rsp_cmpl; + + /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */ + atomic_t rcv_fcp_cmd_in; + atomic_t rcv_fcp_cmd_out; + atomic_t rcv_fcp_cmd_drop; + + /* Stats counters - lpfc_nvmet_xmt_fcp_op */ + atomic_t xmt_fcp_abort; + atomic_t xmt_fcp_drop; + atomic_t xmt_fcp_read_rsp; + atomic_t xmt_fcp_read; + atomic_t xmt_fcp_write; + atomic_t xmt_fcp_rsp; + + /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */ + atomic_t xmt_fcp_rsp_cmpl; + atomic_t xmt_fcp_rsp_error; + atomic_t xmt_fcp_rsp_drop; + + + /* Stats counters - lpfc_nvmet_unsol_issue_abort */ + atomic_t xmt_abort_rsp; + atomic_t xmt_abort_rsp_error; + + /* Stats counters - lpfc_nvmet_xmt_abort_cmp */ + atomic_t xmt_abort_cmpl; +}; + +struct lpfc_nvmet_rcv_ctx { + union { + struct nvmefc_tgt_ls_req ls_req; + struct nvmefc_tgt_fcp_req fcp_req; + } ctx; + struct lpfc_hba *phba; + struct lpfc_iocbq *wqeq; + struct lpfc_iocbq *abort_wqeq; + dma_addr_t txrdy_phys; + uint32_t *txrdy; + uint32_t sid; + uint32_t offset; + uint16_t oxid; + uint16_t size; + uint16_t entry_cnt; + uint16_t cpu; + uint16_t state; + /* States */ +#define LPFC_NVMET_STE_FREE 0 +#define LPFC_NVMET_STE_RCV 1 +#define LPFC_NVMET_STE_DATA 2 +#define LPFC_NVMET_STE_ABORT 3 +#define LPFC_NVMET_STE_RSP 4 +#define LPFC_NVMET_STE_DONE 5 + uint16_t flag; +#define LPFC_NVMET_IO_INP 1 +#define LPFC_NVMET_ABORT_OP 2 + struct rqb_dmabuf *rqb_buffer; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint64_t ts_isr_cmd; + uint64_t ts_cmd_nvme; + uint64_t ts_nvme_data; + uint64_t ts_data_wqput; + uint64_t ts_isr_data; + uint64_t ts_data_nvme; + uint64_t ts_nvme_status; + uint64_t ts_status_wqput; + uint64_t ts_isr_status; + uint64_t ts_status_nvme; +#endif +}; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1180a22beb435c..54fd0c81ceaf69 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -413,7 +415,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) * struct fcp_cmnd, struct fcp_rsp and the number of bde's * necessary to support the sg_tablesize. */ - psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool, + psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); @@ -424,8 +426,8 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); kfree(psb); break; } @@ -522,6 +524,8 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) struct lpfc_scsi_buf *psb, *next_psb; unsigned long iflag = 0; + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, @@ -554,8 +558,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, int i; struct lpfc_nodelist *ndlp; int rrq_empty = 0; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, @@ -819,7 +825,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) * for the struct fcp_cmnd, struct fcp_rsp and the number * of bde's necessary to support the sg_tablesize. */ - psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool, + psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); @@ -832,7 +838,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) */ if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + pci_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); break; @@ -841,8 +847,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); kfree(psb); break; } @@ -850,8 +856,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); kfree(psb); lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "3368 Failed to allocate IOTAG for" @@ -920,7 +926,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) phba->sli4_hba.scsi_xri_cnt++; spin_unlock_irq(&phba->scsi_buf_list_get_lock); } - lpfc_printf_log(phba, KERN_INFO, LOG_BG, + lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP, "3021 Allocate %d out of %d requested new SCSI " "buffers\n", bcnt, num_to_alloc); @@ -3894,7 +3900,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, } } chann = atomic_add_return(1, &phba->fcp_qidx); - chann = (chann % phba->cfg_fcp_io_channel); + chann = chann % phba->cfg_fcp_io_channel; return chann; } @@ -3925,6 +3931,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct Scsi_Host *shost; uint32_t logit = LOG_FCP; + phba->fc4ScsiIoCmpls++; + /* Sanity check on return of outstanding command */ cmd = lpfc_cmd->pCmd; if (!cmd) @@ -3967,6 +3975,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->prot_data_segment = NULL; } #endif + if (pnode && NLP_CHK_NODE_ACT(pnode)) atomic_dec(&pnode->cmd_pending); @@ -4241,19 +4250,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, vport->cfg_first_burst_size; } fcp_cmnd->fcpCntl3 = WRITE_DATA; - phba->fc4OutputRequests++; + phba->fc4ScsiOutputRequests++; } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; fcp_cmnd->fcpCntl3 = READ_DATA; - phba->fc4InputRequests++; + phba->fc4ScsiInputRequests++; } } else { iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = 0; - phba->fc4ControlRequests++; + phba->fc4ScsiControlRequests++; } if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) @@ -4467,7 +4476,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) unsigned long poll_tmo_expires = (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); - if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) + if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) mod_timer(&phba->fcp_poll_timer, poll_tmo_expires); } @@ -4497,7 +4506,7 @@ void lpfc_poll_timeout(unsigned long ptr) if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); @@ -4561,7 +4570,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (lpfc_cmd == NULL) { lpfc_rampdown_queue_depth(phba); - lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC, + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, "0707 driver's buffer pool is empty, " "IO busied\n"); goto out_host_busy; @@ -4636,7 +4645,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) } if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); @@ -4681,7 +4690,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) IOCB_t *cmd, *icmd; int ret = SUCCESS, status = 0; struct lpfc_sli_ring *pring_s4; - int ring_number, ret_val; + int ret_val; unsigned long flags, iflags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); @@ -4769,7 +4778,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) icmd->ulpClass = cmd->ulpClass; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocb->fcp_wqidx = iocb->fcp_wqidx; + abtsiocb->hba_wqidx = iocb->hba_wqidx; abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; if (iocb->iocb_flag & LPFC_IO_FOF) abtsiocb->iocb_flag |= LPFC_IO_FOF; @@ -4782,8 +4791,11 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; if (phba->sli_rev == LPFC_SLI_REV4) { - ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx; - pring_s4 = &phba->sli.ring[ring_number]; + pring_s4 = lpfc_sli4_calc_ring(phba, iocb); + if (pring_s4 == NULL) { + ret = FAILED; + goto out_unlock; + } /* Note: both hbalock and ring_lock must be set here */ spin_lock_irqsave(&pring_s4->ring_lock, iflags); ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, @@ -4805,7 +4817,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); wait_for_cmpl: lpfc_cmd->waitq = &waitq; @@ -5105,7 +5117,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); if (cnt) lpfc_sli_abort_taskmgmt(vport, - &phba->sli.ring[phba->sli.fcp_ring], + &phba->sli.sli3_ring[LPFC_FCP_RING], tgt_id, lun_id, context); later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; while (time_after(later, jiffies) && cnt) { @@ -5323,7 +5335,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) continue; if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && ndlp->nlp_sid == i && - ndlp->rport) { + ndlp->rport && + ndlp->nlp_type & NLP_FCP_TARGET) { match = 1; break; } @@ -5534,7 +5547,7 @@ lpfc_slave_configure(struct scsi_device *sdev) if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } @@ -5898,12 +5911,55 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, return false; } -struct scsi_host_template lpfc_template_s3 = { +static int +lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) +{ + return SCSI_MLQUEUE_HOST_BUSY; +} + +static int +lpfc_no_handler(struct scsi_cmnd *cmnd) +{ + return FAILED; +} + +static int +lpfc_no_slave(struct scsi_device *sdev) +{ + return -ENODEV; +} + +struct scsi_host_template lpfc_template_nvme = { + .module = THIS_MODULE, + .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, + .info = lpfc_info, + .queuecommand = lpfc_no_command, + .eh_abort_handler = lpfc_no_handler, + .eh_device_reset_handler = lpfc_no_handler, + .eh_target_reset_handler = lpfc_no_handler, + .eh_bus_reset_handler = lpfc_no_handler, + .eh_host_reset_handler = lpfc_no_handler, + .slave_alloc = lpfc_no_slave, + .slave_configure = lpfc_no_slave, + .scan_finished = lpfc_scan_finished, + .this_id = -1, + .sg_tablesize = 1, + .cmd_per_lun = 1, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = lpfc_hba_attrs, + .max_sectors = 0xFFFF, + .vendor_id = LPFC_NL_VENDOR_ID, + .track_queue_depth = 0, +}; + +struct scsi_host_template lpfc_template_no_hr = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = lpfc_abort_handler, .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, @@ -5960,7 +6016,6 @@ struct scsi_host_template lpfc_vport_template = { .eh_abort_handler = lpfc_abort_handler, .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, - .eh_bus_reset_handler = lpfc_bus_reset_handler, .slave_alloc = lpfc_slave_alloc, .slave_configure = lpfc_slave_configure, .slave_destroy = lpfc_slave_destroy, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 8cb80dabada849..5da7e15400cbc1 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -135,6 +137,8 @@ struct lpfc_scsi_buf { uint32_t timeout; + uint16_t flags; /* TBD convert exch_busy to flags */ +#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ uint16_t status; /* From IOCB Word 7- ulpStatus */ uint32_t result; /* From IOCB Word 4. */ @@ -164,6 +168,8 @@ struct lpfc_scsi_buf { * Iotag is in here */ struct lpfc_iocbq cur_iocbq; + uint16_t cpu; + wait_queue_head_t *waitq; unsigned long start_time; @@ -178,13 +184,15 @@ struct lpfc_scsi_buf { #endif }; -#define LPFC_SCSI_DMA_EXT_SIZE 264 -#define LPFC_BPL_SIZE 1024 -#define MDAC_DIRECT_CMD 0x22 +#define LPFC_SCSI_DMA_EXT_SIZE 264 +#define LPFC_BPL_SIZE 1024 +#define MDAC_DIRECT_CMD 0x22 + +#define FIND_FIRST_OAS_LUN 0 +#define NO_MORE_OAS_LUN -1 +#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN -#define FIND_FIRST_OAS_LUN 0 -#define NO_MORE_OAS_LUN -1 -#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN +#define TXRDY_PAYLOAD_LEN 12 int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8e886caf245430..1c9fa45df7eb5d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,9 +1,12 @@ + /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -34,14 +37,18 @@ #include #include +#include + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" #include "lpfc_crtn.h" #include "lpfc_logmsg.h" #include "lpfc_compat.h" @@ -67,14 +74,17 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, struct lpfc_iocbq *); static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, struct hbq_dmabuf *); -static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, +static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_cqe *); -static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, +static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, int); static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, uint32_t); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); +static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct lpfc_iocbq *cmdiocb); static IOCB_t * lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) @@ -271,10 +281,11 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) /* * insert barrier for instruction interlock : data from the hardware * must have the valid bit checked before it can be copied and acted - * upon. Given what was seen in lpfc_sli4_cq_get() of speculative - * instructions allowing action on content before valid bit checked, - * add barrier here as well. May not be needed as "content" is a - * single 32-bit entity here (vs multi word structure for cq's). + * upon. Speculative instructions were allowing a bcopy at the start + * of lpfc_sli4_fp_handle_wcqe(), which is called immediately + * after our return, to copy data before the valid bit check above + * was done. As such, some of the copied data was stale. The barrier + * ensures the check is before any data is copied. */ mb(); return eqe; @@ -386,11 +397,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) /* * insert barrier for instruction interlock : data from the hardware * must have the valid bit checked before it can be copied and acted - * upon. Speculative instructions were allowing a bcopy at the start - * of lpfc_sli4_fp_handle_wcqe(), which is called immediately - * after our return, to copy data before the valid bit check above - * was done. As such, some of the copied data was stale. The barrier - * ensures the check is before any data is copied. + * upon. Given what was seen in lpfc_sli4_cq_get() of speculative + * instructions allowing action on content before valid bit checked, + * add barrier here as well. May not be needed as "content" is a + * single 32-bit entity here (vs multi word structure for cq's). */ mb(); return cqe; @@ -456,7 +466,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) * on @q then this function will return -ENOMEM. * The caller is expected to hold the hbalock when calling this routine. **/ -static int +int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) { @@ -602,7 +612,7 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) * * Returns sglq ponter = success, NULL = Failure. **/ -static struct lpfc_sglq * +struct lpfc_sglq * __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) { struct lpfc_sglq *sglq; @@ -902,7 +912,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, } /** - * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool + * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool * @phba: Pointer to HBA context object. * @piocb: Pointer to the iocbq. * @@ -912,9 +922,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, * allocated sglq object else it returns NULL. **/ static struct lpfc_sglq * -__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) +__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) { - struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; + struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; struct lpfc_sglq *sglq = NULL; struct lpfc_sglq *start_sglq = NULL; struct lpfc_scsi_buf *lpfc_cmd; @@ -938,18 +948,21 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) ndlp = piocbq->context1; } - list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); start_sglq = sglq; while (!found) { if (!sglq) - return NULL; - if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) { + break; + if (ndlp && ndlp->active_rrqs_xri_bitmap && + test_bit(sglq->sli4_lxritag, + ndlp->active_rrqs_xri_bitmap)) { /* This xri has an rrq outstanding for this DID. * put it back in the list and get another xri. */ - list_add_tail(&sglq->list, lpfc_sgl_list); + list_add_tail(&sglq->list, lpfc_els_sgl_list); sglq = NULL; - list_remove_head(lpfc_sgl_list, sglq, + list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); if (sglq == start_sglq) { sglq = NULL; @@ -962,6 +975,35 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; sglq->state = SGL_ALLOCATED; } + spin_unlock(&phba->sli4_hba.sgl_list_lock); + return sglq; +} + +/** + * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool + * @phba: Pointer to HBA context object. + * @piocb: Pointer to the iocbq. + * + * This function is called with the sgl_list lock held. This function + * gets a new driver sglq object from the sglq list. If the + * list is not empty then it is successful, it returns pointer to the newly + * allocated sglq object else it returns NULL. + **/ +struct lpfc_sglq * +__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) +{ + struct list_head *lpfc_nvmet_sgl_list; + struct lpfc_sglq *sglq = NULL; + + lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; + + lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); + + list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); + if (!sglq) + return NULL; + phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; + sglq->state = SGL_ALLOCATED; return sglq; } @@ -1002,7 +1044,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) * this IO was aborted then the sglq entry it put on the * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the * IO has good status or fails for any other reason then the sglq - * entry is added to the free list (lpfc_sgl_list). + * entry is added to the free list (lpfc_els_sgl_list). **/ static void __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) @@ -1010,7 +1052,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) struct lpfc_sglq *sglq; size_t start_clean = offsetof(struct lpfc_iocbq, iocb); unsigned long iflag = 0; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; lockdep_assert_held(&phba->hbalock); @@ -1021,21 +1063,36 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) if (sglq) { + if (iocbq->iocb_flag & LPFC_IO_NVMET) { + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, + iflag); + sglq->state = SGL_FREED; + sglq->ndlp = NULL; + list_add_tail(&sglq->list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock_irqrestore( + &phba->sli4_hba.sgl_list_lock, iflag); + goto out; + } + + pring = phba->sli4_hba.els_wq->pring; if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && (sglq->state != SGL_XRI_ABORTED)) { - spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, - iflag); + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, + iflag); list_add(&sglq->list, - &phba->sli4_hba.lpfc_abts_els_sgl_list); + &phba->sli4_hba.lpfc_abts_els_sgl_list); spin_unlock_irqrestore( - &phba->sli4_hba.abts_sgl_list_lock, iflag); + &phba->sli4_hba.sgl_list_lock, iflag); } else { - spin_lock_irqsave(&pring->ring_lock, iflag); + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, + iflag); sglq->state = SGL_FREED; sglq->ndlp = NULL; list_add_tail(&sglq->list, - &phba->sli4_hba.lpfc_sgl_list); - spin_unlock_irqrestore(&pring->ring_lock, iflag); + &phba->sli4_hba.lpfc_els_sgl_list); + spin_unlock_irqrestore( + &phba->sli4_hba.sgl_list_lock, iflag); /* Check if TXQ queue needs to be serviced */ if (!list_empty(&pring->txq)) @@ -1043,13 +1100,15 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) } } - +out: /* * Clean all volatile data fields, preserve iotag and node struct. */ memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); iocbq->sli4_lxritag = NO_XRI; iocbq->sli4_xritag = NO_XRI; + iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | + LPFC_IO_NVME_LS); list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } @@ -1639,7 +1698,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) if (lpfc_is_link_up(phba) && (!list_empty(&pring->txq)) && - (pring->ringno != phba->sli.fcp_ring || + (pring->ringno != LPFC_FCP_RING || phba->sli.sli_flag & LPFC_PROCESS_LA)) { while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && @@ -1718,7 +1777,6 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) struct hbq_dmabuf *hbq_buf; unsigned long flags; int i, hbq_count; - uint32_t hbqno; hbq_count = lpfc_sli_hbq_count(); /* Return all memory used by all HBQs */ @@ -1732,24 +1790,6 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) } phba->hbqs[i].buffer_count = 0; } - /* Return all HBQ buffer that are in-fly */ - list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, - list) { - hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); - list_del(&hbq_buf->dbuf.list); - if (hbq_buf->tag == -1) { - (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) - (phba, hbq_buf); - } else { - hbqno = hbq_buf->tag >> 16; - if (hbqno >= LPFC_MAX_HBQS) - (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) - (phba, hbq_buf); - else - (phba->hbqs[hbqno].hbq_free_buffer)(phba, - hbq_buf); - } - } /* Mark the HBQs not in use */ phba->hbq_in_use = 0; @@ -1802,7 +1842,7 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); - hbqe->bde.tus.f.bdeSize = hbq_buf->size; + hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; hbqe->bde.tus.f.bdeFlags = 0; hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); @@ -1834,17 +1874,23 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, int rc; struct lpfc_rqe hrqe; struct lpfc_rqe drqe; + struct lpfc_queue *hrq; + struct lpfc_queue *drq; + + if (hbqno != LPFC_ELS_HBQ) + return 1; + hrq = phba->sli4_hba.hdr_rq; + drq = phba->sli4_hba.dat_rq; lockdep_assert_held(&phba->hbalock); hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); - rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, - &hrqe, &drqe); + rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); if (rc < 0) return rc; - hbq_buf->tag = rc; + hbq_buf->tag = (rc | (hbqno << 16)); list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); return 0; } @@ -1861,22 +1907,9 @@ static struct lpfc_hbq_init lpfc_els_hbq = { .add_count = 40, }; -/* HBQ for the extra ring if needed */ -static struct lpfc_hbq_init lpfc_extra_hbq = { - .rn = 1, - .entry_count = 200, - .mask_count = 0, - .profile = 0, - .ring_mask = (1 << LPFC_EXTRA_RING), - .buffer_count = 0, - .init_count = 0, - .add_count = 5, -}; - /* Array of HBQs */ struct lpfc_hbq_init *lpfc_hbq_defs[] = { &lpfc_els_hbq, - &lpfc_extra_hbq, }; /** @@ -1997,6 +2030,29 @@ lpfc_sli_hbqbuf_get(struct list_head *rb_list) return container_of(d_buf, struct hbq_dmabuf, dbuf); } +/** + * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * + * This function removes the first RQ buffer on an RQ buffer list and returns a + * pointer to that buffer. If it finds no buffers on the list it returns NULL. + **/ +static struct rqb_dmabuf * +lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) +{ + struct lpfc_dmabuf *h_buf; + struct lpfc_rqb *rqbp; + + rqbp = hrq->rqbp; + list_remove_head(&rqbp->rqb_buffer_list, h_buf, + struct lpfc_dmabuf, list); + if (!h_buf) + return NULL; + rqbp->buffer_count--; + return container_of(h_buf, struct rqb_dmabuf, hbuf); +} + /** * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag * @phba: Pointer to HBA context object. @@ -2463,6 +2519,14 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { int i; + switch (fch_type) { + case FC_TYPE_NVME: + lpfc_nvmet_unsol_ls_event(phba, pring, saveq); + return 1; + default: + break; + } + /* unSolicited Responses */ if (pring->prt[0].profile) { if (pring->prt[0].lpfc_sli_rcv_unsol_event) @@ -2713,7 +2777,7 @@ static struct lpfc_iocbq * lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint16_t iotag) { - struct lpfc_iocbq *cmd_iocb; + struct lpfc_iocbq *cmd_iocb = NULL; lockdep_assert_held(&phba->hbalock); if (iotag != 0 && iotag <= phba->sli.last_iotag) { @@ -2727,8 +2791,10 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, } lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0372 iotag x%x is out of range: max iotag (x%x)\n", - iotag, phba->sli.last_iotag); + "0372 iotag x%x lookup error: max iotag (x%x) " + "iocb_flag x%x\n", + iotag, phba->sli.last_iotag, + cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); return NULL; } @@ -3597,6 +3663,33 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) IOERR_SLI_ABORTED); } +/** + * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function aborts all iocbs in the given ring and frees all the iocb + * objects in txq. This function issues an abort iocb for all the iocb commands + * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before + * the return of this function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + LIST_HEAD(completions); + struct lpfc_iocbq *iocb, *next_iocb; + + if (pring->ringno == LPFC_ELS_RING) + lpfc_fabric_abort_hba(phba); + + spin_lock_irq(&phba->hbalock); + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) + lpfc_sli4_abort_nvme_io(phba, pring, iocb); + spin_unlock_irq(&phba->hbalock); +} + + /** * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings * @phba: Pointer to HBA context object. @@ -3617,15 +3710,40 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_fcp_io_channel; i++) { - pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + pring = phba->sli4_hba.fcp_wq[i]->pring; lpfc_sli_abort_iocb_ring(phba, pring); } } else { - pring = &psli->ring[psli->fcp_ring]; + pring = &psli->sli3_ring[LPFC_FCP_RING]; lpfc_sli_abort_iocb_ring(phba, pring); } } +/** + * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings + * @phba: Pointer to HBA context object. + * + * This function aborts all wqes in NVME rings. This function issues an + * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in + * the txcmplq is not guaranteed to complete before the return of this + * function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) +{ + struct lpfc_sli_ring *pring; + uint32_t i; + + if (phba->sli_rev < LPFC_SLI_REV4) + return; + + /* Abort all IO on each NVME ring. */ + for (i = 0; i < phba->cfg_nvme_io_channel; i++) { + pring = phba->sli4_hba.nvme_wq[i]->pring; + lpfc_sli_abort_wqe_ring(phba, pring); + } +} + /** * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring @@ -3654,7 +3772,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_fcp_io_channel; i++) { - pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + pring = phba->sli4_hba.fcp_wq[i]->pring; spin_lock_irq(&pring->ring_lock); /* Retrieve everything on txq */ @@ -3675,7 +3793,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) IOERR_SLI_DOWN); } } else { - pring = &psli->ring[psli->fcp_ring]; + pring = &psli->sli3_ring[LPFC_FCP_RING]; spin_lock_irq(&phba->hbalock); /* Retrieve everything on txq */ @@ -3695,6 +3813,51 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) } } +/** + * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings + * @phba: Pointer to HBA context object. + * + * This function flushes all wqes in the nvme rings and frees all resources + * in the txcmplq. This function does not issue abort wqes for the IO + * commands in txcmplq, they will just be returned with + * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI + * slot has been permanently disabled. + **/ +void +lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) +{ + LIST_HEAD(txcmplq); + struct lpfc_sli_ring *pring; + uint32_t i; + + if (phba->sli_rev < LPFC_SLI_REV4) + return; + + /* Hint to other driver operations that a flush is in progress. */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= HBA_NVME_IOQ_FLUSH; + spin_unlock_irq(&phba->hbalock); + + /* Cycle through all NVME rings and complete each IO with + * a local driver reason code. This is a flush so no + * abort exchange to FW. + */ + for (i = 0; i < phba->cfg_nvme_io_channel; i++) { + pring = phba->sli4_hba.nvme_wq[i]->pring; + + /* Retrieve everything on the txcmplq */ + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txcmplq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); + + /* Flush the txcmpq &&&PAE */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + } +} + /** * lpfc_sli_brdready_s3 - Check for sli3 host ready status * @phba: Pointer to HBA context object. @@ -4069,7 +4232,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) /* Initialize relevant SLI info */ for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + pring = &psli->sli3_ring[i]; pring->flag = 0; pring->sli.sli3.rspidx = 0; pring->sli.sli3.next_cmdidx = 0; @@ -4498,10 +4661,11 @@ static int lpfc_sli4_rb_setup(struct lpfc_hba *phba) { phba->hbq_in_use = 1; - phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; phba->hbq_count = 1; + lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); /* Initially populate or replenish the HBQs */ - lpfc_sli_hbqbuf_init_hbqs(phba, 0); return 0; } @@ -5107,26 +5271,38 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) static void lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) { - int fcp_eqidx; + int qidx; lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); - fcp_eqidx = 0; - if (phba->sli4_hba.fcp_cq) { - do { - lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], - LPFC_QUEUE_REARM); - } while (++fcp_eqidx < phba->cfg_fcp_io_channel); - } + if (phba->sli4_hba.nvmels_cq) + lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq, + LPFC_QUEUE_REARM); + + if (phba->sli4_hba.fcp_cq) + for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) + lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx], + LPFC_QUEUE_REARM); + + if (phba->sli4_hba.nvme_cq) + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) + lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx], + LPFC_QUEUE_REARM); if (phba->cfg_fof) lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); - if (phba->sli4_hba.hba_eq) { - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; - fcp_eqidx++) - lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], - LPFC_QUEUE_REARM); + if (phba->sli4_hba.hba_eq) + for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) + lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx], + LPFC_QUEUE_REARM); + + if (phba->nvmet_support) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { + lpfc_sli4_cq_release( + phba->sli4_hba.nvmet_cqset[qidx], + LPFC_QUEUE_REARM); + } } if (phba->cfg_fof) @@ -5560,9 +5736,13 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) rsrc_blks->rsrc_size = rsrc_size; list_add_tail(&rsrc_blks->list, ext_blk_list); rsrc_start = rsrc_id; - if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) + if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { phba->sli4_hba.scsi_xri_start = rsrc_start + - lpfc_sli4_get_els_iocb_cnt(phba); + lpfc_sli4_get_iocb_cnt(phba); + phba->sli4_hba.nvme_xri_start = + phba->sli4_hba.scsi_xri_start + + phba->sli4_hba.scsi_xri_max; + } while (rsrc_id < (rsrc_start + rsrc_size)) { ids[j] = rsrc_id; @@ -5578,6 +5758,8 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) return rc; } + + /** * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. * @phba: Pointer to HBA context object. @@ -6156,42 +6338,45 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, } /** - * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block + * lpfc_sli4_repost_sgl_list - Repsot the buffers sgl pages as block * @phba: pointer to lpfc hba data structure. + * @pring: Pointer to driver SLI ring object. + * @sgl_list: linked link of sgl buffers to post + * @cnt: number of linked list buffers * - * This routine walks the list of els buffers that have been allocated and + * This routine walks the list of buffers that have been allocated and * repost them to the port by using SGL block post. This is needed after a * pci_function_reset/warm_start or start. It attempts to construct blocks - * of els buffer sgls which contains contiguous xris and uses the non-embedded - * SGL block post mailbox commands to post them to the port. For single els + * of buffer sgls which contains contiguous xris and uses the non-embedded + * SGL block post mailbox commands to post them to the port. For single * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post * mailbox command for posting. * * Returns: 0 = success, non-zero failure. **/ static int -lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) +lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, + struct list_head *sgl_list, int cnt) { struct lpfc_sglq *sglq_entry = NULL; struct lpfc_sglq *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry_first = NULL; - int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; + int status, total_cnt; + int post_cnt = 0, num_posted = 0, block_cnt = 0; int last_xritag = NO_XRI; - struct lpfc_sli_ring *pring; LIST_HEAD(prep_sgl_list); LIST_HEAD(blck_sgl_list); LIST_HEAD(allc_sgl_list); LIST_HEAD(post_sgl_list); LIST_HEAD(free_sgl_list); - pring = &phba->sli.ring[LPFC_ELS_RING]; spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); - spin_unlock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(sgl_list, &allc_sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); - total_cnt = phba->sli4_hba.els_xri_cnt; + total_cnt = cnt; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &allc_sgl_list, list) { list_del_init(&sglq_entry->list); @@ -6220,8 +6405,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* keep track of last sgl's xritag */ last_xritag = sglq_entry->sli4_xritag; - /* end of repost sgl list condition for els buffers */ - if (num_posted == phba->sli4_hba.els_xri_cnt) { + /* end of repost sgl list condition for buffers */ + if (num_posted == total_cnt) { if (post_cnt == 0) { list_splice_init(&prep_sgl_list, &blck_sgl_list); @@ -6238,7 +6423,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* Failure, put sgl to free list */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "3159 Failed to post els " + "3159 Failed to post " "sgl, xritag:x%x\n", sglq_entry->sli4_xritag); list_add_tail(&sglq_entry->list, @@ -6252,9 +6437,9 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) if (post_cnt == 0) continue; - /* post the els buffer list sgls as a block */ - status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list, - post_cnt); + /* post the buffer list sgls as a block */ + status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, + post_cnt); if (!status) { /* success, put sgl list to posted sgl list */ @@ -6265,7 +6450,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) struct lpfc_sglq, list); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "3160 Failed to post els sgl-list, " + "3160 Failed to post sgl-list, " "xritag:x%x-x%x\n", sglq_entry_first->sli4_xritag, (sglq_entry_first->sli4_xritag + @@ -6278,29 +6463,28 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) if (block_cnt == 0) last_xritag = NO_XRI; - /* reset els sgl post count for next round of posting */ + /* reset sgl post count for next round of posting */ post_cnt = 0; } - /* update the number of XRIs posted for ELS */ - phba->sli4_hba.els_xri_cnt = total_cnt; - /* free the els sgls failed to post */ + /* free the sgls failed to post */ lpfc_free_sgl_list(phba, &free_sgl_list); - /* push els sgls posted to the availble list */ + /* push sgls posted to the available list */ if (!list_empty(&post_sgl_list)) { spin_lock_irq(&phba->hbalock); - spin_lock(&pring->ring_lock); - list_splice_init(&post_sgl_list, - &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&pring->ring_lock); + spin_lock(&phba->sli4_hba.sgl_list_lock); + list_splice_init(&post_sgl_list, sgl_list); + spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "3161 Failure to post els sgl to port.\n"); + "3161 Failure to post sgl to port.\n"); return -EIO; } - return 0; + + /* return the number of XRIs actually posted */ + return total_cnt; } void @@ -6335,7 +6519,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) int lpfc_sli4_hba_setup(struct lpfc_hba *phba) { - int rc; + int rc, i; LPFC_MBOXQ_t *mboxq; struct lpfc_mqe *mqe; uint8_t *vpd; @@ -6344,6 +6528,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); struct lpfc_vport *vport = phba->pport; struct lpfc_dmabuf *mp; + struct lpfc_rqb *rqbp; /* Perform a PCI function reset to start from clean */ rc = lpfc_pci_function_reset(phba); @@ -6622,35 +6807,141 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); - /* update host els and scsi xri-sgl sizes and mappings */ - rc = lpfc_sli4_xri_sgl_update(phba); + /* Create all the SLI4 queues */ + rc = lpfc_sli4_queue_create(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3089 Failed to allocate queues\n"); + rc = -ENODEV; + goto out_free_mbox; + } + /* Set up all the queues to the device */ + rc = lpfc_sli4_queue_setup(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0381 Error %d during queue setup.\n ", rc); + goto out_stop_timers; + } + /* Initialize the driver internal SLI layer lists. */ + lpfc_sli4_setup(phba); + lpfc_sli4_queue_init(phba); + + /* update host els xri-sgl sizes and mappings */ + rc = lpfc_sli4_els_sgl_update(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "1400 Failed to update xri-sgl size and " "mapping: %d\n", rc); - goto out_free_mbox; + goto out_destroy_queue; } /* register the els sgl pool to the port */ - rc = lpfc_sli4_repost_els_sgl_list(phba); - if (unlikely(rc)) { + rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, + phba->sli4_hba.els_xri_cnt); + if (unlikely(rc < 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0582 Error %d during els sgl post " "operation\n", rc); rc = -ENODEV; - goto out_free_mbox; + goto out_destroy_queue; } + phba->sli4_hba.els_xri_cnt = rc; - /* register the allocated scsi sgl pool to the port */ - rc = lpfc_sli4_repost_scsi_sgl_list(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0383 Error %d during scsi sgl post " - "operation\n", rc); - /* Some Scsi buffers were moved to the abort scsi list */ - /* A pci function reset will repost them */ - rc = -ENODEV; - goto out_free_mbox; + if (phba->nvmet_support) { + /* update host nvmet xri-sgl sizes and mappings */ + rc = lpfc_sli4_nvmet_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "6308 Failed to update nvmet-sgl size " + "and mapping: %d\n", rc); + goto out_destroy_queue; + } + + /* register the nvmet sgl pool to the port */ + rc = lpfc_sli4_repost_sgl_list( + phba, + &phba->sli4_hba.lpfc_nvmet_sgl_list, + phba->sli4_hba.nvmet_xri_cnt); + if (unlikely(rc < 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "3117 Error %d during nvmet " + "sgl post\n", rc); + rc = -ENODEV; + goto out_destroy_queue; + } + phba->sli4_hba.nvmet_xri_cnt = rc; + lpfc_nvmet_create_targetport(phba); + } else { + /* update host scsi xri-sgl sizes and mappings */ + rc = lpfc_sli4_scsi_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "6309 Failed to update scsi-sgl size " + "and mapping: %d\n", rc); + goto out_destroy_queue; + } + + /* update host nvme xri-sgl sizes and mappings */ + rc = lpfc_sli4_nvme_sgl_update(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "6082 Failed to update nvme-sgl size " + "and mapping: %d\n", rc); + goto out_destroy_queue; + } + } + + if (phba->nvmet_support && phba->cfg_nvmet_mrq) { + + /* Post initial buffers to all RQs created */ + for (i = 0; i < phba->cfg_nvmet_mrq; i++) { + rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; + INIT_LIST_HEAD(&rqbp->rqb_buffer_list); + rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; + rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; + rqbp->entry_count = 256; + rqbp->buffer_count = 0; + + /* Divide by 4 and round down to multiple of 16 */ + rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8; + phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc; + phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc; + + lpfc_post_rq_buffer( + phba, phba->sli4_hba.nvmet_mrq_hdr[i], + phba->sli4_hba.nvmet_mrq_data[i], + phba->cfg_nvmet_mrq_post); + } + } + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + /* register the allocated scsi sgl pool to the port */ + rc = lpfc_sli4_repost_scsi_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0383 Error %d during scsi sgl post " + "operation\n", rc); + /* Some Scsi buffers were moved to abort scsi list */ + /* A pci function reset will repost them */ + rc = -ENODEV; + goto out_destroy_queue; + } + } + + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && + (phba->nvmet_support == 0)) { + + /* register the allocated nvme sgl pool to the port */ + rc = lpfc_repost_nvme_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "6116 Error %d during nvme sgl post " + "operation\n", rc); + /* Some NVME buffers were moved to abort nvme list */ + /* A pci function reset will repost them */ + rc = -ENODEV; + goto out_destroy_queue; + } } /* Post the rpi header region to the device. */ @@ -6660,33 +6951,55 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "0393 Error %d during rpi post operation\n", rc); rc = -ENODEV; - goto out_free_mbox; - } - lpfc_sli4_node_prep(phba); - - /* Create all the SLI4 queues */ - rc = lpfc_sli4_queue_create(phba); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3089 Failed to allocate queues\n"); - rc = -ENODEV; - goto out_stop_timers; - } - /* Set up all the queues to the device */ - rc = lpfc_sli4_queue_setup(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0381 Error %d during queue setup.\n ", rc); goto out_destroy_queue; } + lpfc_sli4_node_prep(phba); - /* Arm the CQs and then EQs on device */ - lpfc_sli4_arm_cqeq_intr(phba); + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { + /* + * The FC Port needs to register FCFI (index 0) + */ + lpfc_reg_fcfi(phba, mboxq); + mboxq->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + goto out_unset_queue; + rc = 0; + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, + &mboxq->u.mqe.un.reg_fcfi); + } else { + /* We are a NVME Target mode with MRQ > 1 */ - /* Indicate device interrupt mode */ - phba->sli4_hba.intr_enable = 1; + /* First register the FCFI */ + lpfc_reg_fcfi_mrq(phba, mboxq, 0); + mboxq->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + goto out_unset_queue; + rc = 0; + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, + &mboxq->u.mqe.un.reg_fcfi_mrq); - /* Allow asynchronous mailbox command to go through */ + /* Next register the MRQs */ + lpfc_reg_fcfi_mrq(phba, mboxq, 1); + mboxq->vport = phba->pport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + goto out_unset_queue; + rc = 0; + } + /* Check if the port is configured to be disabled */ + lpfc_sli_read_link_ste(phba); + } + + /* Arm the CQs and then EQs on device */ + lpfc_sli4_arm_cqeq_intr(phba); + + /* Indicate device interrupt mode */ + phba->sli4_hba.intr_enable = 1; + + /* Allow asynchronous mailbox command to go through */ spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); @@ -6731,23 +7044,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = 0; } - if (!(phba->hba_flag & HBA_FCOE_MODE)) { - /* - * The FC Port needs to register FCFI (index 0) - */ - lpfc_reg_fcfi(phba, mboxq); - mboxq->vport = phba->pport; - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (rc != MBX_SUCCESS) - goto out_unset_queue; - rc = 0; - phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, - &mboxq->u.mqe.un.reg_fcfi); - - /* Check if the port is configured to be disabled */ - lpfc_sli_read_link_ste(phba); - } - /* * The port is ready, set the host's link state to LINK_DOWN * in preparation for link interrupts. @@ -6884,7 +7180,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) /* Find the eq associated with the mcq */ if (phba->sli4_hba.hba_eq) - for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) + for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) if (phba->sli4_hba.hba_eq[eqidx]->queue_id == phba->sli4_hba.mbx_cq->assoc_qid) { fpeq = phba->sli4_hba.hba_eq[eqidx]; @@ -7243,16 +7539,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, = MAILBOX_HBA_EXT_OFFSET; /* Copy the mailbox extension data */ - if (pmbox->in_ext_byte_len && pmbox->context2) { + if (pmbox->in_ext_byte_len && pmbox->context2) lpfc_memcpy_to_slim(phba->MBslimaddr + MAILBOX_HBA_EXT_OFFSET, pmbox->context2, pmbox->in_ext_byte_len); - } - if (mbx->mbxCommand == MBX_CONFIG_PORT) { + if (mbx->mbxCommand == MBX_CONFIG_PORT) /* copy command data into host mbox for cmpl */ - lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); - } + lpfc_sli_pcimem_bcopy(mbx, phba->mbox, + MAILBOX_CMD_SIZE); /* First copy mbox command data to HBA SLIM, skip past first word */ @@ -7266,10 +7561,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, writel(ldata, to_slim); readl(to_slim); /* flush */ - if (mbx->mbxCommand == MBX_CONFIG_PORT) { + if (mbx->mbxCommand == MBX_CONFIG_PORT) /* switch over to host mailbox */ psli->sli_flag |= LPFC_SLI_ACTIVE; - } } wmb(); @@ -7368,7 +7662,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* copy results back to user */ - lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE); + lpfc_sli_pcimem_bcopy(phba->mbox, mbx, + MAILBOX_CMD_SIZE); /* Copy the mailbox extension data */ if (pmbox->out_ext_byte_len && pmbox->context2) { lpfc_sli_pcimem_bcopy(phba->mbox_ext, @@ -7378,7 +7673,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, } else { /* First copy command data */ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, - MAILBOX_CMD_SIZE); + MAILBOX_CMD_SIZE); /* Copy the mailbox extension data */ if (pmbox->out_ext_byte_len && pmbox->context2) { lpfc_memcpy_from_slim(pmbox->context2, @@ -8059,7 +8354,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, { struct lpfc_iocbq *nextiocb; IOCB_t *iocb; - struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; + struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; lockdep_assert_held(&phba->hbalock); @@ -8133,7 +8428,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, * For FCP commands, we must be in a state where we can process link * attention events. */ - } else if (unlikely(pring->ringno == phba->sli.fcp_ring && + } else if (unlikely(pring->ringno == LPFC_FCP_RING && !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { goto iocb_busy; } @@ -8870,9 +9165,21 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, union lpfc_wqe *wqe; union lpfc_wqe128 wqe128; struct lpfc_queue *wq; - struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; + struct lpfc_sli_ring *pring; - lockdep_assert_held(&phba->hbalock); + /* Get the WQ */ + if ((piocb->iocb_flag & LPFC_IO_FCP) || + (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) + wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx]; + else + wq = phba->sli4_hba.oas_wq; + } else { + wq = phba->sli4_hba.els_wq; + } + + /* Get corresponding ring */ + pring = wq->pring; /* * The WQE can be either 64 or 128 bytes, @@ -8880,6 +9187,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, */ wqe = (union lpfc_wqe *)&wqe128; + lockdep_assert_held(&phba->hbalock); + if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) @@ -8894,7 +9203,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_BUSY; } } else { - sglq = __lpfc_sli_get_sglq(phba, piocb); + sglq = __lpfc_sli_get_els_sglq(phba, piocb); if (!sglq) { if (!(flag & SLI_IOCB_RET_IOCB)) { __lpfc_sli_ringtx_put(phba, @@ -8906,10 +9215,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, } } } - } else if (piocb->iocb_flag & LPFC_IO_FCP) { + } else if (piocb->iocb_flag & LPFC_IO_FCP) /* These IO's already have an XRI and a mapped sgl. */ sglq = NULL; - } else { + else { /* * This is a continuation of a commandi,(CX) so this * sglq is on the active list @@ -8929,21 +9238,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if (lpfc_sli4_iocb2wqe(phba, piocb, wqe)) return IOCB_ERROR; - if ((piocb->iocb_flag & LPFC_IO_FCP) || - (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { - if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) { - wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; - } else { - wq = phba->sli4_hba.oas_wq; - } - if (lpfc_sli4_wq_put(wq, wqe)) - return IOCB_ERROR; - } else { - if (unlikely(!phba->sli4_hba.els_wq)) - return IOCB_ERROR; - if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe)) - return IOCB_ERROR; - } + if (lpfc_sli4_wq_put(wq, wqe)) + return IOCB_ERROR; lpfc_sli_ringtxcmpl_put(phba, pring, piocb); return 0; @@ -9001,46 +9297,44 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) } /** - * lpfc_sli_calc_ring - Calculates which ring to use + * lpfc_sli4_calc_ring - Calculates which ring to use * @phba: Pointer to HBA context object. - * @ring_number: Initial ring * @piocb: Pointer to command iocb. * - * For SLI4, FCP IO can deferred to one fo many WQs, based on - * fcp_wqidx, thus we need to calculate the corresponding ring. + * For SLI4 only, FCP IO can deferred to one fo many WQs, based on + * hba_wqidx, thus we need to calculate the corresponding ring. * Since ABORTS must go on the same WQ of the command they are - * aborting, we use command's fcp_wqidx. + * aborting, we use command's hba_wqidx. */ -static int -lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number, - struct lpfc_iocbq *piocb) +struct lpfc_sli_ring * +lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) { - if (phba->sli_rev < LPFC_SLI_REV4) - return ring_number; - - if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { + if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { if (!(phba->cfg_fof) || - (!(piocb->iocb_flag & LPFC_IO_FOF))) { + (!(piocb->iocb_flag & LPFC_IO_FOF))) { if (unlikely(!phba->sli4_hba.fcp_wq)) - return LPFC_HBA_ERROR; + return NULL; /* - * for abort iocb fcp_wqidx should already + * for abort iocb hba_wqidx should already * be setup based on what work queue we used. */ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) - piocb->fcp_wqidx = + piocb->hba_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb->context1); - ring_number = MAX_SLI3_CONFIGURED_RINGS + - piocb->fcp_wqidx; + return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; } else { if (unlikely(!phba->sli4_hba.oas_wq)) - return LPFC_HBA_ERROR; - piocb->fcp_wqidx = 0; - ring_number = LPFC_FCP_OAS_RING; + return NULL; + piocb->hba_wqidx = 0; + return phba->sli4_hba.oas_wq->pring; } + } else { + if (unlikely(!phba->sli4_hba.els_wq)) + return NULL; + piocb->hba_wqidx = 0; + return phba->sli4_hba.els_wq->pring; } - return ring_number; } /** @@ -9060,7 +9354,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_sli_ring *pring; struct lpfc_queue *fpeq; struct lpfc_eqe *eqe; @@ -9068,21 +9362,19 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, int rc, idx; if (phba->sli_rev == LPFC_SLI_REV4) { - ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb); - if (unlikely(ring_number == LPFC_HBA_ERROR)) + pring = lpfc_sli4_calc_ring(phba, piocb); + if (unlikely(pring == NULL)) return IOCB_ERROR; - idx = piocb->fcp_wqidx; - pring = &phba->sli.ring[ring_number]; spin_lock_irqsave(&pring->ring_lock, iflags); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&pring->ring_lock, iflags); if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { - fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx]; + idx = piocb->hba_wqidx; + hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx]; - if (atomic_dec_and_test(&fcp_eq_hdl-> - fcp_eq_in_use)) { + if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) { /* Get associated EQ with this index */ fpeq = phba->sli4_hba.hba_eq[idx]; @@ -9103,7 +9395,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); } - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); } } else { /* For now, SLI2/3 will still use hbalock */ @@ -9123,7 +9415,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, * only when driver needs to support target mode functionality * or IP over FC functionalities. * - * This function is called with no lock held. + * This function is called with no lock held. SLI3 only. **/ static int lpfc_extra_ring_setup( struct lpfc_hba *phba) @@ -9136,14 +9428,14 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) /* Adjust cmd/rsp ring iocb entries more evenly */ /* Take some away from the FCP ring */ - pring = &psli->ring[psli->fcp_ring]; + pring = &psli->sli3_ring[LPFC_FCP_RING]; pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; /* and give them to the extra ring */ - pring = &psli->ring[psli->extra_ring]; + pring = &psli->sli3_ring[LPFC_EXTRA_RING]; pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; @@ -9328,7 +9620,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, /** - * lpfc_sli_setup - SLI ring setup function + * lpfc_sli4_setup - SLI ring setup function * @phba: Pointer to HBA context object. * * lpfc_sli_setup sets up rings of the SLI interface with @@ -9339,6 +9631,51 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, * This function always returns 0. **/ int +lpfc_sli4_setup(struct lpfc_hba *phba) +{ + struct lpfc_sli_ring *pring; + + pring = phba->sli4_hba.els_wq->pring; + pring->num_mask = LPFC_MAX_RING_MASK; + pring->prt[0].profile = 0; /* Mask 0 */ + pring->prt[0].rctl = FC_RCTL_ELS_REQ; + pring->prt[0].type = FC_TYPE_ELS; + pring->prt[0].lpfc_sli_rcv_unsol_event = + lpfc_els_unsol_event; + pring->prt[1].profile = 0; /* Mask 1 */ + pring->prt[1].rctl = FC_RCTL_ELS_REP; + pring->prt[1].type = FC_TYPE_ELS; + pring->prt[1].lpfc_sli_rcv_unsol_event = + lpfc_els_unsol_event; + pring->prt[2].profile = 0; /* Mask 2 */ + /* NameServer Inquiry */ + pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; + /* NameServer */ + pring->prt[2].type = FC_TYPE_CT; + pring->prt[2].lpfc_sli_rcv_unsol_event = + lpfc_ct_unsol_event; + pring->prt[3].profile = 0; /* Mask 3 */ + /* NameServer response */ + pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; + /* NameServer */ + pring->prt[3].type = FC_TYPE_CT; + pring->prt[3].lpfc_sli_rcv_unsol_event = + lpfc_ct_unsol_event; + return 0; +} + +/** + * lpfc_sli_setup - SLI ring setup function + * @phba: Pointer to HBA context object. + * + * lpfc_sli_setup sets up rings of the SLI interface with + * number of iocbs per ring and iotags. This function is + * called while driver attach to the HBA and before the + * interrupts are enabled. So there is no need for locking. + * + * This function always returns 0. SLI3 only. + **/ +int lpfc_sli_setup(struct lpfc_hba *phba) { int i, totiocbsize = 0; @@ -9346,19 +9683,14 @@ lpfc_sli_setup(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; - if (phba->sli_rev == LPFC_SLI_REV4) - psli->num_rings += phba->cfg_fcp_io_channel; psli->sli_flag = 0; - psli->fcp_ring = LPFC_FCP_RING; - psli->next_ring = LPFC_FCP_NEXT_RING; - psli->extra_ring = LPFC_EXTRA_RING; psli->iocbq_lookup = NULL; psli->iocbq_lookup_len = 0; psli->last_iotag = 0; for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + pring = &psli->sli3_ring[i]; switch (i) { case LPFC_FCP_RING: /* ring 0 - FCP */ /* numCiocb and numRiocb are used in config_port */ @@ -9457,18 +9789,90 @@ lpfc_sli_setup(struct lpfc_hba *phba) } /** - * lpfc_sli_queue_setup - Queue initialization function + * lpfc_sli4_queue_init - Queue initialization function * @phba: Pointer to HBA context object. * - * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each + * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each * ring. This function also initializes ring indices of each ring. * This function is called during the initialization of the SLI * interface of an HBA. * This function is called with no lock held and always returns * 1. **/ -int -lpfc_sli_queue_setup(struct lpfc_hba *phba) +void +lpfc_sli4_queue_init(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + struct lpfc_sli_ring *pring; + int i; + + psli = &phba->sli; + spin_lock_irq(&phba->hbalock); + INIT_LIST_HEAD(&psli->mboxq); + INIT_LIST_HEAD(&psli->mboxq_cmpl); + /* Initialize list headers for txq and txcmplq as double linked lists */ + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { + pring = phba->sli4_hba.fcp_wq[i]->pring; + pring->flag = 0; + pring->ringno = LPFC_FCP_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + for (i = 0; i < phba->cfg_nvme_io_channel; i++) { + pring = phba->sli4_hba.nvme_wq[i]->pring; + pring->flag = 0; + pring->ringno = LPFC_FCP_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + pring = phba->sli4_hba.els_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_ELS_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + + if (phba->cfg_nvme_io_channel) { + pring = phba->sli4_hba.nvmels_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_ELS_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + + if (phba->cfg_fof) { + pring = phba->sli4_hba.oas_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_FCP_RING; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } + + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli_queue_init - Queue initialization function + * @phba: Pointer to HBA context object. + * + * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each + * ring. This function also initializes ring indices of each ring. + * This function is called during the initialization of the SLI + * interface of an HBA. + * This function is called with no lock held and always returns + * 1. + **/ +void +lpfc_sli_queue_init(struct lpfc_hba *phba) { struct lpfc_sli *psli; struct lpfc_sli_ring *pring; @@ -9480,21 +9884,20 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) INIT_LIST_HEAD(&psli->mboxq_cmpl); /* Initialize list headers for txq and txcmplq as double linked lists */ for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + pring = &psli->sli3_ring[i]; pring->ringno = i; pring->sli.sli3.next_cmdidx = 0; pring->sli.sli3.local_getidx = 0; pring->sli.sli3.cmdidx = 0; - pring->flag = 0; - INIT_LIST_HEAD(&pring->txq); - INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); INIT_LIST_HEAD(&pring->iocb_continue_saveq); INIT_LIST_HEAD(&pring->postbufq); + pring->flag = 0; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); spin_lock_init(&pring->ring_lock); } spin_unlock_irq(&phba->hbalock); - return 1; } /** @@ -9566,6 +9969,7 @@ lpfc_sli_host_down(struct lpfc_vport *vport) LIST_HEAD(completions); struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; int i; @@ -9575,36 +9979,64 @@ lpfc_sli_host_down(struct lpfc_vport *vport) lpfc_cleanup_discovery_resources(vport); spin_lock_irqsave(&phba->hbalock, flags); - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - prev_pring_flag = pring->flag; - /* Only slow rings */ - if (pring->ringno == LPFC_ELS_RING) { - pring->flag |= LPFC_DEFERRED_RING_EVENT; - /* Set the lpfc data pending flag */ - set_bit(LPFC_DATA_READY, &phba->data_flags); - } - /* - * Error everything on the txq since these iocbs have not been - * given to the FW yet. - */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { - if (iocb->vport != vport) - continue; - list_move_tail(&iocb->list, &completions); - } - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, - list) { - if (iocb->vport != vport) + /* + * Error everything on the txq since these iocbs + * have not been given to the FW yet. + * Also issue ABTS for everything on the txcmplq + */ + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + prev_pring_flag = pring->flag; + /* Only slow rings */ + if (pring->ringno == LPFC_ELS_RING) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + list_for_each_entry_safe(iocb, next_iocb, + &pring->txq, list) { + if (iocb->vport != vport) + continue; + list_move_tail(&iocb->list, &completions); + } + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) { + if (iocb->vport != vport) + continue; + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + } + pring->flag = prev_pring_flag; + } + } else { + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) continue; - lpfc_sli_issue_abort_iotag(phba, pring, iocb); + if (pring == phba->sli4_hba.els_wq->pring) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + prev_pring_flag = pring->flag; + spin_lock_irq(&pring->ring_lock); + list_for_each_entry_safe(iocb, next_iocb, + &pring->txq, list) { + if (iocb->vport != vport) + continue; + list_move_tail(&iocb->list, &completions); + } + spin_unlock_irq(&pring->ring_lock); + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) { + if (iocb->vport != vport) + continue; + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + } + pring->flag = prev_pring_flag; } - - pring->flag = prev_pring_flag; } - spin_unlock_irqrestore(&phba->hbalock, flags); /* Cancel all the IOCBs from the completions list */ @@ -9633,6 +10065,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) { LIST_HEAD(completions); struct lpfc_sli *psli = &phba->sli; + struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *buf_ptr; unsigned long flags = 0; @@ -9646,20 +10079,36 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) lpfc_fabric_abort_hba(phba); spin_lock_irqsave(&phba->hbalock, flags); - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; - /* Only slow rings */ - if (pring->ringno == LPFC_ELS_RING) { - pring->flag |= LPFC_DEFERRED_RING_EVENT; - /* Set the lpfc data pending flag */ - set_bit(LPFC_DATA_READY, &phba->data_flags); - } - /* - * Error everything on the txq since these iocbs have not been - * given to the FW yet. - */ - list_splice_init(&pring->txq, &completions); + /* + * Error everything on the txq since these iocbs + * have not been given to the FW yet. + */ + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + /* Only slow rings */ + if (pring->ringno == LPFC_ELS_RING) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + list_splice_init(&pring->txq, &completions); + } + } else { + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txq, &completions); + spin_unlock_irq(&pring->ring_lock); + if (pring == phba->sli4_hba.els_wq->pring) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } + } } spin_unlock_irqrestore(&phba->hbalock, flags); @@ -9986,7 +10435,6 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *abtsiocbp; IOCB_t *icmd = NULL; IOCB_t *iabt = NULL; - int ring_number; int retval; unsigned long iflags; @@ -10026,7 +10474,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt->ulpClass = icmd->ulpClass; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; + abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; if (cmdiocb->iocb_flag & LPFC_IO_FCP) abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; if (cmdiocb->iocb_flag & LPFC_IO_FOF) @@ -10048,11 +10496,9 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abtsiocbp->iotag); if (phba->sli_rev == LPFC_SLI_REV4) { - ring_number = - lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp); - if (unlikely(ring_number == LPFC_HBA_ERROR)) + pring = lpfc_sli4_calc_ring(phba, abtsiocbp); + if (unlikely(pring == NULL)) return 0; - pring = &phba->sli.ring[ring_number]; /* Note: both hbalock and ring_lock need to be set here */ spin_lock_irqsave(&pring->ring_lock, iflags); retval = __lpfc_sli_issue_iocb(phba, pring->ringno, @@ -10133,6 +10579,108 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return retval; } +/** + * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @cmdiocb: Pointer to driver command iocb object. + * + * This function issues an abort iocb for the provided command iocb down to + * the port. Other than the case the outstanding command iocb is an abort + * request, this function issues abort out unconditionally. This function is + * called with hbalock held. The function returns 0 when it fails due to + * memory allocation failure or when the command iocb is an abort request. + **/ +static int +lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_iocbq *abtsiocbp; + union lpfc_wqe *abts_wqe; + int retval; + + /* + * There are certain command types we don't want to abort. And we + * don't want to abort commands that are already in the process of + * being aborted. + */ + if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || + cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || + (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) + return 0; + + /* issue ABTS for this io based on iotag */ + abtsiocbp = __lpfc_sli_get_iocbq(phba); + if (abtsiocbp == NULL) + return 0; + + /* This signals the response to set the correct status + * before calling the completion handler + */ + cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; + + /* Complete prepping the abort wqe and issue to the FW. */ + abts_wqe = &abtsiocbp->wqe; + bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0); + bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); + + /* Explicitly set reserved fields to zero.*/ + abts_wqe->abort_cmd.rsrvd4 = 0; + abts_wqe->abort_cmd.rsrvd5 = 0; + + /* WQE Common - word 6. Context is XRI tag. Set 0. */ + bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0); + bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0); + + /* word 7 */ + bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); + bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, + cmdiocb->iocb.ulpClass); + + /* word 8 - tell the FW to abort the IO associated with this + * outstanding exchange ID. + */ + abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag; + + /* word 9 - this is the iotag for the abts_wqe completion. */ + bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, + abtsiocbp->iotag); + + /* word 10 */ + bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx); + bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); + + /* word 11 */ + bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); + bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbp->iocb_flag |= LPFC_IO_NVME; + abtsiocbp->vport = vport; + abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; + retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); + if (retval == IOCB_ERROR) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6147 Failed abts issue_wqe with status x%x " + "for oxid x%x\n", + retval, cmdiocb->sli4_xritag); + lpfc_sli_release_iocbq(phba, abtsiocbp); + return retval; + } + + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, + "6148 Drv Abort NVME Request Issued for " + "ox_id x%x on reqtag x%x\n", + cmdiocb->sli4_xritag, + abtsiocbp->iotag); + + return retval; +} + /** * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. * @phba: pointer to lpfc HBA data structure. @@ -10144,10 +10692,20 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; + struct lpfc_queue *qp = NULL; int i; - for (i = 0; i < psli->num_rings; i++) { - pring = &psli->ring[i]; + if (phba->sli_rev != LPFC_SLI_REV4) { + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->sli3_ring[i]; + lpfc_sli_abort_iocb_ring(phba, pring); + } + return; + } + list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { + pring = qp->pring; + if (!pring) + continue; lpfc_sli_abort_iocb_ring(phba, pring); } } @@ -10351,7 +10909,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocb->vport = vport; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; + abtsiocb->hba_wqidx = iocbq->hba_wqidx; if (iocbq->iocb_flag & LPFC_IO_FCP) abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; if (iocbq->iocb_flag & LPFC_IO_FOF) @@ -10411,7 +10969,6 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, int sum, i, ret_val; unsigned long iflags; struct lpfc_sli_ring *pring_s4; - uint32_t ring_number; spin_lock_irq(&phba->hbalock); @@ -10454,7 +11011,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocbq->vport = vport; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; + abtsiocbq->hba_wqidx = iocbq->hba_wqidx; if (iocbq->iocb_flag & LPFC_IO_FCP) abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; if (iocbq->iocb_flag & LPFC_IO_FOF) @@ -10479,9 +11036,9 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; if (phba->sli_rev == LPFC_SLI_REV4) { - ring_number = MAX_SLI3_CONFIGURED_RINGS + - iocbq->fcp_wqidx; - pring_s4 = &phba->sli.ring[ring_number]; + pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); + if (pring_s4 == NULL) + continue; /* Note: both hbalock and ring_lock must be set here */ spin_lock_irqsave(&pring_s4->ring_lock, iflags); ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, @@ -10643,10 +11200,14 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, struct lpfc_iocbq *iocb; int txq_cnt = 0; int txcmplq_cnt = 0; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; unsigned long iflags; bool iocb_completed = true; + if (phba->sli_rev >= LPFC_SLI_REV4) + pring = lpfc_sli4_calc_ring(phba, piocb); + else + pring = &phba->sli.sli3_ring[ring_number]; /* * If the caller has provided a response iocbq buffer, then context2 * is NULL or its an error. @@ -11441,6 +12002,7 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) uint32_t ha_copy; unsigned long status; unsigned long iflag; + struct lpfc_sli_ring *pring; /* Get the driver's phba structure from the dev_id and * assume the HBA is not interrupting. @@ -11485,10 +12047,9 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); status >>= (4*LPFC_FCP_RING); + pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; if (status & HA_RXMASK) - lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], - status); + lpfc_sli_handle_fast_ring_event(phba, pring, status); if (phba->cfg_multi_ring_support == 2) { /* @@ -11499,7 +12060,7 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) status >>= (4*LPFC_EXTRA_RING); if (status & HA_RXMASK) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_EXTRA_RING], + &phba->sli.sli3_ring[LPFC_EXTRA_RING], status); } } @@ -11653,7 +12214,42 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) } /** - * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event + * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 NVME abort XRI events. + **/ +void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + + /* First, declare the fcp xri abort event has been handled */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~NVME_XRI_ABORT_EVENT; + spin_unlock_irq(&phba->hbalock); + /* Now, handle all the fcp xri abort events */ + while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) { + /* Get the first event from the head of the event queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + /* Notify aborted XRI for NVME work queue */ + if (phba->nvmet_support) { + lpfc_sli4_nvmet_xri_aborted(phba, + &cq_event->cqe.wcqe_axri); + } else { + lpfc_sli4_nvme_xri_aborted(phba, + &cq_event->cqe.wcqe_axri); + } + /* Free the event processed back to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); + } +} + +/** + * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event * @phba: pointer to lpfc hba data structure. * * This routine is invoked by the worker thread to process all the pending @@ -11812,11 +12408,13 @@ static struct lpfc_iocbq * lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, struct lpfc_iocbq *irspiocbq) { - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *cmdiocbq; struct lpfc_wcqe_complete *wcqe; unsigned long iflags; + pring = lpfc_phba_elsring(phba); + wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; spin_lock_irqsave(&pring->ring_lock, iflags); pring->stats.iocb_event++; @@ -12052,8 +12650,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, txq_cnt++; if (!list_empty(&pring->txcmplq)) txcmplq_cnt++; - if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) - fcp_txcmplq_cnt++; lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", @@ -12149,10 +12745,22 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; break; + case LPFC_NVME: + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_nvme_xri_aborted_work_queue); + /* Set the nvme xri abort event flag */ + phba->hba_flag |= NVME_XRI_ABORT_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0603 Invalid work queue CQE subtype (x%x)\n", - cq->subtype); + "0603 Invalid CQ subtype %d: " + "%08x %08x %08x %08x\n", + cq->subtype, wcqe->word0, wcqe->parameter, + wcqe->word2, wcqe->word3); + lpfc_sli4_cq_event_release(phba, cq_event); workposted = false; break; } @@ -12172,6 +12780,7 @@ static bool lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) { bool workposted = false; + struct fc_frame_header *fc_hdr; struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; struct lpfc_queue *drq = phba->sli4_hba.dat_rq; struct hbq_dmabuf *dma_buf; @@ -12206,6 +12815,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) } hrq->RQ_rcv_buf++; memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); + + /* If a NVME LS event (type 0x28), treat it as Fast path */ + fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; + /* save off the frame for the word thread to process */ list_add_tail(&dma_buf->cq_event.list, &phba->sli4_hba.sp_queue_event); @@ -12324,6 +12937,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, return; } + /* Save EQ associated with this CQ */ + cq->assoc_qp = speq; + /* Process all the entries to the CQ */ switch (cq->type) { case LPFC_MCQ: @@ -12336,8 +12952,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, break; case LPFC_WCQ: while ((cqe = lpfc_sli4_cq_get(cq))) { - if (cq->subtype == LPFC_FCP) - workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, + if ((cq->subtype == LPFC_FCP) || + (cq->subtype == LPFC_NVME)) + workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); else workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, @@ -12424,7 +13041,23 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, bf_get(lpfc_wcqe_c_request_tag, wcqe)); return; } - if (unlikely(!cmdiocbq->iocb_cmpl)) { + + if (cq->assoc_qp) + cmdiocbq->isr_timestamp = + cq->assoc_qp->isr_timestamp; + + if (cmdiocbq->iocb_cmpl == NULL) { + if (cmdiocbq->wqe_cmpl) { + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + + /* Pass the cmd_iocb and the wcqe to the upper layer */ + (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); + return; + } lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0375 FCP cmdiocb not callback function " "iotag: (%d)\n", @@ -12460,12 +13093,12 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, { struct lpfc_queue *childwq; bool wqid_matched = false; - uint16_t fcp_wqid; + uint16_t hba_wqid; /* Check for fast-path FCP work queue release */ - fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); + hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); list_for_each_entry(childwq, &cq->child_list, list) { - if (childwq->queue_id == fcp_wqid) { + if (childwq->queue_id == hba_wqid) { lpfc_sli4_wq_release(childwq, bf_get(lpfc_wcqe_r_wqe_index, wcqe)); wqid_matched = true; @@ -12476,11 +13109,108 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, if (wqid_matched != true) lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2580 Fast-path wqe consume event carries " - "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); + "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); +} + +/** + * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry + * @phba: Pointer to HBA context object. + * @rcqe: Pointer to receive-queue completion queue entry. + * + * This routine process a receive-queue completion queue entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_rcqe *rcqe) +{ + bool workposted = false; + struct lpfc_queue *hrq; + struct lpfc_queue *drq; + struct rqb_dmabuf *dma_buf; + struct fc_frame_header *fc_hdr; + uint32_t status, rq_id; + unsigned long iflags; + uint32_t fctl, idx; + + if ((phba->nvmet_support == 0) || + (phba->sli4_hba.nvmet_cqset == NULL)) + return workposted; + + idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; + hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; + drq = phba->sli4_hba.nvmet_mrq_data[idx]; + + /* sanity check on queue memory */ + if (unlikely(!hrq) || unlikely(!drq)) + return workposted; + + if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) + rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); + else + rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); + + if ((phba->nvmet_support == 0) || + (rq_id != hrq->queue_id)) + return workposted; + + status = bf_get(lpfc_rcqe_status, rcqe); + switch (status) { + case FC_STATUS_RQ_BUF_LEN_EXCEEDED: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6126 Receive Frame Truncated!!\n"); + hrq->RQ_buf_trunc++; + break; + case FC_STATUS_RQ_SUCCESS: + lpfc_sli4_rq_release(hrq, drq); + spin_lock_irqsave(&phba->hbalock, iflags); + dma_buf = lpfc_sli_rqbuf_get(phba, hrq); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + hrq->RQ_rcv_buf++; + fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; + + /* Just some basic sanity checks on FCP Command frame */ + fctl = (fc_hdr->fh_f_ctl[0] << 16 | + fc_hdr->fh_f_ctl[1] << 8 | + fc_hdr->fh_f_ctl[2]); + if (((fctl & + (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != + (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || + (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ + goto drop; + + if (fc_hdr->fh_type == FC_TYPE_FCP) { + dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); + lpfc_nvmet_unsol_fcp_event( + phba, phba->sli4_hba.els_wq->pring, dma_buf, + cq->assoc_qp->isr_timestamp); + return false; + } +drop: + lpfc_in_buf_free(phba, &dma_buf->dbuf); + break; + case FC_STATUS_INSUFF_BUF_NEED_BUF: + case FC_STATUS_INSUFF_BUF_FRM_DISC: + hrq->RQ_no_posted_buf++; + /* Post more buffers if possible */ + spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + } +out: + return workposted; } /** - * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry + * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry * @cq: Pointer to the completion queue. * @eqe: Pointer to fast-path completion queue entry. * @@ -12488,7 +13218,7 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * event queue for FCP command response completion. **/ static int -lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, +lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) { struct lpfc_wcqe_release wcqe; @@ -12500,10 +13230,15 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, /* Check and process for different type of WCQE and dispatch */ switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { case CQE_CODE_COMPL_WQE: + case CQE_CODE_NVME_ERSP: cq->CQ_wq++; /* Process the WQ complete event */ phba->last_completion_time = jiffies; - lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, + if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) + lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, + (struct lpfc_wcqe_complete *)&wcqe); + if (cq->subtype == LPFC_NVME_LS) + lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, (struct lpfc_wcqe_complete *)&wcqe); break; case CQE_CODE_RELEASE_WQE: @@ -12519,9 +13254,17 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, (struct sli4_wcqe_xri_aborted *)&wcqe); break; + case CQE_CODE_RECEIVE_V1: + case CQE_CODE_RECEIVE: + phba->last_completion_time = jiffies; + if (cq->subtype == LPFC_NVMET) { + workposted = lpfc_sli4_nvmet_handle_rcqe( + phba, cq, (struct lpfc_rcqe *)&wcqe); + } + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0144 Not a valid WCQE code: x%x\n", + "0144 Not a valid CQE code: x%x\n", bf_get(lpfc_wcqe_c_code, &wcqe)); break; } @@ -12544,10 +13287,10 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, uint32_t qidx) { - struct lpfc_queue *cq; + struct lpfc_queue *cq = NULL; struct lpfc_cqe *cqe; bool workposted = false; - uint16_t cqid; + uint16_t cqid, id; int ecount = 0; if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { @@ -12562,28 +13305,42 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* Get the reference to the corresponding CQ */ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); - /* Check if this is a Slow path event */ - if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) { - lpfc_sli4_sp_handle_eqe(phba, eqe, - phba->sli4_hba.hba_eq[qidx]); - return; + if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { + id = phba->sli4_hba.nvmet_cqset[0]->queue_id; + if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { + /* Process NVMET unsol rcv */ + cq = phba->sli4_hba.nvmet_cqset[cqid - id]; + goto process_cq; + } } - if (unlikely(!phba->sli4_hba.fcp_cq)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "3146 Fast-path completion queues " - "does not exist\n"); - return; + if (phba->sli4_hba.nvme_cq_map && + (cqid == phba->sli4_hba.nvme_cq_map[qidx])) { + /* Process NVME / NVMET command completion */ + cq = phba->sli4_hba.nvme_cq[qidx]; + goto process_cq; } - cq = phba->sli4_hba.fcp_cq[qidx]; - if (unlikely(!cq)) { - if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0367 Fast-path completion queue " - "(%d) does not exist\n", qidx); + + if (phba->sli4_hba.fcp_cq_map && + (cqid == phba->sli4_hba.fcp_cq_map[qidx])) { + /* Process FCP command completion */ + cq = phba->sli4_hba.fcp_cq[qidx]; + goto process_cq; + } + + if (phba->sli4_hba.nvmels_cq && + (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { + /* Process NVME unsol rcv */ + cq = phba->sli4_hba.nvmels_cq; + } + + /* Otherwise this is a Slow path event */ + if (cq == NULL) { + lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]); return; } +process_cq: if (unlikely(cqid != cq->queue_id)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0368 Miss-matched fast-path completion " @@ -12592,9 +13349,12 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, return; } + /* Save EQ associated with this CQ */ + cq->assoc_qp = phba->sli4_hba.hba_eq[qidx]; + /* Process all the entries to the CQ */ while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); + workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); if (!(++ecount % cq->entry_repost)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } @@ -12685,7 +13445,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) /* Process all the entries to the OAS CQ */ while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); + workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); if (!(++ecount % cq->entry_repost)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } @@ -12733,15 +13493,15 @@ irqreturn_t lpfc_sli4_fof_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_queue *eq; struct lpfc_eqe *eqe; unsigned long iflag; int ecount = 0; /* Get the driver's phba structure from the dev_id */ - fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; - phba = fcp_eq_hdl->phba; + hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; + phba = hba_eq_hdl->phba; if (unlikely(!phba)) return IRQ_NONE; @@ -12827,17 +13587,17 @@ irqreturn_t lpfc_sli4_hba_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_queue *fpeq; struct lpfc_eqe *eqe; unsigned long iflag; int ecount = 0; - int fcp_eqidx; + int hba_eqidx; /* Get the driver's phba structure from the dev_id */ - fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; - phba = fcp_eq_hdl->phba; - fcp_eqidx = fcp_eq_hdl->idx; + hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; + phba = hba_eq_hdl->phba; + hba_eqidx = hba_eq_hdl->idx; if (unlikely(!phba)) return IRQ_NONE; @@ -12845,15 +13605,20 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) return IRQ_NONE; /* Get to the EQ struct associated with this vector */ - fpeq = phba->sli4_hba.hba_eq[fcp_eqidx]; + fpeq = phba->sli4_hba.hba_eq[hba_eqidx]; if (unlikely(!fpeq)) return IRQ_NONE; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) + fpeq->isr_timestamp = ktime_get_ns(); +#endif + if (lpfc_fcp_look_ahead) { - if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use)) + if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) lpfc_sli4_eq_clr_intr(fpeq); else { - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); return IRQ_NONE; } } @@ -12868,7 +13633,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) lpfc_sli4_eq_flush(phba, fpeq); spin_unlock_irqrestore(&phba->hbalock, iflag); if (lpfc_fcp_look_ahead) - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); return IRQ_NONE; } @@ -12879,7 +13644,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) if (eqe == NULL) break; - lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx); + lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); if (!(++ecount % fpeq->entry_repost)) lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); fpeq->EQ_processed++; @@ -12896,7 +13661,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) fpeq->EQ_no_entry++; if (lpfc_fcp_look_ahead) { - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); return IRQ_NONE; } @@ -12910,7 +13675,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) } if (lpfc_fcp_look_ahead) - atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); + atomic_inc(&hba_eq_hdl->hba_eq_in_use); + return IRQ_HANDLED; } /* lpfc_sli4_fp_intr_handler */ @@ -12937,7 +13703,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) struct lpfc_hba *phba; irqreturn_t hba_irq_rc; bool hba_handled = false; - int fcp_eqidx; + int qidx; /* Get the driver's phba structure from the dev_id */ phba = (struct lpfc_hba *)dev_id; @@ -12948,16 +13714,16 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) /* * Invoke fast-path host attention interrupt handling as appropriate. */ - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { + for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, - &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); + &phba->sli4_hba.hba_eq_hdl[qidx]); if (hba_irq_rc == IRQ_HANDLED) hba_handled |= true; } if (phba->cfg_fof) { hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, - &phba->sli4_hba.fcp_eq_hdl[0]); + &phba->sli4_hba.hba_eq_hdl[qidx]); if (hba_irq_rc == IRQ_HANDLED) hba_handled |= true; } @@ -12988,6 +13754,11 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue) dmabuf->virt, dmabuf->phys); kfree(dmabuf); } + if (queue->rqbp) { + lpfc_free_rq_buffer(queue->phba, queue); + kfree(queue->rqbp); + } + kfree(queue->pring); kfree(queue); return; } @@ -13021,7 +13792,13 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, return NULL; queue->page_count = (ALIGN(entry_size * entry_count, hw_page_size))/hw_page_size; + + /* If needed, Adjust page count to match the max the adapter supports */ + if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) + queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; + INIT_LIST_HEAD(&queue->list); + INIT_LIST_HEAD(&queue->wq_list); INIT_LIST_HEAD(&queue->page_list); INIT_LIST_HEAD(&queue->child_list); for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { @@ -13093,11 +13870,13 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) } /** - * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs + * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs * @phba: HBA structure that indicates port to create a queue on. * @startq: The starting FCP EQ to modify * * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. + * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be + * updated in one mailbox command. * * The @phba struct is used to send mailbox command to HBA. The @startq * is used to get the starting FCP EQ to change. @@ -13109,7 +13888,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) * fails this function will return -ENXIO. **/ int -lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq) +lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq) { struct lpfc_mbx_modify_eq_delay *eq_delay; LPFC_MBOXQ_t *mbox; @@ -13117,11 +13896,11 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq) int cnt, rc, length, status = 0; uint32_t shdr_status, shdr_add_status; uint32_t result; - int fcp_eqidx; + int qidx; union lpfc_sli4_cfg_shdr *shdr; uint16_t dmult; - if (startq >= phba->cfg_fcp_io_channel) + if (startq >= phba->io_channel_irqs) return 0; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -13135,23 +13914,22 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq) eq_delay = &mbox->u.mqe.un.eq_delay; /* Calculate delay multiper from maximum interrupt per second */ - result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; - if (result > LPFC_DMULT_CONST) + result = phba->cfg_fcp_imax / phba->io_channel_irqs; + if (result > LPFC_DMULT_CONST || result == 0) dmult = 0; else dmult = LPFC_DMULT_CONST/result - 1; cnt = 0; - for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; - fcp_eqidx++) { - eq = phba->sli4_hba.hba_eq[fcp_eqidx]; + for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) { + eq = phba->sli4_hba.hba_eq[qidx]; if (!eq) continue; eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; eq_delay->u.request.eq[cnt].phase = 0; eq_delay->u.request.eq[cnt].delay_multi = dmult; cnt++; - if (cnt >= LPFC_MAX_EQ_DELAY) + if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT) break; } eq_delay->u.request.num_eq = cnt; @@ -13359,8 +14137,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, switch (cq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0361 Unsupported CQ count. (%d)\n", - cq->entry_count); + "0361 Unsupported CQ count: " + "entry cnt %d sz %d pg cnt %d repost %d\n", + cq->entry_count, cq->entry_size, + cq->page_count, cq->entry_repost); if (cq->entry_count < 256) { status = -EINVAL; goto out; @@ -13419,6 +14199,234 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, return status; } +/** + * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ + * @phba: HBA structure that indicates port to create a queue on. + * @cqp: The queue structure array to use to create the completion queues. + * @eqp: The event queue array to bind these completion queues to. + * + * This function creates a set of completion queue, s to support MRQ + * as detailed in @cqp, on a port, + * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @cq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. The @eq + * is used to indicate which event queue to bind this completion queue to. This + * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the + * completion queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, + struct lpfc_queue **eqp, uint32_t type, uint32_t subtype) +{ + struct lpfc_queue *cq; + struct lpfc_queue *eq; + struct lpfc_mbx_cq_create_set *cq_set; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, alloclen, status = 0; + int cnt, idx, numcq, page_idx = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + /* sanity check on queue memory */ + numcq = phba->cfg_nvmet_mrq; + if (!cqp || !eqp || !numcq) + return -ENODEV; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + length = sizeof(struct lpfc_mbx_cq_create_set); + length += ((numcq * cqp[0]->page_count) * + sizeof(struct dma_address)); + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < length) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3098 Allocated DMA memory size (%d) is " + "less than the requested DMA memory size " + "(%d)\n", alloclen, length); + status = -ENOMEM; + goto out; + } + cq_set = mbox->sge_array->addr[0]; + shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; + bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); + + for (idx = 0; idx < numcq; idx++) { + cq = cqp[idx]; + eq = eqp[idx]; + if (!cq || !eq) { + status = -ENOMEM; + goto out; + } + + switch (idx) { + case 0: + bf_set(lpfc_mbx_cq_create_set_page_size, + &cq_set->u.request, + (hw_page_size / SLI4_PAGE_SIZE)); + bf_set(lpfc_mbx_cq_create_set_num_pages, + &cq_set->u.request, cq->page_count); + bf_set(lpfc_mbx_cq_create_set_evt, + &cq_set->u.request, 1); + bf_set(lpfc_mbx_cq_create_set_valid, + &cq_set->u.request, 1); + bf_set(lpfc_mbx_cq_create_set_cqe_size, + &cq_set->u.request, 0); + bf_set(lpfc_mbx_cq_create_set_num_cq, + &cq_set->u.request, numcq); + switch (cq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3118 Bad CQ count. (%d)\n", + cq->entry_count); + if (cq->entry_count < 256) { + status = -EINVAL; + goto out; + } + /* otherwise default to smallest (drop thru) */ + case 256: + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, LPFC_CQ_CNT_256); + break; + case 512: + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, LPFC_CQ_CNT_512); + break; + case 1024: + bf_set(lpfc_mbx_cq_create_set_cqe_cnt, + &cq_set->u.request, LPFC_CQ_CNT_1024); + break; + } + bf_set(lpfc_mbx_cq_create_set_eq_id0, + &cq_set->u.request, eq->queue_id); + break; + case 1: + bf_set(lpfc_mbx_cq_create_set_eq_id1, + &cq_set->u.request, eq->queue_id); + break; + case 2: + bf_set(lpfc_mbx_cq_create_set_eq_id2, + &cq_set->u.request, eq->queue_id); + break; + case 3: + bf_set(lpfc_mbx_cq_create_set_eq_id3, + &cq_set->u.request, eq->queue_id); + break; + case 4: + bf_set(lpfc_mbx_cq_create_set_eq_id4, + &cq_set->u.request, eq->queue_id); + break; + case 5: + bf_set(lpfc_mbx_cq_create_set_eq_id5, + &cq_set->u.request, eq->queue_id); + break; + case 6: + bf_set(lpfc_mbx_cq_create_set_eq_id6, + &cq_set->u.request, eq->queue_id); + break; + case 7: + bf_set(lpfc_mbx_cq_create_set_eq_id7, + &cq_set->u.request, eq->queue_id); + break; + case 8: + bf_set(lpfc_mbx_cq_create_set_eq_id8, + &cq_set->u.request, eq->queue_id); + break; + case 9: + bf_set(lpfc_mbx_cq_create_set_eq_id9, + &cq_set->u.request, eq->queue_id); + break; + case 10: + bf_set(lpfc_mbx_cq_create_set_eq_id10, + &cq_set->u.request, eq->queue_id); + break; + case 11: + bf_set(lpfc_mbx_cq_create_set_eq_id11, + &cq_set->u.request, eq->queue_id); + break; + case 12: + bf_set(lpfc_mbx_cq_create_set_eq_id12, + &cq_set->u.request, eq->queue_id); + break; + case 13: + bf_set(lpfc_mbx_cq_create_set_eq_id13, + &cq_set->u.request, eq->queue_id); + break; + case 14: + bf_set(lpfc_mbx_cq_create_set_eq_id14, + &cq_set->u.request, eq->queue_id); + break; + case 15: + bf_set(lpfc_mbx_cq_create_set_eq_id15, + &cq_set->u.request, eq->queue_id); + break; + } + + /* link the cq onto the parent eq child list */ + list_add_tail(&cq->list, &eq->child_list); + /* Set up completion queue's type and subtype */ + cq->type = type; + cq->subtype = subtype; + cq->assoc_qid = eq->queue_id; + cq->host_index = 0; + cq->hba_index = 0; + + rc = 0; + list_for_each_entry(dmabuf, &cq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + cnt = page_idx + dmabuf->buffer_tag; + cq_set->u.request.page[cnt].addr_lo = + putPaddrLow(dmabuf->phys); + cq_set->u.request.page[cnt].addr_hi = + putPaddrHigh(dmabuf->phys); + rc++; + } + page_idx += rc; + } + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3119 CQ_CREATE_SET mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); + if (rc == 0xFFFF) { + status = -ENXIO; + goto out; + } + + for (idx = 0; idx < numcq; idx++) { + cq = cqp[idx]; + cq->queue_id = rc + idx; + } + +out: + lpfc_sli4_mbox_cmd_free(phba, mbox); + return status; +} + /** * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration * @phba: HBA structure that indicates port to create a queue on. @@ -13722,7 +14730,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_WQ_WQE_SIZE_128); bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, - (PAGE_SIZE/SLI4_PAGE_SIZE)); + LPFC_WQ_PAGE_SIZE_4096); page = wq_create->u.request_1.page; break; } @@ -13748,8 +14756,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_WQ_WQE_SIZE_128); break; } - bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, - (PAGE_SIZE/SLI4_PAGE_SIZE)); + bf_set(lpfc_mbx_wq_create_page_size, + &wq_create->u.request_1, + LPFC_WQ_PAGE_SIZE_4096); page = wq_create->u.request_1.page; break; default: @@ -13825,6 +14834,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, wq->db_format = LPFC_DB_LIST_FORMAT; wq->db_regaddr = phba->sli4_hba.WQDBregaddr; } + wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); + if (wq->pring == NULL) { + status = -ENOMEM; + goto out; + } wq->type = LPFC_WQ; wq->assoc_qid = cq->queue_id; wq->subtype = subtype; @@ -13935,7 +14949,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, LPFC_RQE_SIZE_8); bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, - (PAGE_SIZE/SLI4_PAGE_SIZE)); + LPFC_RQ_PAGE_SIZE_4096); } else { switch (hrq->entry_count) { default: @@ -14143,6 +15157,197 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, return status; } +/** + * lpfc_mrq_create - Create MRQ Receive Queues on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @hrqp: The queue structure array to use to create the header receive queues. + * @drqp: The queue structure array to use to create the data receive queues. + * @cqp: The completion queue array to bind these receive queues to. + * + * This function creates a receive buffer queue pair , as detailed in @hrq and + * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command + * to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq + * struct is used to get the entry count that is necessary to determine the + * number of pages to use for this queue. The @cq is used to indicate which + * completion queue to bind received buffers that are posted to these queues to. + * This function will send the RQ_CREATE mailbox command to the HBA to setup the + * receive queue pair. This function is asynchronous and will wait for the + * mailbox command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. + **/ +int +lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, + struct lpfc_queue **drqp, struct lpfc_queue **cqp, + uint32_t subtype) +{ + struct lpfc_queue *hrq, *drq, *cq; + struct lpfc_mbx_rq_create_v2 *rq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, alloclen, status = 0; + int cnt, idx, numrq, page_idx = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + numrq = phba->cfg_nvmet_mrq; + /* sanity check on array memory */ + if (!hrqp || !drqp || !cqp || !numrq) + return -ENODEV; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + length = sizeof(struct lpfc_mbx_rq_create_v2); + length += ((2 * numrq * hrqp[0]->page_count) * + sizeof(struct dma_address)); + + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < length) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3099 Allocated DMA memory size (%d) is " + "less than the requested DMA memory size " + "(%d)\n", alloclen, length); + status = -ENOMEM; + goto out; + } + + + + rq_create = mbox->sge_array->addr[0]; + shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; + + bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); + cnt = 0; + + for (idx = 0; idx < numrq; idx++) { + hrq = hrqp[idx]; + drq = drqp[idx]; + cq = cqp[idx]; + + /* sanity check on queue memory */ + if (!hrq || !drq || !cq) { + status = -ENODEV; + goto out; + } + + if (hrq->entry_count != drq->entry_count) { + status = -EINVAL; + goto out; + } + + if (idx == 0) { + bf_set(lpfc_mbx_rq_create_num_pages, + &rq_create->u.request, + hrq->page_count); + bf_set(lpfc_mbx_rq_create_rq_cnt, + &rq_create->u.request, (numrq * 2)); + bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, + 1); + bf_set(lpfc_rq_context_base_cq, + &rq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_rq_context_data_size, + &rq_create->u.request.context, + LPFC_DATA_BUF_SIZE); + bf_set(lpfc_rq_context_hdr_size, + &rq_create->u.request.context, + LPFC_HDR_BUF_SIZE); + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + bf_set(lpfc_rq_context_rqe_size, + &rq_create->u.request.context, + LPFC_RQE_SIZE_8); + bf_set(lpfc_rq_context_page_size, + &rq_create->u.request.context, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + } + rc = 0; + list_for_each_entry(dmabuf, &hrq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + cnt = page_idx + dmabuf->buffer_tag; + rq_create->u.request.page[cnt].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[cnt].addr_hi = + putPaddrHigh(dmabuf->phys); + rc++; + } + page_idx += rc; + + rc = 0; + list_for_each_entry(dmabuf, &drq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); + cnt = page_idx + dmabuf->buffer_tag; + rq_create->u.request.page[cnt].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[cnt].addr_hi = + putPaddrHigh(dmabuf->phys); + rc++; + } + page_idx += rc; + + hrq->db_format = LPFC_DB_RING_FORMAT; + hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; + hrq->type = LPFC_HRQ; + hrq->assoc_qid = cq->queue_id; + hrq->subtype = subtype; + hrq->host_index = 0; + hrq->hba_index = 0; + + drq->db_format = LPFC_DB_RING_FORMAT; + drq->db_regaddr = phba->sli4_hba.RQDBregaddr; + drq->type = LPFC_DRQ; + drq->assoc_qid = cq->queue_id; + drq->subtype = subtype; + drq->host_index = 0; + drq->hba_index = 0; + + list_add_tail(&hrq->list, &cq->child_list); + list_add_tail(&drq->list, &cq->child_list); + } + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3120 RQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); + if (rc == 0xFFFF) { + status = -ENXIO; + goto out; + } + + /* Initialize all RQs with associated queue id */ + for (idx = 0; idx < numrq; idx++) { + hrq = hrqp[idx]; + hrq->queue_id = rc + (2 * idx); + drq = drqp[idx]; + drq->queue_id = rc + (2 * idx) + 1; + } + +out: + lpfc_sli4_mbox_cmd_free(phba, mbox); + return status; +} + /** * lpfc_eq_destroy - Destroy an event Queue on the HBA * @eq: The queue structure associated with the queue to destroy. @@ -14609,7 +15814,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba) } /** - * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. + * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. * @phba: pointer to lpfc hba data structure. * @post_sgl_list: pointer to els sgl entry list. * @count: number of els sgl entries on the list. @@ -14620,7 +15825,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba) * stopped. **/ static int -lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, +lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, struct list_head *post_sgl_list, int post_cnt) { @@ -14636,14 +15841,15 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; - reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) + + reqlen = post_cnt * sizeof(struct sgl_page_pairs) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); if (reqlen > SLI4_PAGE_SIZE) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2559 Block sgl registration required DMA " "size (%d) great than a page\n", reqlen); return -ENOMEM; } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -14687,8 +15893,9 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, /* Complete initialization and perform endian conversion. */ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); - bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); sgl->word0 = cpu_to_le32(sgl->word0); + if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { @@ -14823,6 +16030,9 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, return rc; } +static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT; +static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT; + /** * lpfc_fc_frame_check - Check that this frame is a valid frame to handle * @phba: pointer to lpfc_hba struct that the frame was received on @@ -14837,8 +16047,6 @@ static int lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) { /* make rctl_names static to save stack space */ - static char *rctl_names[] = FC_RCTL_NAMES_INIT; - char *type_names[] = FC_TYPE_NAMES_INIT; struct fc_vft_header *fc_vft_hdr; uint32_t *header = (uint32_t *) fc_hdr; @@ -14883,6 +16091,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) case FC_TYPE_ELS: case FC_TYPE_FCP: case FC_TYPE_CT: + case FC_TYPE_NVME: break; case FC_TYPE_IP: case FC_TYPE_ILS: @@ -14893,8 +16102,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "2538 Received frame rctl:%s (x%x), type:%s (x%x), " "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", - rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, - type_names[fc_hdr->fh_type], fc_hdr->fh_type, + lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, + lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type, be32_to_cpu(header[0]), be32_to_cpu(header[1]), be32_to_cpu(header[2]), be32_to_cpu(header[3]), be32_to_cpu(header[4]), be32_to_cpu(header[5]), @@ -14903,8 +16112,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, "2539 Dropped frame rctl:%s type:%s\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type]); + lpfc_rctl_names[fc_hdr->fh_r_ctl], + lpfc_type_names[fc_hdr->fh_type]); return 1; } @@ -14940,14 +16149,11 @@ lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) **/ static struct lpfc_vport * lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, - uint16_t fcfi) + uint16_t fcfi, uint32_t did) { struct lpfc_vport **vports; struct lpfc_vport *vport = NULL; int i; - uint32_t did = (fc_hdr->fh_d_id[0] << 16 | - fc_hdr->fh_d_id[1] << 8 | - fc_hdr->fh_d_id[2]); if (did == Fabric_DID) return phba->pport; @@ -14956,7 +16162,7 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, return phba->pport; vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) + if (vports != NULL) { for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { if (phba->fcf.fcfi == fcfi && vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && @@ -14965,6 +16171,7 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, break; } } + } lpfc_destroy_vport_work_array(phba, vports); return vport; } @@ -15394,7 +16601,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * a BA_RJT. */ if ((fctl & FC_FC_EX_CTX) && - (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) { + (lxri > lpfc_sli4_get_iocb_cnt(phba))) { icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); @@ -15571,6 +16778,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) /* Initialize the first IOCB. */ first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; + first_iocbq->vport = vport; /* Check FC Header to see what TYPE of frame we are rcv'ing */ if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { @@ -15683,7 +16891,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, return; } if (!lpfc_complete_unsol_iocb(phba, - &phba->sli.ring[LPFC_ELS_RING], + phba->sli4_hba.els_wq->pring, iocbq, fc_hdr->fh_r_ctl, fc_hdr->fh_type)) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -15708,8 +16916,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, * This function is called with no lock held. This function processes all * the received buffers and gives it to upper layers when a received buffer * indicates that it is the final frame in the sequence. The interrupt - * service routine processes received buffers at interrupt contexts and adds - * received dma buffers to the rb_pend_list queue and signals the worker thread. + * service routine processes received buffers at interrupt contexts. * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the * appropriate receive function when the final frame in a sequence is received. **/ @@ -15725,11 +16932,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, /* Process each received buffer */ fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* check to see if this a valid type of frame */ if (lpfc_fc_frame_check(phba, fc_hdr)) { lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } + if ((bf_get(lpfc_cqe_code, &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) fcfi = bf_get(lpfc_rcqe_fcf_id_v1, @@ -15738,16 +16947,16 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); - vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); + /* d_id this frame is directed to */ + did = sli4_did_from_fc_hdr(fc_hdr); + + vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); if (!vport) { /* throw out the frame */ lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } - /* d_id this frame is directed to */ - did = sli4_did_from_fc_hdr(fc_hdr); - /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && (did != Fabric_DID)) { @@ -17225,7 +18434,7 @@ uint32_t lpfc_drain_txq(struct lpfc_hba *phba) { LIST_HEAD(completions); - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring; struct lpfc_iocbq *piocbq = NULL; unsigned long iflags = 0; char *fail_msg = NULL; @@ -17234,6 +18443,8 @@ lpfc_drain_txq(struct lpfc_hba *phba) union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128; uint32_t txq_cnt = 0; + pring = lpfc_phba_elsring(phba); + spin_lock_irqsave(&pring->ring_lock, iflags); list_for_each_entry(piocbq, &pring->txq, list) { txq_cnt++; @@ -17255,7 +18466,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) txq_cnt); break; } - sglq = __lpfc_sli_get_sglq(phba, piocbq); + sglq = __lpfc_sli_get_els_sglq(phba, piocbq); if (!sglq) { __lpfc_sli_ringtx_put(phba, pring, piocbq); spin_unlock_irqrestore(&pring->ring_lock, iflags); @@ -17295,3 +18506,217 @@ lpfc_drain_txq(struct lpfc_hba *phba) return txq_cnt; } + +/** + * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. + * @phba: Pointer to HBA context object. + * @pwqe: Pointer to command WQE. + * @sglq: Pointer to the scatter gather queue object. + * + * This routine converts the bpl or bde that is in the WQE + * to a sgl list for the sli4 hardware. The physical address + * of the bpl/bde is converted back to a virtual address. + * If the WQE contains a BPL then the list of BDE's is + * converted to sli4_sge's. If the WQE contains a single + * BDE then it is converted to a single sli_sge. + * The WQE is still in cpu endianness so the contents of + * the bpl can be used without byte swapping. + * + * Returns valid XRI = Success, NO_XRI = Failure. + */ +static uint16_t +lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, + struct lpfc_sglq *sglq) +{ + uint16_t xritag = NO_XRI; + struct ulp_bde64 *bpl = NULL; + struct ulp_bde64 bde; + struct sli4_sge *sgl = NULL; + struct lpfc_dmabuf *dmabuf; + union lpfc_wqe *wqe; + int numBdes = 0; + int i = 0; + uint32_t offset = 0; /* accumulated offset in the sg request list */ + int inbound = 0; /* number of sg reply entries inbound from firmware */ + uint32_t cmd; + + if (!pwqeq || !sglq) + return xritag; + + sgl = (struct sli4_sge *)sglq->sgl; + wqe = &pwqeq->wqe; + pwqeq->iocb.ulpIoTag = pwqeq->iotag; + + cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); + if (cmd == CMD_XMIT_BLS_RSP64_WQE) + return sglq->sli4_xritag; + numBdes = pwqeq->rsvd2; + if (numBdes) { + /* The addrHigh and addrLow fields within the WQE + * have not been byteswapped yet so there is no + * need to swap them back. + */ + if (pwqeq->context3) + dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; + else + return xritag; + + bpl = (struct ulp_bde64 *)dmabuf->virt; + if (!bpl) + return xritag; + + for (i = 0; i < numBdes; i++) { + /* Should already be byte swapped. */ + sgl->addr_hi = bpl->addrHigh; + sgl->addr_lo = bpl->addrLow; + + sgl->word2 = le32_to_cpu(sgl->word2); + if ((i+1) == numBdes) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + /* swap the size field back to the cpu so we + * can assign it to the sgl. + */ + bde.tus.w = le32_to_cpu(bpl->tus.w); + sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); + /* The offsets in the sgl need to be accumulated + * separately for the request and reply lists. + * The request is always first, the reply follows. + */ + switch (cmd) { + case CMD_GEN_REQUEST64_WQE: + /* add up the reply sg entries */ + if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) + inbound++; + /* first inbound? reset the offset */ + if (inbound == 1) + offset = 0; + bf_set(lpfc_sli4_sge_offset, sgl, offset); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + offset += bde.tus.f.bdeSize; + break; + case CMD_FCP_TRSP64_WQE: + bf_set(lpfc_sli4_sge_offset, sgl, 0); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + break; + case CMD_FCP_TSEND64_WQE: + case CMD_FCP_TRECEIVE64_WQE: + bf_set(lpfc_sli4_sge_type, sgl, + bpl->tus.f.bdeFlags); + if (i < 3) + offset = 0; + else + offset += bde.tus.f.bdeSize; + bf_set(lpfc_sli4_sge_offset, sgl, offset); + break; + } + sgl->word2 = cpu_to_le32(sgl->word2); + bpl++; + sgl++; + } + } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { + /* The addrHigh and addrLow fields of the BDE have not + * been byteswapped yet so they need to be swapped + * before putting them in the sgl. + */ + sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); + sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); + } + return sglq->sli4_xritag; +} + +/** + * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) + * @phba: Pointer to HBA context object. + * @ring_number: Base sli ring number + * @pwqe: Pointer to command WQE. + **/ +int +lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *pwqe) +{ + union lpfc_wqe *wqe = &pwqe->wqe; + struct lpfc_nvmet_rcv_ctx *ctxp; + struct lpfc_queue *wq; + struct lpfc_sglq *sglq; + struct lpfc_sli_ring *pring; + unsigned long iflags; + + /* NVME_LS and NVME_LS ABTS requests. */ + if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { + pring = phba->sli4_hba.nvmels_wq->pring; + spin_lock_irqsave(&pring->ring_lock, iflags); + sglq = __lpfc_sli_get_els_sglq(phba, pwqe); + if (!sglq) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_BUSY; + } + pwqe->sli4_lxritag = sglq->sli4_lxritag; + pwqe->sli4_xritag = sglq->sli4_xritag; + if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_ERROR; + } + bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, + pwqe->sli4_xritag); + if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_ERROR; + } + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return 0; + } + + /* NVME_FCREQ and NVME_ABTS requests */ + if (pwqe->iocb_flag & LPFC_IO_NVME) { + /* Get the IO distribution (hba_wqidx) for WQ assignment. */ + pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; + + spin_lock_irqsave(&pring->ring_lock, iflags); + wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; + bf_set(wqe_cqid, &wqe->generic.wqe_com, + phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); + if (lpfc_sli4_wq_put(wq, wqe)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_ERROR; + } + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return 0; + } + + /* NVMET requests */ + if (pwqe->iocb_flag & LPFC_IO_NVMET) { + /* Get the IO distribution (hba_wqidx) for WQ assignment. */ + pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; + + spin_lock_irqsave(&pring->ring_lock, iflags); + ctxp = pwqe->context2; + sglq = ctxp->rqb_buffer->sglq; + if (pwqe->sli4_xritag == NO_XRI) { + pwqe->sli4_lxritag = sglq->sli4_lxritag; + pwqe->sli4_xritag = sglq->sli4_xritag; + } + bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, + pwqe->sli4_xritag); + wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; + bf_set(wqe_cqid, &wqe->generic.wqe_com, + phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); + if (lpfc_sli4_wq_put(wq, wqe)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return WQE_ERROR; + } + lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return 0; + } + return WQE_ERROR; +} diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 74227a28bd569e..9085306ddd785d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -54,9 +56,16 @@ struct lpfc_iocbq { uint16_t iotag; /* pre-assigned IO tag */ uint16_t sli4_lxritag; /* logical pre-assigned XRI. */ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ + uint16_t hba_wqidx; /* index to HBA work queue */ struct lpfc_cq_event cq_event; + struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */ + uint64_t isr_timestamp; - IOCB_t iocb; /* IOCB cmd */ + /* Be careful here */ + union lpfc_wqe wqe; /* WQE cmd */ + IOCB_t iocb; /* For IOCB cmd or if we want 128 byte WQE */ + + uint8_t rsvd2; uint8_t priority; /* OAS priority */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ uint32_t iocb_flag; @@ -82,9 +91,13 @@ struct lpfc_iocbq { #define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ #define LPFC_IO_FOF 0x20000 /* FOF FCP IO */ #define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */ +#define LPFC_PRLI_NVME_REQ 0x80000 /* This is an NVME PRLI. */ +#define LPFC_PRLI_FCP_REQ 0x100000 /* This is an NVME PRLI. */ +#define LPFC_IO_NVME 0x200000 /* NVME FCP command */ +#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */ +#define LPFC_IO_NVMET 0x800000 /* NVMET command */ uint32_t drvrTimeout; /* driver timeout in seconds */ - uint32_t fcp_wqidx; /* index to FCP work queue */ struct lpfc_vport *vport;/* virtual port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ @@ -97,12 +110,14 @@ struct lpfc_iocbq { struct lpfc_node_rrq *rrq; } context_un; - void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); + void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_wcqe_complete *); }; #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ @@ -112,6 +127,14 @@ struct lpfc_iocbq { #define IOCB_ERROR 2 #define IOCB_TIMEDOUT 3 +#define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */ + +#define WQE_SUCCESS 0 +#define WQE_BUSY 1 +#define WQE_ERROR 2 +#define WQE_TIMEDOUT 3 +#define WQE_ABORTED 4 + #define LPFC_MBX_WAKE 1 #define LPFC_MBX_IMED_UNREG 2 @@ -297,12 +320,9 @@ struct lpfc_sli { #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ #define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ +#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */ - struct lpfc_sli_ring *ring; - int fcp_ring; /* ring used for FCP initiator commands */ - int next_ring; - - int extra_ring; /* extra ring used for other protocols */ + struct lpfc_sli_ring *sli3_ring; struct lpfc_sli_stat slistat; /* SLI statistical info */ struct list_head mboxq; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 0b88b5703e0f10..710458cf11d62f 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -35,9 +37,10 @@ #define LPFC_NEMBED_MBOX_SGL_CNT 254 /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ -#define LPFC_FCP_IO_CHAN_DEF 4 -#define LPFC_FCP_IO_CHAN_MIN 1 -#define LPFC_FCP_IO_CHAN_MAX 16 +#define LPFC_HBA_IO_CHAN_MIN 0 +#define LPFC_HBA_IO_CHAN_MAX 32 +#define LPFC_FCP_IO_CHAN_DEF 4 +#define LPFC_NVME_IO_CHAN_DEF 0 /* Number of channels used for Flash Optimized Fabric (FOF) operations */ @@ -107,6 +110,9 @@ enum lpfc_sli4_queue_subtype { LPFC_MBOX, LPFC_FCP, LPFC_ELS, + LPFC_NVME, + LPFC_NVMET, + LPFC_NVME_LS, LPFC_USOL }; @@ -125,25 +131,41 @@ union sli4_qe { struct lpfc_rqe *rqe; }; +/* RQ buffer list */ +struct lpfc_rqb { + uint16_t entry_count; /* Current number of RQ slots */ + uint16_t buffer_count; /* Current number of buffers posted */ + struct list_head rqb_buffer_list; /* buffers assigned to this HBQ */ + /* Callback for HBQ buffer allocation */ + struct rqb_dmabuf *(*rqb_alloc_buffer)(struct lpfc_hba *); + /* Callback for HBQ buffer free */ + void (*rqb_free_buffer)(struct lpfc_hba *, + struct rqb_dmabuf *); +}; + struct lpfc_queue { struct list_head list; + struct list_head wq_list; enum lpfc_sli4_queue_type type; enum lpfc_sli4_queue_subtype subtype; struct lpfc_hba *phba; struct list_head child_list; + struct list_head page_list; + struct list_head sgl_list; uint32_t entry_count; /* Number of entries to support on the queue */ uint32_t entry_size; /* Size of each queue entry. */ uint32_t entry_repost; /* Count of entries before doorbell is rung */ #define LPFC_QUEUE_MIN_REPOST 8 uint32_t queue_id; /* Queue ID assigned by the hardware */ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ - struct list_head page_list; uint32_t page_count; /* Number of pages allocated for this queue */ uint32_t host_index; /* The host's index for putting or getting */ uint32_t hba_index; /* The last known hba index for get or put */ struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ + struct lpfc_rqb *rqbp; /* ptr to RQ buffers */ + uint16_t sgl_list_cnt; uint16_t db_format; #define LPFC_DB_RING_FORMAT 0x01 #define LPFC_DB_LIST_FORMAT 0x02 @@ -176,6 +198,8 @@ struct lpfc_queue { #define RQ_buf_trunc q_cnt_3 #define RQ_rcv_buf q_cnt_4 + uint64_t isr_timestamp; + struct lpfc_queue *assoc_qp; union sli4_qe qe[1]; /* array to index entries (must be last) */ }; @@ -338,6 +362,7 @@ struct lpfc_bmbx { #define LPFC_CQE_DEF_COUNT 1024 #define LPFC_WQE_DEF_COUNT 256 #define LPFC_WQE128_DEF_COUNT 128 +#define LPFC_WQE128_MAX_COUNT 256 #define LPFC_MQE_DEF_COUNT 16 #define LPFC_RQE_DEF_COUNT 512 @@ -379,10 +404,14 @@ struct lpfc_max_cfg_param { struct lpfc_hba; /* SLI4 HBA multi-fcp queue handler struct */ -struct lpfc_fcp_eq_hdl { +struct lpfc_hba_eq_hdl { uint32_t idx; struct lpfc_hba *phba; - atomic_t fcp_eq_in_use; + atomic_t hba_eq_in_use; + struct cpumask *cpumask; + /* CPU affinitsed to or 0xffffffff if multiple */ + uint32_t cpu; +#define LPFC_MULTI_CPU_AFFINITY 0xffffffff }; /* Port Capabilities for SLI4 Parameters */ @@ -427,6 +456,7 @@ struct lpfc_pc_sli4_params { uint8_t wqsize; #define LPFC_WQ_SZ64_SUPPORT 1 #define LPFC_WQ_SZ128_SUPPORT 2 + uint8_t wqpcnt; }; struct lpfc_iov { @@ -445,7 +475,7 @@ struct lpfc_sli4_lnk_info { uint8_t optic_state; }; -#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \ +#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \ LPFC_FOF_IO_CHAN_NUM) #define LPFC_SLI4_HANDLER_NAME_SZ 16 @@ -515,23 +545,34 @@ struct lpfc_sli4_hba { uint32_t ue_to_rp; struct lpfc_register sli_intf; struct lpfc_pc_sli4_params pc_sli4_params; - struct msix_entry *msix_entries; uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ]; - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ + struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */ /* Pointers to the constructed SLI4 queues */ - struct lpfc_queue **hba_eq;/* Event queues for HBA */ - struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ - struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ + struct lpfc_queue **hba_eq; /* Event queues for HBA */ + struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */ + struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */ + struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */ + struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */ + struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */ + struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */ + struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */ uint16_t *fcp_cq_map; + uint16_t *nvme_cq_map; + struct list_head lpfc_wq_list; struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ + struct lpfc_queue *nvmels_cq; /* NVME LS complete queue */ struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ + struct lpfc_queue *nvmels_wq; /* NVME LS work queue */ struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ + struct lpfc_name wwnn; + struct lpfc_name wwpn; + uint32_t fw_func_mode; /* FW function protocol mode */ uint32_t ulp0_mode; /* ULP0 protocol mode */ uint32_t ulp1_mode; /* ULP1 protocol mode */ @@ -568,14 +609,20 @@ struct lpfc_sli4_hba { uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */ uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ uint16_t next_rpi; + uint16_t nvme_xri_max; + uint16_t nvme_xri_cnt; + uint16_t nvme_xri_start; uint16_t scsi_xri_max; uint16_t scsi_xri_cnt; - uint16_t els_xri_cnt; uint16_t scsi_xri_start; - struct list_head lpfc_free_sgl_list; - struct list_head lpfc_sgl_list; + uint16_t els_xri_cnt; + uint16_t nvmet_xri_cnt; + struct list_head lpfc_els_sgl_list; struct list_head lpfc_abts_els_sgl_list; + struct list_head lpfc_nvmet_sgl_list; + struct list_head lpfc_abts_nvmet_sgl_list; struct list_head lpfc_abts_scsi_buf_list; + struct list_head lpfc_abts_nvme_buf_list; struct lpfc_sglq **lpfc_sglq_active_list; struct list_head lpfc_rpi_hdr_list; unsigned long *rpi_bmask; @@ -595,6 +642,7 @@ struct lpfc_sli4_hba { struct list_head sp_asynce_work_queue; struct list_head sp_fcp_xri_aborted_work_queue; struct list_head sp_els_xri_aborted_work_queue; + struct list_head sp_nvme_xri_aborted_work_queue; struct list_head sp_unsol_work_queue; struct lpfc_sli4_link link_state; struct lpfc_sli4_lnk_info lnk_info; @@ -602,8 +650,10 @@ struct lpfc_sli4_hba { #define LPFC_SLI4_PPNAME_NON 0 #define LPFC_SLI4_PPNAME_GET 1 struct lpfc_iov iov; + spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ - spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ + spinlock_t sgl_list_lock; /* list of aborted els IOs */ + spinlock_t nvmet_io_lock; uint32_t physical_port; /* CPU to vector mapping information */ @@ -611,11 +661,14 @@ struct lpfc_sli4_hba { uint16_t num_online_cpu; uint16_t num_present_cpu; uint16_t curr_disp_cpu; + + uint16_t nvmet_mrq_post_idx; }; enum lpfc_sge_type { GEN_BUFF_TYPE, - SCSI_BUFF_TYPE + SCSI_BUFF_TYPE, + NVMET_BUFF_TYPE }; enum lpfc_sgl_state { @@ -694,15 +747,21 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli4_queue_free(struct lpfc_queue *); int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); -int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint32_t); +int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq); int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t, uint32_t); +int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, + struct lpfc_queue **eqp, uint32_t type, + uint32_t subtype); int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); +int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, + struct lpfc_queue **drqp, struct lpfc_queue **cqp, + uint32_t subtype); void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int); int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); @@ -714,6 +773,7 @@ int lpfc_sli4_queue_setup(struct lpfc_hba *); void lpfc_sli4_queue_unset(struct lpfc_hba *); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); +int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); void lpfc_sli4_free_xri(struct lpfc_hba *, int); int lpfc_sli4_post_async_mbox(struct lpfc_hba *); @@ -735,9 +795,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *); int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); +void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba); void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, struct sli4_wcqe_xri_aborted *); +void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri); +void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri); void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, struct sli4_wcqe_xri_aborted *); void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *); @@ -746,6 +811,7 @@ int lpfc_sli4_brdreset(struct lpfc_hba *); int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); +int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba); int lpfc_sli4_init_vpi(struct lpfc_vport *); uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 0ee0623a354c03..d4e95e28f4e3d5 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * @@ -18,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.2.0.4" +#define LPFC_DRIVER_VERSION "11.2.0.10" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -30,4 +32,6 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright(c) 2004-2016 Emulex. All rights reserved." +#define LPFC_COPYRIGHT "Copyright (C) 2017 Broadcom. All Rights Reserved. " \ + "The term \"Broadcom\" refers to Broadcom Limited " \ + "and/or its subsidiaries." diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index e18bbc66e83b1f..9a0339dbc024bb 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -28,11 +30,13 @@ #include #include #include +#include #include #include #include #include + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -402,6 +406,22 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) vport->fdmi_port_mask = phba->pport->fdmi_port_mask; } + if ((phba->nvmet_support == 0) && + ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) { + /* Create NVME binding with nvme_fc_transport. This + * ensures the vport is initialized. + */ + rc = lpfc_nvme_create_localport(vport); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6003 %s status x%x\n", + "NVME registration failed, ", + rc); + goto error_out; + } + } + /* * In SLI4, the vpi must be activated before it can be used * by the port. diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 6b2c94eb813430..62295971f66cf7 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2006 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index e7e5974e1a2c43..2b209bbb4c9165 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,8 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "07.701.16.00-rc1" -#define MEGASAS_RELDATE "February 2, 2017" +#define MEGASAS_VERSION "07.701.17.00-rc1" +#define MEGASAS_RELDATE "March 2, 2017" /* * Device IDs diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7ac9a9ee9bd473..0016f12cc563e7 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1963,6 +1963,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev) if (!mr_device_priv_data) return -ENOMEM; sdev->hostdata = mr_device_priv_data; + + atomic_set(&mr_device_priv_data->r1_ldio_hint, + instance->r1_ldio_hint_default); return 0; } @@ -5034,10 +5037,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) &instance->irq_context[j]); /* Retry irq register for IO_APIC*/ instance->msix_vectors = 0; - if (is_probe) + if (is_probe) { + pci_free_irq_vectors(instance->pdev); return megasas_setup_irqs_ioapic(instance); - else + } else { return -1; + } } } return 0; @@ -5277,9 +5282,11 @@ static int megasas_init_fw(struct megasas_instance *instance) MPI2_REPLY_POST_HOST_INDEX_OFFSET); } - i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); - if (i < 0) - goto fail_setup_irqs; + if (!instance->msix_vectors) { + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); + if (i < 0) + goto fail_setup_irqs; + } dev_info(&instance->pdev->dev, "firmware supports msix\t: (%d)", fw_msix_count); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 29650ba669da58..f990ab4d45e1bf 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, cpu_sel = MR_RAID_CTX_CPUSEL_1; if (is_stream_detected(rctx_g35) && - (raid->level == 5) && + ((raid->level == 5) || (raid->level == 6)) && (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) cpu_sel = MR_RAID_CTX_CPUSEL_0; @@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, fp_possible = false; atomic_dec(&instance->fw_outstanding); } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || - atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) { + (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) { fp_possible = false; atomic_dec(&instance->fw_outstanding); if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index a3fe1fb55c17c3..5b7aec5d575a39 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1148,7 +1148,7 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) /* TMs are on msix_index == 0 */ if (reply_q->msix_index == 0) continue; - synchronize_irq(reply_q->vector); + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); } } @@ -1837,11 +1837,8 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc) list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { list_del(&reply_q->list); - if (smp_affinity_enable) { - irq_set_affinity_hint(reply_q->vector, NULL); - free_cpumask_var(reply_q->affinity_hint); - } - free_irq(reply_q->vector, reply_q); + free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), + reply_q); kfree(reply_q); } } @@ -1850,13 +1847,13 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc) * _base_request_irq - request irq * @ioc: per adapter object * @index: msix index into vector table - * @vector: irq vector * * Inserting respective reply_queue into the list. */ static int -_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) +_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) { + struct pci_dev *pdev = ioc->pdev; struct adapter_reply_queue *reply_q; int r; @@ -1868,14 +1865,6 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) } reply_q->ioc = ioc; reply_q->msix_index = index; - reply_q->vector = vector; - - if (smp_affinity_enable) { - if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) { - kfree(reply_q); - return -ENOMEM; - } - } atomic_set(&reply_q->busy, 0); if (ioc->msix_enable) @@ -1884,12 +1873,11 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) else snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", ioc->driver_name, ioc->id); - r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name, - reply_q); + r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, + IRQF_SHARED, reply_q->name, reply_q); if (r) { pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", - reply_q->name, vector); - free_cpumask_var(reply_q->affinity_hint); + reply_q->name, pci_irq_vector(pdev, index)); kfree(reply_q); return -EBUSY; } @@ -1925,6 +1913,21 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) if (!nr_msix) return; + if (smp_affinity_enable) { + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev, + reply_q->msix_index); + if (!mask) { + pr_warn(MPT3SAS_FMT "no affinity for msi %x\n", + ioc->name, reply_q->msix_index); + continue; + } + + for_each_cpu(cpu, mask) + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + } + return; + } cpu = cpumask_first(cpu_online_mask); list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { @@ -1938,18 +1941,9 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) group++; for (i = 0 ; i < group ; i++) { - ioc->cpu_msix_table[cpu] = index; - if (smp_affinity_enable) - cpumask_or(reply_q->affinity_hint, - reply_q->affinity_hint, get_cpu_mask(cpu)); + ioc->cpu_msix_table[cpu] = reply_q->msix_index; cpu = cpumask_next(cpu, cpu_online_mask); } - if (smp_affinity_enable) - if (irq_set_affinity_hint(reply_q->vector, - reply_q->affinity_hint)) - dinitprintk(ioc, pr_info(MPT3SAS_FMT - "Err setting affinity hint to irq vector %d\n", - ioc->name, reply_q->vector)); index++; } } @@ -1976,10 +1970,10 @@ _base_disable_msix(struct MPT3SAS_ADAPTER *ioc) static int _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) { - struct msix_entry *entries, *a; int r; int i, local_max_msix_vectors; u8 try_msix = 0; + unsigned int irq_flags = PCI_IRQ_MSIX; if (msix_disable == -1 || msix_disable == 0) try_msix = 1; @@ -1991,7 +1985,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) goto try_ioapic; ioc->reply_queue_count = min_t(int, ioc->cpu_count, - ioc->msix_vector_count); + ioc->msix_vector_count); printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, @@ -2002,56 +1996,51 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) else local_max_msix_vectors = max_msix_vectors; - if (local_max_msix_vectors > 0) { + if (local_max_msix_vectors > 0) ioc->reply_queue_count = min_t(int, local_max_msix_vectors, ioc->reply_queue_count); - ioc->msix_vector_count = ioc->reply_queue_count; - } else if (local_max_msix_vectors == 0) + else if (local_max_msix_vectors == 0) goto try_ioapic; if (ioc->msix_vector_count < ioc->cpu_count) smp_affinity_enable = 0; - entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), - GFP_KERNEL); - if (!entries) { - dfailprintk(ioc, pr_info(MPT3SAS_FMT - "kcalloc failed @ at %s:%d/%s() !!!\n", - ioc->name, __FILE__, __LINE__, __func__)); - goto try_ioapic; - } + if (smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY; - for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) - a->entry = i; - - r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count); - if (r) { + r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count, + irq_flags); + if (r < 0) { dfailprintk(ioc, pr_info(MPT3SAS_FMT - "pci_enable_msix_exact failed (r=%d) !!!\n", + "pci_alloc_irq_vectors failed (r=%d) !!!\n", ioc->name, r)); - kfree(entries); goto try_ioapic; } ioc->msix_enable = 1; - for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) { - r = _base_request_irq(ioc, i, a->vector); + ioc->reply_queue_count = r; + for (i = 0; i < ioc->reply_queue_count; i++) { + r = _base_request_irq(ioc, i); if (r) { _base_free_irq(ioc); _base_disable_msix(ioc); - kfree(entries); goto try_ioapic; } } - kfree(entries); return 0; /* failback to io_apic interrupt routing */ try_ioapic: ioc->reply_queue_count = 1; - r = _base_request_irq(ioc, 0, ioc->pdev->irq); + r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); + if (r < 0) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT + "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", + ioc->name, r)); + } else + r = _base_request_irq(ioc, 0); return r; } @@ -2222,7 +2211,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) list_for_each_entry(reply_q, &ioc->reply_queue_list, list) pr_info(MPT3SAS_FMT "%s: IRQ %d\n", reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : - "IO-APIC enabled"), reply_q->vector); + "IO-APIC enabled"), + pci_irq_vector(ioc->pdev, reply_q->msix_index)); pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); @@ -5357,7 +5347,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) sizeof(resource_size_t *), GFP_KERNEL); if (!ioc->reply_post_host_index) { dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation " - "for cpu_msix_table failed!!!\n", ioc->name)); + "for reply_post_host_index failed!!!\n", + ioc->name)); r = -ENOMEM; goto out_free_resources; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 4ab634fc27df92..8981806fb13fa7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -731,12 +731,10 @@ struct _event_ack_list { struct adapter_reply_queue { struct MPT3SAS_ADAPTER *ioc; u8 msix_index; - unsigned int vector; u32 reply_post_host_index; Mpi2ReplyDescriptorsUnion_t *reply_post_free; char name[MPT_NAME_LENGTH]; atomic_t busy; - cpumask_var_t affinity_hint; struct list_head list; }; @@ -1444,9 +1442,6 @@ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, u16 handle, u8 phy_number, u8 link_rate); extern struct sas_function_template mpt3sas_transport_functions; extern struct scsi_transport_template *mpt3sas_transport_template; -extern int scsi_internal_device_block(struct scsi_device *sdev); -extern int scsi_internal_device_unblock(struct scsi_device *sdev, - enum scsi_device_state new_state); /* trigger data externs */ void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 46e866c36c8a88..919ba2bb15f110 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 1; - r = scsi_internal_device_block(sdev); + r = scsi_internal_device_block(sdev, false); if (r == -EINVAL) sdev_printk(KERN_WARNING, sdev, "device_block failed with return(%d) for handle(0x%04x)\n", @@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev, "performing a block followed by an unblock\n", r, sas_device_priv_data->sas_target->handle); sas_device_priv_data->block = 1; - r = scsi_internal_device_block(sdev); + r = scsi_internal_device_block(sdev, false); if (r) sdev_printk(KERN_WARNING, sdev, "retried device_block " "failed with return(%d) for handle(0x%04x)\n", @@ -4677,7 +4677,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) struct MPT3SAS_DEVICE *sas_device_priv_data; u32 response_code = 0; unsigned long flags; - unsigned int sector_sz; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); @@ -4742,20 +4741,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) } xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); - - /* In case of bogus fw or device, we could end up having - * unaligned partial completion. We can force alignment here, - * then scsi-ml does not need to handle this misbehavior. - */ - sector_sz = scmd->device->sector_size; - if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz && - xfer_cnt % sector_sz)) { - sdev_printk(KERN_INFO, scmd->device, - "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n", - xfer_cnt, sector_sz); - xfer_cnt = round_down(xfer_cnt, sector_sz); - } - scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) log_info = le32_to_cpu(mpi_reply->IOCLogInfo); diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 243eab3d10d094..e0ce5d2fd14d29 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -372,6 +372,7 @@ EXPORT_SYMBOL(osduld_device_same); static int __detect_osd(struct osd_uld_device *oud) { struct scsi_device *scsi_device = oud->od.scsi_device; + struct scsi_sense_hdr sense_hdr; char caps[OSD_CAP_LEN]; int error; @@ -380,7 +381,7 @@ static int __detect_osd(struct osd_uld_device *oud) */ OSD_DEBUG("start scsi_test_unit_ready %p %p %p\n", oud, scsi_device, scsi_device->request_queue); - error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, NULL); + error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, &sense_hdr); if (error) OSD_ERR("warning: scsi_test_unit_ready failed\n"); diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 75ac662793a3cd..c47f4b349bac44 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -35,7 +35,7 @@ static const char * osst_version = "0.99.4"; #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/qedf/Kconfig b/drivers/scsi/qedf/Kconfig new file mode 100644 index 00000000000000..943f5ee45807b1 --- /dev/null +++ b/drivers/scsi/qedf/Kconfig @@ -0,0 +1,11 @@ +config QEDF + tristate "QLogic QEDF 25/40/100Gb FCoE Initiator Driver Support" + depends on PCI && SCSI + depends on QED + depends on LIBFC + depends on LIBFCOE + select QED_LL2 + select QED_FCOE + ---help--- + This driver supports FCoE offload for the QLogic FastLinQ + 41000 Series Converged Network Adapters. diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile new file mode 100644 index 00000000000000..64e9f507ce3286 --- /dev/null +++ b/drivers/scsi/qedf/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_QEDF) := qedf.o +qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \ + qedf_attr.o qedf_els.o + +qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h new file mode 100644 index 00000000000000..96346a1b1515e8 --- /dev/null +++ b/drivers/scsi/qedf/qedf.h @@ -0,0 +1,545 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef _QEDFC_H_ +#define _QEDFC_H_ + +#include +#include +#include +#include +#include +#include +#include + + +/* qedf_hsi.h needs to before included any qed includes */ +#include "qedf_hsi.h" + +#include +#include +#include +#include "qedf_version.h" +#include "qedf_dbg.h" + +/* Helpers to extract upper and lower 32-bits of pointer */ +#define U64_HI(val) ((u32)(((u64)(val)) >> 32)) +#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) + +#define QEDF_DESCR "QLogic FCoE Offload Driver" +#define QEDF_MODULE_NAME "qedf" + +#define QEDF_MIN_XID 0 +#define QEDF_MAX_SCSI_XID (NUM_TASKS_PER_CONNECTION - 1) +#define QEDF_MAX_ELS_XID 4095 +#define QEDF_FLOGI_RETRY_CNT 3 +#define QEDF_RPORT_RETRY_CNT 255 +#define QEDF_MAX_SESSIONS 1024 +#define QEDF_MAX_PAYLOAD 2048 +#define QEDF_MAX_BDS_PER_CMD 256 +#define QEDF_MAX_BD_LEN 0xffff +#define QEDF_BD_SPLIT_SZ 0x1000 +#define QEDF_PAGE_SIZE 4096 +#define QED_HW_DMA_BOUNDARY 0xfff +#define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1) +#define QEDF_MFS (QEDF_MAX_PAYLOAD + \ + sizeof(struct fc_frame_header)) +#define QEDF_MAX_NPIV 64 +#define QEDF_TM_TIMEOUT 10 +#define QEDF_ABORT_TIMEOUT 10 +#define QEDF_CLEANUP_TIMEOUT 10 +#define QEDF_MAX_CDB_LEN 16 + +#define UPSTREAM_REMOVE 1 +#define UPSTREAM_KEEP 1 + +struct qedf_mp_req { + uint8_t tm_flags; + + uint32_t req_len; + void *req_buf; + dma_addr_t req_buf_dma; + struct fcoe_sge *mp_req_bd; + dma_addr_t mp_req_bd_dma; + struct fc_frame_header req_fc_hdr; + + uint32_t resp_len; + void *resp_buf; + dma_addr_t resp_buf_dma; + struct fcoe_sge *mp_resp_bd; + dma_addr_t mp_resp_bd_dma; + struct fc_frame_header resp_fc_hdr; +}; + +struct qedf_els_cb_arg { + struct qedf_ioreq *aborted_io_req; + struct qedf_ioreq *io_req; + u8 op; /* Used to keep track of ELS op */ + uint16_t l2_oxid; + u32 offset; /* Used for sequence cleanup */ + u8 r_ctl; /* Used for sequence cleanup */ +}; + +enum qedf_ioreq_event { + QEDF_IOREQ_EV_ABORT_SUCCESS, + QEDF_IOREQ_EV_ABORT_FAILED, + QEDF_IOREQ_EV_SEND_RRQ, + QEDF_IOREQ_EV_ELS_TMO, + QEDF_IOREQ_EV_ELS_ERR_DETECT, + QEDF_IOREQ_EV_ELS_FLUSH, + QEDF_IOREQ_EV_CLEANUP_SUCCESS, + QEDF_IOREQ_EV_CLEANUP_FAILED, +}; + +#define FC_GOOD 0 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) +#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) +struct qedf_ioreq { + struct list_head link; + uint16_t xid; + struct scsi_cmnd *sc_cmd; + bool use_slowpath; /* Use slow SGL for this I/O */ +#define QEDF_SCSI_CMD 1 +#define QEDF_TASK_MGMT_CMD 2 +#define QEDF_ABTS 3 +#define QEDF_ELS 4 +#define QEDF_CLEANUP 5 +#define QEDF_SEQ_CLEANUP 6 + u8 cmd_type; +#define QEDF_CMD_OUTSTANDING 0x0 +#define QEDF_CMD_IN_ABORT 0x1 +#define QEDF_CMD_IN_CLEANUP 0x2 +#define QEDF_CMD_SRR_SENT 0x3 + u8 io_req_flags; + struct qedf_rport *fcport; + unsigned long flags; + enum qedf_ioreq_event event; + size_t data_xfer_len; + struct kref refcount; + struct qedf_cmd_mgr *cmd_mgr; + struct io_bdt *bd_tbl; + struct delayed_work timeout_work; + struct completion tm_done; + struct completion abts_done; + struct fcoe_task_context *task; + int idx; +/* + * Need to allocate enough room for both sense data and FCP response data + * which has a max length of 8 bytes according to spec. + */ +#define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8) + uint8_t *sense_buffer; + dma_addr_t sense_buffer_dma; + u32 fcp_resid; + u32 fcp_rsp_len; + u32 fcp_sns_len; + u8 cdb_status; + u8 fcp_status; + u8 fcp_rsp_code; + u8 scsi_comp_flags; +#define QEDF_MAX_REUSE 0xfff + u16 reuse_count; + struct qedf_mp_req mp_req; + void (*cb_func)(struct qedf_els_cb_arg *cb_arg); + struct qedf_els_cb_arg *cb_arg; + int fp_idx; + unsigned int cpu; + unsigned int int_cpu; +#define QEDF_IOREQ_SLOW_SGE 0 +#define QEDF_IOREQ_SINGLE_SGE 1 +#define QEDF_IOREQ_FAST_SGE 2 + u8 sge_type; + struct delayed_work rrq_work; + + /* Used for sequence level recovery; i.e. REC/SRR */ + uint32_t rx_buf_off; + uint32_t tx_buf_off; + uint32_t rx_id; + uint32_t task_retry_identifier; + + /* + * Used to tell if we need to return a SCSI command + * during some form of error processing. + */ + bool return_scsi_cmd_on_abts; +}; + +extern struct workqueue_struct *qedf_io_wq; + +struct qedf_rport { + spinlock_t rport_lock; +#define QEDF_RPORT_SESSION_READY 1 +#define QEDF_RPORT_UPLOADING_CONNECTION 2 + unsigned long flags; + unsigned long retry_delay_timestamp; + struct fc_rport *rport; + struct fc_rport_priv *rdata; + struct qedf_ctx *qedf; + u32 handle; /* Handle from qed */ + u32 fw_cid; /* fw_cid from qed */ + void __iomem *p_doorbell; + /* Send queue management */ + atomic_t free_sqes; + atomic_t num_active_ios; + struct fcoe_wqe *sq; + dma_addr_t sq_dma; + u16 sq_prod_idx; + u16 fw_sq_prod_idx; + u16 sq_con_idx; + u32 sq_mem_size; + void *sq_pbl; + dma_addr_t sq_pbl_dma; + u32 sq_pbl_size; + u32 sid; +#define QEDF_RPORT_TYPE_DISK 1 +#define QEDF_RPORT_TYPE_TAPE 2 + uint dev_type; /* Disk or tape */ + struct list_head peers; +}; + +/* Used to contain LL2 skb's in ll2_skb_list */ +struct qedf_skb_work { + struct work_struct work; + struct sk_buff *skb; + struct qedf_ctx *qedf; +}; + +struct qedf_fastpath { +#define QEDF_SB_ID_NULL 0xffff + u16 sb_id; + struct qed_sb_info *sb_info; + struct qedf_ctx *qedf; + /* Keep track of number of completions on this fastpath */ + unsigned long completions; + uint32_t cq_num_entries; +}; + +/* Used to pass fastpath information needed to process CQEs */ +struct qedf_io_work { + struct work_struct work; + struct fcoe_cqe cqe; + struct qedf_ctx *qedf; + struct fc_frame *fp; +}; + +struct qedf_glbl_q_params { + u64 hw_p_cq; /* Completion queue PBL */ + u64 hw_p_rq; /* Request queue PBL */ + u64 hw_p_cmdq; /* Command queue PBL */ +}; + +struct global_queue { + struct fcoe_cqe *cq; + dma_addr_t cq_dma; + u32 cq_mem_size; + u32 cq_cons_idx; /* Completion queue consumer index */ + u32 cq_prod_idx; + + void *cq_pbl; + dma_addr_t cq_pbl_dma; + u32 cq_pbl_size; +}; + +/* I/O tracing entry */ +#define QEDF_IO_TRACE_SIZE 2048 +struct qedf_io_log { +#define QEDF_IO_TRACE_REQ 0 +#define QEDF_IO_TRACE_RSP 1 + uint8_t direction; + uint16_t task_id; + uint32_t port_id; /* Remote port fabric ID */ + int lun; + char op; /* SCSI CDB */ + uint8_t lba[4]; + unsigned int bufflen; /* SCSI buffer length */ + unsigned int sg_count; /* Number of SG elements */ + int result; /* Result passed back to mid-layer */ + unsigned long jiffies; /* Time stamp when I/O logged */ + int refcount; /* Reference count for task id */ + unsigned int req_cpu; /* CPU that the task is queued on */ + unsigned int int_cpu; /* Interrupt CPU that the task is received on */ + unsigned int rsp_cpu; /* CPU that task is returned on */ + u8 sge_type; /* Did we take the slow, single or fast SGE path */ +}; + +/* Number of entries in BDQ */ +#define QEDF_BDQ_SIZE 256 +#define QEDF_BDQ_BUF_SIZE 2072 + +/* DMA coherent buffers for BDQ */ +struct qedf_bdq_buf { + void *buf_addr; + dma_addr_t buf_dma; +}; + +/* Main adapter struct */ +struct qedf_ctx { + struct qedf_dbg_ctx dbg_ctx; + struct fcoe_ctlr ctlr; + struct fc_lport *lport; + u8 data_src_addr[ETH_ALEN]; +#define QEDF_LINK_DOWN 0 +#define QEDF_LINK_UP 1 + atomic_t link_state; +#define QEDF_DCBX_PENDING 0 +#define QEDF_DCBX_DONE 1 + atomic_t dcbx; + uint16_t max_scsi_xid; + uint16_t max_els_xid; +#define QEDF_NULL_VLAN_ID -1 +#define QEDF_FALLBACK_VLAN 1002 +#define QEDF_DEFAULT_PRIO 3 + int vlan_id; + uint vlan_hw_insert:1; + struct qed_dev *cdev; + struct qed_dev_fcoe_info dev_info; + struct qed_int_info int_info; + uint16_t last_command; + spinlock_t hba_lock; + struct pci_dev *pdev; + u64 wwnn; + u64 wwpn; + u8 __aligned(16) mac[ETH_ALEN]; + struct list_head fcports; + atomic_t num_offloads; + unsigned int curr_conn_id; + struct workqueue_struct *ll2_recv_wq; + struct workqueue_struct *link_update_wq; + struct delayed_work link_update; + struct delayed_work link_recovery; + struct completion flogi_compl; + struct completion fipvlan_compl; + + /* + * Used to tell if we're in the window where we are waiting for + * the link to come back up before informting fcoe that the link is + * done. + */ + atomic_t link_down_tmo_valid; +#define QEDF_TIMER_INTERVAL (1 * HZ) + struct timer_list timer; /* One second book keeping timer */ +#define QEDF_DRAIN_ACTIVE 1 +#define QEDF_LL2_STARTED 2 +#define QEDF_UNLOADING 3 +#define QEDF_GRCDUMP_CAPTURE 4 +#define QEDF_IN_RECOVERY 5 +#define QEDF_DBG_STOP_IO 6 + unsigned long flags; /* Miscellaneous state flags */ + int fipvlan_retries; + u8 num_queues; + struct global_queue **global_queues; + /* Pointer to array of queue structures */ + struct qedf_glbl_q_params *p_cpuq; + /* Physical address of array of queue structures */ + dma_addr_t hw_p_cpuq; + + struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE]; + void *bdq_pbl; + dma_addr_t bdq_pbl_dma; + size_t bdq_pbl_mem_size; + void *bdq_pbl_list; + dma_addr_t bdq_pbl_list_dma; + u8 bdq_pbl_list_num_entries; + void __iomem *bdq_primary_prod; + void __iomem *bdq_secondary_prod; + uint16_t bdq_prod_idx; + + /* Structure for holding all the fastpath for this qedf_ctx */ + struct qedf_fastpath *fp_array; + struct qed_fcoe_tid tasks; + struct qedf_cmd_mgr *cmd_mgr; + /* Holds the PF parameters we pass to qed to start he FCoE function */ + struct qed_pf_params pf_params; + /* Used to time middle path ELS and TM commands */ + struct workqueue_struct *timer_work_queue; + +#define QEDF_IO_WORK_MIN 64 + mempool_t *io_mempool; + struct workqueue_struct *dpc_wq; + + u32 slow_sge_ios; + u32 fast_sge_ios; + u32 single_sge_ios; + + uint8_t *grcdump; + uint32_t grcdump_size; + + struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE]; + spinlock_t io_trace_lock; + uint16_t io_trace_idx; + + bool stop_io_on_error; + + u32 flogi_cnt; + u32 flogi_failed; + + /* Used for fc statistics */ + u64 input_requests; + u64 output_requests; + u64 control_requests; + u64 packet_aborts; + u64 alloc_failures; +}; + +struct io_bdt { + struct qedf_ioreq *io_req; + struct fcoe_sge *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + +struct qedf_cmd_mgr { + struct qedf_ctx *qedf; + u16 idx; + struct io_bdt **io_bdt_pool; +#define FCOE_PARAMS_NUM_TASKS 4096 + struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS]; + spinlock_t lock; + atomic_t free_list_cnt; +}; + +/* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info + * Usage: + * + * void *ptr; + * ptr = qedf_get_task_mem(&qedf->tasks, 128); + */ +static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid) +{ + return (void *)(info->blocks[tid / info->num_tids_per_block] + + (tid % info->num_tids_per_block) * info->size); +} + +static inline void qedf_stop_all_io(struct qedf_ctx *qedf) +{ + set_bit(QEDF_DBG_STOP_IO, &qedf->flags); +} + +/* + * Externs + */ +#define QEDF_DEFAULT_LOG_MASK 0x3CFB6 +extern const struct qed_fcoe_ops *qed_ops; +extern uint qedf_dump_frames; +extern uint qedf_io_tracing; +extern uint qedf_stop_io_on_error; +extern uint qedf_link_down_tmo; +#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */ +extern bool qedf_retry_delay; +extern uint qedf_debug; + +extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf); +extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr); +extern int qedf_queuecommand(struct Scsi_Host *host, + struct scsi_cmnd *sc_cmd); +extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb); +extern void qedf_update_src_mac(struct fc_lport *lport, u8 *addr); +extern u8 *qedf_get_src_mac(struct fc_lport *lport); +extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb); +extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf); +extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req); +extern void qedf_process_warning_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern void qedf_process_error_detect(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun); +extern void qedf_release_cmd(struct kref *ref); +extern int qedf_initiate_abts(struct qedf_ioreq *io_req, + bool return_scsi_cmd_on_abts); +extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req); +extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, + u8 cmd_type); + +extern struct device_attribute *qedf_host_attrs[]; +extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + unsigned int timer_msec); +extern int qedf_init_mp_req(struct qedf_ioreq *io_req); +extern void qedf_init_mp_task(struct qedf_ioreq *io_req, + struct fcoe_task_context *task_ctx); +extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, + u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset); +extern void qedf_ring_doorbell(struct qedf_rport *fcport); +extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *els_req); +extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req); +extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp); +extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req, + bool return_scsi_cmd_on_abts); +extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags); +extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req); +extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe); +extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + int result); +extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id); +extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf); +extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf); +extern void qedf_capture_grc_dump(struct qedf_ctx *qedf); +extern void qedf_wait_for_upload(struct qedf_ctx *qedf); +extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, + struct fcoe_cqe *cqe); +extern void qedf_restart_rport(struct qedf_rport *fcport); +extern int qedf_send_rec(struct qedf_ioreq *orig_io_req); +extern int qedf_post_io_req(struct qedf_rport *fcport, + struct qedf_ioreq *io_req); +extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); +extern int qedf_send_flogi(struct qedf_ctx *qedf); +extern void qedf_fp_io_handler(struct work_struct *work); + +#define FCOE_WORD_TO_BYTE 4 +#define QEDF_MAX_TASK_NUM 0xFFFF + +struct fip_vlan { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + } desc; +}; + +/* SQ/CQ Sizes */ +#define GBL_RSVD_TASKS 16 +#define NUM_TASKS_PER_CONNECTION 1024 +#define NUM_RW_TASKS_PER_CONNECTION 512 +#define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS + +#define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS +#define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION + +#define QEDF_FCOE_PARAMS_GL_RQ_PI 0 +#define QEDF_FCOE_PARAMS_GL_CMD_PI 1 + +#define QEDF_READ (1 << 1) +#define QEDF_WRITE (1 << 0) +#define MAX_FIBRE_LUNS 0xffffffff + +#define QEDF_MAX_NUM_CQS 8 + +/* + * PCI function probe defines + */ +/* Probe/remove called during normal PCI probe */ +#define QEDF_MODE_NORMAL 0 +/* Probe/remove called from qed error recovery */ +#define QEDF_MODE_RECOVERY 1 + +#define SUPPORTED_25000baseKR_Full (1<<27) +#define SUPPORTED_50000baseKR2_Full (1<<28) +#define SUPPORTED_100000baseKR4_Full (1<<29) +#define SUPPORTED_100000baseCR4_Full (1<<30) + +#endif diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c new file mode 100644 index 00000000000000..47720611ad2c5e --- /dev/null +++ b/drivers/scsi/qedf/qedf_attr.c @@ -0,0 +1,165 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include "qedf.h" + +static ssize_t +qedf_fcoe_mac_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fc_lport *lport = shost_priv(class_to_shost(dev)); + u32 port_id; + u8 lport_src_id[3]; + u8 fcoe_mac[6]; + + port_id = fc_host_port_id(lport->host); + lport_src_id[2] = (port_id & 0x000000FF); + lport_src_id[1] = (port_id & 0x0000FF00) >> 8; + lport_src_id[0] = (port_id & 0x00FF0000) >> 16; + fc_fcoe_set_mac(fcoe_mac, lport_src_id); + + return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac); +} + +static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL); + +struct device_attribute *qedf_host_attrs[] = { + &dev_attr_fcoe_mac, + NULL, +}; + +extern const struct qed_fcoe_ops *qed_ops; + +inline bool qedf_is_vport(struct qedf_ctx *qedf) +{ + return (!(qedf->lport->vport == NULL)); +} + +/* Get base qedf for physical port from vport */ +static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf) +{ + struct fc_lport *lport; + struct fc_lport *base_lport; + + if (!(qedf_is_vport(qedf))) + return NULL; + + lport = qedf->lport; + base_lport = shost_priv(vport_to_shost(lport->vport)); + return (struct qedf_ctx *)(lport_priv(base_lport)); +} + +void qedf_capture_grc_dump(struct qedf_ctx *qedf) +{ + struct qedf_ctx *base_qedf; + + /* Make sure we use the base qedf to take the GRC dump */ + if (qedf_is_vport(qedf)) + base_qedf = qedf_get_base_qedf(qedf); + else + base_qedf = qedf; + + if (test_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags)) { + QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_INFO, + "GRC Dump already captured.\n"); + return; + } + + + qedf_get_grc_dump(base_qedf->cdev, qed_ops->common, + &base_qedf->grcdump, &base_qedf->grcdump_size); + QEDF_ERR(&(base_qedf->dbg_ctx), "GRC Dump captured.\n"); + set_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags); + qedf_uevent_emit(base_qedf->lport->host, QEDF_UEVENT_CODE_GRCDUMP, + NULL); +} + +static ssize_t +qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + ssize_t ret = 0; + struct fc_lport *lport = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qedf_ctx *qedf = lport_priv(lport); + + if (test_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags)) { + ret = memory_read_from_buffer(buf, count, &off, + qedf->grcdump, qedf->grcdump_size); + } else { + QEDF_ERR(&(qedf->dbg_ctx), "GRC Dump not captured!\n"); + } + + return ret; +} + +static ssize_t +qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + struct fc_lport *lport = NULL; + struct qedf_ctx *qedf = NULL; + long reading; + int ret = 0; + char msg[40]; + + if (off != 0) + return ret; + + + lport = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + qedf = lport_priv(lport); + + buf[1] = 0; + ret = kstrtol(buf, 10, &reading); + if (ret) { + QEDF_ERR(&(qedf->dbg_ctx), "Invalid input, err(%d)\n", ret); + return ret; + } + + memset(msg, 0, sizeof(msg)); + switch (reading) { + case 0: + memset(qedf->grcdump, 0, qedf->grcdump_size); + clear_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags); + break; + case 1: + qedf_capture_grc_dump(qedf); + break; + } + + return count; +} + +static struct bin_attribute sysfs_grcdump_attr = { + .attr = { + .name = "grcdump", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = qedf_sysfs_read_grcdump, + .write = qedf_sysfs_write_grcdump, +}; + +static struct sysfs_bin_attrs bin_file_entries[] = { + {"grcdump", &sysfs_grcdump_attr}, + {NULL}, +}; + +void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf) +{ + qedf_create_sysfs_attr(qedf->lport->host, bin_file_entries); +} + +void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf) +{ + qedf_remove_sysfs_attr(qedf->lport->host, bin_file_entries); +} diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c new file mode 100644 index 00000000000000..e023f5d0dc12cf --- /dev/null +++ b/drivers/scsi/qedf/qedf_dbg.c @@ -0,0 +1,195 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include "qedf_dbg.h" +#include + +void +qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (likely(qedf) && likely(qedf->pdev)) + pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), + nfunc, line, qedf->host_no, &vaf); + else + pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + + va_end(va); +} + +void +qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedf_debug & QEDF_LOG_WARN)) + goto ret; + + if (likely(qedf) && likely(qedf->pdev)) + pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), + nfunc, line, qedf->host_no, &vaf); + else + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + +ret: + va_end(va); +} + +void +qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedf_debug & QEDF_LOG_NOTICE)) + goto ret; + + if (likely(qedf) && likely(qedf->pdev)) + pr_notice("[%s]:[%s:%d]:%d: %pV", + dev_name(&(qedf->pdev->dev)), nfunc, line, + qedf->host_no, &vaf); + else + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + +ret: + va_end(va); +} + +void +qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + u32 level, const char *fmt, ...) +{ + va_list va; + struct va_format vaf; + char nfunc[32]; + + memset(nfunc, 0, sizeof(nfunc)); + memcpy(nfunc, func, sizeof(nfunc) - 1); + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + if (!(qedf_debug & level)) + goto ret; + + if (likely(qedf) && likely(qedf->pdev)) + pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), + nfunc, line, qedf->host_no, &vaf); + else + pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + +ret: + va_end(va); +} + +int +qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len) +{ + *buf = vmalloc(len); + if (!(*buf)) + return -ENOMEM; + + memset(*buf, 0, len); + return 0; +} + +void +qedf_free_grc_dump_buf(uint8_t **buf) +{ + vfree(*buf); + *buf = NULL; +} + +int +qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common, + u8 **buf, uint32_t *grcsize) +{ + if (!*buf) + return -EINVAL; + + return common->dbg_grc(cdev, *buf, grcsize); +} + +void +qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg) +{ + char event_string[40]; + char *envp[] = {event_string, NULL}; + + memset(event_string, 0, sizeof(event_string)); + switch (code) { + case QEDF_UEVENT_CODE_GRCDUMP: + if (msg) + strncpy(event_string, msg, strlen(msg)); + else + sprintf(event_string, "GRCDUMP=%u", shost->host_no); + break; + default: + /* do nothing */ + break; + } + + kobject_uevent_env(&shost->shost_gendev.kobj, KOBJ_CHANGE, envp); +} + +int +qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + int ret = 0; + + for (; iter->name; iter++) { + ret = sysfs_create_bin_file(&shost->shost_gendev.kobj, + iter->attr); + if (ret) + pr_err("Unable to create sysfs %s attr, err(%d).\n", + iter->name, ret); + } + return ret; +} + +void +qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) +{ + for (; iter->name; iter++) + sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr); +} diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h new file mode 100644 index 00000000000000..7d173f48a81e8d --- /dev/null +++ b/drivers/scsi/qedf/qedf_dbg.h @@ -0,0 +1,157 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef _QEDF_DBG_H_ +#define _QEDF_DBG_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +extern uint qedf_debug; + +/* Debug print level definitions */ +#define QEDF_LOG_DEFAULT 0x1 /* Set default logging mask */ +#define QEDF_LOG_INFO 0x2 /* + * Informational logs, + * MAC address, WWPN, WWNN + */ +#define QEDF_LOG_DISC 0x4 /* Init, discovery, rport */ +#define QEDF_LOG_LL2 0x8 /* LL2, VLAN logs */ +#define QEDF_LOG_CONN 0x10 /* Connection setup, cleanup */ +#define QEDF_LOG_EVT 0x20 /* Events, link, mtu */ +#define QEDF_LOG_TIMER 0x40 /* Timer events */ +#define QEDF_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */ +#define QEDF_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */ +#define QEDF_LOG_UNSOL 0x200 /* unsolicited event logs */ +#define QEDF_LOG_IO 0x400 /* scsi cmd, completion */ +#define QEDF_LOG_MQ 0x800 /* Multi Queue logs */ +#define QEDF_LOG_BSG 0x1000 /* BSG logs */ +#define QEDF_LOG_DEBUGFS 0x2000 /* debugFS logs */ +#define QEDF_LOG_LPORT 0x4000 /* lport logs */ +#define QEDF_LOG_ELS 0x8000 /* ELS logs */ +#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */ +#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */ +#define QEDF_LOG_TID 0x80000 /* + * FW TID context acquire + * free + */ +#define QEDF_TRACK_TID 0x100000 /* + * Track TID state. To be + * enabled only at module load + * and not run-time. + */ +#define QEDF_TRACK_CMD_LIST 0x300000 /* + * Track active cmd list nodes, + * done with reference to TID, + * hence TRACK_TID also enabled. + */ +#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */ +#define QEDF_LOG_WARN 0x80000000 /* Warning logs */ + +/* Debug context structure */ +struct qedf_dbg_ctx { + unsigned int host_no; + struct pci_dev *pdev; +#ifdef CONFIG_DEBUG_FS + struct dentry *bdf_dentry; +#endif +}; + +#define QEDF_ERR(pdev, fmt, ...) \ + qedf_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDF_WARN(pdev, fmt, ...) \ + qedf_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDF_NOTICE(pdev, fmt, ...) \ + qedf_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__) +#define QEDF_INFO(pdev, level, fmt, ...) \ + qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \ + ## __VA_ARGS__) +__printf(4, 5) +void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *fmt, ...); +__printf(4, 5) +void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + const char *, ...); +__printf(4, 5) +void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, + u32 line, const char *, ...); +__printf(5, 6) +void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, + u32 info, const char *fmt, ...); + +/* GRC Dump related defines */ + +struct Scsi_Host; + +#define QEDF_UEVENT_CODE_GRCDUMP 0 + +struct sysfs_bin_attrs { + char *name; + struct bin_attribute *attr; +}; + +extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len); +extern void qedf_free_grc_dump_buf(uint8_t **buf); +extern int qedf_get_grc_dump(struct qed_dev *cdev, + const struct qed_common_ops *common, uint8_t **buf, + uint32_t *grcsize); +extern void qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg); +extern int qedf_create_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); +extern void qedf_remove_sysfs_attr(struct Scsi_Host *shost, + struct sysfs_bin_attrs *iter); + +#ifdef CONFIG_DEBUG_FS +/* DebugFS related code */ +struct qedf_list_of_funcs { + char *oper_str; + ssize_t (*oper_func)(struct qedf_dbg_ctx *qedf); +}; + +struct qedf_debugfs_ops { + char *name; + struct qedf_list_of_funcs *qedf_funcs; +}; + +#define qedf_dbg_fileops(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = drv##_dbg_##ops##_cmd_read, \ + .write = drv##_dbg_##ops##_cmd_write \ +} + +/* Used for debugfs sequential files */ +#define qedf_dbg_fileops_seq(drv, ops) \ +{ \ + .owner = THIS_MODULE, \ + .open = drv##_dbg_##ops##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +extern void qedf_dbg_host_init(struct qedf_dbg_ctx *qedf, + struct qedf_debugfs_ops *dops, + struct file_operations *fops); +extern void qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf); +extern void qedf_dbg_init(char *drv_name); +extern void qedf_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _QEDF_DBG_H_ */ diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c new file mode 100644 index 00000000000000..cb08b625c59479 --- /dev/null +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -0,0 +1,460 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifdef CONFIG_DEBUG_FS + +#include +#include +#include + +#include "qedf.h" +#include "qedf_dbg.h" + +static struct dentry *qedf_dbg_root; + +/** + * qedf_dbg_host_init - setup the debugfs file for the pf + * @pf: the pf that is starting up + **/ +void +qedf_dbg_host_init(struct qedf_dbg_ctx *qedf, + struct qedf_debugfs_ops *dops, + struct file_operations *fops) +{ + char host_dirname[32]; + struct dentry *file_dentry = NULL; + + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n"); + /* create pf dir */ + sprintf(host_dirname, "host%u", qedf->host_no); + qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root); + if (!qedf->bdf_dentry) + return; + + /* create debugfs files */ + while (dops) { + if (!(dops->name)) + break; + + file_dentry = debugfs_create_file(dops->name, 0600, + qedf->bdf_dentry, qedf, + fops); + if (!file_dentry) { + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, + "Debugfs entry %s creation failed\n", + dops->name); + debugfs_remove_recursive(qedf->bdf_dentry); + return; + } + dops++; + fops++; + } +} + +/** + * qedf_dbg_host_exit - clear out the pf's debugfs entries + * @pf: the pf that is stopping + **/ +void +qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf) +{ + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host " + "entry\n"); + /* remove debugfs entries of this PF */ + debugfs_remove_recursive(qedf->bdf_dentry); + qedf->bdf_dentry = NULL; +} + +/** + * qedf_dbg_init - start up debugfs for the driver + **/ +void +qedf_dbg_init(char *drv_name) +{ + QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Creating debugfs root node\n"); + + /* create qed dir in root of debugfs. NULL means debugfs root */ + qedf_dbg_root = debugfs_create_dir(drv_name, NULL); + if (!qedf_dbg_root) + QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Init of debugfs " + "failed\n"); +} + +/** + * qedf_dbg_exit - clean out the driver's debugfs entries + **/ +void +qedf_dbg_exit(void) +{ + QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Destroying debugfs root " + "entry\n"); + + /* remove qed dir in root of debugfs */ + debugfs_remove_recursive(qedf_dbg_root); + qedf_dbg_root = NULL; +} + +struct qedf_debugfs_ops qedf_debugfs_ops[] = { + { "fp_int", NULL }, + { "io_trace", NULL }, + { "debug", NULL }, + { "stop_io_on_error", NULL}, + { "driver_stats", NULL}, + { "clear_stats", NULL}, + { "offload_stats", NULL}, + /* This must be last */ + { NULL, NULL } +}; + +DECLARE_PER_CPU(struct qedf_percpu_iothread_s, qedf_percpu_iothreads); + +static ssize_t +qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + size_t cnt = 0; + int id; + struct qedf_fastpath *fp = NULL; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + + cnt = sprintf(buffer, "\nFastpath I/O completions\n\n"); + + for (id = 0; id < qedf->num_queues; id++) { + fp = &(qedf->fp_array[id]); + if (fp->sb_id == QEDF_SB_ID_NULL) + continue; + cnt += sprintf((buffer + cnt), "#%d: %lu\n", id, + fp->completions); + } + + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static ssize_t +qedf_dbg_fp_int_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + if (!count || *ppos) + return 0; + + return count; +} + +static ssize_t +qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + int cnt; + struct qedf_dbg_ctx *qedf = + (struct qedf_dbg_ctx *)filp->private_data; + + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n"); + cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug); + + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static ssize_t +qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + uint32_t val; + void *kern_buf; + int rval; + struct qedf_dbg_ctx *qedf = + (struct qedf_dbg_ctx *)filp->private_data; + + if (!count || *ppos) + return 0; + + kern_buf = memdup_user(buffer, count); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + rval = kstrtouint(kern_buf, 10, &val); + kfree(kern_buf); + if (rval) + return rval; + + if (val == 1) + qedf_debug = QEDF_DEFAULT_LOG_MASK; + else + qedf_debug = val; + + QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val); + return count; +} + +static ssize_t +qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int cnt; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + cnt = sprintf(buffer, "%s\n", + qedf->stop_io_on_error ? "true" : "false"); + + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static ssize_t +qedf_dbg_stop_io_on_error_cmd_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + void *kern_buf; + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, + dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + + if (!count || *ppos) + return 0; + + kern_buf = memdup_user(buffer, 6); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + if (strncmp(kern_buf, "false", 5) == 0) + qedf->stop_io_on_error = false; + else if (strncmp(kern_buf, "true", 4) == 0) + qedf->stop_io_on_error = true; + else if (strncmp(kern_buf, "now", 3) == 0) + /* Trigger from user to stop all I/O on this host */ + set_bit(QEDF_DBG_STOP_IO, &qedf->flags); + + kfree(kern_buf); + return count; +} + +static int +qedf_io_trace_show(struct seq_file *s, void *unused) +{ + int i, idx = 0; + struct qedf_ctx *qedf = s->private; + struct qedf_dbg_ctx *qedf_dbg = &qedf->dbg_ctx; + struct qedf_io_log *io_log; + unsigned long flags; + + if (!qedf_io_tracing) { + seq_puts(s, "I/O tracing not enabled.\n"); + goto out; + } + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); + + spin_lock_irqsave(&qedf->io_trace_lock, flags); + idx = qedf->io_trace_idx; + for (i = 0; i < QEDF_IO_TRACE_SIZE; i++) { + io_log = &qedf->io_trace_buf[idx]; + seq_printf(s, "%d:", io_log->direction); + seq_printf(s, "0x%x:", io_log->task_id); + seq_printf(s, "0x%06x:", io_log->port_id); + seq_printf(s, "%d:", io_log->lun); + seq_printf(s, "0x%02x:", io_log->op); + seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0], + io_log->lba[1], io_log->lba[2], io_log->lba[3]); + seq_printf(s, "%d:", io_log->bufflen); + seq_printf(s, "%d:", io_log->sg_count); + seq_printf(s, "0x%08x:", io_log->result); + seq_printf(s, "%lu:", io_log->jiffies); + seq_printf(s, "%d:", io_log->refcount); + seq_printf(s, "%d:", io_log->req_cpu); + seq_printf(s, "%d:", io_log->int_cpu); + seq_printf(s, "%d:", io_log->rsp_cpu); + seq_printf(s, "%d\n", io_log->sge_type); + + idx++; + if (idx == QEDF_IO_TRACE_SIZE) + idx = 0; + } + spin_unlock_irqrestore(&qedf->io_trace_lock, flags); + +out: + return 0; +} + +static int +qedf_dbg_io_trace_open(struct inode *inode, struct file *file) +{ + struct qedf_dbg_ctx *qedf_dbg = inode->i_private; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + return single_open(file, qedf_io_trace_show, qedf); +} + +static int +qedf_driver_stats_show(struct seq_file *s, void *unused) +{ + struct qedf_ctx *qedf = s->private; + struct qedf_rport *fcport; + struct fc_rport_priv *rdata; + + seq_printf(s, "cmg_mgr free io_reqs: %d\n", + atomic_read(&qedf->cmd_mgr->free_list_cnt)); + seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios); + seq_printf(s, "single SGEs: %d\n", qedf->single_sge_ios); + seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios); + + seq_puts(s, "Offloaded ports:\n\n"); + + rcu_read_lock(); + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { + rdata = fcport->rdata; + if (rdata == NULL) + continue; + seq_printf(s, "%06x: free_sqes: %d, num_active_ios: %d\n", + rdata->ids.port_id, atomic_read(&fcport->free_sqes), + atomic_read(&fcport->num_active_ios)); + } + rcu_read_unlock(); + + return 0; +} + +static int +qedf_dbg_driver_stats_open(struct inode *inode, struct file *file) +{ + struct qedf_dbg_ctx *qedf_dbg = inode->i_private; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + return single_open(file, qedf_driver_stats_show, qedf); +} + +static ssize_t +qedf_dbg_clear_stats_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int cnt = 0; + + /* Essentially a read stub */ + cnt = min_t(int, count, cnt - *ppos); + *ppos += cnt; + return cnt; +} + +static ssize_t +qedf_dbg_clear_stats_cmd_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct qedf_dbg_ctx *qedf_dbg = + (struct qedf_dbg_ctx *)filp->private_data; + struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, + dbg_ctx); + + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Clearing stat counters.\n"); + + if (!count || *ppos) + return 0; + + /* Clear stat counters exposed by 'stats' node */ + qedf->slow_sge_ios = 0; + qedf->single_sge_ios = 0; + qedf->fast_sge_ios = 0; + + return count; +} + +static int +qedf_offload_stats_show(struct seq_file *s, void *unused) +{ + struct qedf_ctx *qedf = s->private; + struct qed_fcoe_stats *fw_fcoe_stats; + + fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); + if (!fw_fcoe_stats) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " + "fw_fcoe_stats.\n"); + goto out; + } + + /* Query firmware for offload stats */ + qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); + + seq_printf(s, "fcoe_rx_byte_cnt=%llu\n" + "fcoe_rx_data_pkt_cnt=%llu\n" + "fcoe_rx_xfer_pkt_cnt=%llu\n" + "fcoe_rx_other_pkt_cnt=%llu\n" + "fcoe_silent_drop_pkt_cmdq_full_cnt=%u\n" + "fcoe_silent_drop_pkt_crc_error_cnt=%u\n" + "fcoe_silent_drop_pkt_task_invalid_cnt=%u\n" + "fcoe_silent_drop_total_pkt_cnt=%u\n" + "fcoe_silent_drop_pkt_rq_full_cnt=%u\n" + "fcoe_tx_byte_cnt=%llu\n" + "fcoe_tx_data_pkt_cnt=%llu\n" + "fcoe_tx_xfer_pkt_cnt=%llu\n" + "fcoe_tx_other_pkt_cnt=%llu\n", + fw_fcoe_stats->fcoe_rx_byte_cnt, + fw_fcoe_stats->fcoe_rx_data_pkt_cnt, + fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt, + fw_fcoe_stats->fcoe_rx_other_pkt_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_cmdq_full_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_task_invalid_cnt, + fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt, + fw_fcoe_stats->fcoe_silent_drop_pkt_rq_full_cnt, + fw_fcoe_stats->fcoe_tx_byte_cnt, + fw_fcoe_stats->fcoe_tx_data_pkt_cnt, + fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt, + fw_fcoe_stats->fcoe_tx_other_pkt_cnt); + + kfree(fw_fcoe_stats); +out: + return 0; +} + +static int +qedf_dbg_offload_stats_open(struct inode *inode, struct file *file) +{ + struct qedf_dbg_ctx *qedf_dbg = inode->i_private; + struct qedf_ctx *qedf = container_of(qedf_dbg, + struct qedf_ctx, dbg_ctx); + + return single_open(file, qedf_offload_stats_show, qedf); +} + + +const struct file_operations qedf_dbg_fops[] = { + qedf_dbg_fileops(qedf, fp_int), + qedf_dbg_fileops_seq(qedf, io_trace), + qedf_dbg_fileops(qedf, debug), + qedf_dbg_fileops(qedf, stop_io_on_error), + qedf_dbg_fileops_seq(qedf, driver_stats), + qedf_dbg_fileops(qedf, clear_stats), + qedf_dbg_fileops_seq(qedf, offload_stats), + /* This must be last */ + { NULL, NULL }, +}; + +#else /* CONFIG_DEBUG_FS */ +void qedf_dbg_host_init(struct qedf_dbg_ctx *); +void qedf_dbg_host_exit(struct qedf_dbg_ctx *); +void qedf_dbg_init(char *); +void qedf_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c new file mode 100644 index 00000000000000..59f3e5c73a139b --- /dev/null +++ b/drivers/scsi/qedf/qedf_els.c @@ -0,0 +1,949 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include "qedf.h" + +/* It's assumed that the lock is held when calling this function. */ +static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, + void *data, uint32_t data_len, + void (*cb_func)(struct qedf_els_cb_arg *cb_arg), + struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec) +{ + struct qedf_ctx *qedf = fcport->qedf; + struct fc_lport *lport = qedf->lport; + struct qedf_ioreq *els_req; + struct qedf_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + struct fcoe_task_context *task; + int rc = 0; + uint32_t did, sid; + uint16_t xid; + uint32_t start_time = jiffies / HZ; + uint32_t current_time; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); + + rc = fc_remote_port_chkready(fcport->rport); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op); + rc = -EAGAIN; + goto els_err; + } + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n", + op); + rc = -EAGAIN; + goto els_err; + } + + if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op); + rc = -EINVAL; + goto els_err; + } + +retry_els: + els_req = qedf_alloc_cmd(fcport, QEDF_ELS); + if (!els_req) { + current_time = jiffies / HZ; + if ((current_time - start_time) > 10) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "els: Failed els 0x%x\n", op); + rc = -ENOMEM; + goto els_err; + } + mdelay(20 * USEC_PER_MSEC); + goto retry_els; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = " + "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg, + els_req->xid); + els_req->sc_cmd = NULL; + els_req->cmd_type = QEDF_ELS; + els_req->fcport = fcport; + els_req->cb_func = cb_func; + cb_arg->io_req = els_req; + cb_arg->op = op; + els_req->cb_arg = cb_arg; + els_req->data_xfer_len = data_len; + + /* Record which cpu this request is associated with */ + els_req->cpu = smp_processor_id(); + + mp_req = (struct qedf_mp_req *)&(els_req->mp_req); + rc = qedf_init_mp_req(els_req); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n"); + kref_put(&els_req->refcount, qedf_release_cmd); + goto els_err; + } else { + rc = 0; + } + + /* Fill ELS Payload */ + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { + memcpy(mp_req->req_buf, data, data_len); + } else { + QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + kref_put(&els_req->refcount, qedf_release_cmd); + rc = -EINVAL; + } + + if (rc) + goto els_err; + + /* Fill FC header */ + fc_hdr = &(mp_req->req_fc_hdr); + + did = fcport->rdata->ids.port_id; + sid = fcport->sid; + + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did, + FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + + /* Obtain exchange id */ + xid = els_req->xid; + + /* Initialize task context for this IO request */ + task = qedf_get_task_mem(&qedf->tasks, xid); + qedf_init_mp_task(els_req, task); + + /* Put timer on original I/O request */ + if (timer_msec) + qedf_cmd_timer_set(qedf, els_req, timer_msec); + + qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0); + + /* Ring doorbell */ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " + "req\n"); + qedf_ring_doorbell(fcport); +els_err: + return rc; +} + +void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *els_req) +{ + struct fcoe_task_context *task_ctx; + struct scsi_cmnd *sc_cmd; + uint16_t xid; + struct fcoe_cqe_midpath_info *mp_info; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x" + " cmd_type = %d.\n", els_req->xid, els_req->cmd_type); + + /* Kill the ELS timer */ + cancel_delayed_work(&els_req->timeout_work); + + xid = els_req->xid; + task_ctx = qedf_get_task_mem(&qedf->tasks, xid); + sc_cmd = els_req->sc_cmd; + + /* Get ELS response length from CQE */ + mp_info = &cqe->cqe_info.midpath_info; + els_req->mp_req.resp_len = mp_info->data_placement_size; + + /* Parse ELS response */ + if ((els_req->cb_func) && (els_req->cb_arg)) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + kref_put(&els_req->refcount, qedf_release_cmd); +} + +static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *orig_io_req; + struct qedf_ioreq *rrq_req; + struct qedf_ctx *qedf; + int refcount; + + rrq_req = cb_arg->io_req; + qedf = rrq_req->fcport->qedf; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n"); + + orig_io_req = cb_arg->aborted_io_req; + + if (!orig_io_req) + goto out_free; + + if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO && + rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) + cancel_delayed_work_sync(&orig_io_req->timeout_work); + + refcount = kref_read(&orig_io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p," + " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n", + orig_io_req, orig_io_req->xid, rrq_req->xid, refcount); + + /* This should return the aborted io_req to the command pool */ + if (orig_io_req) + kref_put(&orig_io_req->refcount, qedf_release_cmd); + +out_free: + kfree(cb_arg); +} + +/* Assumes kref is already held by caller */ +int qedf_send_rrq(struct qedf_ioreq *aborted_io_req) +{ + + struct fc_els_rrq rrq; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_els_cb_arg *cb_arg = NULL; + struct qedf_ctx *qedf; + uint32_t sid; + uint32_t r_a_tov; + int rc; + + if (!aborted_io_req) { + QEDF_ERR(NULL, "abort_io_req is NULL.\n"); + return -EINVAL; + } + + fcport = aborted_io_req->fcport; + + /* Check that fcport is still offloaded */ + if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return -EINVAL; + } + + if (!fcport->qedf) { + QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); + return -EINVAL; + } + + qedf = fcport->qedf; + lport = qedf->lport; + sid = fcport->sid; + r_a_tov = lport->r_a_tov; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig " + "io = %p, orig_xid = 0x%x\n", aborted_io_req, + aborted_io_req->xid); + memset(&rrq, 0, sizeof(rrq)); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "RRQ\n"); + rc = -ENOMEM; + goto rrq_err; + } + + cb_arg->aborted_io_req = aborted_io_req; + + rrq.rrq_cmd = ELS_RRQ; + hton24(rrq.rrq_s_id, sid); + rrq.rrq_ox_id = htons(aborted_io_req->xid); + rrq.rrq_rx_id = + htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id); + + rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq), + qedf_rrq_compl, cb_arg, r_a_tov); + +rrq_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io " + "req 0x%x\n", aborted_io_req->xid); + kfree(cb_arg); + kref_put(&aborted_io_req->refcount, qedf_release_cmd); + } + return rc; +} + +static void qedf_process_l2_frame_compl(struct qedf_rport *fcport, + struct fc_frame *fp, + u16 l2_oxid) +{ + struct fc_lport *lport = fcport->qedf->lport; + struct fc_frame_header *fh; + u32 crc; + + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + + /* Set the OXID we return to what libfc used */ + if (l2_oxid != FC_XID_UNKNOWN) + fh->fh_ox_id = htons(l2_oxid); + + /* Setup header fields */ + fh->fh_r_ctl = FC_RCTL_ELS_REP; + fh->fh_type = FC_TYPE_ELS; + /* Last sequence, end sequence */ + fh->fh_f_ctl[0] = 0x98; + hton24(fh->fh_d_id, lport->port_id); + hton24(fh->fh_s_id, fcport->rdata->ids.port_id); + fh->fh_rx_id = 0xffff; + + /* Set frame attributes */ + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + + /* Send completed request to libfc */ + fc_exch_recv(lport, fp); +} + +/* + * In instances where an ELS command times out we may need to restart the + * rport by logging out and then logging back in. + */ +void qedf_restart_rport(struct qedf_rport *fcport) +{ + struct fc_lport *lport; + struct fc_rport_priv *rdata; + u32 port_id; + + if (!fcport) + return; + + rdata = fcport->rdata; + if (rdata) { + lport = fcport->qedf->lport; + port_id = rdata->ids.port_id; + QEDF_ERR(&(fcport->qedf->dbg_ctx), + "LOGO port_id=%x.\n", port_id); + fc_rport_logoff(rdata); + /* Recreate the rport and log back in */ + rdata = fc_rport_create(lport, port_id); + if (rdata) + fc_rport_login(rdata); + } +} + +static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *els_req; + struct qedf_rport *fcport; + struct qedf_mp_req *mp_req; + struct fc_frame *fp; + struct fc_frame_header *fh, *mp_fc_hdr; + void *resp_buf, *fc_payload; + u32 resp_len; + u16 l2_oxid; + + l2_oxid = cb_arg->l2_oxid; + els_req = cb_arg->io_req; + + if (!els_req) { + QEDF_ERR(NULL, "els_req is NULL.\n"); + goto free_arg; + } + + /* + * If we are flushing the command just free the cb_arg as none of the + * response data will be valid. + */ + if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) + goto free_arg; + + fcport = els_req->fcport; + mp_req = &(els_req->mp_req); + mp_fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + /* + * If a middle path ELS command times out, don't try to return + * the command but rather do any internal cleanup and then libfc + * timeout the command and clean up its internal resources. + */ + if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) { + /* + * If ADISC times out, libfc will timeout the exchange and then + * try to send a PLOGI which will timeout since the session is + * still offloaded. Force libfc to logout the session which + * will offload the connection and allow the PLOGI response to + * flow over the LL2 path. + */ + if (cb_arg->op == ELS_ADISC) + qedf_restart_rport(fcport); + return; + } + + if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is " + "beyond page size.\n"); + goto free_arg; + } + + fp = fc_frame_alloc(fcport->qedf->lport, resp_len); + if (!fp) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), + "fc_frame_alloc failure.\n"); + return; + } + + /* Copy frame header from firmware into fp */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); + + /* Copy payload from firmware into fp */ + fc_payload = fc_frame_payload_get(fp, resp_len); + memcpy(fc_payload, resp_buf, resp_len); + + QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, + "Completing OX_ID 0x%x back to libfc.\n", l2_oxid); + qedf_process_l2_frame_compl(fcport, fp, l2_oxid); + +free_arg: + kfree(cb_arg); +} + +int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp) +{ + struct fc_els_adisc *adisc; + struct fc_frame_header *fh; + struct fc_lport *lport = fcport->qedf->lport; + struct qedf_els_cb_arg *cb_arg = NULL; + struct qedf_ctx *qedf; + uint32_t r_a_tov = lport->r_a_tov; + int rc; + + qedf = fcport->qedf; + fh = fc_frame_header_get(fp); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "ADISC\n"); + rc = -ENOMEM; + goto adisc_err; + } + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid); + + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + + rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc), + qedf_l2_els_compl, cb_arg, r_a_tov); + +adisc_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n"); + kfree(cb_arg); + } + return rc; +} + +static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *orig_io_req; + struct qedf_ioreq *srr_req; + struct qedf_mp_req *mp_req; + struct fc_frame_header *mp_fc_hdr, *fh; + struct fc_frame *fp; + void *resp_buf, *fc_payload; + u32 resp_len; + struct fc_lport *lport; + struct qedf_ctx *qedf; + int refcount; + u8 opcode; + + srr_req = cb_arg->io_req; + qedf = srr_req->fcport->qedf; + lport = qedf->lport; + + orig_io_req = cb_arg->aborted_io_req; + + if (!orig_io_req) + goto out_free; + + clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); + + if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO && + srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) + cancel_delayed_work_sync(&orig_io_req->timeout_work); + + refcount = kref_read(&orig_io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," + " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", + orig_io_req, orig_io_req->xid, srr_req->xid, refcount); + + /* If a SRR times out, simply free resources */ + if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) + goto out_free; + + /* Normalize response data into struct fc_frame */ + mp_req = &(srr_req->mp_req); + mp_fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + fp = fc_frame_alloc(lport, resp_len); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), + "fc_frame_alloc failure.\n"); + goto out_free; + } + + /* Copy frame header from firmware into fp */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); + + /* Copy payload from firmware into fp */ + fc_payload = fc_frame_payload_get(fp, resp_len); + memcpy(fc_payload, resp_buf, resp_len); + + opcode = fc_frame_payload_op(fp); + switch (opcode) { + case ELS_LS_ACC: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "SRR success.\n"); + break; + case ELS_LS_RJT: + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, + "SRR rejected.\n"); + qedf_initiate_abts(orig_io_req, true); + break; + } + + fc_frame_free(fp); +out_free: + /* Put reference for original command since SRR completed */ + kref_put(&orig_io_req->refcount, qedf_release_cmd); + kfree(cb_arg); +} + +static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl) +{ + struct fcp_srr srr; + struct qedf_ctx *qedf; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_els_cb_arg *cb_arg = NULL; + u32 sid, r_a_tov; + int rc; + + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); + return -EINVAL; + } + + fcport = orig_io_req->fcport; + + /* Check that fcport is still offloaded */ + if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return -EINVAL; + } + + if (!fcport->qedf) { + QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); + return -EINVAL; + } + + /* Take reference until SRR command completion */ + kref_get(&orig_io_req->refcount); + + qedf = fcport->qedf; + lport = qedf->lport; + sid = fcport->sid; + r_a_tov = lport->r_a_tov; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, " + "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid); + memset(&srr, 0, sizeof(srr)); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "SRR\n"); + rc = -ENOMEM; + goto srr_err; + } + + cb_arg->aborted_io_req = orig_io_req; + + srr.srr_op = ELS_SRR; + srr.srr_ox_id = htons(orig_io_req->xid); + srr.srr_rx_id = htons(orig_io_req->rx_id); + srr.srr_rel_off = htonl(offset); + srr.srr_r_ctl = r_ctl; + + rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr), + qedf_srr_compl, cb_arg, r_a_tov); + +srr_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req" + "=0x%x\n", orig_io_req->xid); + kfree(cb_arg); + /* If we fail to queue SRR, send ABTS to orig_io */ + qedf_initiate_abts(orig_io_req, true); + kref_put(&orig_io_req->refcount, qedf_release_cmd); + } else + /* Tell other threads that SRR is in progress */ + set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); + + return rc; +} + +static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, + u32 offset, u8 r_ctl) +{ + struct qedf_rport *fcport; + unsigned long flags; + struct qedf_els_cb_arg *cb_arg; + + fcport = orig_io_req->fcport; + + QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, + "Doing sequence cleanup for xid=0x%x offset=%u.\n", + orig_io_req->xid, offset); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg " + "for sequence cleanup\n"); + return; + } + + /* Get reference for cleanup request */ + kref_get(&orig_io_req->refcount); + + orig_io_req->cmd_type = QEDF_SEQ_CLEANUP; + cb_arg->offset = offset; + cb_arg->r_ctl = r_ctl; + orig_io_req->cb_arg = cb_arg; + + qedf_cmd_timer_set(fcport->qedf, orig_io_req, + QEDF_CLEANUP_TIMEOUT * HZ); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + qedf_add_to_sq(fcport, orig_io_req->xid, 0, + FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset); + qedf_ring_doorbell(fcport); + + spin_unlock_irqrestore(&fcport->rport_lock, flags); +} + +void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, + struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) +{ + int rc; + struct qedf_els_cb_arg *cb_arg; + + cb_arg = io_req->cb_arg; + + /* If we timed out just free resources */ + if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) + goto free; + + /* Kill the timer we put on the request */ + cancel_delayed_work_sync(&io_req->timeout_work); + + rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl); + if (rc) + QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will " + "abort, xid=0x%x.\n", io_req->xid); +free: + kfree(cb_arg); + kref_put(&io_req->refcount, qedf_release_cmd); +} + +static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req) +{ + struct qedf_rport *fcport; + struct qedf_ioreq *new_io_req; + unsigned long flags; + bool rc = false; + + fcport = orig_io_req->fcport; + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL.\n"); + goto out; + } + + if (!orig_io_req->sc_cmd) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for " + "xid=0x%x.\n", orig_io_req->xid); + goto out; + } + + new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); + if (!new_io_req) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new " + "io_req.\n"); + goto out; + } + + new_io_req->sc_cmd = orig_io_req->sc_cmd; + + /* + * This keeps the sc_cmd struct from being returned to the tape + * driver and being requeued twice. We do need to put a reference + * for the original I/O request since we will not do a SCSI completion + * for it. + */ + orig_io_req->sc_cmd = NULL; + kref_put(&orig_io_req->refcount, qedf_release_cmd); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + /* kref for new command released in qedf_post_io_req on error */ + if (qedf_post_io_req(fcport, new_io_req)) { + QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n"); + /* Return SQE to pool */ + atomic_inc(&fcport->free_sqes); + } else { + QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, + "Reissued SCSI command from orig_xid=0x%x on " + "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid); + /* + * Abort the original I/O but do not return SCSI command as + * it has been reissued on another OX_ID. + */ + spin_unlock_irqrestore(&fcport->rport_lock, flags); + qedf_initiate_abts(orig_io_req, false); + goto out; + } + + spin_unlock_irqrestore(&fcport->rport_lock, flags); +out: + return rc; +} + + +static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) +{ + struct qedf_ioreq *orig_io_req; + struct qedf_ioreq *rec_req; + struct qedf_mp_req *mp_req; + struct fc_frame_header *mp_fc_hdr, *fh; + struct fc_frame *fp; + void *resp_buf, *fc_payload; + u32 resp_len; + struct fc_lport *lport; + struct qedf_ctx *qedf; + int refcount; + enum fc_rctl r_ctl; + struct fc_els_ls_rjt *rjt; + struct fc_els_rec_acc *acc; + u8 opcode; + u32 offset, e_stat; + struct scsi_cmnd *sc_cmd; + bool srr_needed = false; + + rec_req = cb_arg->io_req; + qedf = rec_req->fcport->qedf; + lport = qedf->lport; + + orig_io_req = cb_arg->aborted_io_req; + + if (!orig_io_req) + goto out_free; + + if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && + rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) + cancel_delayed_work_sync(&orig_io_req->timeout_work); + + refcount = kref_read(&orig_io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," + " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", + orig_io_req, orig_io_req->xid, rec_req->xid, refcount); + + /* If a REC times out, free resources */ + if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) + goto out_free; + + /* Normalize response data into struct fc_frame */ + mp_req = &(rec_req->mp_req); + mp_fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + acc = resp_buf = mp_req->resp_buf; + + fp = fc_frame_alloc(lport, resp_len); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), + "fc_frame_alloc failure.\n"); + goto out_free; + } + + /* Copy frame header from firmware into fp */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); + + /* Copy payload from firmware into fp */ + fc_payload = fc_frame_payload_get(fp, resp_len); + memcpy(fc_payload, resp_buf, resp_len); + + opcode = fc_frame_payload_op(fp); + if (opcode == ELS_LS_RJT) { + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Received LS_RJT for REC: er_reason=0x%x, " + "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); + /* + * The following response(s) mean that we need to reissue the + * request on another exchange. We need to do this without + * informing the upper layers lest it cause an application + * error. + */ + if ((rjt->er_reason == ELS_RJT_LOGIC || + rjt->er_reason == ELS_RJT_UNAB) && + rjt->er_explan == ELS_EXPL_OXID_RXID) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Handle CMD LOST case.\n"); + qedf_requeue_io_req(orig_io_req); + } + } else if (opcode == ELS_LS_ACC) { + offset = ntohl(acc->reca_fc4value); + e_stat = ntohl(acc->reca_e_stat); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n", + offset, e_stat); + if (e_stat & ESB_ST_SEQ_INIT) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Target has the seq init\n"); + goto out_free_frame; + } + sc_cmd = orig_io_req->sc_cmd; + if (!sc_cmd) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "sc_cmd is NULL for xid=0x%x.\n", + orig_io_req->xid); + goto out_free_frame; + } + /* SCSI write case */ + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + if (offset == orig_io_req->data_xfer_len) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "WRITE - response lost.\n"); + r_ctl = FC_RCTL_DD_CMD_STATUS; + srr_needed = true; + offset = 0; + } else { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "WRITE - XFER_RDY/DATA lost.\n"); + r_ctl = FC_RCTL_DD_DATA_DESC; + /* Use data from warning CQE instead of REC */ + offset = orig_io_req->tx_buf_off; + } + /* SCSI read case */ + } else { + if (orig_io_req->rx_buf_off == + orig_io_req->data_xfer_len) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "READ - response lost.\n"); + srr_needed = true; + r_ctl = FC_RCTL_DD_CMD_STATUS; + offset = 0; + } else { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "READ - DATA lost.\n"); + /* + * For read case we always set the offset to 0 + * for sequence recovery task. + */ + offset = 0; + r_ctl = FC_RCTL_DD_SOL_DATA; + } + } + + if (srr_needed) + qedf_send_srr(orig_io_req, offset, r_ctl); + else + qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl); + } + +out_free_frame: + fc_frame_free(fp); +out_free: + /* Put reference for original command since REC completed */ + kref_put(&orig_io_req->refcount, qedf_release_cmd); + kfree(cb_arg); +} + +/* Assumes kref is already held by caller */ +int qedf_send_rec(struct qedf_ioreq *orig_io_req) +{ + + struct fc_els_rec rec; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_els_cb_arg *cb_arg = NULL; + struct qedf_ctx *qedf; + uint32_t sid; + uint32_t r_a_tov; + int rc; + + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); + return -EINVAL; + } + + fcport = orig_io_req->fcport; + + /* Check that fcport is still offloaded */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); + return -EINVAL; + } + + if (!fcport->qedf) { + QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); + return -EINVAL; + } + + /* Take reference until REC command completion */ + kref_get(&orig_io_req->refcount); + + qedf = fcport->qedf; + lport = qedf->lport; + sid = fcport->sid; + r_a_tov = lport->r_a_tov; + + memset(&rec, 0, sizeof(rec)); + + cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " + "REC\n"); + rc = -ENOMEM; + goto rec_err; + } + + cb_arg->aborted_io_req = orig_io_req; + + rec.rec_cmd = ELS_REC; + hton24(rec.rec_s_id, sid); + rec.rec_ox_id = htons(orig_io_req->xid); + rec.rec_rx_id = + htons(orig_io_req->task->tstorm_st_context.read_write.rx_id); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, " + "orig_xid=0x%x rx_id=0x%x\n", orig_io_req, + orig_io_req->xid, rec.rec_rx_id); + rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec), + qedf_rec_compl, cb_arg, r_a_tov); + +rec_err: + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req" + "=0x%x\n", orig_io_req->xid); + kfree(cb_arg); + kref_put(&orig_io_req->refcount, qedf_release_cmd); + } + return rc; +} diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c new file mode 100644 index 00000000000000..ed58b9104f58b8 --- /dev/null +++ b/drivers/scsi/qedf/qedf_fip.c @@ -0,0 +1,269 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include +#include +#include "qedf.h" + +extern const struct qed_fcoe_ops *qed_ops; +/* + * FIP VLAN functions that will eventually move to libfcoe. + */ + +void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) +{ + struct sk_buff *skb; + char *eth_fr; + int fr_len; + struct fip_vlan *vlan; +#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) + static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; + + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + if (!skb) + return; + + fr_len = sizeof(*vlan); + eth_fr = (char *)skb->data; + vlan = (struct fip_vlan *)eth_fr; + + memset(vlan, 0, sizeof(*vlan)); + ether_addr_copy(vlan->eth.h_source, qedf->mac); + ether_addr_copy(vlan->eth.h_dest, my_fcoe_all_fcfs); + vlan->eth.h_proto = htons(ETH_P_FIP); + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + vlan->fip.fip_op = htons(FIP_OP_VLAN); + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + ether_addr_copy(vlan->desc.mac.fd_mac, qedf->mac); + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + put_unaligned_be64(qedf->lport->wwnn, &vlan->desc.wwnn.fd_wwn); + + skb_put(skb, sizeof(*vlan)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Sending FIP VLAN " + "request."); + + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_WARN(&(qedf->dbg_ctx), "Cannot send vlan request " + "because link is not up.\n"); + + kfree_skb(skb); + return; + } + qed_ops->ll2->start_xmit(qedf->cdev, skb); +} + +static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, + struct sk_buff *skb) +{ + struct fip_header *fiph; + struct fip_desc *desc; + u16 vid = 0; + ssize_t rlen; + size_t dlen; + + fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2); + + rlen = ntohs(fiph->fip_dl_len) * 4; + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_VLAN: + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, " + "vid=0x%x.\n", vid); + + if (vid > 0 && qedf->vlan_id != vid) { + qedf_set_vlan_id(qedf, vid); + + /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ + complete(&qedf->fipvlan_compl); + } +} + +void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr); + struct ethhdr *eth_hdr; + struct vlan_ethhdr *vlan_hdr; + struct fip_header *fiph; + u16 op, vlan_tci = 0; + u8 sub; + + if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { + QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); + kfree_skb(skb); + return; + } + + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (!qedf->vlan_hw_insert) { + vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, sizeof(*vlan_hdr) + - sizeof(*eth_hdr)); + memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); + vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; + vlan_hdr->h_vlan_TCI = vlan_tci = htons(qedf->vlan_id); + } + + /* Update eth_hdr since we added a VLAN tag */ + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: " + "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub, + ntohs(vlan_tci)); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, false); + + qed_ops->ll2->start_xmit(qedf->cdev, skb); +} + +/* Process incoming FIP frames. */ +void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) +{ + struct ethhdr *eth_hdr; + struct fip_header *fiph; + struct fip_desc *desc; + struct fip_mac_desc *mp; + struct fip_wwn_desc *wp; + struct fip_vn_desc *vp; + size_t rlen, dlen; + uint32_t cvl_port_id; + __u8 cvl_mac[ETH_ALEN]; + u16 op; + u8 sub; + + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame received: " + "skb=%p fiph=%p source=%pM op=%x sub=%x", skb, fiph, + eth_hdr->h_source, op, sub); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, false); + + /* Handle FIP VLAN resp in the driver */ + if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { + qedf_fcoe_process_vlan_resp(qedf, skb); + qedf->vlan_hw_insert = 0; + kfree_skb(skb); + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual " + "link received.\n"); + + /* Check that an FCF has been selected by fcoe */ + if (qedf->ctlr.sel_fcf == NULL) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Dropping CVL since FCF has not been selected " + "yet."); + return; + } + + cvl_port_id = 0; + memset(cvl_mac, 0, ETH_ALEN); + /* + * We need to loop through the CVL descriptors to determine + * if we want to reset the fcoe link + */ + rlen = ntohs(fiph->fip_dl_len) * FIP_BPW; + desc = (struct fip_desc *)(fiph + 1); + while (rlen >= sizeof(*desc)) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_MAC: + mp = (struct fip_mac_desc *)desc; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "fd_mac=%pM\n", mp->fd_mac); + ether_addr_copy(cvl_mac, mp->fd_mac); + break; + case FIP_DT_NAME: + wp = (struct fip_wwn_desc *)desc; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "fc_wwpn=%016llx.\n", + get_unaligned_be64(&wp->fd_wwn)); + break; + case FIP_DT_VN_ID: + vp = (struct fip_vn_desc *)desc; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id)); + cvl_port_id = ntoh24(vp->fd_fc_id); + break; + default: + /* Ignore anything else */ + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id, + cvl_mac); + if (cvl_port_id == qedf->lport->port_id && + ether_addr_equal(cvl_mac, + qedf->ctlr.sel_fcf->fcf_mac)) { + fcoe_ctlr_link_down(&qedf->ctlr); + qedf_wait_for_upload(qedf); + fcoe_ctlr_link_up(&qedf->ctlr); + } + kfree_skb(skb); + } else { + /* Everything else is handled by libfcoe */ + __skb_pull(skb, ETH_HLEN); + fcoe_ctlr_recv(&qedf->ctlr, skb); + } +} + +void qedf_update_src_mac(struct fc_lport *lport, u8 *addr) +{ + struct qedf_ctx *qedf = lport_priv(lport); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Setting data_src_addr=%pM.\n", addr); + ether_addr_copy(qedf->data_src_addr, addr); +} + +u8 *qedf_get_src_mac(struct fc_lport *lport) +{ + u8 mac[ETH_ALEN]; + u8 port_id[3]; + struct qedf_ctx *qedf = lport_priv(lport); + + /* We need to use the lport port_id to create the data_src_addr */ + if (is_zero_ether_addr(qedf->data_src_addr)) { + hton24(port_id, lport->port_id); + fc_fcoe_set_mac(mac, port_id); + qedf->ctlr.update_mac(lport, mac); + } + return qedf->data_src_addr; +} diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h new file mode 100644 index 00000000000000..dfd65dec287498 --- /dev/null +++ b/drivers/scsi/qedf/qedf_hsi.h @@ -0,0 +1,422 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef __QEDF_HSI__ +#define __QEDF_HSI__ +/* + * Add include to common target + */ +#include + +/* + * Add include to common storage target + */ +#include + +/* + * Add include to common fcoe target for both eCore and protocol driver + */ +#include + + +/* + * FCoE CQ element ABTS information + */ +struct fcoe_abts_info { + u8 r_ctl /* R_CTL in the ABTS response frame */; + u8 reserved0; + __le16 rx_id; + __le32 reserved2[2]; + __le32 fc_payload[3] /* ABTS FC payload response frame */; +}; + + +/* + * FCoE class type + */ +enum fcoe_class_type { + FCOE_TASK_CLASS_TYPE_3, + FCOE_TASK_CLASS_TYPE_2, + MAX_FCOE_CLASS_TYPE +}; + + +/* + * FCoE CMDQ element control information + */ +struct fcoe_cmdqe_control { + __le16 conn_id; + u8 num_additional_cmdqes; + u8 cmdType; + /* true for ABTS request cmdqe. used in Target mode */ +#define FCOE_CMDQE_CONTROL_ABTSREQCMD_MASK 0x1 +#define FCOE_CMDQE_CONTROL_ABTSREQCMD_SHIFT 0 +#define FCOE_CMDQE_CONTROL_RESERVED1_MASK 0x7F +#define FCOE_CMDQE_CONTROL_RESERVED1_SHIFT 1 + u8 reserved2[4]; +}; + +/* + * FCoE control + payload CMDQ element + */ +struct fcoe_cmdqe { + struct fcoe_cmdqe_control hdr; + u8 fc_header[24]; + __le32 fcp_cmd_payload[8]; +}; + + + +/* + * FCP RSP flags + */ +struct fcoe_fcp_rsp_flags { + u8 flags; +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_MASK 0x1 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_MASK 0x7 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 +}; + +/* + * FCoE CQ element response information + */ +struct fcoe_cqe_rsp_info { + struct fcoe_fcp_rsp_flags rsp_flags; + u8 scsi_status_code; + __le16 retry_delay_timer; + __le32 fcp_resid; + __le32 fcp_sns_len; + __le32 fcp_rsp_len; + __le16 rx_id; + u8 fw_error_flags; +#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_MASK 0x1 /* FW detected underrun */ +#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_SHIFT 0 +#define FCOE_CQE_RSP_INFO_RESREVED_MASK 0x7F +#define FCOE_CQE_RSP_INFO_RESREVED_SHIFT 1 + u8 reserved; + __le32 fw_residual /* Residual bytes calculated by FW */; +}; + +/* + * FCoE CQ element Target completion information + */ +struct fcoe_cqe_target_info { + __le16 rx_id; + __le16 reserved0; + __le32 reserved1[5]; +}; + +/* + * FCoE error/warning reporting entry + */ +struct fcoe_err_report_entry { + __le32 err_warn_bitmap_lo /* Error bitmap lower 32 bits */; + __le32 err_warn_bitmap_hi /* Error bitmap higher 32 bits */; + /* Buffer offset the beginning of the Sequence last transmitted */ + __le32 tx_buf_off; + /* Buffer offset from the beginning of the Sequence last received */ + __le32 rx_buf_off; + __le16 rx_id /* RX_ID of the associated task */; + __le16 reserved1; + __le32 reserved2; +}; + +/* + * FCoE CQ element middle path information + */ +struct fcoe_cqe_midpath_info { + __le32 data_placement_size; + __le16 rx_id; + __le16 reserved0; + __le32 reserved1[4]; +}; + +/* + * FCoE CQ element unsolicited information + */ +struct fcoe_unsolic_info { + /* BD information: Physical address and opaque data */ + struct scsi_bd bd_info; + __le16 conn_id /* Connection ID the frame is associated to */; + __le16 pkt_len /* Packet length */; + u8 reserved1[4]; +}; + +/* + * FCoE warning reporting entry + */ +struct fcoe_warning_report_entry { + /* BD information: Physical address and opaque data */ + struct scsi_bd bd_info; + /* Buffer offset the beginning of the Sequence last transmitted */ + __le32 buf_off; + __le16 rx_id /* RX_ID of the associated task */; + __le16 reserved1; +}; + +/* + * FCoE CQ element information + */ +union fcoe_cqe_info { + struct fcoe_cqe_rsp_info rsp_info /* Response completion information */; + /* Target completion information */ + struct fcoe_cqe_target_info target_info; + /* Error completion information */ + struct fcoe_err_report_entry err_info; + struct fcoe_abts_info abts_info /* ABTS completion information */; + /* Middle path completion information */ + struct fcoe_cqe_midpath_info midpath_info; + /* Unsolicited packet completion information */ + struct fcoe_unsolic_info unsolic_info; + /* Warning completion information (Rec Tov expiration) */ + struct fcoe_warning_report_entry warn_info; +}; + +/* + * FCoE CQ element + */ +struct fcoe_cqe { + __le32 cqe_data; + /* The task identifier (OX_ID) to be completed */ +#define FCOE_CQE_TASK_ID_MASK 0xFFFF +#define FCOE_CQE_TASK_ID_SHIFT 0 + /* + * The CQE type: 0x0 Indicating on a pending work request completion. + * 0x1 - Indicating on an unsolicited event notification. use enum + * fcoe_cqe_type (use enum fcoe_cqe_type) + */ +#define FCOE_CQE_CQE_TYPE_MASK 0xF +#define FCOE_CQE_CQE_TYPE_SHIFT 16 +#define FCOE_CQE_RESERVED0_MASK 0xFFF +#define FCOE_CQE_RESERVED0_SHIFT 20 + __le16 reserved1; + __le16 fw_cq_prod; + union fcoe_cqe_info cqe_info; +}; + +/* + * FCoE CQE type + */ +enum fcoe_cqe_type { + /* solicited response on a R/W or middle-path SQE */ + FCOE_GOOD_COMPLETION_CQE_TYPE, + FCOE_UNSOLIC_CQE_TYPE /* unsolicited packet, RQ consumed */, + FCOE_ERROR_DETECTION_CQE_TYPE /* timer expiration, validation error */, + FCOE_WARNING_CQE_TYPE /* rec_tov or rr_tov timer expiration */, + FCOE_EXCH_CLEANUP_CQE_TYPE /* task cleanup completed */, + FCOE_ABTS_CQE_TYPE /* ABTS received and task cleaned */, + FCOE_DUMMY_CQE_TYPE /* just increment SQ CONS */, + /* Task was completed wight after sending a pkt to the target */ + FCOE_LOCAL_COMP_CQE_TYPE, + MAX_FCOE_CQE_TYPE +}; + + +/* + * FCoE device type + */ +enum fcoe_device_type { + FCOE_TASK_DEV_TYPE_DISK, + FCOE_TASK_DEV_TYPE_TAPE, + MAX_FCOE_DEVICE_TYPE +}; + + + + +/* + * FCoE fast path error codes + */ +enum fcoe_fp_error_warning_code { + FCOE_ERROR_CODE_XFER_OOO_RO /* XFER error codes */, + FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED, + FCOE_ERROR_CODE_XFER_NULL_BURST_LEN, + FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS, + FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE, + FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE, + FCOE_ERROR_CODE_XFER_PEND_XFER_SET, + FCOE_ERROR_CODE_XFER_OPENED_SEQ, + FCOE_ERROR_CODE_XFER_FCTL, + FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET /* FCP RSP error codes */, + FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD, + FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD, + FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE, + FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET, + FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ, + FCOE_ERROR_CODE_FCP_RSP_FCTL, + FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET, + FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET, + FCOE_ERROR_CODE_DATA_OOO_RO /* FCP DATA error codes */, + FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE, + FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS, + FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET, + FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET, + FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET, + FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET, + FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ, + FCOE_ERROR_CODE_DATA_FCTL_INITIATIR, + FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE /* Middle path error codes */, + FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET, + FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET, + FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET, + FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET, + FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL, + FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY, + FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL, + FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD /* Common error codes */, + FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE, + FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH, + FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT, + FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH, + FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES, + FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR, + FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG, + FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED, + FCOE_ERROR_CODE_COMMON_TASK_DDF_RCTL_INFO_FIELD, + FCOE_ERROR_CODE_COMMON_TASK_INVALID_RCTL, + FCOE_ERROR_CODE_COMMON_TASK_RCTL_GENERAL_MISMATCH, + FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION /* Timer error codes */, + FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION /* Timer error codes */, + FCOE_ERROR_CODE_RR_TOV_TIMER_EXPIRATION /* Timer error codes */, + /* ABTSrsp pckt arrived unexpected */ + FCOE_ERROR_CODE_ABTS_REPLY_UNEXPECTED, + FCOE_ERROR_CODE_TARGET_MODE_FCP_RSP, + FCOE_ERROR_CODE_TARGET_MODE_FCP_XFER, + FCOE_ERROR_CODE_TARGET_MODE_DATA_TASK_TYPE_NOT_WRITE, + FCOE_ERROR_CODE_DATA_FCTL_TARGET, + FCOE_ERROR_CODE_TARGET_DATA_SIZE_NO_MATCH_XFER, + FCOE_ERROR_CODE_TARGET_DIF_CRC_CHECKSUM_ERROR, + FCOE_ERROR_CODE_TARGET_DIF_REF_TAG_ERROR, + FCOE_ERROR_CODE_TARGET_DIF_APP_TAG_ERROR, + MAX_FCOE_FP_ERROR_WARNING_CODE +}; + + +/* + * FCoE RESPQ element + */ +struct fcoe_respqe { + __le16 ox_id /* OX_ID that is located in the FCP_RSP FC header */; + __le16 rx_id /* RX_ID that is located in the FCP_RSP FC header */; + __le32 additional_info; +/* PARAM that is located in the FCP_RSP FC header */ +#define FCOE_RESPQE_PARAM_MASK 0xFFFFFF +#define FCOE_RESPQE_PARAM_SHIFT 0 +/* Indication whther its Target-auto-rsp mode or not */ +#define FCOE_RESPQE_TARGET_AUTO_RSP_MASK 0xFF +#define FCOE_RESPQE_TARGET_AUTO_RSP_SHIFT 24 +}; + + +/* + * FCoE slow path error codes + */ +enum fcoe_sp_error_code { + /* Error codes for Error Reporting in slow path flows */ + FCOE_ERROR_CODE_SLOW_PATH_TOO_MANY_FUNCS, + FCOE_ERROR_SLOW_PATH_CODE_NO_LICENSE, + MAX_FCOE_SP_ERROR_CODE +}; + + +/* + * FCoE SQE request type + */ +enum fcoe_sqe_request_type { + SEND_FCOE_CMD, + SEND_FCOE_MIDPATH, + SEND_FCOE_ABTS_REQUEST, + FCOE_EXCHANGE_CLEANUP, + FCOE_SEQUENCE_RECOVERY, + SEND_FCOE_XFER_RDY, + SEND_FCOE_RSP, + SEND_FCOE_RSP_WITH_SENSE_DATA, + SEND_FCOE_TARGET_DATA, + SEND_FCOE_INITIATOR_DATA, + /* + * Xfer Continuation (==1) ready to be sent. Previous XFERs data + * received successfully. + */ + SEND_FCOE_XFER_CONTINUATION_RDY, + SEND_FCOE_TARGET_ABTS_RSP, + MAX_FCOE_SQE_REQUEST_TYPE +}; + + +/* + * FCoE task TX state + */ +enum fcoe_task_tx_state { + /* Initiate state after driver has initialized the task */ + FCOE_TASK_TX_STATE_NORMAL, + /* Updated by TX path after complete transmitting unsolicited packet */ + FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED, + /* + * Updated by TX path after start processing the task requesting the + * cleanup/abort operation + */ + FCOE_TASK_TX_STATE_CLEAN_REQ, + FCOE_TASK_TX_STATE_ABTS /* Updated by TX path during abort procedure */, + /* Updated by TX path during exchange cleanup procedure */ + FCOE_TASK_TX_STATE_EXCLEANUP, + /* + * Updated by TX path during exchange cleanup continuation task + * procedure + */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_CONT, + /* Updated by TX path during exchange cleanup first xfer procedure */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE, + /* Updated by TX path during exchange cleanup read task in Target */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_READ_OR_RSP, + /* Updated by TX path during target exchange cleanup procedure */ + FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_LAST_CYCLE, + /* Updated by TX path during sequence recovery procedure */ + FCOE_TASK_TX_STATE_SEQRECOVERY, + MAX_FCOE_TASK_TX_STATE +}; + + +/* + * FCoE task type + */ +enum fcoe_task_type { + FCOE_TASK_TYPE_WRITE_INITIATOR, + FCOE_TASK_TYPE_READ_INITIATOR, + FCOE_TASK_TYPE_MIDPATH, + FCOE_TASK_TYPE_UNSOLICITED, + FCOE_TASK_TYPE_ABTS, + FCOE_TASK_TYPE_EXCHANGE_CLEANUP, + FCOE_TASK_TYPE_SEQUENCE_CLEANUP, + FCOE_TASK_TYPE_WRITE_TARGET, + FCOE_TASK_TYPE_READ_TARGET, + FCOE_TASK_TYPE_RSP, + FCOE_TASK_TYPE_RSP_SENSE_DATA, + FCOE_TASK_TYPE_ABTS_TARGET, + FCOE_TASK_TYPE_ENUM_SIZE, + MAX_FCOE_TASK_TYPE +}; + +struct scsi_glbl_queue_entry { + /* Start physical address for the RQ (receive queue) PBL. */ + struct regpair rq_pbl_addr; + /* Start physical address for the CQ (completion queue) PBL. */ + struct regpair cq_pbl_addr; + /* Start physical address for the CMDQ (command queue) PBL. */ + struct regpair cmdq_pbl_addr; +}; + +#endif /* __QEDF_HSI__ */ diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c new file mode 100644 index 00000000000000..46debe5034af10 --- /dev/null +++ b/drivers/scsi/qedf/qedf_io.c @@ -0,0 +1,2282 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include +#include +#include "qedf.h" +#include + +void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + unsigned int timer_msec) +{ + queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work, + msecs_to_jiffies(timer_msec)); +} + +static void qedf_cmd_timeout(struct work_struct *work) +{ + + struct qedf_ioreq *io_req = + container_of(work, struct qedf_ioreq, timeout_work.work); + struct qedf_ctx *qedf = io_req->fcport->qedf; + struct qedf_rport *fcport = io_req->fcport; + u8 op = 0; + + switch (io_req->cmd_type) { + case QEDF_ABTS: + QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n", + io_req->xid); + /* Cleanup timed out ABTS */ + qedf_initiate_cleanup(io_req, true); + complete(&io_req->abts_done); + + /* + * Need to call kref_put for reference taken when initiate_abts + * was called since abts_compl won't be called now that we've + * cleaned up the task. + */ + kref_put(&io_req->refcount, qedf_release_cmd); + + /* + * Now that the original I/O and the ABTS are complete see + * if we need to reconnect to the target. + */ + qedf_restart_rport(fcport); + break; + case QEDF_ELS: + kref_get(&io_req->refcount); + /* + * Don't attempt to clean an ELS timeout as any subseqeunt + * ABTS or cleanup requests just hang. For now just free + * the resources of the original I/O and the RRQ + */ + QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n", + io_req->xid); + io_req->event = QEDF_IOREQ_EV_ELS_TMO; + /* Call callback function to complete command */ + if (io_req->cb_func && io_req->cb_arg) { + op = io_req->cb_arg->op; + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + qedf_initiate_cleanup(io_req, true); + kref_put(&io_req->refcount, qedf_release_cmd); + break; + case QEDF_SEQ_CLEANUP: + QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, " + "xid=0x%x.\n", io_req->xid); + qedf_initiate_cleanup(io_req, true); + io_req->event = QEDF_IOREQ_EV_ELS_TMO; + qedf_process_seq_cleanup_compl(qedf, NULL, io_req); + break; + default: + break; + } +} + +void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) +{ + struct io_bdt *bdt_info; + struct qedf_ctx *qedf = cmgr->qedf; + size_t bd_tbl_sz; + u16 min_xid = QEDF_MIN_XID; + u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); + int num_ios; + int i; + struct qedf_ioreq *io_req; + + num_ios = max_xid - min_xid + 1; + + /* Free fcoe_bdt_ctx structures */ + if (!cmgr->io_bdt_pool) + goto free_cmd_pool; + + bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + if (bdt_info->bd_tbl) { + dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz, + bdt_info->bd_tbl, bdt_info->bd_tbl_dma); + bdt_info->bd_tbl = NULL; + } + } + + /* Destroy io_bdt pool */ + for (i = 0; i < num_ios; i++) { + kfree(cmgr->io_bdt_pool[i]); + cmgr->io_bdt_pool[i] = NULL; + } + + kfree(cmgr->io_bdt_pool); + cmgr->io_bdt_pool = NULL; + +free_cmd_pool: + + for (i = 0; i < num_ios; i++) { + io_req = &cmgr->cmds[i]; + /* Make sure we free per command sense buffer */ + if (io_req->sense_buffer) + dma_free_coherent(&qedf->pdev->dev, + QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer, + io_req->sense_buffer_dma); + cancel_delayed_work_sync(&io_req->rrq_work); + } + + /* Free command manager itself */ + vfree(cmgr); +} + +static void qedf_handle_rrq(struct work_struct *work) +{ + struct qedf_ioreq *io_req = + container_of(work, struct qedf_ioreq, rrq_work.work); + + qedf_send_rrq(io_req); + +} + +struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) +{ + struct qedf_cmd_mgr *cmgr; + struct io_bdt *bdt_info; + struct qedf_ioreq *io_req; + u16 xid; + int i; + int num_ios; + u16 min_xid = QEDF_MIN_XID; + u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); + + /* Make sure num_queues is already set before calling this function */ + if (!qedf->num_queues) { + QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n"); + return NULL; + } + + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { + QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and " + "max_xid 0x%x.\n", min_xid, max_xid); + return NULL; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid " + "0x%x.\n", min_xid, max_xid); + + num_ios = max_xid - min_xid + 1; + + cmgr = vzalloc(sizeof(struct qedf_cmd_mgr)); + if (!cmgr) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n"); + return NULL; + } + + cmgr->qedf = qedf; + spin_lock_init(&cmgr->lock); + + /* + * Initialize list of qedf_ioreq. + */ + xid = QEDF_MIN_XID; + + for (i = 0; i < num_ios; i++) { + io_req = &cmgr->cmds[i]; + INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout); + + io_req->xid = xid++; + + INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq); + + /* Allocate DMA memory to hold sense buffer */ + io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, + GFP_KERNEL); + if (!io_req->sense_buffer) + goto mem_err; + } + + /* Allocate pool of io_bdts - one for each qedf_ioreq */ + cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *), + GFP_KERNEL); + + if (!cmgr->io_bdt_pool) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n"); + goto mem_err; + } + + for (i = 0; i < num_ios; i++) { + cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt), + GFP_KERNEL); + if (!cmgr->io_bdt_pool[i]) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc " + "io_bdt_pool[%d].\n", i); + goto mem_err; + } + } + + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge), + &bdt_info->bd_tbl_dma, GFP_KERNEL); + if (!bdt_info->bd_tbl) { + QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc " + "bdt_tbl[%d].\n", i); + goto mem_err; + } + } + atomic_set(&cmgr->free_list_cnt, num_ios); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "cmgr->free_list_cnt=%d.\n", + atomic_read(&cmgr->free_list_cnt)); + + return cmgr; + +mem_err: + qedf_cmd_mgr_free(cmgr); + return NULL; +} + +struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) +{ + struct qedf_ctx *qedf = fcport->qedf; + struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr; + struct qedf_ioreq *io_req = NULL; + struct io_bdt *bd_tbl; + u16 xid; + uint32_t free_sqes; + int i; + unsigned long flags; + + free_sqes = atomic_read(&fcport->free_sqes); + + if (!free_sqes) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Returning NULL, free_sqes=%d.\n ", + free_sqes); + goto out_failed; + } + + /* Limit the number of outstanding R/W tasks */ + if ((atomic_read(&fcport->num_active_ios) >= + NUM_RW_TASKS_PER_CONNECTION)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Returning NULL, num_active_ios=%d.\n", + atomic_read(&fcport->num_active_ios)); + goto out_failed; + } + + /* Limit global TIDs certain tasks */ + if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Returning NULL, free_list_cnt=%d.\n", + atomic_read(&cmd_mgr->free_list_cnt)); + goto out_failed; + } + + spin_lock_irqsave(&cmd_mgr->lock, flags); + for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { + io_req = &cmd_mgr->cmds[cmd_mgr->idx]; + cmd_mgr->idx++; + if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS) + cmd_mgr->idx = 0; + + /* Check to make sure command was previously freed */ + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) + break; + } + + if (i == FCOE_PARAMS_NUM_TASKS) { + spin_unlock_irqrestore(&cmd_mgr->lock, flags); + goto out_failed; + } + + set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); + spin_unlock_irqrestore(&cmd_mgr->lock, flags); + + atomic_inc(&fcport->num_active_ios); + atomic_dec(&fcport->free_sqes); + xid = io_req->xid; + atomic_dec(&cmd_mgr->free_list_cnt); + + io_req->cmd_mgr = cmd_mgr; + io_req->fcport = fcport; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + if (bd_tbl == NULL) { + QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid); + kref_put(&io_req->refcount, qedf_release_cmd); + goto out_failed; + } + bd_tbl->io_req = io_req; + io_req->cmd_type = cmd_type; + + /* Reset sequence offset data */ + io_req->rx_buf_off = 0; + io_req->tx_buf_off = 0; + io_req->rx_id = 0xffff; /* No OX_ID */ + + return io_req; + +out_failed: + /* Record failure for stats and return NULL to caller */ + qedf->alloc_failures++; + return NULL; +} + +static void qedf_free_mp_resc(struct qedf_ioreq *io_req) +{ + struct qedf_mp_req *mp_req = &(io_req->mp_req); + struct qedf_ctx *qedf = io_req->fcport->qedf; + uint64_t sz = sizeof(struct fcoe_sge); + + /* clear tm flags */ + mp_req->tm_flags = 0; + if (mp_req->mp_req_bd) { + dma_free_coherent(&qedf->pdev->dev, sz, + mp_req->mp_req_bd, mp_req->mp_req_bd_dma); + mp_req->mp_req_bd = NULL; + } + if (mp_req->mp_resp_bd) { + dma_free_coherent(&qedf->pdev->dev, sz, + mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma); + mp_req->mp_resp_bd = NULL; + } + if (mp_req->req_buf) { + dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + mp_req->req_buf, mp_req->req_buf_dma); + mp_req->req_buf = NULL; + } + if (mp_req->resp_buf) { + dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + mp_req->resp_buf, mp_req->resp_buf_dma); + mp_req->resp_buf = NULL; + } +} + +void qedf_release_cmd(struct kref *ref) +{ + struct qedf_ioreq *io_req = + container_of(ref, struct qedf_ioreq, refcount); + struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr; + struct qedf_rport *fcport = io_req->fcport; + + if (io_req->cmd_type == QEDF_ELS || + io_req->cmd_type == QEDF_TASK_MGMT_CMD) + qedf_free_mp_resc(io_req); + + atomic_inc(&cmd_mgr->free_list_cnt); + atomic_dec(&fcport->num_active_ios); + if (atomic_read(&fcport->num_active_ios) < 0) + QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); + + /* Increment task retry identifier now that the request is released */ + io_req->task_retry_identifier++; + + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); +} + +static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len, + int bd_index) +{ + struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; + int frag_size, sg_frags; + + sg_frags = 0; + while (sg_len) { + if (sg_len > QEDF_BD_SPLIT_SZ) + frag_size = QEDF_BD_SPLIT_SZ; + else + frag_size = sg_len; + bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr); + bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr); + bd[bd_index + sg_frags].size = (uint16_t)frag_size; + + addr += (u64)frag_size; + sg_frags++; + sg_len -= frag_size; + } + return sg_frags; +} + +static int qedf_map_sg(struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct Scsi_Host *host = sc->device->host; + struct fc_lport *lport = shost_priv(host); + struct qedf_ctx *qedf = lport_priv(lport); + struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int sg_count = 0; + int bd_count = 0; + int sg_frags; + unsigned int sg_len; + u64 addr, end_addr; + int i; + + sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + + sg = scsi_sglist(sc); + + /* + * New condition to send single SGE as cached-SGL with length less + * than 64k. + */ + if ((sg_count == 1) && (sg_dma_len(sg) <= + QEDF_MAX_SGLEN_FOR_CACHESGL)) { + sg_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + + bd[bd_count].sge_addr.lo = (addr & 0xffffffff); + bd[bd_count].sge_addr.hi = (addr >> 32); + bd[bd_count].size = (u16)sg_len; + + return ++bd_count; + } + + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + end_addr = (u64)(addr + sg_len); + + /* + * First s/g element in the list so check if the end_addr + * is paged aligned. Also check to make sure the length is + * at least page size. + */ + if ((i == 0) && (sg_count > 1) && + ((end_addr % QEDF_PAGE_SIZE) || + sg_len < QEDF_PAGE_SIZE)) + io_req->use_slowpath = true; + /* + * Last s/g element so check if the start address is paged + * aligned. + */ + else if ((i == (sg_count - 1)) && (sg_count > 1) && + (addr % QEDF_PAGE_SIZE)) + io_req->use_slowpath = true; + /* + * Intermediate s/g element so check if start and end address + * is page aligned. + */ + else if ((i != 0) && (i != (sg_count - 1)) && + ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE))) + io_req->use_slowpath = true; + + if (sg_len > QEDF_MAX_BD_LEN) { + sg_frags = qedf_split_bd(io_req, addr, sg_len, + bd_count); + } else { + sg_frags = 1; + bd[bd_count].sge_addr.lo = U64_LO(addr); + bd[bd_count].sge_addr.hi = U64_HI(addr); + bd[bd_count].size = (uint16_t)sg_len; + } + + bd_count += sg_frags; + byte_count += sg_len; + } + + if (byte_count != scsi_bufflen(sc)) + QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != " + "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count, + scsi_bufflen(sc), io_req->xid); + + return bd_count; +} + +static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; + int bd_count; + + if (scsi_sg_count(sc)) { + bd_count = qedf_map_sg(io_req); + if (bd_count == 0) + return -ENOMEM; + } else { + bd_count = 0; + bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0; + bd[0].size = 0; + } + io_req->bd_tbl->bd_valid = bd_count; + + return 0; +} + +static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, + struct fcp_cmnd *fcp_cmnd) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + + /* fcp_cmnd is 32 bytes */ + memset(fcp_cmnd, 0, FCP_CMND_LEN); + + /* 8 bytes: SCSI LUN info */ + int_to_scsilun(sc_cmd->device->lun, + (struct scsi_lun *)&fcp_cmnd->fc_lun); + + /* 4 bytes: flag info */ + fcp_cmnd->fc_pri_ta = 0; + fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; + fcp_cmnd->fc_flags = io_req->io_req_flags; + fcp_cmnd->fc_cmdref = 0; + + /* Populate data direction */ + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + fcp_cmnd->fc_flags |= FCP_CFL_WRDATA; + else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) + fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; + + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; + + /* 16 bytes: CDB information */ + memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); + + /* 4 bytes: FCP data length */ + fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); + +} + +static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, + struct qedf_ioreq *io_req, u32 *ptu_invalidate, + struct fcoe_task_context *task_ctx) +{ + enum fcoe_task_type task_type; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct io_bdt *bd_tbl = io_req->bd_tbl; + union fcoe_data_desc_ctx *data_desc; + u32 *fcp_cmnd; + u32 tmp_fcp_cmnd[8]; + int cnt, i; + int bd_count; + struct qedf_ctx *qedf = fcport->qedf; + uint16_t cq_idx = smp_processor_id() % qedf->num_queues; + u8 tmp_sgl_mode = 0; + u8 mst_sgl_mode = 0; + + memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + io_req->task = task_ctx; + + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; + else + task_type = FCOE_TASK_TYPE_READ_INITIATOR; + + /* Y Storm context */ + task_ctx->ystorm_st_context.expect_first_xfer = 1; + task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len; + /* Check if this is required */ + task_ctx->ystorm_st_context.ox_id = io_req->xid; + task_ctx->ystorm_st_context.task_rety_identifier = + io_req->task_retry_identifier; + + /* T Storm ag context */ + SET_FIELD(task_ctx->tstorm_ag_context.flags0, + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE); + task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid; + + /* T Storm st context */ + SET_FIELD(task_ctx->tstorm_st_context.read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, + 1); + task_ctx->tstorm_st_context.read_write.rx_id = 0xffff; + + task_ctx->tstorm_st_context.read_only.dev_type = + FCOE_TASK_DEV_TYPE_DISK; + task_ctx->tstorm_st_context.read_only.conf_supported = 0; + task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid; + + /* Completion queue for response. */ + task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx; + task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size = + io_req->data_xfer_len; + task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val = + lport->e_d_tov; + + task_ctx->ustorm_ag_context.global_cq_num = cq_idx; + io_req->fp_idx = cq_idx; + + bd_count = bd_tbl->bd_valid; + if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) { + /* Setup WRITE task */ + struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl; + + task_ctx->ystorm_st_context.task_type = + FCOE_TASK_TYPE_WRITE_INITIATOR; + data_desc = &task_ctx->ystorm_st_context.data_desc; + + if (io_req->use_slowpath) { + SET_FIELD(task_ctx->ystorm_st_context.sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, + FCOE_SLOW_SGL); + data_desc->slow.base_sgl_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + data_desc->slow.base_sgl_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + data_desc->slow.remainder_num_sges = bd_count; + data_desc->slow.curr_sge_off = 0; + data_desc->slow.curr_sgl_index = 0; + qedf->slow_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SLOW_SGE; + } else { + SET_FIELD(task_ctx->ystorm_st_context.sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, + (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count : + FCOE_MUL_FAST_SGES); + + if (bd_count == 1) { + data_desc->single_sge.sge_addr.lo = + fcoe_bd_tbl->sge_addr.lo; + data_desc->single_sge.sge_addr.hi = + fcoe_bd_tbl->sge_addr.hi; + data_desc->single_sge.size = + fcoe_bd_tbl->size; + data_desc->single_sge.is_valid_sge = 0; + qedf->single_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SINGLE_SGE; + } else { + data_desc->fast.sgl_start_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + data_desc->fast.sgl_start_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + data_desc->fast.sgl_byte_offset = + data_desc->fast.sgl_start_addr.lo & + (QEDF_PAGE_SIZE - 1); + if (data_desc->fast.sgl_byte_offset > 0) + QEDF_ERR(&(qedf->dbg_ctx), + "byte_offset=%u for xid=0x%x.\n", + io_req->xid, + data_desc->fast.sgl_byte_offset); + data_desc->fast.task_reuse_cnt = + io_req->reuse_count; + io_req->reuse_count++; + if (io_req->reuse_count == QEDF_MAX_REUSE) { + *ptu_invalidate = 1; + io_req->reuse_count = 0; + } + qedf->fast_sge_ios++; + io_req->sge_type = QEDF_IOREQ_FAST_SGE; + } + } + + /* T Storm context */ + task_ctx->tstorm_st_context.read_only.task_type = + FCOE_TASK_TYPE_WRITE_INITIATOR; + + /* M Storm context */ + tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE); + SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, + FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE, + tmp_sgl_mode); + + } else { + /* Setup READ task */ + + /* M Storm context */ + struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl; + + data_desc = &task_ctx->mstorm_st_context.fp.data_desc; + task_ctx->mstorm_st_context.fp.data_2_trns_rem = + io_req->data_xfer_len; + + if (io_req->use_slowpath) { + SET_FIELD( + task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, + FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE, + FCOE_SLOW_SGL); + data_desc->slow.base_sgl_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + data_desc->slow.base_sgl_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + data_desc->slow.remainder_num_sges = + bd_count; + data_desc->slow.curr_sge_off = 0; + data_desc->slow.curr_sgl_index = 0; + qedf->slow_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SLOW_SGE; + } else { + SET_FIELD( + task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, + FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE, + (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count : + FCOE_MUL_FAST_SGES); + + if (bd_count == 1) { + data_desc->single_sge.sge_addr.lo = + fcoe_bd_tbl->sge_addr.lo; + data_desc->single_sge.sge_addr.hi = + fcoe_bd_tbl->sge_addr.hi; + data_desc->single_sge.size = + fcoe_bd_tbl->size; + data_desc->single_sge.is_valid_sge = 0; + qedf->single_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SINGLE_SGE; + } else { + data_desc->fast.sgl_start_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + data_desc->fast.sgl_start_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + data_desc->fast.sgl_byte_offset = 0; + data_desc->fast.task_reuse_cnt = + io_req->reuse_count; + io_req->reuse_count++; + if (io_req->reuse_count == QEDF_MAX_REUSE) { + *ptu_invalidate = 1; + io_req->reuse_count = 0; + } + qedf->fast_sge_ios++; + io_req->sge_type = QEDF_IOREQ_FAST_SGE; + } + } + + /* Y Storm context */ + task_ctx->ystorm_st_context.expect_first_xfer = 0; + task_ctx->ystorm_st_context.task_type = + FCOE_TASK_TYPE_READ_INITIATOR; + + /* T Storm context */ + task_ctx->tstorm_st_context.read_only.task_type = + FCOE_TASK_TYPE_READ_INITIATOR; + mst_sgl_mode = GET_FIELD( + task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, + FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE); + SET_FIELD(task_ctx->tstorm_st_context.read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE, + mst_sgl_mode); + } + + /* fill FCP_CMND IU */ + fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque; + qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); + + /* Swap fcp_cmnd since FC is big endian */ + cnt = sizeof(struct fcp_cmnd) / sizeof(u32); + + for (i = 0; i < cnt; i++) { + *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]); + fcp_cmnd++; + } + + /* M Storm context - Sense buffer */ + task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo = + U64_LO(io_req->sense_buffer_dma); + task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi = + U64_HI(io_req->sense_buffer_dma); +} + +void qedf_init_mp_task(struct qedf_ioreq *io_req, + struct fcoe_task_context *task_ctx) +{ + struct qedf_mp_req *mp_req = &(io_req->mp_req); + struct qedf_rport *fcport = io_req->fcport; + struct qedf_ctx *qedf = io_req->fcport->qedf; + struct fc_frame_header *fc_hdr; + enum fcoe_task_type task_type = 0; + union fcoe_data_desc_ctx *data_desc; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task " + "for cmd_type = %d\n", io_req->cmd_type); + + qedf->control_requests++; + + /* Obtain task_type */ + if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) || + (io_req->cmd_type == QEDF_ELS)) { + task_type = FCOE_TASK_TYPE_MIDPATH; + } else if (io_req->cmd_type == QEDF_ABTS) { + task_type = FCOE_TASK_TYPE_ABTS; + } + + memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task_ctx; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n", + task_type); + + /* YSTORM only */ + { + /* Initialize YSTORM task context */ + struct fcoe_tx_mid_path_params *task_fc_hdr = + &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path; + memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); + task_ctx->ystorm_st_context.task_rety_identifier = + io_req->task_retry_identifier; + + /* Init SGL parameters */ + if ((task_type == FCOE_TASK_TYPE_MIDPATH) || + (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { + data_desc = &task_ctx->ystorm_st_context.data_desc; + data_desc->slow.base_sgl_addr.lo = + U64_LO(mp_req->mp_req_bd_dma); + data_desc->slow.base_sgl_addr.hi = + U64_HI(mp_req->mp_req_bd_dma); + data_desc->slow.remainder_num_sges = 1; + data_desc->slow.curr_sge_off = 0; + data_desc->slow.curr_sgl_index = 0; + } + + fc_hdr = &(mp_req->req_fc_hdr); + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + fc_hdr->fh_ox_id = io_req->xid; + fc_hdr->fh_rx_id = htons(0xffff); + } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { + fc_hdr->fh_rx_id = io_req->xid; + } + + /* Fill FC Header into middle path buffer */ + task_fc_hdr->parameter = fc_hdr->fh_parm_offset; + task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl; + task_fc_hdr->type = fc_hdr->fh_type; + task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl; + task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl; + task_fc_hdr->rx_id = fc_hdr->fh_rx_id; + task_fc_hdr->ox_id = fc_hdr->fh_ox_id; + + task_ctx->ystorm_st_context.data_2_trns_rem = + io_req->data_xfer_len; + task_ctx->ystorm_st_context.task_type = task_type; + } + + /* TSTORM ONLY */ + { + task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid; + task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid; + /* Always send middle-path repsonses on CQ #0 */ + task_ctx->tstorm_st_context.read_only.glbl_q_num = 0; + io_req->fp_idx = 0; + SET_FIELD(task_ctx->tstorm_ag_context.flags0, + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, + PROTOCOLID_FCOE); + task_ctx->tstorm_st_context.read_only.task_type = task_type; + SET_FIELD(task_ctx->tstorm_st_context.read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, + 1); + task_ctx->tstorm_st_context.read_write.rx_id = 0xffff; + } + + /* MSTORM only */ + { + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + /* Initialize task context */ + data_desc = &task_ctx->mstorm_st_context.fp.data_desc; + + /* Set cache sges address and length */ + data_desc->slow.base_sgl_addr.lo = + U64_LO(mp_req->mp_resp_bd_dma); + data_desc->slow.base_sgl_addr.hi = + U64_HI(mp_req->mp_resp_bd_dma); + data_desc->slow.remainder_num_sges = 1; + data_desc->slow.curr_sge_off = 0; + data_desc->slow.curr_sgl_index = 0; + + /* + * Also need to fil in non-fastpath response address + * for middle path commands. + */ + task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo = + U64_LO(mp_req->mp_resp_bd_dma); + task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi = + U64_HI(mp_req->mp_resp_bd_dma); + } + } + + /* USTORM ONLY */ + { + task_ctx->ustorm_ag_context.global_cq_num = 0; + } + + /* I/O stats. Middle path commands always use slow SGEs */ + qedf->slow_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SLOW_SGE; +} + +void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate, + enum fcoe_task_type req_type, u32 offset) +{ + struct fcoe_wqe *sqe; + uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); + + sqe = &fcport->sq[fcport->sq_prod_idx]; + + fcport->sq_prod_idx++; + fcport->fw_sq_prod_idx++; + if (fcport->sq_prod_idx == total_sqe) + fcport->sq_prod_idx = 0; + + switch (req_type) { + case FCOE_TASK_TYPE_WRITE_INITIATOR: + case FCOE_TASK_TYPE_READ_INITIATOR: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD); + if (ptu_invalidate) + SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1); + break; + case FCOE_TASK_TYPE_MIDPATH: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH); + break; + case FCOE_TASK_TYPE_ABTS: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, + SEND_FCOE_ABTS_REQUEST); + break; + case FCOE_TASK_TYPE_EXCHANGE_CLEANUP: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, + FCOE_EXCHANGE_CLEANUP); + break; + case FCOE_TASK_TYPE_SEQUENCE_CLEANUP: + SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, + FCOE_SEQUENCE_RECOVERY); + /* NOTE: offset param only used for sequence recovery */ + sqe->additional_info_union.seq_rec_updated_offset = offset; + break; + case FCOE_TASK_TYPE_UNSOLICITED: + break; + default: + break; + } + + sqe->task_id = xid; + + /* Make sure SQ data is coherent */ + wmb(); + +} + +void qedf_ring_doorbell(struct qedf_rport *fcport) +{ + struct fcoe_db_data dbell = { 0 }; + + dbell.agg_flags = 0; + + dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT; + dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT; + dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD << + FCOE_DB_DATA_AGG_VAL_SEL_SHIFT; + + dbell.sq_prod = fcport->fw_sq_prod_idx; + writel(*(u32 *)&dbell, fcport->p_doorbell); + /* Make sure SQ index is updated so f/w prcesses requests in order */ + wmb(); + mmiowb(); +} + +static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, + int8_t direction) +{ + struct qedf_ctx *qedf = fcport->qedf; + struct qedf_io_log *io_log; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + unsigned long flags; + uint8_t op; + + spin_lock_irqsave(&qedf->io_trace_lock, flags); + + io_log = &qedf->io_trace_buf[qedf->io_trace_idx]; + io_log->direction = direction; + io_log->task_id = io_req->xid; + io_log->port_id = fcport->rdata->ids.port_id; + io_log->lun = sc_cmd->device->lun; + io_log->op = op = sc_cmd->cmnd[0]; + io_log->lba[0] = sc_cmd->cmnd[2]; + io_log->lba[1] = sc_cmd->cmnd[3]; + io_log->lba[2] = sc_cmd->cmnd[4]; + io_log->lba[3] = sc_cmd->cmnd[5]; + io_log->bufflen = scsi_bufflen(sc_cmd); + io_log->sg_count = scsi_sg_count(sc_cmd); + io_log->result = sc_cmd->result; + io_log->jiffies = jiffies; + io_log->refcount = kref_read(&io_req->refcount); + + if (direction == QEDF_IO_TRACE_REQ) { + /* For requests we only care abot the submission CPU */ + io_log->req_cpu = io_req->cpu; + io_log->int_cpu = 0; + io_log->rsp_cpu = 0; + } else if (direction == QEDF_IO_TRACE_RSP) { + io_log->req_cpu = io_req->cpu; + io_log->int_cpu = io_req->int_cpu; + io_log->rsp_cpu = smp_processor_id(); + } + + io_log->sge_type = io_req->sge_type; + + qedf->io_trace_idx++; + if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE) + qedf->io_trace_idx = 0; + + spin_unlock_irqrestore(&qedf->io_trace_lock, flags); +} + +int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct Scsi_Host *host = sc_cmd->device->host; + struct fc_lport *lport = shost_priv(host); + struct qedf_ctx *qedf = lport_priv(lport); + struct fcoe_task_context *task_ctx; + u16 xid; + enum fcoe_task_type req_type = 0; + u32 ptu_invalidate = 0; + + /* Initialize rest of io_req fileds */ + io_req->data_xfer_len = scsi_bufflen(sc_cmd); + sc_cmd->SCp.ptr = (char *)io_req; + io_req->use_slowpath = false; /* Assume fast SGL by default */ + + /* Record which cpu this request is associated with */ + io_req->cpu = smp_processor_id(); + + if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { + req_type = FCOE_TASK_TYPE_READ_INITIATOR; + io_req->io_req_flags = QEDF_READ; + qedf->input_requests++; + } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + req_type = FCOE_TASK_TYPE_WRITE_INITIATOR; + io_req->io_req_flags = QEDF_WRITE; + qedf->output_requests++; + } else { + io_req->io_req_flags = 0; + qedf->control_requests++; + } + + xid = io_req->xid; + + /* Build buffer descriptor list for firmware from sg list */ + if (qedf_build_bd_list_from_sg(io_req)) { + QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n"); + kref_put(&io_req->refcount, qedf_release_cmd); + return -EAGAIN; + } + + /* Get the task context */ + task_ctx = qedf_get_task_mem(&qedf->tasks, xid); + if (!task_ctx) { + QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n", + xid); + kref_put(&io_req->refcount, qedf_release_cmd); + return -EINVAL; + } + + qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx); + + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); + kref_put(&io_req->refcount, qedf_release_cmd); + } + + /* Obtain free SQ entry */ + qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0); + + /* Ring doorbell */ + qedf_ring_doorbell(fcport); + + if (qedf_io_tracing && io_req->sc_cmd) + qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); + + return false; +} + +int +qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport = shost_priv(host); + struct qedf_ctx *qedf = lport_priv(lport); + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct qedf_rport *fcport = rport->dd_data; + struct qedf_ioreq *io_req; + int rc = 0; + int rval; + unsigned long flags = 0; + + + if (test_bit(QEDF_UNLOADING, &qedf->flags) || + test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { + sc_cmd->result = DID_NO_CONNECT << 16; + sc_cmd->scsi_done(sc_cmd); + return 0; + } + + rval = fc_remote_port_chkready(rport); + if (rval) { + sc_cmd->result = rval; + sc_cmd->scsi_done(sc_cmd); + return 0; + } + + /* Retry command if we are doing a qed drain operation */ + if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + if (lport->state != LPORT_ST_READY || + atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + /* rport and tgt are allocated together, so tgt should be non-NULL */ + fcport = (struct qedf_rport *)&rp[1]; + + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + /* + * Session is not offloaded yet. Let SCSI-ml retry + * the command. + */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + if (fcport->retry_delay_timestamp) { + if (time_after(jiffies, fcport->retry_delay_timestamp)) { + fcport->retry_delay_timestamp = 0; + } else { + /* If retry_delay timer is active, flow off the ML */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + } + + io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); + if (!io_req) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + io_req->sc_cmd = sc_cmd; + + /* Take fcport->rport_lock for posting to fcport send queue */ + spin_lock_irqsave(&fcport->rport_lock, flags); + if (qedf_post_io_req(fcport, io_req)) { + QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n"); + /* Return SQE to pool */ + atomic_inc(&fcport->free_sqes); + rc = SCSI_MLQUEUE_HOST_BUSY; + } + spin_unlock_irqrestore(&fcport->rport_lock, flags); + +exit_qcmd: + return rc; +} + +static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req, + struct fcoe_cqe_rsp_info *fcp_rsp) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct qedf_ctx *qedf = io_req->fcport->qedf; + u8 rsp_flags = fcp_rsp->rsp_flags.flags; + int fcp_sns_len = 0; + int fcp_rsp_len = 0; + uint8_t *rsp_info, *sense_data; + + io_req->fcp_status = FC_GOOD; + io_req->fcp_resid = 0; + if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | + FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) + io_req->fcp_resid = fcp_rsp->fcp_resid; + + io_req->scsi_comp_flags = rsp_flags; + CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = + fcp_rsp->scsi_status_code; + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) + fcp_rsp_len = fcp_rsp->fcp_rsp_len; + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) + fcp_sns_len = fcp_rsp->fcp_sns_len; + + io_req->fcp_rsp_len = fcp_rsp_len; + io_req->fcp_sns_len = fcp_sns_len; + rsp_info = sense_data = io_req->sense_buffer; + + /* fetch fcp_rsp_code */ + if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { + /* Only for task management function */ + io_req->fcp_rsp_code = rsp_info[3]; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); + /* Adjust sense-data location. */ + sense_data += fcp_rsp_len; + } + + if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Truncating sense buffer\n"); + fcp_sns_len = SCSI_SENSE_BUFFERSIZE; + } + + memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (fcp_sns_len) + memcpy(sc_cmd->sense_buffer, sense_data, + fcp_sns_len); +} + +static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + + if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { + dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + io_req->bd_tbl->bd_valid = 0; + } +} + +void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + u16 xid, rval; + struct fcoe_task_context *task_ctx; + struct scsi_cmnd *sc_cmd; + struct fcoe_cqe_rsp_info *fcp_rsp; + struct qedf_rport *fcport; + int refcount; + u16 scope, qualifier = 0; + u8 fw_residual_flag = 0; + + if (!io_req) + return; + if (!cqe) + return; + + xid = io_req->xid; + task_ctx = qedf_get_task_mem(&qedf->tasks, xid); + sc_cmd = io_req->sc_cmd; + fcp_rsp = &cqe->cqe_info.rsp_info; + + if (!sc_cmd) { + QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); + return; + } + + if (!sc_cmd->SCp.ptr) { + QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " + "another context.\n"); + return; + } + + if (!sc_cmd->request) { + QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, " + "sc_cmd=%p.\n", sc_cmd); + return; + } + + if (!sc_cmd->request->special) { + QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so " + "request not valid, sc_cmd=%p.\n", sc_cmd); + return; + } + + if (!sc_cmd->request->q) { + QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request " + "is not valid, sc_cmd=%p.\n", sc_cmd); + return; + } + + fcport = io_req->fcport; + + qedf_parse_fcp_rsp(io_req, fcp_rsp); + + qedf_unmap_sg_list(qedf, io_req); + + /* Check for FCP transport error */ + if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) { + QEDF_ERR(&(qedf->dbg_ctx), + "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d " + "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len, + io_req->fcp_rsp_code); + sc_cmd->result = DID_BUS_BUSY << 16; + goto out; + } + + fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags, + FCOE_CQE_RSP_INFO_FW_UNDERRUN); + if (fw_residual_flag) { + QEDF_ERR(&(qedf->dbg_ctx), + "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x " + "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid, + fcp_rsp->rsp_flags.flags, io_req->fcp_resid, + cqe->cqe_info.rsp_info.fw_residual); + + if (io_req->cdb_status == 0) + sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; + else + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + + /* Abort the command since we did not get all the data */ + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); + sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; + } + + /* + * Set resid to the whole buffer length so we won't try to resue + * any previously data. + */ + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + goto out; + } + + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good I/O completion */ + sc_cmd->result = DID_OK << 16; + } else { + refcount = kref_read(&io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "%d:0:%d:%lld xid=0x%0x op=0x%02x " + "lba=%02x%02x%02x%02x cdb_status=%d " + "fcp_resid=0x%x refcount=%d.\n", + qedf->lport->host->host_no, sc_cmd->device->id, + sc_cmd->device->lun, io_req->xid, + sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3], + sc_cmd->cmnd[4], sc_cmd->cmnd[5], + io_req->cdb_status, io_req->fcp_resid, + refcount); + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + + if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || + io_req->cdb_status == SAM_STAT_BUSY) { + /* + * Check whether we need to set retry_delay at + * all based on retry_delay module parameter + * and the status qualifier. + */ + + /* Upper 2 bits */ + scope = fcp_rsp->retry_delay_timer & 0xC000; + /* Lower 14 bits */ + qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; + + if (qedf_retry_delay && + scope > 0 && qualifier > 0 && + qualifier <= 0x3FEF) { + /* Check we don't go over the max */ + if (qualifier > QEDF_RETRY_DELAY_MAX) + qualifier = + QEDF_RETRY_DELAY_MAX; + fcport->retry_delay_timestamp = + jiffies + (qualifier * HZ / 10); + } + } + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + default: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n", + io_req->fcp_status); + break; + } + +out: + if (qedf_io_tracing) + qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); + + io_req->sc_cmd = NULL; + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); + kref_put(&io_req->refcount, qedf_release_cmd); +} + +/* Return a SCSI command in some other context besides a normal completion */ +void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, + int result) +{ + u16 xid; + struct scsi_cmnd *sc_cmd; + int refcount; + + if (!io_req) + return; + + xid = io_req->xid; + sc_cmd = io_req->sc_cmd; + + if (!sc_cmd) { + QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); + return; + } + + if (!sc_cmd->SCp.ptr) { + QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " + "another context.\n"); + return; + } + + qedf_unmap_sg_list(qedf, io_req); + + sc_cmd->result = result << 16; + refcount = kref_read(&io_req->refcount); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing " + "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, " + "allowed=%d retries=%d refcount=%d.\n", + qedf->lport->host->host_no, sc_cmd->device->id, + sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0], + sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4], + sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries, + refcount); + + /* + * Set resid to the whole buffer length so we won't try to resue any + * previously read data + */ + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + + if (qedf_io_tracing) + qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); + + io_req->sc_cmd = NULL; + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); + kref_put(&io_req->refcount, qedf_release_cmd); +} + +/* + * Handle warning type CQE completions. This is mainly used for REC timer + * popping. + */ +void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + int rval, i; + struct qedf_rport *fcport = io_req->fcport; + u64 err_warn_bit_map; + u8 err_warn = 0xff; + + if (!cqe) + return; + + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " + "xid=0x%x\n", io_req->xid); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), + "err_warn_bitmap=%08x:%08x\n", + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " + "rx_buff_off=%08x, rx_id=%04x\n", + le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_id)); + + /* Normalize the error bitmap value to an just an unsigned int */ + err_warn_bit_map = (u64) + ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) | + (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo; + for (i = 0; i < 64; i++) { + if (err_warn_bit_map & (u64)((u64)1 << i)) { + err_warn = i; + break; + } + } + + /* Check if REC TOV expired if this is a tape device */ + if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { + if (err_warn == + FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) { + QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n"); + if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) { + io_req->rx_buf_off = + cqe->cqe_info.err_info.rx_buf_off; + io_req->tx_buf_off = + cqe->cqe_info.err_info.tx_buf_off; + io_req->rx_id = cqe->cqe_info.err_info.rx_id; + rval = qedf_send_rec(io_req); + /* + * We only want to abort the io_req if we + * can't queue the REC command as we want to + * keep the exchange open for recovery. + */ + if (rval) + goto send_abort; + } + return; + } + } + +send_abort: + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); +} + +/* Cleanup a command when we receive an error detection completion */ +void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + int rval; + + if (!cqe) + return; + + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " + "xid=0x%x\n", io_req->xid); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), + "err_warn_bitmap=%08x:%08x\n", + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), + le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); + QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " + "rx_buff_off=%08x, rx_id=%04x\n", + le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), + le32_to_cpu(cqe->cqe_info.err_info.rx_id)); + + if (qedf->stop_io_on_error) { + qedf_stop_all_io(qedf); + return; + } + + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); +} + +static void qedf_flush_els_req(struct qedf_ctx *qedf, + struct qedf_ioreq *els_req) +{ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid, + kref_read(&els_req->refcount)); + + /* + * Need to distinguish this from a timeout when calling the + * els_req->cb_func. + */ + els_req->event = QEDF_IOREQ_EV_ELS_FLUSH; + + /* Cancel the timer */ + cancel_delayed_work_sync(&els_req->timeout_work); + + /* Call callback function to complete command */ + if (els_req->cb_func && els_req->cb_arg) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + /* Release kref for original initiate_els */ + kref_put(&els_req->refcount, qedf_release_cmd); +} + +/* A value of -1 for lun is a wild card that means flush all + * active SCSI I/Os for the target. + */ +void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) +{ + struct qedf_ioreq *io_req; + struct qedf_ctx *qedf; + struct qedf_cmd_mgr *cmd_mgr; + int i, rc; + + if (!fcport) + return; + + qedf = fcport->qedf; + cmd_mgr = qedf->cmd_mgr; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n"); + + for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { + io_req = &cmd_mgr->cmds[i]; + + if (!io_req) + continue; + if (io_req->fcport != fcport) + continue; + if (io_req->cmd_type == QEDF_ELS) { + rc = kref_get_unless_zero(&io_req->refcount); + if (!rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "Could not get kref for io_req=0x%p.\n", + io_req); + continue; + } + qedf_flush_els_req(qedf, io_req); + /* + * Release the kref and go back to the top of the + * loop. + */ + goto free_cmd; + } + + if (!io_req->sc_cmd) + continue; + if (lun > 0) { + if (io_req->sc_cmd->device->lun != + (u64)lun) + continue; + } + + /* + * Use kref_get_unless_zero in the unlikely case the command + * we're about to flush was completed in the normal SCSI path + */ + rc = kref_get_unless_zero(&io_req->refcount); + if (!rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for " + "io_req=0x%p\n", io_req); + continue; + } + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Cleanup xid=0x%x.\n", io_req->xid); + + /* Cleanup task and return I/O mid-layer */ + qedf_initiate_cleanup(io_req, true); + +free_cmd: + kref_put(&io_req->refcount, qedf_release_cmd); + } +} + +/* + * Initiate a ABTS middle path command. Note that we don't have to initialize + * the task context for an ABTS task. + */ +int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) +{ + struct fc_lport *lport; + struct qedf_rport *fcport = io_req->fcport; + struct fc_rport_priv *rdata = fcport->rdata; + struct qedf_ctx *qedf = fcport->qedf; + u16 xid; + u32 r_a_tov = 0; + int rc = 0; + unsigned long flags; + + r_a_tov = rdata->r_a_tov; + lport = qedf->lport; + + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n"); + rc = 1; + goto abts_err; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); + rc = 1; + goto abts_err; + } + + if (atomic_read(&qedf->link_down_tmo_valid) > 0) { + QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n"); + rc = 1; + goto abts_err; + } + + /* Ensure room on SQ */ + if (!atomic_read(&fcport->free_sqes)) { + QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); + rc = 1; + goto abts_err; + } + + + kref_get(&io_req->refcount); + + xid = io_req->xid; + qedf->control_requests++; + qedf->packet_aborts++; + + /* Set the return CPU to be the same as the request one */ + io_req->cpu = smp_processor_id(); + + /* Set the command type to abort */ + io_req->cmd_type = QEDF_ABTS; + io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; + + set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = " + "0x%x\n", xid); + + qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ); + + spin_lock_irqsave(&fcport->rport_lock, flags); + + /* Add ABTS to send queue */ + qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0); + + /* Ring doorbell */ + qedf_ring_doorbell(fcport); + + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + return rc; +abts_err: + /* + * If the ABTS task fails to queue then we need to cleanup the + * task at the firmware. + */ + qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts); + return rc; +} + +void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + uint32_t r_ctl; + uint16_t xid; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = " + "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type); + + cancel_delayed_work(&io_req->timeout_work); + + xid = io_req->xid; + r_ctl = cqe->cqe_info.abts_info.r_ctl; + + switch (r_ctl) { + case FC_RCTL_BA_ACC: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, + "ABTS response - ACC Send RRQ after R_A_TOV\n"); + io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS; + /* + * Dont release this cmd yet. It will be relesed + * after we get RRQ response + */ + kref_get(&io_req->refcount); + queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work, + msecs_to_jiffies(qedf->lport->r_a_tov)); + break; + /* For error cases let the cleanup return the command */ + case FC_RCTL_BA_RJT: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, + "ABTS response - RJT\n"); + io_req->event = QEDF_IOREQ_EV_ABORT_FAILED; + break; + default: + QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n"); + break; + } + + clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); + + if (io_req->sc_cmd) { + if (io_req->return_scsi_cmd_on_abts) + qedf_scsi_done(qedf, io_req, DID_ERROR); + } + + /* Notify eh_abort handler that ABTS is complete */ + complete(&io_req->abts_done); + + kref_put(&io_req->refcount, qedf_release_cmd); +} + +int qedf_init_mp_req(struct qedf_ioreq *io_req) +{ + struct qedf_mp_req *mp_req; + struct fcoe_sge *mp_req_bd; + struct fcoe_sge *mp_resp_bd; + struct qedf_ctx *qedf = io_req->fcport->qedf; + dma_addr_t addr; + uint64_t sz; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n"); + + mp_req = (struct qedf_mp_req *)&(io_req->mp_req); + memset(mp_req, 0, sizeof(struct qedf_mp_req)); + + if (io_req->cmd_type != QEDF_ELS) { + mp_req->req_len = sizeof(struct fcp_cmnd); + io_req->data_xfer_len = mp_req->req_len; + } else + mp_req->req_len = io_req->data_xfer_len; + + mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + &mp_req->req_buf_dma, GFP_KERNEL); + if (!mp_req->req_buf) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL); + if (!mp_req->resp_buf) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp " + "buffer\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + /* Allocate and map mp_req_bd and mp_resp_bd */ + sz = sizeof(struct fcoe_sge); + mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, + &mp_req->mp_req_bd_dma, GFP_KERNEL); + if (!mp_req->mp_req_bd) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, + &mp_req->mp_resp_bd_dma, GFP_KERNEL); + if (!mp_req->mp_resp_bd) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n"); + qedf_free_mp_resc(io_req); + return -ENOMEM; + } + + /* Fill bd table */ + addr = mp_req->req_buf_dma; + mp_req_bd = mp_req->mp_req_bd; + mp_req_bd->sge_addr.lo = U64_LO(addr); + mp_req_bd->sge_addr.hi = U64_HI(addr); + mp_req_bd->size = QEDF_PAGE_SIZE; + + /* + * MP buffer is either a task mgmt command or an ELS. + * So the assumption is that it consumes a single bd + * entry in the bd table + */ + mp_resp_bd = mp_req->mp_resp_bd; + addr = mp_req->resp_buf_dma; + mp_resp_bd->sge_addr.lo = U64_LO(addr); + mp_resp_bd->sge_addr.hi = U64_HI(addr); + mp_resp_bd->size = QEDF_PAGE_SIZE; + + return 0; +} + +/* + * Last ditch effort to clear the port if it's stuck. Used only after a + * cleanup task times out. + */ +static void qedf_drain_request(struct qedf_ctx *qedf) +{ + if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n"); + return; + } + + /* Set bit to return all queuecommand requests as busy */ + set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); + + /* Call qed drain request for function. Should be synchronous */ + qed_ops->common->drain(qedf->cdev); + + /* Settle time for CQEs to be returned */ + msleep(100); + + /* Unplug and continue */ + clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); +} + +/* + * Returns SUCCESS if the cleanup task does not timeout, otherwise return + * FAILURE. + */ +int qedf_initiate_cleanup(struct qedf_ioreq *io_req, + bool return_scsi_cmd_on_abts) +{ + struct qedf_rport *fcport; + struct qedf_ctx *qedf; + uint16_t xid; + struct fcoe_task_context *task; + int tmo = 0; + int rc = SUCCESS; + unsigned long flags; + + fcport = io_req->fcport; + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL.\n"); + return SUCCESS; + } + + qedf = fcport->qedf; + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + return SUCCESS; + } + + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || + test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " + "cleanup processing or already completed.\n", + io_req->xid); + return SUCCESS; + } + + /* Ensure room on SQ */ + if (!atomic_read(&fcport->free_sqes)) { + QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); + return FAILED; + } + + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n", + io_req->xid); + + /* Cleanup cmds re-use the same TID as the original I/O */ + xid = io_req->xid; + io_req->cmd_type = QEDF_CLEANUP; + io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; + + /* Set the return CPU to be the same as the request one */ + io_req->cpu = smp_processor_id(); + + set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + + task = qedf_get_task_mem(&qedf->tasks, xid); + + init_completion(&io_req->tm_done); + + /* Obtain free SQ entry */ + spin_lock_irqsave(&fcport->rport_lock, flags); + qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0); + + /* Ring doorbell */ + qedf_ring_doorbell(fcport); + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + tmo = wait_for_completion_timeout(&io_req->tm_done, + QEDF_CLEANUP_TIMEOUT * HZ); + + if (!tmo) { + rc = FAILED; + /* Timeout case */ + QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, " + "xid=%x.\n", io_req->xid); + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + /* Issue a drain request if cleanup task times out */ + QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n"); + qedf_drain_request(qedf); + } + + if (io_req->sc_cmd) { + if (io_req->return_scsi_cmd_on_abts) + qedf_scsi_done(qedf, io_req, DID_ERROR); + } + + if (rc == SUCCESS) + io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS; + else + io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED; + + return rc; +} + +void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n", + io_req->xid); + + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); + + /* Complete so we can finish cleaning up the I/O */ + complete(&io_req->tm_done); +} + +static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, + uint8_t tm_flags) +{ + struct qedf_ioreq *io_req; + struct qedf_mp_req *tm_req; + struct fcoe_task_context *task; + struct fc_frame_header *fc_hdr; + struct fcp_cmnd *fcp_cmnd; + struct qedf_ctx *qedf = fcport->qedf; + int rc = 0; + uint16_t xid; + uint32_t sid, did; + int tmo = 0; + unsigned long flags; + + if (!sc_cmd) { + QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); + return FAILED; + } + + if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { + QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n"); + rc = FAILED; + return FAILED; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x " + "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags); + + io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); + if (!io_req) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF"); + rc = -EAGAIN; + goto reset_tmf_err; + } + + /* Initialize rest of io_req fields */ + io_req->sc_cmd = sc_cmd; + io_req->fcport = fcport; + io_req->cmd_type = QEDF_TASK_MGMT_CMD; + + /* Set the return CPU to be the same as the request one */ + io_req->cpu = smp_processor_id(); + + tm_req = (struct qedf_mp_req *)&(io_req->mp_req); + + rc = qedf_init_mp_req(io_req); + if (rc == FAILED) { + QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init " + "failed\n"); + kref_put(&io_req->refcount, qedf_release_cmd); + goto reset_tmf_err; + } + + /* Set TM flags */ + io_req->io_req_flags = 0; + tm_req->tm_flags = tm_flags; + + /* Default is to return a SCSI command when an error occurs */ + io_req->return_scsi_cmd_on_abts = true; + + /* Fill FCP_CMND */ + qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); + fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; + memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN); + fcp_cmnd->fc_dl = 0; + + /* Fill FC header */ + fc_hdr = &(tm_req->req_fc_hdr); + sid = fcport->sid; + did = fcport->rdata->ids.port_id; + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did, + FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + /* Obtain exchange id */ + xid = io_req->xid; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = " + "0x%x\n", xid); + + /* Initialize task context for this IO request */ + task = qedf_get_task_mem(&qedf->tasks, xid); + qedf_init_mp_task(io_req, task); + + init_completion(&io_req->tm_done); + + /* Obtain free SQ entry */ + spin_lock_irqsave(&fcport->rport_lock, flags); + qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0); + + /* Ring doorbell */ + qedf_ring_doorbell(fcport); + spin_unlock_irqrestore(&fcport->rport_lock, flags); + + tmo = wait_for_completion_timeout(&io_req->tm_done, + QEDF_TM_TIMEOUT * HZ); + + if (!tmo) { + rc = FAILED; + QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n"); + } else { + /* Check TMF response code */ + if (io_req->fcp_rsp_code == 0) + rc = SUCCESS; + else + rc = FAILED; + } + + if (tm_flags == FCP_TMF_LUN_RESET) + qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun); + else + qedf_flush_active_ios(fcport, -1); + + kref_put(&io_req->refcount, qedf_release_cmd); + + if (rc != SUCCESS) { + QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n"); + rc = FAILED; + } else { + QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n"); + rc = SUCCESS; + } +reset_tmf_err: + return rc; +} + +int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; + struct qedf_ctx *qedf; + struct fc_lport *lport; + int rc = SUCCESS; + int rval; + + rval = fc_remote_port_chkready(rport); + + if (rval) { + QEDF_ERR(NULL, "device_reset rport not ready\n"); + rc = FAILED; + goto tmf_err; + } + + if (fcport == NULL) { + QEDF_ERR(NULL, "device_reset: rport is NULL\n"); + rc = FAILED; + goto tmf_err; + } + + qedf = fcport->qedf; + lport = qedf->lport; + + if (test_bit(QEDF_UNLOADING, &qedf->flags) || + test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { + rc = SUCCESS; + goto tmf_err; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); + rc = FAILED; + goto tmf_err; + } + + rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); + +tmf_err: + return rc; +} + +void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, + struct qedf_ioreq *io_req) +{ + struct fcoe_cqe_rsp_info *fcp_rsp; + struct fcoe_cqe_midpath_info *mp_info; + + + /* Get TMF response length from CQE */ + mp_info = &cqe->cqe_info.midpath_info; + io_req->mp_req.resp_len = mp_info->data_placement_size; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, + "Response len is %d.\n", io_req->mp_req.resp_len); + + fcp_rsp = &cqe->cqe_info.rsp_info; + qedf_parse_fcp_rsp(io_req, fcp_rsp); + + io_req->sc_cmd = NULL; + complete(&io_req->tm_done); +} + +void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, + struct fcoe_cqe *cqe) +{ + unsigned long flags; + uint16_t tmp; + uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len; + u32 payload_len, crc; + struct fc_frame_header *fh; + struct fc_frame *fp; + struct qedf_io_work *io_work; + u32 bdq_idx; + void *bdq_addr; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, + "address.hi=%x address.lo=%x opaque_data.hi=%x " + "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n", + le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi), + le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo), + le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi), + le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo), + qedf->bdq_prod_idx, pktlen); + + bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo); + if (bdq_idx >= QEDF_BDQ_SIZE) { + QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n", + bdq_idx); + goto increment_prod; + } + + bdq_addr = qedf->bdq[bdq_idx].buf_addr; + if (!bdq_addr) { + QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping " + "unsolicited packet.\n"); + goto increment_prod; + } + + if (qedf_dump_frames) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, + "BDQ frame is at addr=%p.\n", bdq_addr); + print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1, + (void *)bdq_addr, pktlen, false); + } + + /* Allocate frame */ + payload_len = pktlen - sizeof(struct fc_frame_header); + fp = fc_frame_alloc(qedf->lport, payload_len); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n"); + goto increment_prod; + } + + /* Copy data from BDQ buffer into fc_frame struct */ + fh = (struct fc_frame_header *)fc_frame_header_get(fp); + memcpy(fh, (void *)bdq_addr, pktlen); + + /* Initialize the frame so libfc sees it as a valid frame */ + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = qedf->lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + + /* + * We need to return the frame back up to libfc in a non-atomic + * context + */ + io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); + if (!io_work) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "work for I/O completion.\n"); + fc_frame_free(fp); + goto increment_prod; + } + memset(io_work, 0, sizeof(struct qedf_io_work)); + + INIT_WORK(&io_work->work, qedf_fp_io_handler); + + /* Copy contents of CQE for deferred processing */ + memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); + + io_work->qedf = qedf; + io_work->fp = fp; + + queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work); +increment_prod: + spin_lock_irqsave(&qedf->hba_lock, flags); + + /* Increment producer to let f/w know we've handled the frame */ + qedf->bdq_prod_idx++; + + /* Producer index wraps at uint16_t boundary */ + if (qedf->bdq_prod_idx == 0xffff) + qedf->bdq_prod_idx = 0; + + writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); + tmp = readw(qedf->bdq_primary_prod); + writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); + tmp = readw(qedf->bdq_secondary_prod); + + spin_unlock_irqrestore(&qedf->hba_lock, flags); +} diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c new file mode 100644 index 00000000000000..8e2a160490e66a --- /dev/null +++ b/drivers/scsi/qedf/qedf_main.c @@ -0,0 +1,3336 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qedf.h" + +const struct qed_fcoe_ops *qed_ops; + +static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); +static void qedf_remove(struct pci_dev *pdev); + +extern struct qedf_debugfs_ops qedf_debugfs_ops; +extern struct file_operations qedf_dbg_fops; + +/* + * Driver module parameters. + */ +static unsigned int qedf_dev_loss_tmo = 60; +module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO); +MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached " + "remote ports (default 60)"); + +uint qedf_debug = QEDF_LOG_INFO; +module_param_named(debug, qedf_debug, uint, S_IRUGO); +MODULE_PARM_DESC(qedf_debug, " Debug mask. Pass '1' to enable default debugging" + " mask"); + +static uint qedf_fipvlan_retries = 30; +module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO); +MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt " + "before giving up (default 30)"); + +static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN; +module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO); +MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails " + "(default 1002)."); + +static uint qedf_default_prio = QEDF_DEFAULT_PRIO; +module_param_named(default_prio, qedf_default_prio, int, S_IRUGO); +MODULE_PARM_DESC(default_prio, " Default 802.1q priority for FIP and FCoE" + " traffic (default 3)."); + +uint qedf_dump_frames; +module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames " + "(default off)"); + +static uint qedf_queue_depth; +module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO); +MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered " + "by the qedf driver. Default is 0 (use OS default)."); + +uint qedf_io_tracing; +module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions " + "into trace buffer. (default off)."); + +static uint qedf_max_lun = MAX_FIBRE_LUNS; +module_param_named(max_lun, qedf_max_lun, int, S_IRUGO); +MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver " + "supports. (default 0xffffffff)"); + +uint qedf_link_down_tmo; +module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO); +MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the " + "link is down by N seconds."); + +bool qedf_retry_delay; +module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry " + "delay handling (default off)."); + +static uint qedf_dp_module; +module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO); +MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed " + "qed module during probe."); + +static uint qedf_dp_level; +module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO); +MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module " + "during probe (0-3: 0 more verbose)."); + +struct workqueue_struct *qedf_io_wq; + +static struct fcoe_percpu_s qedf_global; +static DEFINE_SPINLOCK(qedf_global_lock); + +static struct kmem_cache *qedf_io_work_cache; + +void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) +{ + qedf->vlan_id = vlan_id; + qedf->vlan_id |= qedf_default_prio << VLAN_PRIO_SHIFT; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x " + "prio=%d.\n", vlan_id, qedf_default_prio); +} + +/* Returns true if we have a valid vlan, false otherwise */ +static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) +{ + int rc; + + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n"); + return false; + } + + while (qedf->fipvlan_retries--) { + if (qedf->vlan_id > 0) + return true; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Retry %d.\n", qedf->fipvlan_retries); + init_completion(&qedf->fipvlan_compl); + qedf_fcoe_send_vlan_req(qedf); + rc = wait_for_completion_timeout(&qedf->fipvlan_compl, + 1 * HZ); + if (rc > 0) { + fcoe_ctlr_link_up(&qedf->ctlr); + return true; + } + } + + return false; +} + +static void qedf_handle_link_update(struct work_struct *work) +{ + struct qedf_ctx *qedf = + container_of(work, struct qedf_ctx, link_update.work); + int rc; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n"); + + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { + rc = qedf_initiate_fipvlan_req(qedf); + if (rc) + return; + /* + * If we get here then we never received a repsonse to our + * fip vlan request so set the vlan_id to the default and + * tell FCoE that the link is up + */ + QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN " + "response, falling back to default VLAN %d.\n", + qedf_fallback_vlan); + qedf_set_vlan_id(qedf, QEDF_FALLBACK_VLAN); + + /* + * Zero out data_src_addr so we'll update it with the new + * lport port_id + */ + eth_zero_addr(qedf->data_src_addr); + fcoe_ctlr_link_up(&qedf->ctlr); + } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { + /* + * If we hit here and link_down_tmo_valid is still 1 it means + * that link_down_tmo timed out so set it to 0 to make sure any + * other readers have accurate state. + */ + atomic_set(&qedf->link_down_tmo_valid, 0); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Calling fcoe_ctlr_link_down().\n"); + fcoe_ctlr_link_down(&qedf->ctlr); + qedf_wait_for_upload(qedf); + /* Reset the number of FIP VLAN retries */ + qedf->fipvlan_retries = qedf_fipvlan_retries; + } +} + +static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + struct qedf_ctx *qedf = lport_priv(lport); + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + return; + } + + /* + * If ERR_PTR is set then don't try to stat anything as it will cause + * a crash when we access fp. + */ + if (IS_ERR(fp)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "fp has IS_ERR() set.\n"); + goto skip_stat; + } + + /* Log stats for FLOGI reject */ + if (fc_frame_payload_op(fp) == ELS_LS_RJT) + qedf->flogi_failed++; + + /* Complete flogi_compl so we can proceed to sending ADISCs */ + complete(&qedf->flogi_compl); + +skip_stat: + /* Report response to libfc */ + fc_lport_flogi_resp(seq, fp, lport); +} + +static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout) +{ + struct qedf_ctx *qedf = lport_priv(lport); + + /* + * Intercept FLOGI for statistic purposes. Note we use the resp + * callback to tell if this is really a flogi. + */ + if (resp == fc_lport_flogi_resp) { + qedf->flogi_cnt++; + return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, + arg, timeout); + } + + return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); +} + +int qedf_send_flogi(struct qedf_ctx *qedf) +{ + struct fc_lport *lport; + struct fc_frame *fp; + + lport = qedf->lport; + + if (!lport->tt.elsct_send) + return -EINVAL; + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); + if (!fp) { + QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n"); + return -ENOMEM; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, + "Sending FLOGI to reestablish session with switch.\n"); + lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, + ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov); + + init_completion(&qedf->flogi_compl); + + return 0; +} + +struct qedf_tmp_rdata_item { + struct fc_rport_priv *rdata; + struct list_head list; +}; + +/* + * This function is called if link_down_tmo is in use. If we get a link up and + * link_down_tmo has not expired then use just FLOGI/ADISC to recover our + * sessions with targets. Otherwise, just call fcoe_ctlr_link_up(). + */ +static void qedf_link_recovery(struct work_struct *work) +{ + struct qedf_ctx *qedf = + container_of(work, struct qedf_ctx, link_recovery.work); + struct qedf_rport *fcport; + struct fc_rport_priv *rdata; + struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item; + bool rc; + int retries = 30; + int rval, i; + struct list_head rdata_login_list; + + INIT_LIST_HEAD(&rdata_login_list); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Link down tmo did not expire.\n"); + + /* + * Essentially reset the fcoe_ctlr here without affecting the state + * of the libfc structs. + */ + qedf->ctlr.state = FIP_ST_LINK_WAIT; + fcoe_ctlr_link_down(&qedf->ctlr); + + /* + * Bring the link up before we send the fipvlan request so libfcoe + * can select a new fcf in parallel + */ + fcoe_ctlr_link_up(&qedf->ctlr); + + /* Since the link when down and up to verify which vlan we're on */ + qedf->fipvlan_retries = qedf_fipvlan_retries; + rc = qedf_initiate_fipvlan_req(qedf); + if (!rc) + return; + + /* + * We need to wait for an FCF to be selected due to the + * fcoe_ctlr_link_up other the FLOGI will be rejected. + */ + while (retries > 0) { + if (qedf->ctlr.sel_fcf) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "FCF reselected, proceeding with FLOGI.\n"); + break; + } + msleep(500); + retries--; + } + + if (retries < 1) { + QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for " + "FCF selection.\n"); + return; + } + + rval = qedf_send_flogi(qedf); + if (rval) + return; + + /* Wait for FLOGI completion before proceeding with sending ADISCs */ + i = wait_for_completion_timeout(&qedf->flogi_compl, + qedf->lport->r_a_tov); + if (i == 0) { + QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n"); + return; + } + + /* + * Call lport->tt.rport_login which will cause libfc to send an + * ADISC since the rport is in state ready. + */ + rcu_read_lock(); + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { + rdata = fcport->rdata; + if (rdata == NULL) + continue; + rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item), + GFP_ATOMIC); + if (!rdata_item) + continue; + if (kref_get_unless_zero(&rdata->kref)) { + rdata_item->rdata = rdata; + list_add(&rdata_item->list, &rdata_login_list); + } else + kfree(rdata_item); + } + rcu_read_unlock(); + /* + * Do the fc_rport_login outside of the rcu lock so we don't take a + * mutex in an atomic context. + */ + list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list, + list) { + list_del(&rdata_item->list); + fc_rport_login(rdata_item->rdata); + kref_put(&rdata_item->rdata->kref, fc_rport_destroy); + kfree(rdata_item); + } +} + +static void qedf_update_link_speed(struct qedf_ctx *qedf, + struct qed_link_output *link) +{ + struct fc_lport *lport = qedf->lport; + + lport->link_speed = FC_PORTSPEED_UNKNOWN; + lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; + + /* Set fc_host link speed */ + switch (link->speed) { + case 10000: + lport->link_speed = FC_PORTSPEED_10GBIT; + break; + case 25000: + lport->link_speed = FC_PORTSPEED_25GBIT; + break; + case 40000: + lport->link_speed = FC_PORTSPEED_40GBIT; + break; + case 50000: + lport->link_speed = FC_PORTSPEED_50GBIT; + break; + case 100000: + lport->link_speed = FC_PORTSPEED_100GBIT; + break; + default: + lport->link_speed = FC_PORTSPEED_UNKNOWN; + break; + } + + /* + * Set supported link speed by querying the supported + * capabilities of the link. + */ + if (link->supported_caps & SUPPORTED_10000baseKR_Full) + lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; + if (link->supported_caps & SUPPORTED_25000baseKR_Full) + lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; + if (link->supported_caps & SUPPORTED_40000baseLR4_Full) + lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; + if (link->supported_caps & SUPPORTED_50000baseKR2_Full) + lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; + if (link->supported_caps & SUPPORTED_100000baseKR4_Full) + lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; + fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; +} + +static void qedf_link_update(void *dev, struct qed_link_output *link) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)dev; + + if (link->link_up) { + QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n", + link->speed / 1000); + + /* Cancel any pending link down work */ + cancel_delayed_work(&qedf->link_update); + + atomic_set(&qedf->link_state, QEDF_LINK_UP); + qedf_update_link_speed(qedf, link); + + if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { + QEDF_ERR(&(qedf->dbg_ctx), "DCBx done.\n"); + if (atomic_read(&qedf->link_down_tmo_valid) > 0) + queue_delayed_work(qedf->link_update_wq, + &qedf->link_recovery, 0); + else + queue_delayed_work(qedf->link_update_wq, + &qedf->link_update, 0); + atomic_set(&qedf->link_down_tmo_valid, 0); + } + + } else { + QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n"); + + atomic_set(&qedf->link_state, QEDF_LINK_DOWN); + atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); + /* + * Flag that we're waiting for the link to come back up before + * informing the fcoe layer of the event. + */ + if (qedf_link_down_tmo > 0) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Starting link down tmo.\n"); + atomic_set(&qedf->link_down_tmo_valid, 1); + } + qedf->vlan_id = 0; + qedf_update_link_speed(qedf, link); + queue_delayed_work(qedf->link_update_wq, &qedf->link_update, + qedf_link_down_tmo * HZ); + } +} + + +static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)dev; + + QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe " + "prio=%d.\n", get->operational.valid, get->operational.enabled, + get->operational.app_prio.fcoe); + + if (get->operational.enabled && get->operational.valid) { + /* If DCBX was already negotiated on link up then just exit */ + if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "DCBX already set on link up.\n"); + return; + } + + atomic_set(&qedf->dcbx, QEDF_DCBX_DONE); + + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { + if (atomic_read(&qedf->link_down_tmo_valid) > 0) + queue_delayed_work(qedf->link_update_wq, + &qedf->link_recovery, 0); + else + queue_delayed_work(qedf->link_update_wq, + &qedf->link_update, 0); + atomic_set(&qedf->link_down_tmo_valid, 0); + } + } + +} + +static u32 qedf_get_login_failures(void *cookie) +{ + struct qedf_ctx *qedf; + + qedf = (struct qedf_ctx *)cookie; + return qedf->flogi_failed; +} + +static struct qed_fcoe_cb_ops qedf_cb_ops = { + { + .link_update = qedf_link_update, + .dcbx_aen = qedf_dcbx_handler, + } +}; + +/* + * Various transport templates. + */ + +static struct scsi_transport_template *qedf_fc_transport_template; +static struct scsi_transport_template *qedf_fc_vport_transport_template; + +/* + * SCSI EH handlers + */ +static int qedf_eh_abort(struct scsi_cmnd *sc_cmd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct qedf_rport *fcport; + struct fc_lport *lport; + struct qedf_ctx *qedf; + struct qedf_ioreq *io_req; + int rc = FAILED; + int rval; + + if (fc_remote_port_chkready(rport)) { + QEDF_ERR(NULL, "rport not ready\n"); + goto out; + } + + lport = shost_priv(sc_cmd->device->host); + qedf = (struct qedf_ctx *)lport_priv(lport); + + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n"); + goto out; + } + + fcport = (struct qedf_rport *)&rp[1]; + + io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; + if (!io_req) { + QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n"); + rc = SUCCESS; + goto out; + } + + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || + test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || + test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " + "cleanup or abort processing or already " + "completed.\n", io_req->xid); + rc = SUCCESS; + goto out; + } + + QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x " + "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx); + + if (qedf->stop_io_on_error) { + qedf_stop_all_io(qedf); + rc = SUCCESS; + goto out; + } + + init_completion(&io_req->abts_done); + rval = qedf_initiate_abts(io_req, true); + if (rval) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); + goto out; + } + + wait_for_completion(&io_req->abts_done); + + if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS || + io_req->event == QEDF_IOREQ_EV_ABORT_FAILED || + io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) { + /* + * If we get a reponse to the abort this is success from + * the perspective that all references to the command have + * been removed from the driver and firmware + */ + rc = SUCCESS; + } else { + /* If the abort and cleanup failed then return a failure */ + rc = FAILED; + } + + if (rc == SUCCESS) + QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n", + io_req->xid); + else + QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n", + io_req->xid); + +out: + return rc; +} + +static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd) +{ + QEDF_ERR(NULL, "TARGET RESET Issued..."); + return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); +} + +static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd) +{ + QEDF_ERR(NULL, "LUN RESET Issued...\n"); + return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); +} + +void qedf_wait_for_upload(struct qedf_ctx *qedf) +{ + while (1) { + if (atomic_read(&qedf->num_offloads)) + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Waiting for all uploads to complete.\n"); + else + break; + msleep(500); + } +} + +/* Reset the host by gracefully logging out and then logging back in */ +static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport; + struct qedf_ctx *qedf; + + lport = shost_priv(sc_cmd->device->host); + + if (lport->vport) { + QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n"); + return SUCCESS; + } + + qedf = (struct qedf_ctx *)lport_priv(lport); + + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN || + test_bit(QEDF_UNLOADING, &qedf->flags) || + test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) + return FAILED; + + QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued..."); + + /* For host reset, essentially do a soft link up/down */ + atomic_set(&qedf->link_state, QEDF_LINK_DOWN); + atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); + queue_delayed_work(qedf->link_update_wq, &qedf->link_update, + 0); + qedf_wait_for_upload(qedf); + atomic_set(&qedf->link_state, QEDF_LINK_UP); + qedf->vlan_id = 0; + queue_delayed_work(qedf->link_update_wq, &qedf->link_update, + 0); + + return SUCCESS; +} + +static int qedf_slave_configure(struct scsi_device *sdev) +{ + if (qedf_queue_depth) { + scsi_change_queue_depth(sdev, qedf_queue_depth); + } + + return 0; +} + +static struct scsi_host_template qedf_host_template = { + .module = THIS_MODULE, + .name = QEDF_MODULE_NAME, + .this_id = -1, + .cmd_per_lun = 3, + .use_clustering = ENABLE_CLUSTERING, + .max_sectors = 0xffff, + .queuecommand = qedf_queuecommand, + .shost_attrs = qedf_host_attrs, + .eh_abort_handler = qedf_eh_abort, + .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */ + .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */ + .eh_host_reset_handler = qedf_eh_host_reset, + .slave_configure = qedf_slave_configure, + .dma_boundary = QED_HW_DMA_BOUNDARY, + .sg_tablesize = QEDF_MAX_BDS_PER_CMD, + .can_queue = FCOE_PARAMS_NUM_TASKS, +}; + +static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen) +{ + int rc; + + spin_lock(&qedf_global_lock); + rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global); + spin_unlock(&qedf_global_lock); + + return rc; +} + +static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id) +{ + struct qedf_rport *fcport; + struct fc_rport_priv *rdata; + + rcu_read_lock(); + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { + rdata = fcport->rdata; + if (rdata == NULL) + continue; + if (rdata->ids.port_id == port_id) { + rcu_read_unlock(); + return fcport; + } + } + rcu_read_unlock(); + + /* Return NULL to caller to let them know fcport was not found */ + return NULL; +} + +/* Transmits an ELS frame over an offloaded session */ +static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp) +{ + struct fc_frame_header *fh; + int rc = 0; + + fh = fc_frame_header_get(fp); + if ((fh->fh_type == FC_TYPE_ELS) && + (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + switch (fc_frame_payload_op(fp)) { + case ELS_ADISC: + qedf_send_adisc(fcport, fp); + rc = 1; + break; + } + } + + return rc; +} + +/** + * qedf_xmit - qedf FCoE frame transmit function + * + */ +static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_lport *base_lport; + struct qedf_ctx *qedf; + struct ethhdr *eh; + struct fcoe_crc_eof *cp; + struct sk_buff *skb; + struct fc_frame_header *fh; + struct fcoe_hdr *hp; + u8 sof, eof; + u32 crc; + unsigned int hlen, tlen, elen; + int wlen; + struct fc_stats *stats; + struct fc_lport *tmp_lport; + struct fc_lport *vn_port = NULL; + struct qedf_rport *fcport; + int rc; + u16 vlan_tci = 0; + + qedf = (struct qedf_ctx *)lport_priv(lport); + + fh = fc_frame_header_get(fp); + skb = fp_skb(fp); + + /* Filter out traffic to other NPIV ports on the same host */ + if (lport->vport) + base_lport = shost_priv(vport_to_shost(lport->vport)); + else + base_lport = lport; + + /* Flag if the destination is the base port */ + if (base_lport->port_id == ntoh24(fh->fh_d_id)) { + vn_port = base_lport; + } else { + /* Got through the list of vports attached to the base_lport + * and see if we have a match with the destination address. + */ + list_for_each_entry(tmp_lport, &base_lport->vports, list) { + if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) { + vn_port = tmp_lport; + break; + } + } + } + if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) { + struct fc_rport_priv *rdata = NULL; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id)); + kfree_skb(skb); + rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); + if (rdata) + rdata->retries = lport->max_rport_retry_count; + return -EINVAL; + } + /* End NPIV filtering */ + + if (!qedf->ctlr.sel_fcf) { + kfree_skb(skb); + return 0; + } + + if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { + QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); + kfree_skb(skb); + return 0; + } + + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n"); + kfree_skb(skb); + return 0; + } + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb)) + return 0; + } + + /* Check to see if this needs to be sent on an offloaded session */ + fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); + + if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + rc = qedf_xmit_l2_frame(fcport, fp); + /* + * If the frame was successfully sent over the middle path + * then do not try to also send it over the LL2 path + */ + if (rc) + return 0; + } + + sof = fr_sof(fp); + eof = fr_eof(fp); + + elen = sizeof(struct ethhdr); + hlen = sizeof(struct fcoe_hdr); + tlen = sizeof(struct fcoe_crc_eof); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; + + skb->ip_summed = CHECKSUM_NONE; + crc = fcoe_fc_crc(fp); + + /* copy port crc and eof to the skb buff */ + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag; + + if (qedf_get_paged_crc_eof(skb, tlen)) { + kfree_skb(skb); + return -ENOMEM; + } + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + } else { + cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); + } + + memset(cp, 0, sizeof(*cp)); + cp->fcoe_eof = eof; + cp->fcoe_crc32 = cpu_to_le32(~crc); + if (skb_is_nonlinear(skb)) { + kunmap_atomic(cp); + cp = NULL; + } + + + /* adjust skb network/transport offsets to match mac/fcoe/port */ + skb_push(skb, elen + hlen); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->mac_len = elen; + skb->protocol = htons(ETH_P_FCOE); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); + + /* fill up mac and fcoe headers */ + eh = eth_hdr(skb); + eh->h_proto = htons(ETH_P_FCOE); + if (qedf->ctlr.map_dest) + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); + else + /* insert GW address */ + ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr); + + /* Set the source MAC address */ + fc_fcoe_set_mac(eh->h_source, fh->fh_s_id); + + hp = (struct fcoe_hdr *)(eh + 1); + memset(hp, 0, sizeof(*hp)); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); + hp->fcoe_sof = sof; + + /*update tx stats */ + stats = per_cpu_ptr(lport->stats, get_cpu()); + stats->TxFrames++; + stats->TxWords += wlen; + put_cpu(); + + /* Get VLAN ID from skb for printing purposes */ + __vlan_hwaccel_get_tag(skb, &vlan_tci); + + /* send down to lld */ + fr_dev(fp) = lport; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: " + "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n", + ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type, + vlan_tci); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, + 1, skb->data, skb->len, false); + qed_ops->ll2->start_xmit(qedf->cdev, skb); + + return 0; +} + +static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) +{ + int rval = 0; + u32 *pbl; + dma_addr_t page; + int num_pages; + + /* Calculate appropriate queue and PBL sizes */ + fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); + fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE); + fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) * + sizeof(void *); + fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; + + fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, + &fcport->sq_dma, GFP_KERNEL); + if (!fcport->sq) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send " + "queue.\n"); + rval = 1; + goto out; + } + memset(fcport->sq, 0, fcport->sq_mem_size); + + fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, + fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); + if (!fcport->sq_pbl) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send " + "queue PBL.\n"); + rval = 1; + goto out_free_sq; + } + memset(fcport->sq_pbl, 0, fcport->sq_pbl_size); + + /* Create PBL */ + num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE; + page = fcport->sq_dma; + pbl = (u32 *)fcport->sq_pbl; + + while (num_pages--) { + *pbl = U64_LO(page); + pbl++; + *pbl = U64_HI(page); + pbl++; + page += QEDF_PAGE_SIZE; + } + + return rval; + +out_free_sq: + dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq, + fcport->sq_dma); +out: + return rval; +} + +static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) +{ + if (fcport->sq_pbl) + dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, + fcport->sq_pbl, fcport->sq_pbl_dma); + if (fcport->sq) + dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, + fcport->sq, fcport->sq_dma); +} + +static int qedf_offload_connection(struct qedf_ctx *qedf, + struct qedf_rport *fcport) +{ + struct qed_fcoe_params_offload conn_info; + u32 port_id; + u8 lport_src_id[3]; + int rval; + uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe)); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection " + "portid=%06x.\n", fcport->rdata->ids.port_id); + rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle, + &fcport->fw_cid, &fcport->p_doorbell); + if (rval) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection " + "for portid=%06x.\n", fcport->rdata->ids.port_id); + rval = 1; /* For some reason qed returns 0 on failure here */ + goto out; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x " + "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id, + fcport->fw_cid, fcport->handle); + + memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload)); + + /* Fill in the offload connection info */ + conn_info.sq_pbl_addr = fcport->sq_pbl_dma; + + conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl); + conn_info.sq_next_page_addr = + (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8)); + + /* Need to use our FCoE MAC for the offload session */ + port_id = fc_host_port_id(qedf->lport->host); + lport_src_id[2] = (port_id & 0x000000FF); + lport_src_id[1] = (port_id & 0x0000FF00) >> 8; + lport_src_id[0] = (port_id & 0x00FF0000) >> 16; + fc_fcoe_set_mac(conn_info.src_mac, lport_src_id); + + ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); + + conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size; + conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20; + conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */ + conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size; + + /* Set VLAN data */ + conn_info.vlan_tag = qedf->vlan_id << + FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT; + conn_info.vlan_tag |= + qedf_default_prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT; + conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK << + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT); + + /* Set host port source id */ + port_id = fc_host_port_id(qedf->lport->host); + fcport->sid = port_id; + conn_info.s_id.addr_hi = (port_id & 0x000000FF); + conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8; + conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16; + + conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq; + + /* Set remote port destination id */ + port_id = fcport->rdata->rport->port_id; + conn_info.d_id.addr_hi = (port_id & 0x000000FF); + conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8; + conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16; + + conn_info.def_q_idx = 0; /* Default index for send queue? */ + + /* Set FC-TAPE specific flags if needed */ + if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, + "Enable CONF, REC for portid=%06x.\n", + fcport->rdata->ids.port_id); + conn_info.flags |= 1 << + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT; + conn_info.flags |= + ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT; + } + + rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info); + if (rval) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection " + "for portid=%06x.\n", fcport->rdata->ids.port_id); + goto out_free_conn; + } else + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload " + "succeeded portid=%06x total_sqe=%d.\n", + fcport->rdata->ids.port_id, total_sqe); + + spin_lock_init(&fcport->rport_lock); + atomic_set(&fcport->free_sqes, total_sqe); + return 0; +out_free_conn: + qed_ops->release_conn(qedf->cdev, fcport->handle); +out: + return rval; +} + +#define QEDF_TERM_BUFF_SIZE 10 +static void qedf_upload_connection(struct qedf_ctx *qedf, + struct qedf_rport *fcport) +{ + void *term_params; + dma_addr_t term_params_dma; + + /* Term params needs to be a DMA coherent buffer as qed shared the + * physical DMA address with the firmware. The buffer may be used in + * the receive path so we may eventually have to move this. + */ + term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, + &term_params_dma, GFP_KERNEL); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection " + "port_id=%06x.\n", fcport->rdata->ids.port_id); + + qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma); + qed_ops->release_conn(qedf->cdev, fcport->handle); + + dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params, + term_params_dma); +} + +static void qedf_cleanup_fcport(struct qedf_ctx *qedf, + struct qedf_rport *fcport) +{ + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n", + fcport->rdata->ids.port_id); + + /* Flush any remaining i/o's before we upload the connection */ + qedf_flush_active_ios(fcport, -1); + + if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) + qedf_upload_connection(qedf, fcport); + qedf_free_sq(qedf, fcport); + fcport->rdata = NULL; + fcport->qedf = NULL; +} + +/** + * This event_callback is called after successful completion of libfc + * initiated target login. qedf can proceed with initiating the session + * establishment. + */ +static void qedf_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct qedf_ctx *qedf = lport_priv(lport); + struct fc_rport *rport = rdata->rport; + struct fc_rport_libfc_priv *rp; + struct qedf_rport *fcport; + u32 port_id; + int rval; + unsigned long flags; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, " + "port_id = 0x%x\n", event, rdata->ids.port_id); + + switch (event) { + case RPORT_EV_READY: + if (!rport) { + QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n"); + break; + } + + rp = rport->dd_data; + fcport = (struct qedf_rport *)&rp[1]; + fcport->qedf = qedf; + + if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) { + QEDF_ERR(&(qedf->dbg_ctx), "Not offloading " + "portid=0x%x as max number of offloaded sessions " + "reached.\n", rdata->ids.port_id); + return; + } + + /* + * Don't try to offload the session again. Can happen when we + * get an ADISC + */ + if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_WARN(&(qedf->dbg_ctx), "Session already " + "offloaded, portid=0x%x.\n", + rdata->ids.port_id); + return; + } + + if (rport->port_id == FC_FID_DIR_SERV) { + /* + * qedf_rport structure doesn't exist for + * directory server. + * We should not come here, as lport will + * take care of fabric login + */ + QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not " + "exist for dir server port_id=%x\n", + rdata->ids.port_id); + break; + } + + if (rdata->spp_type != FC_TYPE_FCP) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Not offlading since since spp type isn't FCP\n"); + break; + } + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Not FCP target so not offloading\n"); + break; + } + + fcport->rdata = rdata; + fcport->rport = rport; + + rval = qedf_alloc_sq(qedf, fcport); + if (rval) { + qedf_cleanup_fcport(qedf, fcport); + break; + } + + /* Set device type */ + if (rdata->flags & FC_RP_FLAGS_RETRY && + rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && + !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { + fcport->dev_type = QEDF_RPORT_TYPE_TAPE; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "portid=%06x is a TAPE device.\n", + rdata->ids.port_id); + } else { + fcport->dev_type = QEDF_RPORT_TYPE_DISK; + } + + rval = qedf_offload_connection(qedf, fcport); + if (rval) { + qedf_cleanup_fcport(qedf, fcport); + break; + } + + /* Add fcport to list of qedf_ctx list of offloaded ports */ + spin_lock_irqsave(&qedf->hba_lock, flags); + list_add_rcu(&fcport->peers, &qedf->fcports); + spin_unlock_irqrestore(&qedf->hba_lock, flags); + + /* + * Set the session ready bit to let everyone know that this + * connection is ready for I/O + */ + set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags); + atomic_inc(&qedf->num_offloads); + + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + port_id = rdata->ids.port_id; + if (port_id == FC_FID_DIR_SERV) + break; + + if (!rport) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "port_id=%x - rport notcreated Yet!!\n", port_id); + break; + } + rp = rport->dd_data; + /* + * Perform session upload. Note that rdata->peers is already + * removed from disc->rports list before we get this event. + */ + fcport = (struct qedf_rport *)&rp[1]; + + /* Only free this fcport if it is offloaded already */ + if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags); + qedf_cleanup_fcport(qedf, fcport); + + /* + * Remove fcport to list of qedf_ctx list of offloaded + * ports + */ + spin_lock_irqsave(&qedf->hba_lock, flags); + list_del_rcu(&fcport->peers); + spin_unlock_irqrestore(&qedf->hba_lock, flags); + + clear_bit(QEDF_RPORT_UPLOADING_CONNECTION, + &fcport->flags); + atomic_dec(&qedf->num_offloads); + } + + break; + + case RPORT_EV_NONE: + break; + } +} + +static void qedf_abort_io(struct fc_lport *lport) +{ + /* NO-OP but need to fill in the template */ +} + +static void qedf_fcp_cleanup(struct fc_lport *lport) +{ + /* + * NO-OP but need to fill in template to prevent a NULL + * function pointer dereference during link down. I/Os + * will be flushed when port is uploaded. + */ +} + +static struct libfc_function_template qedf_lport_template = { + .frame_send = qedf_xmit, + .fcp_abort_io = qedf_abort_io, + .fcp_cleanup = qedf_fcp_cleanup, + .rport_event_callback = qedf_rport_event_handler, + .elsct_send = qedf_elsct_send, +}; + +static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf) +{ + fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO); + + qedf->ctlr.send = qedf_fip_send; + qedf->ctlr.update_mac = qedf_update_src_mac; + qedf->ctlr.get_src_addr = qedf_get_src_mac; + ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); +} + +static int qedf_lport_setup(struct qedf_ctx *qedf) +{ + struct fc_lport *lport = qedf->lport; + + lport->link_up = 0; + lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; + lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->boot_time = jiffies; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + + /* Set NPIV support */ + lport->does_npiv = 1; + fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV; + + fc_set_wwnn(lport, qedf->wwnn); + fc_set_wwpn(lport, qedf->wwpn); + + fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0); + + /* Allocate the exchange manager */ + fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1, + qedf->max_els_xid, NULL); + + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish lport config */ + fc_lport_config(lport); + + /* Set max frame size */ + fc_set_mfs(lport, QEDF_MFS); + fc_host_maxframe_size(lport->host) = lport->mfs; + + /* Set default dev_loss_tmo based on module parameter */ + fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo; + + /* Set symbolic node name */ + snprintf(fc_host_symbolic_name(lport->host), 256, + "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION); + + return 0; +} + +/* + * NPIV functions + */ + +static int qedf_vport_libfc_config(struct fc_vport *vport, + struct fc_lport *lport) +{ + lport->link_up = 0; + lport->qfull = 0; + lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; + lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->boot_time = jiffies; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + lport->does_npiv = 1; /* Temporary until we add NPIV support */ + + /* Allocate stats for vport */ + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish lport config */ + fc_lport_config(lport); + + /* offload related configuration */ + lport->crc_offload = 0; + lport->seq_offload = 0; + lport->lro_enabled = 0; + lport->lro_xid = 0; + lport->lso_max = 0; + + return 0; +} + +static int qedf_vport_create(struct fc_vport *vport, bool disabled) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port; + struct qedf_ctx *base_qedf = lport_priv(n_port); + struct qedf_ctx *vport_qedf; + + char buf[32]; + int rc = 0; + + rc = fcoe_validate_vport_create(vport); + if (rc) { + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, " + "WWPN (0x%s) already exists.\n", buf); + goto err1; + } + + if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) { + QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " + "because link is not up.\n"); + rc = -EIO; + goto err1; + } + + vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx)); + if (!vn_port) { + QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " + "for vport.\n"); + rc = -ENOMEM; + goto err1; + } + + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n", + buf); + + /* Copy some fields from base_qedf */ + vport_qedf = lport_priv(vn_port); + memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx)); + + /* Set qedf data specific to this vport */ + vport_qedf->lport = vn_port; + /* Use same hba_lock as base_qedf */ + vport_qedf->hba_lock = base_qedf->hba_lock; + vport_qedf->pdev = base_qedf->pdev; + vport_qedf->cmd_mgr = base_qedf->cmd_mgr; + init_completion(&vport_qedf->flogi_compl); + INIT_LIST_HEAD(&vport_qedf->fcports); + + rc = qedf_vport_libfc_config(vport, vn_port); + if (rc) { + QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " + "for lport stats.\n"); + goto err2; + } + + fc_set_wwnn(vn_port, vport->node_name); + fc_set_wwpn(vn_port, vport->port_name); + vport_qedf->wwnn = vn_port->wwnn; + vport_qedf->wwpn = vn_port->wwpn; + + vn_port->host->transportt = qedf_fc_vport_transport_template; + vn_port->host->can_queue = QEDF_MAX_ELS_XID; + vn_port->host->max_lun = qedf_max_lun; + vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD; + vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN; + + rc = scsi_add_host(vn_port->host, &vport->dev); + if (rc) { + QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n"); + goto err2; + } + + /* Set default dev_loss_tmo based on module parameter */ + fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo; + + /* Init libfc stuffs */ + memcpy(&vn_port->tt, &qedf_lport_template, + sizeof(qedf_lport_template)); + fc_exch_init(vn_port); + fc_elsct_init(vn_port); + fc_lport_init(vn_port); + fc_disc_init(vn_port); + fc_disc_config(vn_port, vn_port); + + + /* Allocate the exchange manager */ + shost = vport_to_shost(vport); + n_port = shost_priv(shost); + fc_exch_mgr_list_clone(n_port, vn_port); + + /* Set max frame size */ + fc_set_mfs(vn_port, QEDF_MFS); + + fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN; + + if (disabled) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + } else { + vn_port->boot_time = jiffies; + fc_fabric_login(vn_port); + fc_vport_setlink(vn_port); + } + + QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", + vn_port); + + /* Set up debug context for vport */ + vport_qedf->dbg_ctx.host_no = vn_port->host->host_no; + vport_qedf->dbg_ctx.pdev = base_qedf->pdev; + +err2: + scsi_host_put(vn_port->host); +err1: + return rc; +} + +static int qedf_vport_destroy(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; + + mutex_lock(&n_port->lp_mutex); + list_del(&vn_port->list); + mutex_unlock(&n_port->lp_mutex); + + fc_fabric_logoff(vn_port); + fc_lport_destroy(vn_port); + + /* Detach from scsi-ml */ + fc_remove_host(vn_port->host); + scsi_remove_host(vn_port->host); + + /* + * Only try to release the exchange manager if the vn_port + * configuration is complete. + */ + if (vn_port->state == LPORT_ST_READY) + fc_exch_mgr_free(vn_port); + + /* Free memory used by statistical counters */ + fc_lport_free_stats(vn_port); + + /* Release Scsi_Host */ + if (vn_port->host) + scsi_host_put(vn_port->host); + + return 0; +} + +static int qedf_vport_disable(struct fc_vport *vport, bool disable) +{ + struct fc_lport *lport = vport->dd_data; + + if (disable) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + fc_fabric_logoff(lport); + } else { + lport->boot_time = jiffies; + fc_fabric_login(lport); + fc_vport_setlink(lport); + } + return 0; +} + +/* + * During removal we need to wait for all the vports associated with a port + * to be destroyed so we avoid a race condition where libfc is still trying + * to reap vports while the driver remove function has already reaped the + * driver contexts associated with the physical port. + */ +static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf) +{ + struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, + "Entered.\n"); + while (fc_host->npiv_vports_inuse > 0) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, + "Waiting for all vports to be reaped.\n"); + msleep(1000); + } +} + +/** + * qedf_fcoe_reset - Resets the fcoe + * + * @shost: shost the reset is from + * + * Returns: always 0 + */ +static int qedf_fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + + fc_fabric_logoff(lport); + fc_fabric_login(lport); + return 0; +} + +static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host + *shost) +{ + struct fc_host_statistics *qedf_stats; + struct fc_lport *lport = shost_priv(shost); + struct qedf_ctx *qedf = lport_priv(lport); + struct qed_fcoe_stats *fw_fcoe_stats; + + qedf_stats = fc_get_host_stats(shost); + + /* We don't collect offload stats for specific NPIV ports */ + if (lport->vport) + goto out; + + fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); + if (!fw_fcoe_stats) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " + "fw_fcoe_stats.\n"); + goto out; + } + + /* Query firmware for offload stats */ + qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); + + /* + * The expectation is that we add our offload stats to the stats + * being maintained by libfc each time the fc_get_host_status callback + * is invoked. The additions are not carried over for each call to + * the fc_get_host_stats callback. + */ + qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt + + fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt + + fw_fcoe_stats->fcoe_tx_other_pkt_cnt; + qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt + + fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt + + fw_fcoe_stats->fcoe_rx_other_pkt_cnt; + qedf_stats->fcp_input_megabytes += + do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000); + qedf_stats->fcp_output_megabytes += + do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000); + qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4; + qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4; + qedf_stats->invalid_crc_count += + fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt; + qedf_stats->dumped_frames = + fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; + qedf_stats->error_frames += + fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; + qedf_stats->fcp_input_requests += qedf->input_requests; + qedf_stats->fcp_output_requests += qedf->output_requests; + qedf_stats->fcp_control_requests += qedf->control_requests; + qedf_stats->fcp_packet_aborts += qedf->packet_aborts; + qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures; + + kfree(fw_fcoe_stats); +out: + return qedf_stats; +} + +static struct fc_function_template qedf_fc_transport_fn = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + /* + * Tell FC transport to allocate enough space to store the backpointer + * for the associate qedf_rport struct. + */ + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct qedf_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = qedf_fc_get_host_stats, + .issue_fc_host_lip = qedf_fcoe_reset, + .vport_create = qedf_vport_create, + .vport_delete = qedf_vport_destroy, + .vport_disable = qedf_vport_disable, + .bsg_request = fc_lport_bsg_request, +}; + +static struct fc_function_template qedf_fc_vport_transport_fn = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct qedf_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = qedf_fcoe_reset, + .bsg_request = fc_lport_bsg_request, +}; + +static bool qedf_fp_has_work(struct qedf_fastpath *fp) +{ + struct qedf_ctx *qedf = fp->qedf; + struct global_queue *que; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + u16 prod_idx; + + /* Get the pointer to the global CQ this completion is on */ + que = qedf->global_queues[fp->sb_id]; + + /* Be sure all responses have been written to PI */ + rmb(); + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; + + return (que->cq_prod_idx != prod_idx); +} + +/* + * Interrupt handler code. + */ + +/* Process completion queue and copy CQE contents for deferred processesing + * + * Return true if we should wake the I/O thread, false if not. + */ +static bool qedf_process_completions(struct qedf_fastpath *fp) +{ + struct qedf_ctx *qedf = fp->qedf; + struct qed_sb_info *sb_info = fp->sb_info; + struct status_block *sb = sb_info->sb_virt; + struct global_queue *que; + u16 prod_idx; + struct fcoe_cqe *cqe; + struct qedf_io_work *io_work; + int num_handled = 0; + unsigned int cpu; + struct qedf_ioreq *io_req = NULL; + u16 xid; + u16 new_cqes; + u32 comp_type; + + /* Get the current firmware producer index */ + prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; + + /* Get the pointer to the global CQ this completion is on */ + que = qedf->global_queues[fp->sb_id]; + + /* Calculate the amount of new elements since last processing */ + new_cqes = (prod_idx >= que->cq_prod_idx) ? + (prod_idx - que->cq_prod_idx) : + 0x10000 - que->cq_prod_idx + prod_idx; + + /* Save producer index */ + que->cq_prod_idx = prod_idx; + + while (new_cqes) { + fp->completions++; + num_handled++; + cqe = &que->cq[que->cq_cons_idx]; + + comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & + FCOE_CQE_CQE_TYPE_MASK; + + /* + * Process unsolicited CQEs directly in the interrupt handler + * sine we need the fastpath ID + */ + if (comp_type == FCOE_UNSOLIC_CQE_TYPE) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, + "Unsolicated CQE.\n"); + qedf_process_unsol_compl(qedf, fp->sb_id, cqe); + /* + * Don't add a work list item. Increment consumer + * consumer index and move on. + */ + goto inc_idx; + } + + xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; + io_req = &qedf->cmd_mgr->cmds[xid]; + + /* + * Figure out which percpu thread we should queue this I/O + * on. + */ + if (!io_req) + /* If there is not io_req assocated with this CQE + * just queue it on CPU 0 + */ + cpu = 0; + else { + cpu = io_req->cpu; + io_req->int_cpu = smp_processor_id(); + } + + io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); + if (!io_work) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "work for I/O completion.\n"); + continue; + } + memset(io_work, 0, sizeof(struct qedf_io_work)); + + INIT_WORK(&io_work->work, qedf_fp_io_handler); + + /* Copy contents of CQE for deferred processing */ + memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); + + io_work->qedf = fp->qedf; + io_work->fp = NULL; /* Only used for unsolicited frames */ + + queue_work_on(cpu, qedf_io_wq, &io_work->work); + +inc_idx: + que->cq_cons_idx++; + if (que->cq_cons_idx == fp->cq_num_entries) + que->cq_cons_idx = 0; + new_cqes--; + } + + return true; +} + + +/* MSI-X fastpath handler code */ +static irqreturn_t qedf_msix_handler(int irq, void *dev_id) +{ + struct qedf_fastpath *fp = dev_id; + + if (!fp) { + QEDF_ERR(NULL, "fp is null.\n"); + return IRQ_HANDLED; + } + if (!fp->sb_info) { + QEDF_ERR(NULL, "fp->sb_info in null."); + return IRQ_HANDLED; + } + + /* + * Disable interrupts for this status block while we process new + * completions + */ + qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); + + while (1) { + qedf_process_completions(fp); + + if (qedf_fp_has_work(fp) == 0) { + /* Update the sb information */ + qed_sb_update_sb_idx(fp->sb_info); + + /* Check for more work */ + rmb(); + + if (qedf_fp_has_work(fp) == 0) { + /* Re-enable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); + return IRQ_HANDLED; + } + } + } + + /* Do we ever want to break out of above loop? */ + return IRQ_HANDLED; +} + +/* simd handler for MSI/INTa */ +static void qedf_simd_int_handler(void *cookie) +{ + /* Cookie is qedf_ctx struct */ + struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; + + QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf); +} + +#define QEDF_SIMD_HANDLER_NUM 0 +static void qedf_sync_free_irqs(struct qedf_ctx *qedf) +{ + int i; + + if (qedf->int_info.msix_cnt) { + for (i = 0; i < qedf->int_info.used_cnt; i++) { + synchronize_irq(qedf->int_info.msix[i].vector); + irq_set_affinity_hint(qedf->int_info.msix[i].vector, + NULL); + irq_set_affinity_notifier(qedf->int_info.msix[i].vector, + NULL); + free_irq(qedf->int_info.msix[i].vector, + &qedf->fp_array[i]); + } + } else + qed_ops->common->simd_handler_clean(qedf->cdev, + QEDF_SIMD_HANDLER_NUM); + + qedf->int_info.used_cnt = 0; + qed_ops->common->set_fp_int(qedf->cdev, 0); +} + +static int qedf_request_msix_irq(struct qedf_ctx *qedf) +{ + int i, rc, cpu; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < qedf->num_queues; i++) { + rc = request_irq(qedf->int_info.msix[i].vector, + qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]); + + if (rc) { + QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n"); + qedf_sync_free_irqs(qedf); + return rc; + } + + qedf->int_info.used_cnt++; + rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector, + get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } + + return 0; +} + +static int qedf_setup_int(struct qedf_ctx *qedf) +{ + int rc = 0; + + /* + * Learn interrupt configuration + */ + rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus()); + + rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info); + if (rc) + return 0; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = " + "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt, + num_online_cpus()); + + if (qedf->int_info.msix_cnt) + return qedf_request_msix_irq(qedf); + + qed_ops->common->simd_handler_config(qedf->cdev, &qedf, + QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler); + qedf->int_info.used_cnt = 1; + + return 0; +} + +/* Main function for libfc frame reception */ +static void qedf_recv_frame(struct qedf_ctx *qedf, + struct sk_buff *skb) +{ + u32 fr_len; + struct fc_lport *lport; + struct fc_frame_header *fh; + struct fcoe_crc_eof crc_eof; + struct fc_frame *fp; + u8 *mac = NULL; + u8 *dest_mac = NULL; + struct fcoe_hdr *hp; + struct qedf_rport *fcport; + + lport = qedf->lport; + if (lport == NULL || lport->state == LPORT_ST_DISABLED) { + QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n"); + kfree_skb(skb); + return; + } + + if (skb_is_nonlinear(skb)) + skb_linearize(skb); + mac = eth_hdr(skb)->h_source; + dest_mac = eth_hdr(skb)->h_dest; + + /* Pull the header */ + hp = (struct fcoe_hdr *)skb->data; + fh = (struct fc_frame_header *) skb_transport_header(skb); + skb_pull(skb, sizeof(struct fcoe_hdr)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = hp->fcoe_sof; + if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { + kfree_skb(skb); + return; + } + fr_eof(fp) = crc_eof.fcoe_eof; + fr_crc(fp) = crc_eof.fcoe_crc32; + if (pskb_trim(skb, fr_len)) { + kfree_skb(skb); + return; + } + + fh = fc_frame_header_get(fp); + + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fh->fh_type == FC_TYPE_FCP) { + /* Drop FCP data. We dont this in L2 path */ + kfree_skb(skb); + return; + } + if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && + fh->fh_type == FC_TYPE_ELS) { + switch (fc_frame_payload_op(fp)) { + case ELS_LOGO: + if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + /* drop non-FIP LOGO */ + kfree_skb(skb); + return; + } + break; + } + } + + if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { + /* Drop incoming ABTS */ + kfree_skb(skb); + return; + } + + /* + * If a connection is uploading, drop incoming FCoE frames as there + * is a small window where we could try to return a frame while libfc + * is trying to clean things up. + */ + + /* Get fcport associated with d_id if it exists */ + fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); + + if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION, + &fcport->flags)) { + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, + "Connection uploading, dropping fp=%p.\n", fp); + kfree_skb(skb); + return; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: " + "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp, + ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, + fh->fh_type); + if (qedf_dump_frames) + print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, + 1, skb->data, skb->len, false); + fc_exch_recv(lport, fp); +} + +static void qedf_ll2_process_skb(struct work_struct *work) +{ + struct qedf_skb_work *skb_work = + container_of(work, struct qedf_skb_work, work); + struct qedf_ctx *qedf = skb_work->qedf; + struct sk_buff *skb = skb_work->skb; + struct ethhdr *eh; + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL\n"); + goto err_out; + } + + eh = (struct ethhdr *)skb->data; + + /* Undo VLAN encapsulation */ + if (eh->h_proto == htons(ETH_P_8021Q)) { + memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); + eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); + skb_reset_mac_header(skb); + } + + /* + * Process either a FIP frame or FCoE frame based on the + * protocol value. If it's not either just drop the + * frame. + */ + if (eh->h_proto == htons(ETH_P_FIP)) { + qedf_fip_recv(qedf, skb); + goto out; + } else if (eh->h_proto == htons(ETH_P_FCOE)) { + __skb_pull(skb, ETH_HLEN); + qedf_recv_frame(qedf, skb); + goto out; + } else + goto err_out; + +err_out: + kfree_skb(skb); +out: + kfree(skb_work); + return; +} + +static int qedf_ll2_rx(void *cookie, struct sk_buff *skb, + u32 arg1, u32 arg2) +{ + struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; + struct qedf_skb_work *skb_work; + + skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC); + if (!skb_work) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so " + "dropping frame.\n"); + kfree_skb(skb); + return 0; + } + + INIT_WORK(&skb_work->work, qedf_ll2_process_skb); + skb_work->skb = skb; + skb_work->qedf = qedf; + queue_work(qedf->ll2_recv_wq, &skb_work->work); + + return 0; +} + +static struct qed_ll2_cb_ops qedf_ll2_cb_ops = { + .rx_cb = qedf_ll2_rx, + .tx_cb = NULL, +}; + +/* Main thread to process I/O completions */ +void qedf_fp_io_handler(struct work_struct *work) +{ + struct qedf_io_work *io_work = + container_of(work, struct qedf_io_work, work); + u32 comp_type; + + /* + * Deferred part of unsolicited CQE sends + * frame to libfc. + */ + comp_type = (io_work->cqe.cqe_data >> + FCOE_CQE_CQE_TYPE_SHIFT) & + FCOE_CQE_CQE_TYPE_MASK; + if (comp_type == FCOE_UNSOLIC_CQE_TYPE && + io_work->fp) + fc_exch_recv(io_work->qedf->lport, io_work->fp); + else + qedf_process_cqe(io_work->qedf, &io_work->cqe); + + kfree(io_work); +} + +static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, + struct qed_sb_info *sb_info, u16 sb_id) +{ + struct status_block *sb_virt; + dma_addr_t sb_phys; + int ret; + + sb_virt = dma_alloc_coherent(&qedf->pdev->dev, + sizeof(struct status_block), &sb_phys, GFP_KERNEL); + + if (!sb_virt) { + QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed " + "for id = %d.\n", sb_id); + return -ENOMEM; + } + + ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys, + sb_id, QED_SB_TYPE_STORAGE); + + if (ret) { + QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization " + "failed for id = %d.\n", sb_id); + return ret; + } + + return 0; +} + +static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info) +{ + if (sb_info->sb_virt) + dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt), + (void *)sb_info->sb_virt, sb_info->sb_phys); +} + +static void qedf_destroy_sb(struct qedf_ctx *qedf) +{ + int id; + struct qedf_fastpath *fp = NULL; + + for (id = 0; id < qedf->num_queues; id++) { + fp = &(qedf->fp_array[id]); + if (fp->sb_id == QEDF_SB_ID_NULL) + break; + qedf_free_sb(qedf, fp->sb_info); + kfree(fp->sb_info); + } + kfree(qedf->fp_array); +} + +static int qedf_prepare_sb(struct qedf_ctx *qedf) +{ + int id; + struct qedf_fastpath *fp; + int ret; + + qedf->fp_array = + kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath), + GFP_KERNEL); + + if (!qedf->fp_array) { + QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation " + "failed.\n"); + return -ENOMEM; + } + + for (id = 0; id < qedf->num_queues; id++) { + fp = &(qedf->fp_array[id]); + fp->sb_id = QEDF_SB_ID_NULL; + fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); + if (!fp->sb_info) { + QEDF_ERR(&(qedf->dbg_ctx), "SB info struct " + "allocation failed.\n"); + goto err; + } + ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id); + if (ret) { + QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and " + "initialization failed.\n"); + goto err; + } + fp->sb_id = id; + fp->qedf = qedf; + fp->cq_num_entries = + qedf->global_queues[id]->cq_mem_size / + sizeof(struct fcoe_cqe); + } +err: + return 0; +} + +void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) +{ + u16 xid; + struct qedf_ioreq *io_req; + struct qedf_rport *fcport; + u32 comp_type; + + comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & + FCOE_CQE_CQE_TYPE_MASK; + + xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; + io_req = &qedf->cmd_mgr->cmds[xid]; + + /* Completion not for a valid I/O anymore so just return */ + if (!io_req) + return; + + fcport = io_req->fcport; + + if (fcport == NULL) { + QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n"); + return; + } + + /* + * Check that fcport is offloaded. If it isn't then the spinlock + * isn't valid and shouldn't be taken. We should just return. + */ + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); + return; + } + + + switch (comp_type) { + case FCOE_GOOD_COMPLETION_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + switch (io_req->cmd_type) { + case QEDF_SCSI_CMD: + qedf_scsi_completion(qedf, cqe, io_req); + break; + case QEDF_ELS: + qedf_process_els_compl(qedf, cqe, io_req); + break; + case QEDF_TASK_MGMT_CMD: + qedf_process_tmf_compl(qedf, cqe, io_req); + break; + case QEDF_SEQ_CLEANUP: + qedf_process_seq_cleanup_compl(qedf, cqe, io_req); + break; + } + break; + case FCOE_ERROR_DETECTION_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Error detect CQE.\n"); + qedf_process_error_detect(qedf, cqe, io_req); + break; + case FCOE_EXCH_CLEANUP_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Cleanup CQE.\n"); + qedf_process_cleanup_compl(qedf, cqe, io_req); + break; + case FCOE_ABTS_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Abort CQE.\n"); + qedf_process_abts_compl(qedf, cqe, io_req); + break; + case FCOE_DUMMY_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Dummy CQE.\n"); + break; + case FCOE_LOCAL_COMP_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Local completion CQE.\n"); + break; + case FCOE_WARNING_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Warning CQE.\n"); + qedf_process_warning_compl(qedf, cqe, io_req); + break; + case MAX_FCOE_CQE_TYPE: + atomic_inc(&fcport->free_sqes); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Max FCoE CQE.\n"); + break; + default: + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, + "Default CQE.\n"); + break; + } +} + +static void qedf_free_bdq(struct qedf_ctx *qedf) +{ + int i; + + if (qedf->bdq_pbl_list) + dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, + qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma); + + if (qedf->bdq_pbl) + dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size, + qedf->bdq_pbl, qedf->bdq_pbl_dma); + + for (i = 0; i < QEDF_BDQ_SIZE; i++) { + if (qedf->bdq[i].buf_addr) { + dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE, + qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma); + } + } +} + +static void qedf_free_global_queues(struct qedf_ctx *qedf) +{ + int i; + struct global_queue **gl = qedf->global_queues; + + for (i = 0; i < qedf->num_queues; i++) { + if (!gl[i]) + continue; + + if (gl[i]->cq) + dma_free_coherent(&qedf->pdev->dev, + gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); + if (gl[i]->cq_pbl) + dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size, + gl[i]->cq_pbl, gl[i]->cq_pbl_dma); + + kfree(gl[i]); + } + + qedf_free_bdq(qedf); +} + +static int qedf_alloc_bdq(struct qedf_ctx *qedf) +{ + int i; + struct scsi_bd *pbl; + u64 *list; + dma_addr_t page; + + /* Alloc dma memory for BDQ buffers */ + for (i = 0; i < QEDF_BDQ_SIZE; i++) { + qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL); + if (!qedf->bdq[i].buf_addr) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ " + "buffer %d.\n", i); + return -ENOMEM; + } + } + + /* Alloc dma memory for BDQ page buffer list */ + qedf->bdq_pbl_mem_size = + QEDF_BDQ_SIZE * sizeof(struct scsi_bd); + qedf->bdq_pbl_mem_size = + ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE); + + qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev, + qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL); + if (!qedf->bdq_pbl) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n"); + return -ENOMEM; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "BDQ PBL addr=0x%p dma=%pad\n", + qedf->bdq_pbl, &qedf->bdq_pbl_dma); + + /* + * Populate BDQ PBL with physical and virtual address of individual + * BDQ buffers + */ + pbl = (struct scsi_bd *)qedf->bdq_pbl; + for (i = 0; i < QEDF_BDQ_SIZE; i++) { + pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma)); + pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma)); + pbl->opaque.hi = 0; + /* Opaque lo data is an index into the BDQ array */ + pbl->opaque.lo = cpu_to_le32(i); + pbl++; + } + + /* Allocate list of PBL pages */ + qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, + QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); + if (!qedf->bdq_pbl_list) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL " + "pages.\n"); + return -ENOMEM; + } + memset(qedf->bdq_pbl_list, 0, QEDF_PAGE_SIZE); + + /* + * Now populate PBL list with pages that contain pointers to the + * individual buffers. + */ + qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size / + QEDF_PAGE_SIZE; + list = (u64 *)qedf->bdq_pbl_list; + page = qedf->bdq_pbl_list_dma; + for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) { + *list = qedf->bdq_pbl_dma; + list++; + page += QEDF_PAGE_SIZE; + } + + return 0; +} + +static int qedf_alloc_global_queues(struct qedf_ctx *qedf) +{ + u32 *list; + int i; + int status = 0, rc; + u32 *pbl; + dma_addr_t page; + int num_pages; + + /* Allocate and map CQs, RQs */ + /* + * Number of global queues (CQ / RQ). This should + * be <= number of available MSIX vectors for the PF + */ + if (!qedf->num_queues) { + QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n"); + return 1; + } + + /* + * Make sure we allocated the PBL that will contain the physical + * addresses of our queues + */ + if (!qedf->p_cpuq) { + status = 1; + goto mem_alloc_failure; + } + + qedf->global_queues = kzalloc((sizeof(struct global_queue *) + * qedf->num_queues), GFP_KERNEL); + if (!qedf->global_queues) { + QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global " + "queues array ptr memory\n"); + return -ENOMEM; + } + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "qedf->global_queues=%p.\n", qedf->global_queues); + + /* Allocate DMA coherent buffers for BDQ */ + rc = qedf_alloc_bdq(qedf); + if (rc) + goto mem_alloc_failure; + + /* Allocate a CQ and an associated PBL for each MSI-X vector */ + for (i = 0; i < qedf->num_queues; i++) { + qedf->global_queues[i] = kzalloc(sizeof(struct global_queue), + GFP_KERNEL); + if (!qedf->global_queues[i]) { + QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocation " + "global queue %d.\n", i); + goto mem_alloc_failure; + } + + qedf->global_queues[i]->cq_mem_size = + FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); + qedf->global_queues[i]->cq_mem_size = + ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE); + + qedf->global_queues[i]->cq_pbl_size = + (qedf->global_queues[i]->cq_mem_size / + PAGE_SIZE) * sizeof(void *); + qedf->global_queues[i]->cq_pbl_size = + ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); + + qedf->global_queues[i]->cq = + dma_alloc_coherent(&qedf->pdev->dev, + qedf->global_queues[i]->cq_mem_size, + &qedf->global_queues[i]->cq_dma, GFP_KERNEL); + + if (!qedf->global_queues[i]->cq) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "cq.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + memset(qedf->global_queues[i]->cq, 0, + qedf->global_queues[i]->cq_mem_size); + + qedf->global_queues[i]->cq_pbl = + dma_alloc_coherent(&qedf->pdev->dev, + qedf->global_queues[i]->cq_pbl_size, + &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); + + if (!qedf->global_queues[i]->cq_pbl) { + QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " + "cq PBL.\n"); + status = -ENOMEM; + goto mem_alloc_failure; + } + memset(qedf->global_queues[i]->cq_pbl, 0, + qedf->global_queues[i]->cq_pbl_size); + + /* Create PBL */ + num_pages = qedf->global_queues[i]->cq_mem_size / + QEDF_PAGE_SIZE; + page = qedf->global_queues[i]->cq_dma; + pbl = (u32 *)qedf->global_queues[i]->cq_pbl; + + while (num_pages--) { + *pbl = U64_LO(page); + pbl++; + *pbl = U64_HI(page); + pbl++; + page += QEDF_PAGE_SIZE; + } + /* Set the initial consumer index for cq */ + qedf->global_queues[i]->cq_cons_idx = 0; + } + + list = (u32 *)qedf->p_cpuq; + + /* + * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, + * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points + * to the physical address which contains an array of pointers to + * the physical addresses of the specific queue pages. + */ + for (i = 0; i < qedf->num_queues; i++) { + *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma); + list++; + *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma); + list++; + *list = U64_LO(0); + list++; + *list = U64_HI(0); + list++; + } + + return 0; + +mem_alloc_failure: + qedf_free_global_queues(qedf); + return status; +} + +static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf) +{ + u8 sq_num_pbl_pages; + u32 sq_mem_size; + u32 cq_mem_size; + u32 cq_num_entries; + int rval; + + /* + * The number of completion queues/fastpath interrupts/status blocks + * we allocation is the minimum off: + * + * Number of CPUs + * Number of MSI-X vectors + * Max number allocated in hardware (QEDF_MAX_NUM_CQS) + */ + qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, + num_online_cpus()); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", + qedf->num_queues); + + qedf->p_cpuq = pci_alloc_consistent(qedf->pdev, + qedf->num_queues * sizeof(struct qedf_glbl_q_params), + &qedf->hw_p_cpuq); + + if (!qedf->p_cpuq) { + QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n"); + return 1; + } + + rval = qedf_alloc_global_queues(qedf); + if (rval) { + QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation " + "failed.\n"); + return 1; + } + + /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */ + sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); + sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE); + sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE); + + /* Calculate CQ num entries */ + cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); + cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE); + cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe); + + memset(&(qedf->pf_params), 0, + sizeof(qedf->pf_params)); + + /* Setup the value for fcoe PF */ + qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS; + qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS; + qedf->pf_params.fcoe_pf_params.glbl_q_params_addr = + (u64)qedf->hw_p_cpuq; + qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages; + + qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0; + + qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries; + qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues; + + /* log_page_size: 12 for 4KB pages */ + qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE); + + qedf->pf_params.fcoe_pf_params.mtu = 9000; + qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI; + qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI; + + /* BDQ address and size */ + qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] = + qedf->bdq_pbl_list_dma; + qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] = + qedf->bdq_pbl_list_num_entries; + qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE; + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n", + qedf->bdq_pbl_list, + qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0], + qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "cq_num_entries=%d.\n", + qedf->pf_params.fcoe_pf_params.cq_num_entries); + + return 0; +} + +/* Free DMA coherent memory for array of queue pointers we pass to qed */ +static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf) +{ + size_t size = 0; + + if (qedf->p_cpuq) { + size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); + pci_free_consistent(qedf->pdev, size, qedf->p_cpuq, + qedf->hw_p_cpuq); + } + + qedf_free_global_queues(qedf); + + if (qedf->global_queues) + kfree(qedf->global_queues); +} + +/* + * PCI driver functions + */ + +static const struct pci_device_id qedf_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) }, + {0} +}; +MODULE_DEVICE_TABLE(pci, qedf_pci_tbl); + +static struct pci_driver qedf_pci_driver = { + .name = QEDF_MODULE_NAME, + .id_table = qedf_pci_tbl, + .probe = qedf_probe, + .remove = qedf_remove, +}; + +static int __qedf_probe(struct pci_dev *pdev, int mode) +{ + int rc = -EINVAL; + struct fc_lport *lport; + struct qedf_ctx *qedf; + struct Scsi_Host *host; + bool is_vf = false; + struct qed_ll2_params params; + char host_buf[20]; + struct qed_link_params link_params; + int status; + void *task_start, *task_end; + struct qed_slowpath_params slowpath_params; + struct qed_probe_params qed_params; + u16 tmp; + + /* + * When doing error recovery we didn't reap the lport so don't try + * to reallocate it. + */ + if (mode != QEDF_MODE_RECOVERY) { + lport = libfc_host_alloc(&qedf_host_template, + sizeof(struct qedf_ctx)); + + if (!lport) { + QEDF_ERR(NULL, "Could not allocate lport.\n"); + rc = -ENOMEM; + goto err0; + } + + /* Initialize qedf_ctx */ + qedf = lport_priv(lport); + qedf->lport = lport; + qedf->ctlr.lp = lport; + qedf->pdev = pdev; + qedf->dbg_ctx.pdev = pdev; + qedf->dbg_ctx.host_no = lport->host->host_no; + spin_lock_init(&qedf->hba_lock); + INIT_LIST_HEAD(&qedf->fcports); + qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1; + atomic_set(&qedf->num_offloads, 0); + qedf->stop_io_on_error = false; + pci_set_drvdata(pdev, qedf); + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, + "QLogic FastLinQ FCoE Module qedf %s, " + "FW %d.%d.%d.%d\n", QEDF_VERSION, + FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, + FW_ENGINEERING_VERSION); + } else { + /* Init pointers during recovery */ + qedf = pci_get_drvdata(pdev); + lport = qedf->lport; + } + + host = lport->host; + + /* Allocate mempool for qedf_io_work structs */ + qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN, + qedf_io_work_cache); + if (qedf->io_mempool == NULL) { + QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n"); + goto err1; + } + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", + qedf->io_mempool); + + sprintf(host_buf, "qedf_%u_link", + qedf->lport->host->host_no); + qedf->link_update_wq = create_singlethread_workqueue(host_buf); + INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); + INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); + + qedf->fipvlan_retries = qedf_fipvlan_retries; + + /* + * Common probe. Takes care of basic hardware init and pci_* + * functions. + */ + memset(&qed_params, 0, sizeof(qed_params)); + qed_params.protocol = QED_PROTOCOL_FCOE; + qed_params.dp_module = qedf_dp_module; + qed_params.dp_level = qedf_dp_level; + qed_params.is_vf = is_vf; + qedf->cdev = qed_ops->common->probe(pdev, &qed_params); + if (!qedf->cdev) { + rc = -ENODEV; + goto err1; + } + + /* queue allocation code should come here + * order should be + * slowpath_start + * status block allocation + * interrupt registration (to get min number of queues) + * set_fcoe_pf_param + * qed_sp_fcoe_func_start + */ + rc = qedf_set_fcoe_pf_param(qedf); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n"); + goto err2; + } + qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); + + /* Learn information crucial for qedf to progress */ + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); + goto err1; + } + + /* Record BDQ producer doorbell addresses */ + qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; + qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod, + qedf->bdq_secondary_prod); + + qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf); + + rc = qedf_prepare_sb(qedf); + if (rc) { + + QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); + goto err2; + } + + /* Start the Slowpath-process */ + slowpath_params.int_mode = QED_INT_MODE_MSIX; + slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER; + slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; + slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; + slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; + memcpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); + rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); + goto err2; + } + + /* + * update_pf_params needs to be called before and after slowpath + * start + */ + qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); + + /* Setup interrupts */ + rc = qedf_setup_int(qedf); + if (rc) + goto err3; + + rc = qed_ops->start(qedf->cdev, &qedf->tasks); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n"); + goto err4; + } + task_start = qedf_get_task_mem(&qedf->tasks, 0); + task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, " + "end=%p block_size=%u.\n", task_start, task_end, + qedf->tasks.size); + + /* + * We need to write the number of BDs in the BDQ we've preallocated so + * the f/w will do a prefetch and we'll get an unsolicited CQE when a + * packet arrives. + */ + qedf->bdq_prod_idx = QEDF_BDQ_SIZE; + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Writing %d to primary and secondary BDQ doorbell registers.\n", + qedf->bdq_prod_idx); + writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); + tmp = readw(qedf->bdq_primary_prod); + writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); + tmp = readw(qedf->bdq_secondary_prod); + + qed_ops->common->set_power_state(qedf->cdev, PCI_D0); + + /* Now that the dev_info struct has been filled in set the MAC + * address + */ + ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n", + qedf->mac); + + /* Set the WWNN and WWPN based on the MAC address */ + qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0); + qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx " + "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); + + sprintf(host_buf, "host_%d", host->host_no); + qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION); + + + /* Set xid max values */ + qedf->max_scsi_xid = QEDF_MAX_SCSI_XID; + qedf->max_els_xid = QEDF_MAX_ELS_XID; + + /* Allocate cmd mgr */ + qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); + if (!qedf->cmd_mgr) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n"); + goto err5; + } + + if (mode != QEDF_MODE_RECOVERY) { + host->transportt = qedf_fc_transport_template; + host->can_queue = QEDF_MAX_ELS_XID; + host->max_lun = qedf_max_lun; + host->max_cmd_len = QEDF_MAX_CDB_LEN; + rc = scsi_add_host(host, &pdev->dev); + if (rc) + goto err6; + } + + memset(¶ms, 0, sizeof(params)); + params.mtu = 9000; + ether_addr_copy(params.ll2_mac_address, qedf->mac); + + /* Start LL2 processing thread */ + snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); + qedf->ll2_recv_wq = + create_singlethread_workqueue(host_buf); + if (!qedf->ll2_recv_wq) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); + goto err7; + } + +#ifdef CONFIG_DEBUG_FS + qedf_dbg_host_init(&(qedf->dbg_ctx), &qedf_debugfs_ops, + &qedf_dbg_fops); +#endif + + /* Start LL2 */ + qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf); + rc = qed_ops->ll2->start(qedf->cdev, ¶ms); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n"); + goto err7; + } + set_bit(QEDF_LL2_STARTED, &qedf->flags); + + /* hw will be insterting vlan tag*/ + qedf->vlan_hw_insert = 1; + qedf->vlan_id = 0; + + /* + * No need to setup fcoe_ctlr or fc_lport objects during recovery since + * they were not reaped during the unload process. + */ + if (mode != QEDF_MODE_RECOVERY) { + /* Setup imbedded fcoe controller */ + qedf_fcoe_ctlr_setup(qedf); + + /* Setup lport */ + rc = qedf_lport_setup(qedf); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "qedf_lport_setup failed.\n"); + goto err7; + } + } + + sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); + qedf->timer_work_queue = + create_singlethread_workqueue(host_buf); + if (!qedf->timer_work_queue) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " + "workqueue.\n"); + goto err7; + } + + /* DPC workqueue is not reaped during recovery unload */ + if (mode != QEDF_MODE_RECOVERY) { + sprintf(host_buf, "qedf_%u_dpc", + qedf->lport->host->host_no); + qedf->dpc_wq = create_singlethread_workqueue(host_buf); + } + + /* + * GRC dump and sysfs parameters are not reaped during the recovery + * unload process. + */ + if (mode != QEDF_MODE_RECOVERY) { + qedf->grcdump_size = qed_ops->common->dbg_grc_size(qedf->cdev); + if (qedf->grcdump_size) { + rc = qedf_alloc_grc_dump_buf(&qedf->grcdump, + qedf->grcdump_size); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), + "GRC Dump buffer alloc failed.\n"); + qedf->grcdump = NULL; + } + + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "grcdump: addr=%p, size=%u.\n", + qedf->grcdump, qedf->grcdump_size); + } + qedf_create_sysfs_ctx_attr(qedf); + + /* Initialize I/O tracing for this adapter */ + spin_lock_init(&qedf->io_trace_lock); + qedf->io_trace_idx = 0; + } + + init_completion(&qedf->flogi_compl); + + memset(&link_params, 0, sizeof(struct qed_link_params)); + link_params.link_up = true; + status = qed_ops->common->set_link(qedf->cdev, &link_params); + if (status) + QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n"); + + /* Start/restart discovery */ + if (mode == QEDF_MODE_RECOVERY) + fcoe_ctlr_link_up(&qedf->ctlr); + else + fc_fabric_login(lport); + + /* All good */ + return 0; + +err7: + if (qedf->ll2_recv_wq) + destroy_workqueue(qedf->ll2_recv_wq); + fc_remove_host(qedf->lport->host); + scsi_remove_host(qedf->lport->host); +#ifdef CONFIG_DEBUG_FS + qedf_dbg_host_exit(&(qedf->dbg_ctx)); +#endif +err6: + qedf_cmd_mgr_free(qedf->cmd_mgr); +err5: + qed_ops->stop(qedf->cdev); +err4: + qedf_free_fcoe_pf_param(qedf); + qedf_sync_free_irqs(qedf); +err3: + qed_ops->common->slowpath_stop(qedf->cdev); +err2: + qed_ops->common->remove(qedf->cdev); +err1: + scsi_host_put(lport->host); +err0: + return rc; +} + +static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return __qedf_probe(pdev, QEDF_MODE_NORMAL); +} + +static void __qedf_remove(struct pci_dev *pdev, int mode) +{ + struct qedf_ctx *qedf; + + if (!pdev) { + QEDF_ERR(NULL, "pdev is NULL.\n"); + return; + } + + qedf = pci_get_drvdata(pdev); + + /* + * Prevent race where we're in board disable work and then try to + * rmmod the module. + */ + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n"); + return; + } + + if (mode != QEDF_MODE_RECOVERY) + set_bit(QEDF_UNLOADING, &qedf->flags); + + /* Logoff the fabric to upload all connections */ + if (mode == QEDF_MODE_RECOVERY) + fcoe_ctlr_link_down(&qedf->ctlr); + else + fc_fabric_logoff(qedf->lport); + qedf_wait_for_upload(qedf); + +#ifdef CONFIG_DEBUG_FS + qedf_dbg_host_exit(&(qedf->dbg_ctx)); +#endif + + /* Stop any link update handling */ + cancel_delayed_work_sync(&qedf->link_update); + destroy_workqueue(qedf->link_update_wq); + qedf->link_update_wq = NULL; + + if (qedf->timer_work_queue) + destroy_workqueue(qedf->timer_work_queue); + + /* Stop Light L2 */ + clear_bit(QEDF_LL2_STARTED, &qedf->flags); + qed_ops->ll2->stop(qedf->cdev); + if (qedf->ll2_recv_wq) + destroy_workqueue(qedf->ll2_recv_wq); + + /* Stop fastpath */ + qedf_sync_free_irqs(qedf); + qedf_destroy_sb(qedf); + + /* + * During recovery don't destroy OS constructs that represent the + * physical port. + */ + if (mode != QEDF_MODE_RECOVERY) { + qedf_free_grc_dump_buf(&qedf->grcdump); + qedf_remove_sysfs_ctx_attr(qedf); + + /* Remove all SCSI/libfc/libfcoe structures */ + fcoe_ctlr_destroy(&qedf->ctlr); + fc_lport_destroy(qedf->lport); + fc_remove_host(qedf->lport->host); + scsi_remove_host(qedf->lport->host); + } + + qedf_cmd_mgr_free(qedf->cmd_mgr); + + if (mode != QEDF_MODE_RECOVERY) { + fc_exch_mgr_free(qedf->lport); + fc_lport_free_stats(qedf->lport); + + /* Wait for all vports to be reaped */ + qedf_wait_for_vport_destroy(qedf); + } + + /* + * Now that all connections have been uploaded we can stop the + * rest of the qed operations + */ + qed_ops->stop(qedf->cdev); + + if (mode != QEDF_MODE_RECOVERY) { + if (qedf->dpc_wq) { + /* Stop general DPC handling */ + destroy_workqueue(qedf->dpc_wq); + qedf->dpc_wq = NULL; + } + } + + /* Final shutdown for the board */ + qedf_free_fcoe_pf_param(qedf); + if (mode != QEDF_MODE_RECOVERY) { + qed_ops->common->set_power_state(qedf->cdev, PCI_D0); + pci_set_drvdata(pdev, NULL); + } + qed_ops->common->slowpath_stop(qedf->cdev); + qed_ops->common->remove(qedf->cdev); + + mempool_destroy(qedf->io_mempool); + + /* Only reap the Scsi_host on a real removal */ + if (mode != QEDF_MODE_RECOVERY) + scsi_host_put(qedf->lport->host); +} + +static void qedf_remove(struct pci_dev *pdev) +{ + /* Check to make sure this function wasn't already disabled */ + if (!atomic_read(&pdev->enable_cnt)) + return; + + __qedf_remove(pdev, QEDF_MODE_NORMAL); +} + +/* + * Module Init/Remove + */ + +static int __init qedf_init(void) +{ + int ret; + + /* If debug=1 passed, set the default log mask */ + if (qedf_debug == QEDF_LOG_DEFAULT) + qedf_debug = QEDF_DEFAULT_LOG_MASK; + + /* Print driver banner */ + QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR, + QEDF_VERSION); + + /* Create kmem_cache for qedf_io_work structs */ + qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache", + sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL); + if (qedf_io_work_cache == NULL) { + QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n"); + goto err1; + } + QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n", + qedf_io_work_cache); + + qed_ops = qed_get_fcoe_ops(); + if (!qed_ops) { + QEDF_ERR(NULL, "Failed to get qed fcoe operations\n"); + goto err1; + } + +#ifdef CONFIG_DEBUG_FS + qedf_dbg_init("qedf"); +#endif + + qedf_fc_transport_template = + fc_attach_transport(&qedf_fc_transport_fn); + if (!qedf_fc_transport_template) { + QEDF_ERR(NULL, "Could not register with FC transport\n"); + goto err2; + } + + qedf_fc_vport_transport_template = + fc_attach_transport(&qedf_fc_vport_transport_fn); + if (!qedf_fc_vport_transport_template) { + QEDF_ERR(NULL, "Could not register vport template with FC " + "transport\n"); + goto err3; + } + + qedf_io_wq = create_workqueue("qedf_io_wq"); + if (!qedf_io_wq) { + QEDF_ERR(NULL, "Could not create qedf_io_wq.\n"); + goto err4; + } + + qedf_cb_ops.get_login_failures = qedf_get_login_failures; + + ret = pci_register_driver(&qedf_pci_driver); + if (ret) { + QEDF_ERR(NULL, "Failed to register driver\n"); + goto err5; + } + + return 0; + +err5: + destroy_workqueue(qedf_io_wq); +err4: + fc_release_transport(qedf_fc_vport_transport_template); +err3: + fc_release_transport(qedf_fc_transport_template); +err2: +#ifdef CONFIG_DEBUG_FS + qedf_dbg_exit(); +#endif + qed_put_fcoe_ops(); +err1: + return -EINVAL; +} + +static void __exit qedf_cleanup(void) +{ + pci_unregister_driver(&qedf_pci_driver); + + destroy_workqueue(qedf_io_wq); + + fc_release_transport(qedf_fc_vport_transport_template); + fc_release_transport(qedf_fc_transport_template); +#ifdef CONFIG_DEBUG_FS + qedf_dbg_exit(); +#endif + qed_put_fcoe_ops(); + + kmem_cache_destroy(qedf_io_work_cache); +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver"); +MODULE_AUTHOR("QLogic Corporation"); +MODULE_VERSION(QEDF_VERSION); +module_init(qedf_init); +module_exit(qedf_cleanup); diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h new file mode 100644 index 00000000000000..4ae5f537a4403a --- /dev/null +++ b/drivers/scsi/qedf/qedf_version.h @@ -0,0 +1,15 @@ +/* + * QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#define QEDF_VERSION "8.10.7.0" +#define QEDF_DRIVER_MAJOR_VER 8 +#define QEDF_DRIVER_MINOR_VER 10 +#define QEDF_DRIVER_REV_VER 7 +#define QEDF_DRIVER_ENG_VER 0 + diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c index 95593627424140..59417199bf363a 100644 --- a/drivers/scsi/qedi/qedi_debugfs.c +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -14,7 +14,7 @@ #include #include -int do_not_recover; +int qedi_do_not_recover; static struct dentry *qedi_dbg_root; void @@ -74,22 +74,22 @@ qedi_dbg_exit(void) static ssize_t qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg) { - if (!do_not_recover) - do_not_recover = 1; + if (!qedi_do_not_recover) + qedi_do_not_recover = 1; QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", - do_not_recover); + qedi_do_not_recover); return 0; } static ssize_t qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg) { - if (do_not_recover) - do_not_recover = 0; + if (qedi_do_not_recover) + qedi_do_not_recover = 0; QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", - do_not_recover); + qedi_do_not_recover); return 0; } @@ -141,7 +141,7 @@ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer, if (*ppos) return 0; - cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover); + cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover); cnt = min_t(int, count, cnt - *ppos); *ppos += cnt; return cnt; diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index b1d3904ae8fd84..2bce3efc66a4b4 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -165,10 +165,9 @@ static void qedi_tmf_resp_work(struct work_struct *work) iscsi_block_session(session->cls_session); rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true); if (rval) { - clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); qedi_clear_task_idx(qedi, qedi_cmd->task_id); iscsi_unblock_session(session->cls_session); - return; + goto exit_tmf_resp; } iscsi_unblock_session(session->cls_session); @@ -177,6 +176,8 @@ static void qedi_tmf_resp_work(struct work_struct *work) spin_lock(&session->back_lock); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); spin_unlock(&session->back_lock); + +exit_tmf_resp: kfree(resp_hdr_ptr); clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); } @@ -1460,9 +1461,9 @@ static void qedi_tmf_work(struct work_struct *work) get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id, qedi_conn->iscsi_conn_id); - if (do_not_recover) { + if (qedi_do_not_recover) { QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n", - do_not_recover); + qedi_do_not_recover); goto abort_ret; } diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h index 8e488de88ece9f..63d793f460645d 100644 --- a/drivers/scsi/qedi/qedi_gbl.h +++ b/drivers/scsi/qedi/qedi_gbl.h @@ -12,8 +12,14 @@ #include "qedi_iscsi.h" +#ifdef CONFIG_DEBUG_FS +extern int qedi_do_not_recover; +#else +#define qedi_do_not_recover (0) +#endif + extern uint qedi_io_tracing; -extern int do_not_recover; + extern struct scsi_host_template qedi_host_template; extern struct iscsi_transport qedi_iscsi_transport; extern const struct qed_iscsi_ops *qedi_ops; diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index b9f79d36142d5e..4cc474364c5056 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -833,7 +833,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, return ERR_PTR(ret); } - if (do_not_recover) { + if (qedi_do_not_recover) { ret = -ENOMEM; return ERR_PTR(ret); } @@ -957,7 +957,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) struct qedi_endpoint *qedi_ep; int ret = 0; - if (do_not_recover) + if (qedi_do_not_recover) return 1; qedi_ep = ep->dd_data; @@ -1025,7 +1025,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) } if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { - if (do_not_recover) { + if (qedi_do_not_recover) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Do not recover cid=0x%x\n", qedi_ep->iscsi_cid); @@ -1039,7 +1039,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) } } - if (do_not_recover) + if (qedi_do_not_recover) goto ep_exit_recover; switch (qedi_ep->state) { diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 5eda21d903e93d..92775a8b74b1cd 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1805,7 +1805,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) */ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); - qedi_setup_int(qedi); + rc = qedi_setup_int(qedi); if (rc) goto stop_iscsi_func; @@ -2007,6 +2007,7 @@ static void qedi_remove(struct pci_dev *pdev) static struct pci_device_id qedi_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 67c0d5aa32125c..de952935b5d2ca 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -3,6 +3,7 @@ config SCSI_QLA_FC depends on PCI && SCSI depends on SCSI_FC_ATTRS select FW_LOADER + select BTREE ---help--- This qla2xxx driver supports all QLogic Fibre Channel PCI and PCIe host adapters. diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index f201f40996205c..435ff7fd6384a0 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) "Timer for the VP[%d] has stopped\n", vha->vp_idx); } - BUG_ON(atomic_read(&vha->vref_count)); - qla2x00_free_fcports(vha); mutex_lock(&ha->vport_lock); @@ -2163,7 +2161,10 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) clear_bit(vha->vp_idx, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); - if (vha->qpair->vp_idx == vha->vp_idx) { + dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, + vha->gnl.ldma); + + if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x7087, "Queue Pair delete failed.\n"); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 40ca75bbcb9d3e..84c9098cc089fc 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -13,28 +13,25 @@ /* BSG support for ELS/CT pass through */ void -qla2x00_bsg_job_done(void *data, void *ptr, int res) +qla2x00_bsg_job_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; + srb_t *sp = ptr; struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; bsg_reply->result = res; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); - sp->free(vha, sp); + sp->free(sp); } void -qla2x00_bsg_sp_free(void *data, void *ptr) +qla2x00_bsg_sp_free(void *ptr) { - srb_t *sp = (srb_t *)ptr; - struct scsi_qla_host *vha = sp->fcport->vha; + srb_t *sp = ptr; + struct qla_hw_data *ha = sp->vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_request *bsg_request = bsg_job->request; - - struct qla_hw_data *ha = vha->hw; struct qla_mt_iocb_rqst_fx00 *piocb_rqst; if (sp->type == SRB_FXIOCB_BCMD) { @@ -62,7 +59,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr) sp->type == SRB_FXIOCB_BCMD || sp->type == SRB_ELS_CMD_HST) kfree(sp->fcport); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); } int @@ -394,7 +391,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, "qla2x00_start_sp failed = %d\n", rval); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); rval = -EIO; goto done_unmap_sg; } @@ -542,7 +539,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7017, "qla2x00_start_sp failed=%d.\n", rval); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); rval = -EIO; goto done_free_fcport; } @@ -2578,6 +2575,6 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) done: spin_unlock_irqrestore(&ha->hardware_lock, flags); - sp->free(vha, sp); + sp->free(sp); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 21d9fb7fc88796..51b4179469d185 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); ql_dbg(level, vha, id, "----- -----------------------------------------------\n"); - for (cnt = 0; cnt < size; cnt++, buf++) { - if (cnt % 16 == 0) - ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU); - printk(" %02x", *buf); - if (cnt % 16 == 15) - printk("\n"); + for (cnt = 0; cnt < size; cnt += 16) { + ql_dbg(level, vha, id, "%04x: ", cnt); + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, + buf + cnt, min(16U, size - cnt), false); } - if (cnt % 16 != 0) - printk("\n"); } diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index e1fc4e66966aea..c6bffe929fe7dc 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); #define ql_dbg_tgt 0x00004000 /* Target mode */ #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ +#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, uint32_t, void **); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 2f14adfab018d8..ae119018dfaae9 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -55,6 +56,8 @@ #include "qla_settings.h" +#define MODE_DUAL (MODE_TARGET | MODE_INITIATOR) + /* * Data bit definitions */ @@ -251,6 +254,14 @@ #define MAX_CMDSZ 16 /* SCSI maximum CDB size. */ #include "qla_fw.h" + +struct name_list_extended { + struct get_name_list_extended *l; + dma_addr_t ldma; + struct list_head fcports; /* protect by sess_list */ + u32 size; + u8 sent; +}; /* * Timeout timer counts in seconds */ @@ -309,6 +320,17 @@ struct els_logo_payload { uint8_t wwpn[WWN_SIZE]; }; +struct ct_arg { + void *iocb; + u16 nport_handle; + dma_addr_t req_dma; + dma_addr_t rsp_dma; + u32 req_size; + u32 rsp_size; + void *req; + void *rsp; +}; + /* * SRB extensions. */ @@ -320,6 +342,7 @@ struct srb_iocb { #define SRB_LOGIN_COND_PLOGI BIT_1 #define SRB_LOGIN_SKIP_PRLI BIT_2 uint16_t data[2]; + u32 iop[2]; } logio; struct { #define ELS_DCMD_TIMEOUT 20 @@ -372,6 +395,20 @@ struct srb_iocb { __le16 comp_status; struct completion comp; } abt; + struct ct_arg ctarg; +#define MAX_IOCB_MB_REG 28 +#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t)) + struct { + __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ + __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ + void *out, *in; + dma_addr_t out_dma, in_dma; + struct completion comp; + int rc; + } mbx; + struct { + struct imm_ntfy_from_isp *ntfy; + } nack; } u; struct timer_list timer; @@ -392,23 +429,31 @@ struct srb_iocb { #define SRB_FXIOCB_BCMD 11 #define SRB_ABT_CMD 12 #define SRB_ELS_DCMD 13 +#define SRB_MB_IOCB 14 +#define SRB_CT_PTHRU_CMD 15 +#define SRB_NACK_PLOGI 16 +#define SRB_NACK_PRLI 17 +#define SRB_NACK_LOGO 18 typedef struct srb { atomic_t ref_count; struct fc_port *fcport; + struct scsi_qla_host *vha; uint32_t handle; uint16_t flags; uint16_t type; - char *name; + const char *name; int iocbs; struct qla_qpair *qpair; + u32 gen1; /* scratch */ + u32 gen2; /* scratch */ union { struct srb_iocb iocb_cmd; struct bsg_job *bsg_job; struct srb_cmd scmd; } u; - void (*done)(void *, void *, int); - void (*free)(void *, void *); + void (*done)(void *, int); + void (*free)(void *); } srb_t; #define GET_CMD_SP(sp) (sp->u.scmd.cmd) @@ -1794,6 +1839,7 @@ typedef struct { #define SS_RESIDUAL_OVER BIT_10 #define SS_SENSE_LEN_VALID BIT_9 #define SS_RESPONSE_INFO_LEN_VALID BIT_8 +#define SS_SCSI_STATUS_BYTE 0xff #define SS_RESERVE_CONFLICT (BIT_4 | BIT_3) #define SS_BUSY_CONDITION BIT_3 @@ -1975,6 +2021,84 @@ struct mbx_entry { uint8_t port_name[WWN_SIZE]; }; +#ifndef IMMED_NOTIFY_TYPE +#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */ +/* + * ISP queue - immediate notify entry structure definition. + * This is sent by the ISP to the Target driver. + * This IOCB would have report of events sent by the + * initiator, that needs to be handled by the target + * driver immediately. + */ +struct imm_ntfy_from_isp { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + union { + struct { + uint32_t sys_define_2; /* System defined. */ + target_id_t target; + uint16_t lun; + uint8_t target_id; + uint8_t reserved_1; + uint16_t status_modifier; + uint16_t status; + uint16_t task_flags; + uint16_t seq_id; + uint16_t srr_rx_id; + uint32_t srr_rel_offs; + uint16_t srr_ui; +#define SRR_IU_DATA_IN 0x1 +#define SRR_IU_DATA_OUT 0x5 +#define SRR_IU_STATUS 0x7 + uint16_t srr_ox_id; + uint8_t reserved_2[28]; + } isp2x; + struct { + uint32_t reserved; + uint16_t nport_handle; + uint16_t reserved_2; + uint16_t flags; +#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 +#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 + uint16_t srr_rx_id; + uint16_t status; + uint8_t status_subcode; + uint8_t fw_handle; + uint32_t exchange_address; + uint32_t srr_rel_offs; + uint16_t srr_ui; + uint16_t srr_ox_id; + union { + struct { + uint8_t node_name[8]; + } plogi; /* PLOGI/ADISC/PDISC */ + struct { + /* PRLI word 3 bit 0-15 */ + uint16_t wd3_lo; + uint8_t resv0[6]; + } prli; + struct { + uint8_t port_id[3]; + uint8_t resv1; + uint16_t nport_handle; + uint16_t resv2; + } req_els; + } u; + uint8_t port_name[8]; + uint8_t resv3[3]; + uint8_t vp_index; + uint32_t reserved_5; + uint8_t port_id[3]; + uint8_t reserved_6; + } isp24; + } u; + uint16_t reserved_7; + uint16_t ox_id; +} __packed; +#endif + /* * ISP request and response queue entry sizes */ @@ -2022,10 +2146,22 @@ typedef struct { #define FC4_TYPE_OTHER 0x0 #define FC4_TYPE_UNKNOWN 0xff +/* mailbox command 4G & above */ +struct mbx_24xx_entry { + uint8_t entry_type; + uint8_t entry_count; + uint8_t sys_define1; + uint8_t entry_status; + uint32_t handle; + uint16_t mb[28]; +}; + +#define IOCB_SIZE 64 + /* * Fibre channel port type. */ - typedef enum { +typedef enum { FCT_UNKNOWN, FCT_RSCN, FCT_SWITCH, @@ -2034,6 +2170,74 @@ typedef struct { FCT_TARGET } fc_port_type_t; +enum qla_sess_deletion { + QLA_SESS_DELETION_NONE = 0, + QLA_SESS_DELETION_IN_PROGRESS, + QLA_SESS_DELETED, +}; + +enum qlt_plogi_link_t { + QLT_PLOGI_LINK_SAME_WWN, + QLT_PLOGI_LINK_CONFLICT, + QLT_PLOGI_LINK_MAX +}; + +struct qlt_plogi_ack_t { + struct list_head list; + struct imm_ntfy_from_isp iocb; + port_id_t id; + int ref_count; + void *fcport; +}; + +struct ct_sns_desc { + struct ct_sns_pkt *ct_sns; + dma_addr_t ct_sns_dma; +}; + +enum discovery_state { + DSC_DELETED, + DSC_GID_PN, + DSC_GNL, + DSC_LOGIN_PEND, + DSC_LOGIN_FAILED, + DSC_GPDB, + DSC_GPSC, + DSC_UPD_FCPORT, + DSC_LOGIN_COMPLETE, + DSC_DELETE_PEND, +}; + +enum login_state { /* FW control Target side */ + DSC_LS_LLIOCB_SENT = 2, + DSC_LS_PLOGI_PEND, + DSC_LS_PLOGI_COMP, + DSC_LS_PRLI_PEND, + DSC_LS_PRLI_COMP, + DSC_LS_PORT_UNAVAIL, + DSC_LS_PRLO_PEND = 9, + DSC_LS_LOGO_PEND, +}; + +enum fcport_mgt_event { + FCME_RELOGIN = 1, + FCME_RSCN, + FCME_GIDPN_DONE, + FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */ + FCME_GNL_DONE, + FCME_GPSC_DONE, + FCME_GPDB_DONE, + FCME_GPNID_DONE, + FCME_DELETE_DONE, +}; + +enum rscn_addr_format { + RSCN_PORT_ADDR, + RSCN_AREA_ADDR, + RSCN_DOM_ADDR, + RSCN_FAB_ADDR, +}; + /* * Fibre channel port structure. */ @@ -2047,6 +2251,29 @@ typedef struct fc_port { uint16_t loop_id; uint16_t old_loop_id; + unsigned int conf_compl_supported:1; + unsigned int deleted:2; + unsigned int local:1; + unsigned int logout_on_delete:1; + unsigned int logo_ack_needed:1; + unsigned int keep_nport_handle:1; + unsigned int send_els_logo:1; + unsigned int login_pause:1; + unsigned int login_succ:1; + + struct fc_port *conflict; + unsigned char logout_completed; + int generation; + + struct se_session *se_sess; + struct kref sess_kref; + struct qla_tgt *tgt; + unsigned long expires; + struct list_head del_list_entry; + struct work_struct free_work; + + struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; + uint16_t tgt_id; uint16_t old_tgt_id; @@ -2075,8 +2302,32 @@ typedef struct fc_port { unsigned long retry_delay_timestamp; struct qla_tgt_sess *tgt_session; + struct ct_sns_desc ct_desc; + enum discovery_state disc_state; + enum login_state fw_login_state; + unsigned long plogi_nack_done_deadline; + + u32 login_gen, last_login_gen; + u32 rscn_gen, last_rscn_gen; + u32 chip_reset; + struct list_head gnl_entry; + struct work_struct del_work; + u8 iocb[IOCB_SIZE]; } fc_port_t; +#define QLA_FCPORT_SCAN 1 +#define QLA_FCPORT_FOUND 2 + +struct event_arg { + enum fcport_mgt_event event; + fc_port_t *fcport; + srb_t *sp; + port_id_t id; + u16 data[2], rc; + u8 port_name[WWN_SIZE]; + u32 iop[2]; +}; + #include "qla_mr.h" /* @@ -2154,6 +2405,10 @@ static const char * const port_state_str[] = { #define GFT_ID_REQ_SIZE (16 + 4) #define GFT_ID_RSP_SIZE (16 + 32) +#define GID_PN_CMD 0x121 +#define GID_PN_REQ_SIZE (16 + 8) +#define GID_PN_RSP_SIZE (16 + 4) + #define RFT_ID_CMD 0x217 #define RFT_ID_REQ_SIZE (16 + 4 + 32) #define RFT_ID_RSP_SIZE 16 @@ -2479,6 +2734,10 @@ struct ct_sns_req { uint8_t reserved; uint8_t port_name[3]; } gff_id; + + struct { + uint8_t port_name[8]; + } gid_pn; } req; }; @@ -2558,6 +2817,10 @@ struct ct_sns_rsp { struct { uint8_t fc4_features[128]; } gff_id; + struct { + uint8_t reserved; + uint8_t port_id[3]; + } gid_pn; } rsp; }; @@ -2699,11 +2962,11 @@ struct isp_operations { uint16_t (*calc_req_entries) (uint16_t); void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t); - void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); - void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t, + void *(*prep_ms_iocb) (struct scsi_qla_host *, struct ct_arg *); + void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); - uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *, + uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); @@ -2765,13 +3028,21 @@ enum qla_work_type { QLA_EVT_AEN, QLA_EVT_IDC_ACK, QLA_EVT_ASYNC_LOGIN, - QLA_EVT_ASYNC_LOGIN_DONE, QLA_EVT_ASYNC_LOGOUT, QLA_EVT_ASYNC_LOGOUT_DONE, QLA_EVT_ASYNC_ADISC, QLA_EVT_ASYNC_ADISC_DONE, QLA_EVT_UEVENT, QLA_EVT_AENFX, + QLA_EVT_GIDPN, + QLA_EVT_GPNID, + QLA_EVT_GPNID_DONE, + QLA_EVT_NEW_SESS, + QLA_EVT_GPDB, + QLA_EVT_GPSC, + QLA_EVT_UPD_FCPORT, + QLA_EVT_GNL, + QLA_EVT_NACK, }; @@ -2807,6 +3078,23 @@ struct qla_work_evt { struct { srb_t *sp; } iosb; + struct { + port_id_t id; + } gpnid; + struct { + port_id_t id; + u8 port_name[8]; + void *pla; + } new_sess; + struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */ + fc_port_t *fcport; + u8 opt; + } fcport; + struct { + fc_port_t *fcport; + u8 iocb[IOCB_SIZE]; + int type; + } nack; } u; }; @@ -2825,6 +3113,16 @@ struct qla_chip_state_84xx { uint32_t gold_fw_version; }; +struct qla_dif_statistics { + uint64_t dif_input_bytes; + uint64_t dif_output_bytes; + uint64_t dif_input_requests; + uint64_t dif_output_requests; + uint32_t dif_guard_err; + uint32_t dif_ref_tag_err; + uint32_t dif_app_tag_err; +}; + struct qla_statistics { uint32_t total_isp_aborts; uint64_t input_bytes; @@ -2837,6 +3135,8 @@ struct qla_statistics { uint32_t stat_max_pend_cmds; uint32_t stat_max_qfull_cmds_alloc; uint32_t stat_max_qfull_cmds_dropped; + + struct qla_dif_statistics qla_dif_stats; }; struct bidi_statistics { @@ -2844,6 +3144,16 @@ struct bidi_statistics { unsigned long long transfer_bytes; }; +struct qla_tc_param { + struct scsi_qla_host *vha; + uint32_t blk_sz; + uint32_t bufflen; + struct scatterlist *sg; + struct scatterlist *prot_sg; + struct crc_context *ctx; + uint8_t *ctx_dsd_alloced; +}; + /* Multi queue support */ #define MBC_INITIALIZE_MULTIQ 0x1f #define QLA_QUE_PAGE 0X1000 @@ -2943,6 +3253,7 @@ struct qla_qpair { struct qla_hw_data *hw; struct work_struct q_work; struct list_head qp_list_elem; /* vha->qp_list */ + struct scsi_qla_host *vha; }; /* Place holder for FW buffer parameters */ @@ -2963,7 +3274,6 @@ struct qlt_hw_data { /* Protected by hw lock */ uint32_t enable_class_2:1; uint32_t enable_explicit_conf:1; - uint32_t ini_mode_force_reverse:1; uint32_t node_name_set:1; dma_addr_t atio_dma; /* Physical address. */ @@ -2991,6 +3301,8 @@ struct qlt_hw_data { uint8_t tgt_node_name[WWN_SIZE]; struct dentry *dfs_tgt_sess; + struct dentry *dfs_tgt_port_database; + struct list_head q_full_list; uint32_t num_pend_cmds; uint32_t num_qfull_cmds_alloc; @@ -3000,6 +3312,7 @@ struct qlt_hw_data { spinlock_t sess_lock; int rspq_vector_cpuid; spinlock_t atio_lock ____cacheline_aligned; + struct btree_head32 host_map; }; #define MAX_QFULL_CMDS_ALLOC 8192 @@ -3009,6 +3322,10 @@ struct qlt_hw_data { #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ +#define QLA_EARLY_LINKUP(_ha) \ + ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \ + _ha->flags.fw_started && !_ha->flags.fw_init_done) + /* * Qlogic host adapter specific data structure. */ @@ -3058,7 +3375,11 @@ struct qla_hw_data { uint32_t fawwpn_enabled:1; uint32_t exlogins_enabled:1; uint32_t exchoffld_enabled:1; - /* 35 bits */ + + uint32_t lip_ae:1; + uint32_t n2n_ae:1; + uint32_t fw_started:1; + uint32_t fw_init_done:1; } flags; /* This spinlock is used to protect "io transactions", you must @@ -3115,6 +3436,7 @@ struct qla_hw_data { #define FLOGI_SP_SUPPORT BIT_13 uint8_t port_no; /* Physical port of adapter */ + uint8_t exch_starvation; /* Timeout timers. */ uint8_t loop_down_abort_time; /* port down timer */ @@ -3150,7 +3472,6 @@ struct qla_hw_data { #define P2P_LOOP 3 uint8_t interrupts_on; uint32_t isp_abort_cnt; - #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 @@ -3631,6 +3952,7 @@ typedef struct scsi_qla_host { struct list_head vp_fcports; /* list of fcports */ struct list_head work_list; spinlock_t work_lock; + struct work_struct iocb_work; /* Commonly used flags and state information. */ struct Scsi_Host *host; @@ -3682,7 +4004,7 @@ typedef struct scsi_qla_host { #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ -#define SCR_PENDING 21 /* SCR in target mode */ +#define FREE_BIT 21 #define PORT_UPDATE_NEEDED 22 #define FX00_RESET_RECOVERY 23 #define FX00_TARGET_SCAN 24 @@ -3736,7 +4058,9 @@ typedef struct scsi_qla_host { /* list of commands waiting on workqueue */ struct list_head qla_cmd_list; struct list_head qla_sess_op_cmd_list; + struct list_head unknown_atio_list; spinlock_t cmd_list_lock; + struct delayed_work unknown_atio_work; /* Counter to detect races between ELS and RSCN events */ atomic_t generation_tick; @@ -3788,6 +4112,11 @@ typedef struct scsi_qla_host { struct qla8044_reset_template reset_tmplt; struct qla_tgt_counters tgt_counters; uint16_t bbcr; + struct name_list_extended gnl; + /* Count of active session/fcport */ + int fcport_count; + wait_queue_head_t fcport_waitQ; + wait_queue_head_t vref_waitq; } scsi_qla_host_t; struct qla27xx_image_status { @@ -3843,14 +4172,17 @@ struct qla2_sgx { mb(); \ if (__vha->flags.delete_progress) { \ atomic_dec(&__vha->vref_count); \ + wake_up(&__vha->vref_waitq); \ __bail = 1; \ } else { \ __bail = 0; \ } \ } while (0) -#define QLA_VHA_MARK_NOT_BUSY(__vha) \ +#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ atomic_dec(&__vha->vref_count); \ + wake_up(&__vha->vref_waitq); \ +} while (0) \ #define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ atomic_inc(&__qpair->ref_count); \ diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 34272fde8a5b0d..989e17b0758cd5 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -18,20 +18,19 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) scsi_qla_host_t *vha = s->private; struct qla_hw_data *ha = vha->hw; unsigned long flags; - struct qla_tgt_sess *sess = NULL; - struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; + struct fc_port *sess = NULL; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - seq_printf(s, "%s\n",vha->host_str); + seq_printf(s, "%s\n", vha->host_str); if (tgt) { - seq_printf(s, "Port ID Port Name Handle\n"); + seq_puts(s, "Port ID Port Name Handle\n"); spin_lock_irqsave(&ha->tgt.sess_lock, flags); - list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { + list_for_each_entry(sess, &vha->vp_fcports, list) seq_printf(s, "%02x:%02x:%02x %8phC %d\n", - sess->s_id.b.domain,sess->s_id.b.area, - sess->s_id.b.al_pa, sess->port_name, - sess->loop_id); - } + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa, sess->port_name, + sess->loop_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } @@ -45,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file) return single_open(file, qla2x00_dfs_tgt_sess_show, vha); } - static const struct file_operations dfs_tgt_sess_ops = { .open = qla2x00_dfs_tgt_sess_open, .read = seq_read, @@ -53,6 +51,78 @@ static const struct file_operations dfs_tgt_sess_ops = { .release = single_release, }; +static int +qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) +{ + scsi_qla_host_t *vha = s->private; + struct qla_hw_data *ha = vha->hw; + struct gid_list_info *gid_list; + dma_addr_t gid_list_dma; + fc_port_t fc_port; + char *id_iter; + int rc, i; + uint16_t entries, loop_id; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + + seq_printf(s, "%s\n", vha->host_str); + if (tgt) { + gid_list = dma_alloc_coherent(&ha->pdev->dev, + qla2x00_gid_list_size(ha), + &gid_list_dma, GFP_KERNEL); + if (!gid_list) { + ql_dbg(ql_dbg_user, vha, 0x705c, + "DMA allocation failed for %u\n", + qla2x00_gid_list_size(ha)); + return 0; + } + + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, + &entries); + if (rc != QLA_SUCCESS) + goto out_free_id_list; + + id_iter = (char *)gid_list; + + seq_puts(s, "Port Name Port ID Loop ID\n"); + + for (i = 0; i < entries; i++) { + struct gid_list_info *gid = + (struct gid_list_info *)id_iter; + loop_id = le16_to_cpu(gid->loop_id); + memset(&fc_port, 0, sizeof(fc_port_t)); + + fc_port.loop_id = loop_id; + + rc = qla24xx_gpdb_wait(vha, &fc_port, 0); + seq_printf(s, "%8phC %02x%02x%02x %d\n", + fc_port.port_name, fc_port.d_id.b.domain, + fc_port.d_id.b.area, fc_port.d_id.b.al_pa, + fc_port.loop_id); + id_iter += ha->gid_list_info_size; + } +out_free_id_list: + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + gid_list, gid_list_dma); + } + + return 0; +} + +static int +qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file) +{ + scsi_qla_host_t *vha = inode->i_private; + + return single_open(file, qla2x00_dfs_tgt_port_database_show, vha); +} + +static const struct file_operations dfs_tgt_port_database_ops = { + .open = qla2x00_dfs_tgt_port_database_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) { @@ -115,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) seq_printf(s, "num Q full sent = %lld\n", vha->tgt_counters.num_q_full_sent); + /* DIF stats */ + seq_printf(s, "DIF Inp Bytes = %lld\n", + vha->qla_stats.qla_dif_stats.dif_input_bytes); + seq_printf(s, "DIF Outp Bytes = %lld\n", + vha->qla_stats.qla_dif_stats.dif_output_bytes); + seq_printf(s, "DIF Inp Req = %lld\n", + vha->qla_stats.qla_dif_stats.dif_input_requests); + seq_printf(s, "DIF Outp Req = %lld\n", + vha->qla_stats.qla_dif_stats.dif_output_requests); + seq_printf(s, "DIF Guard err = %d\n", + vha->qla_stats.qla_dif_stats.dif_guard_err); + seq_printf(s, "DIF Ref tag err = %d\n", + vha->qla_stats.qla_dif_stats.dif_ref_tag_err); + seq_printf(s, "DIF App tag err = %d\n", + vha->qla_stats.qla_dif_stats.dif_app_tag_err); return 0; } @@ -282,6 +367,14 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) goto out; } + ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", + S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); + if (!ha->tgt.dfs_tgt_port_database) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to create debugFS tgt_port_database node.\n"); + goto out; + } + ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); if (!ha->dfs_fce) { @@ -312,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha) ha->tgt.dfs_tgt_sess = NULL; } + if (ha->tgt.dfs_tgt_port_database) { + debugfs_remove(ha->tgt.dfs_tgt_port_database); + ha->tgt.dfs_tgt_port_database = NULL; + } + if (ha->dfs_fw_resource_cnt) { debugfs_remove(ha->dfs_fw_resource_cnt); ha->dfs_fw_resource_cnt = NULL; diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 8a2368b32dece2..1f808928763b4e 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -72,6 +72,37 @@ struct port_database_24xx { uint8_t reserved_3[24]; }; +/* + * MB 75h returns a list of DB entries similar to port_database_24xx(64B). + * However, in this case it returns 1st 40 bytes. + */ +struct get_name_list_extended { + __le16 flags; + u8 current_login_state; + u8 last_login_state; + u8 hard_address[3]; + u8 reserved_1; + u8 port_id[3]; + u8 sequence_id; + __le16 port_timer; + __le16 nport_handle; /* N_PORT handle. */ + __le16 receive_data_size; + __le16 reserved_2; + + /* PRLI SVC Param are Big endian */ + u8 prli_svc_param_word_0[2]; /* Bits 15-0 of word 0 */ + u8 prli_svc_param_word_3[2]; /* Bits 15-0 of word 3 */ + u8 port_name[WWN_SIZE]; + u8 node_name[WWN_SIZE]; +}; + +/* MB 75h: This is the short version of the database */ +struct get_name_list { + u8 port_node_name[WWN_SIZE]; /* B7 most sig, B0 least sig */ + __le16 nport_handle; + u8 reserved; +}; + struct vp_database_24xx { uint16_t vp_status; uint8_t options; @@ -1270,27 +1301,76 @@ struct vp_config_entry_24xx { }; #define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */ +enum VP_STATUS { + VP_STAT_COMPL, + VP_STAT_FAIL, + VP_STAT_ID_CHG, + VP_STAT_SNS_TO, /* timeout */ + VP_STAT_SNS_RJT, + VP_STAT_SCR_TO, /* timeout */ + VP_STAT_SCR_RJT, +}; + +enum VP_FLAGS { + VP_FLAGS_CON_FLOOP = 1, + VP_FLAGS_CON_P2P = 2, + VP_FLAGS_CON_FABRIC = 3, + VP_FLAGS_NAME_VALID = BIT_5, +}; + struct vp_rpt_id_entry_24xx { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ - - uint32_t handle; /* System handle. */ - - uint16_t vp_count; /* Format 0 -- | VP setup | VP acq |. */ - /* Format 1 -- | VP count |. */ - uint16_t vp_idx; /* Format 0 -- Reserved. */ - /* Format 1 -- VP status and index. */ + uint32_t resv1; + uint8_t vp_acquired; + uint8_t vp_setup; + uint8_t vp_idx; /* Format 0=reserved */ + uint8_t vp_status; /* Format 0=reserved */ uint8_t port_id[3]; uint8_t format; - - uint8_t vp_idx_map[16]; - - uint8_t reserved_4[24]; - uint16_t bbcr; - uint8_t reserved_5[6]; + union { + struct { + /* format 0 loop */ + uint8_t vp_idx_map[16]; + uint8_t reserved_4[32]; + } f0; + struct { + /* format 1 fabric */ + uint8_t vpstat1_subcode; /* vp_status=1 subcode */ + uint8_t flags; + uint16_t fip_flags; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint16_t bbcr; + uint8_t reserved_5[6]; + } f1; + struct { /* format 2: N2N direct connect */ + uint8_t vpstat1_subcode; + uint8_t flags; + uint16_t rsv6; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint32_t remote_nport_id; + uint32_t reserved_5; + } f2; + } u; }; #define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index afa0116a163b12..5b2451745e9f47 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -73,6 +73,10 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); +struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, + enum qla_work_type); +extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); +int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e); extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *); extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); @@ -94,6 +98,13 @@ extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *); extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, int, int); extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); +void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *); +int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, + struct imm_ntfy_from_isp *, int); +int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, + void *); +int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); /* * Global Data in qla_os.c source file. @@ -127,6 +138,7 @@ extern int ql2xmdenable; extern int ql2xexlogins; extern int ql2xexchoffld; extern int ql2xfwholdabts; +extern int ql2xmvasynctoatio; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -135,8 +147,6 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *); extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *, - fc_port_t *, uint16_t *); extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, @@ -176,9 +186,14 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern void qla2x00_disable_board_on_pci_error(struct work_struct *); -extern void qla2x00_sp_compl(void *, void *, int); -extern void qla2xxx_qpair_sp_free_dma(void *, void *); -extern void qla2xxx_qpair_sp_compl(void *, void *, int); +extern void qla2x00_sp_compl(void *, int); +extern void qla2xxx_qpair_sp_free_dma(void *); +extern void qla2xxx_qpair_sp_compl(void *, int); +extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); +void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, + uint16_t *); +int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); +int qla24xx_async_abort_cmd(srb_t *); /* * Global Functions in qla_mid.c source file. @@ -201,7 +216,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *); extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); -extern void qla2x00_sp_free_dma(void *, void *); +extern void qla2x00_sp_free_dma(void *); extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); @@ -242,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); extern int qla2x00_issue_marker(scsi_qla_host_t *, int); extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tgt_cmd *); + uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tgt_cmd *); + uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tgt_cmd *); + uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, @@ -301,9 +316,6 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *); extern int qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); -extern int -qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *); - extern int qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); @@ -357,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, extern int qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, - dma_addr_t, uint); + dma_addr_t, uint16_t); extern int qla24xx_abort_command(srb_t *); extern int qla24xx_async_abort_command(srb_t *); @@ -461,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); extern int qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); +int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *); +int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8); +int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t, + uint16_t *); +int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *, + struct port_database_24xx *); + /* * Global Function Prototypes in qla_isr.c source file. */ @@ -483,6 +502,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, uint32_t); extern irqreturn_t qla2xxx_msix_rsp_q(int irq, void *dev_id); +fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t); +fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8); +fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8); /* * Global Function Prototypes in qla_sup.c source file. @@ -574,8 +596,8 @@ extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); /* * Global Function Prototypes in qla_gs.c source file. */ -extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t); -extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t); +extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *); +extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *); extern int qla2x00_ga_nxt(scsi_qla_host_t *, fc_port_t *); extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *); extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *); @@ -591,6 +613,23 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *); extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *); extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *); extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t); +extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *, + struct ct_sns_rsp *, const char *); +extern void qla2x00_async_iocb_timeout(void *data); +extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *); +int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *); +void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *); + +extern void qla2x00_free_fcport(fc_port_t *); + +extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *); +extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *); +void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*); +void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *); + +int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *); +int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); +int qla2x00_mgmt_svr_login(scsi_qla_host_t *); /* * Global Function Prototypes in qla_attr.c source file. @@ -702,10 +741,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *); /* IOCB related functions */ extern int qla82xx_start_scsi(srb_t *); -extern void qla2x00_sp_free(void *, void *); +extern void qla2x00_sp_free(void *); extern void qla2x00_sp_timeout(unsigned long); -extern void qla2x00_bsg_job_done(void *, void *, int); -extern void qla2x00_bsg_sp_free(void *, void *); +extern void qla2x00_bsg_job_done(void *, int); +extern void qla2x00_bsg_sp_free(void *); extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); /* Interrupt related */ @@ -803,4 +842,19 @@ extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *); extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t); extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *); +int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, + struct imm_ntfy_from_isp *, int); +void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *); +void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *, + struct fc_port *, enum qlt_plogi_link_t); +void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *); +extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool); +extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *); +extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, + uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); +void qla24xx_delete_sess_fn(struct work_struct *); +void qlt_unknown_atio_work_fn(struct work_struct *); +void qlt_update_host_map(struct scsi_qla_host *, port_id_t); +void qlt_remove_target_resources(struct qla_hw_data *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index ee3df87948067c..ab0f873fd6a1d9 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -24,12 +24,12 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *); * Returns a pointer to the @ha's ms_iocb. */ void * -qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) +qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) { struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; - ms_pkt = ha->ms_iocb; + ms_pkt = (ms_iocb_entry_t *)arg->iocb; memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); ms_pkt->entry_type = MS_IOCB_TYPE; @@ -39,15 +39,15 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ms_pkt->cmd_dsd_count = cpu_to_le16(1); ms_pkt->total_dsd_count = cpu_to_le16(2); - ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); - ms_pkt->req_bytecount = cpu_to_le32(req_size); + ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size); + ms_pkt->req_bytecount = cpu_to_le32(arg->req_size); - ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma)); + ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma)); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; - ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma)); + ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma)); ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; vha->qla_stats.control_requests++; @@ -64,29 +64,29 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) * Returns a pointer to the @ha's ms_iocb. */ void * -qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) +qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) { struct qla_hw_data *ha = vha->hw; struct ct_entry_24xx *ct_pkt; - ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; + ct_pkt = (struct ct_entry_24xx *)arg->iocb; memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); ct_pkt->entry_type = CT_IOCB_TYPE; ct_pkt->entry_count = 1; - ct_pkt->nport_handle = cpu_to_le16(NPH_SNS); + ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle); ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ct_pkt->cmd_dsd_count = cpu_to_le16(1); ct_pkt->rsp_dsd_count = cpu_to_le16(1); - ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); - ct_pkt->cmd_byte_count = cpu_to_le32(req_size); + ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size); + ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size); - ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma)); + ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma)); ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; - ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); + ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma)); + ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma)); ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = vha->vp_idx; @@ -117,7 +117,7 @@ qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) return &p->p.req; } -static int +int qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, struct ct_sns_rsp *ct_rsp, const char *routine) { @@ -183,14 +183,21 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_ga_nxt(vha, fcport); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GA_NXT_REQ_SIZE; + arg.rsp_size = GA_NXT_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue GA_NXT */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE, - GA_NXT_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD, @@ -269,16 +276,24 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_gid_pt_data *gid_data; struct qla_hw_data *ha = vha->hw; uint16_t gid_pt_rsp_size; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gid_pt(vha, list); gid_data = NULL; gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); + + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GID_PT_REQ_SIZE; + arg.rsp_size = gid_pt_rsp_size; + arg.nport_handle = NPH_SNS; + /* Issue GID_PT */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE, - gid_pt_rsp_size); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size); @@ -344,15 +359,22 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gpn_id(vha, list); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GPN_ID_REQ_SIZE; + arg.rsp_size = GPN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GPN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE, - GPN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD, @@ -406,15 +428,22 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gnn_id(vha, list); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GNN_ID_REQ_SIZE; + arg.rsp_size = GNN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GNN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE, - GNN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD, @@ -473,14 +502,21 @@ qla2x00_rft_id(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_rft_id(vha); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = RFT_ID_REQ_SIZE; + arg.rsp_size = RFT_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RFT_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE, - RFT_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD, @@ -526,6 +562,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2046, @@ -533,10 +570,16 @@ qla2x00_rff_id(scsi_qla_host_t *vha) return (QLA_SUCCESS); } + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = RFF_ID_REQ_SIZE; + arg.rsp_size = RFF_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RFF_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE, - RFF_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD, @@ -584,14 +627,21 @@ qla2x00_rnn_id(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_rnn_id(vha); + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = RNN_ID_REQ_SIZE; + arg.rsp_size = RNN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RNN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE, - RNN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); @@ -651,6 +701,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2050, @@ -658,10 +709,17 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) return (QLA_SUCCESS); } + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = 0; + arg.rsp_size = RSNN_NN_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Issue RSNN_NN */ /* Prepare common MS IOCB */ /* Request size adjusted after CT preparation */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD, @@ -1103,7 +1161,7 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha) * * Returns 0 on success. */ -static int +int qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) { int ret, rval; @@ -2425,15 +2483,22 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (!IS_IIDMA_CAPABLE(ha)) return QLA_FUNCTION_FAILED; + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GFPN_ID_REQ_SIZE; + arg.rsp_size = GFPN_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GFPN_ID */ /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE, - GFPN_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD, @@ -2471,36 +2536,6 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) return (rval); } -static inline void * -qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size, - uint32_t rsp_size) -{ - struct ct_entry_24xx *ct_pkt; - struct qla_hw_data *ha = vha->hw; - ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; - memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); - - ct_pkt->entry_type = CT_IOCB_TYPE; - ct_pkt->entry_count = 1; - ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); - ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); - ct_pkt->cmd_dsd_count = cpu_to_le16(1); - ct_pkt->rsp_dsd_count = cpu_to_le16(1); - ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); - ct_pkt->cmd_byte_count = cpu_to_le32(req_size); - - ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); - ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; - - ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); - ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); - ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; - ct_pkt->vp_index = vha->vp_idx; - - return ct_pkt; -} - static inline struct ct_sns_req * qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, @@ -2530,9 +2565,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) int rval; uint16_t i; struct qla_hw_data *ha = vha->hw; - ms_iocb_entry_t *ms_pkt; + ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; + struct ct_arg arg; if (!IS_IIDMA_CAPABLE(ha)) return QLA_FUNCTION_FAILED; @@ -2543,11 +2579,17 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) if (rval) return rval; + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GPSC_REQ_SIZE; + arg.rsp_size = GPSC_RSP_SIZE; + arg.nport_handle = vha->mgmt_svr_loop_id; + for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GFPN_ID */ /* Prepare common MS IOCB */ - ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE, - GPSC_RSP_SIZE); + ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, @@ -2641,6 +2683,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; uint8_t fcp_scsi_features = 0; + struct ct_arg arg; for (i = 0; i < ha->max_fibre_devices; i++) { /* Set default FC4 Type as UNKNOWN so the default is to @@ -2651,9 +2694,15 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) if (!IS_FWI2_CAPABLE(ha)) continue; + arg.iocb = ha->ms_iocb; + arg.req_dma = ha->ct_sns_dma; + arg.rsp_dma = ha->ct_sns_dma; + arg.req_size = GFF_ID_REQ_SIZE; + arg.rsp_size = GFF_ID_RSP_SIZE; + arg.nport_handle = NPH_SNS; + /* Prepare common MS IOCB */ - ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFF_ID_REQ_SIZE, - GFF_ID_RSP_SIZE); + ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD, @@ -2692,3 +2741,538 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) break; } } + +/* GID_PN completion processing. */ +void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC login state %d \n", + __func__, fcport->port_name, fcport->fw_login_state); + + if (ea->sp->gen2 != fcport->login_gen) { + /* PLOGI/PRLI/LOGO came in while cmd was out.*/ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC generation changed rscn %d|%d login %d|%d \n", + __func__, fcport->port_name, fcport->last_rscn_gen, + fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen); + return; + } + + if (!ea->rc) { + if (ea->sp->gen1 == fcport->rscn_gen) { + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->flags |= FCF_FABRIC_DEVICE; + + if (fcport->d_id.b24 == ea->id.b24) { + /* cable plugged into the same place */ + switch (vha->host->active_mode) { + case MODE_TARGET: + /* NOOP. let the other guy login to us.*/ + break; + case MODE_INITIATOR: + case MODE_DUAL: + default: + if (atomic_read(&fcport->state) == + FCS_ONLINE) + break; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gnl\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_gnl_work(vha, fcport); + break; + } + } else { /* fcport->d_id.b24 != ea->id.b24 */ + fcport->d_id.b24 = ea->id.b24; + if (fcport->deleted == QLA_SESS_DELETED) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + } + } + } else { /* ea->sp->gen1 != fcport->rscn_gen */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); + /* rscn came in while cmd was out */ + qla24xx_post_gidpn_work(vha, fcport); + } + } else { /* ea->rc */ + /* cable pulled */ + if (ea->sp->gen1 == fcport->rscn_gen) { + if (ea->sp->gen2 == fcport->login_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", __func__, + __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC login\n", __func__, __LINE__, + fcport->port_name); + qla24xx_fcport_handle_login(vha, fcport); + } + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gidpn\n", __func__, __LINE__, + fcport->port_name); + qla24xx_post_gidpn_work(vha, fcport); + } + } +} /* gidpn_event */ + +static void qla2x00_async_gidpn_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + fc_port_t *fcport = sp->fcport; + u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id; + struct event_arg ea; + + fcport->flags &= ~FCF_ASYNC_SENT; + + memset(&ea, 0, sizeof(ea)); + ea.fcport = fcport; + ea.id.b.domain = id[0]; + ea.id.b.area = id[1]; + ea.id.b.al_pa = id[2]; + ea.sp = sp; + ea.rc = res; + ea.event = FCME_GIDPN_DONE; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x, WWPN %8phC ID %3phC \n", + sp->name, res, fcport->port_name, id); + + qla2x00_fcport_event_handler(vha, &ea); + + sp->free(sp); +} + +int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + + if (!vha->flags.online) + goto done; + + fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_GID_PN; + fcport->scan_state = QLA_FCPORT_SCAN; + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gidpn"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD, + GID_PN_RSP_SIZE); + + /* GIDPN req */ + memcpy(ct_req->req.gid_pn.port_name, fcport->port_name, + WWN_SIZE); + + /* req & rsp use the same buffer */ + sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla2x00_async_gidpn_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0x206f, + "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n", + sp->name, fcport->port_name, + sp->handle, fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + int ls; + + ls = atomic_read(&vha->loop_state); + if (((ls != LOOP_READY) && (ls != LOOP_UP)) || + test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPSC); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +static void qla24xx_async_gpsc_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = sp->fcport; + struct ct_sns_rsp *ct_rsp; + struct event_arg ea; + + ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x, WWPN %8phC \n", + sp->name, res, fcport->port_name); + + fcport->flags &= ~FCF_ASYNC_SENT; + + if (res == (DID_ERROR << 16)) { + /* entry status error */ + goto done; + } else if (res) { + if ((ct_rsp->header.reason_code == + CT_REASON_INVALID_COMMAND_CODE) || + (ct_rsp->header.reason_code == + CT_REASON_COMMAND_UNSUPPORTED)) { + ql_dbg(ql_dbg_disc, vha, 0x205a, + "GPSC command unsupported, disabling " + "query.\n"); + ha->flags.gpsc_supported = 0; + res = QLA_SUCCESS; + } + } else { + switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) { + case BIT_15: + fcport->fp_speed = PORT_SPEED_1GB; + break; + case BIT_14: + fcport->fp_speed = PORT_SPEED_2GB; + break; + case BIT_13: + fcport->fp_speed = PORT_SPEED_4GB; + break; + case BIT_12: + fcport->fp_speed = PORT_SPEED_10GB; + break; + case BIT_11: + fcport->fp_speed = PORT_SPEED_8GB; + break; + case BIT_10: + fcport->fp_speed = PORT_SPEED_16GB; + break; + case BIT_8: + fcport->fp_speed = PORT_SPEED_32GB; + break; + } + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", + sp->name, + fcport->fabric_port_name, + be16_to_cpu(ct_rsp->rsp.gpsc.speeds), + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); + } +done: + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_GPSC_DONE; + ea.rc = res; + ea.fcport = fcport; + qla2x00_fcport_event_handler(vha, &ea); + + sp->free(sp); +} + +int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + + if (!vha->flags.online) + goto done; + + fcport->flags |= FCF_ASYNC_SENT; + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gpsc"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + /* CT_IU preamble */ + ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, + GPSC_RSP_SIZE); + + /* GPSC req */ + memcpy(ct_req->req.gpsc.port_name, fcport->port_name, + WWN_SIZE); + + sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; + sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; + sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla24xx_async_gpsc_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", + sp->name, fcport->port_name, sp->handle, + fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) +{ + struct qla_work_evt *e; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.gpnid.id = *id; + return qla2x00_post_work(vha, e); +} + +void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp) +{ + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + sp->free(sp); +} + +void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport; + unsigned long flags; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + if (fcport) { + /* cable moved. just plugged in */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + + fcport->rscn_gen++; + fcport->d_id = ea->id; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->flags |= FCF_FABRIC_DEVICE; + + qlt_schedule_sess_for_deletion_lock(fcport); + } else { + /* create new fcport */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post new sess\n", + __func__, __LINE__, ea->port_name); + + qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL); + } +} + +static void qla2x00_async_gpnid_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + struct ct_sns_req *ct_req = + (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; + struct ct_sns_rsp *ct_rsp = + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; + struct event_arg ea; + struct qla_work_evt *e; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x ID %3phC. %8phC\n", + sp->name, res, ct_req->req.port_id.port_id, + ct_rsp->rsp.gpn_id.port_name); + + memset(&ea, 0, sizeof(ea)); + memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); + ea.sp = sp; + ea.id.b.domain = ct_req->req.port_id.port_id[0]; + ea.id.b.area = ct_req->req.port_id.port_id[1]; + ea.id.b.al_pa = ct_req->req.port_id.port_id[2]; + ea.rc = res; + ea.event = FCME_GPNID_DONE; + + qla2x00_fcport_event_handler(vha, &ea); + + e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE); + if (!e) { + /* please ignore kernel warning. otherwise, we have mem leak. */ + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + sp->free(sp); + return; + } + + e->u.iosb.sp = sp; + qla2x00_post_work(vha, e); +} + +/* Get WWPN with Nport ID. */ +int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) +{ + int rval = QLA_FUNCTION_FAILED; + struct ct_sns_req *ct_req; + srb_t *sp; + struct ct_sns_pkt *ct_sns; + + if (!vha->flags.online) + goto done; + + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_CT_PTHRU_CMD; + sp->name = "gpnid"; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + goto done_free_sp; + } + + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + goto done_free_sp; + } + + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; + memset(ct_sns, 0, sizeof(*ct_sns)); + + ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; + /* CT_IU preamble */ + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); + + /* GPN_ID req */ + ct_req->req.port_id.port_id[0] = id->b.domain; + ct_req->req.port_id.port_id[1] = id->b.area; + ct_req->req.port_id.port_id[2] = id->b.al_pa; + + sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; + sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; + + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->done = qla2x00_async_gpnid_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s hdl=%x ID %3phC.\n", sp->name, + sp->handle, ct_req->req.port_id.port_id); + return rval; + +done_free_sp: + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + sp->free(sp); +done: + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 265e1395bdb837..f9d2fe7b1adedf 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -30,15 +30,15 @@ static int qla2x00_configure_hba(scsi_qla_host_t *); static int qla2x00_configure_loop(scsi_qla_host_t *); static int qla2x00_configure_local_loop(scsi_qla_host_t *); static int qla2x00_configure_fabric(scsi_qla_host_t *); -static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); -static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, - uint16_t *); - +static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); static int qla2x00_restart_isp(scsi_qla_host_t *); static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); +static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); +static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, + struct event_arg *); /* SRB Extensions ---------------------------------------------------------- */ @@ -47,29 +47,27 @@ qla2x00_sp_timeout(unsigned long __data) { srb_t *sp = (srb_t *)__data; struct srb_iocb *iocb; - fc_port_t *fcport = sp->fcport; - struct qla_hw_data *ha = fcport->vha->hw; + scsi_qla_host_t *vha = sp->vha; struct req_que *req; unsigned long flags; - spin_lock_irqsave(&ha->hardware_lock, flags); - req = ha->req_q_map[0]; + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + req = vha->hw->req_q_map[0]; req->outstanding_cmds[sp->handle] = NULL; iocb = &sp->u.iocb_cmd; iocb->timeout(sp); - sp->free(fcport->vha, sp); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + sp->free(sp); + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); } void -qla2x00_sp_free(void *data, void *ptr) +qla2x00_sp_free(void *ptr) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *iocb = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; del_timer(&iocb->timer); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); } /* Asynchronous Login/Logout Routines -------------------------------------- */ @@ -94,43 +92,72 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha) return tmo; } -static void +void qla2x00_async_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; fc_port_t *fcport = sp->fcport; + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct event_arg ea; ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, - "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n", - sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa); + "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", + sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); fcport->flags &= ~FCF_ASYNC_SENT; - if (sp->type == SRB_LOGIN_CMD) { - struct srb_iocb *lio = &sp->u.iocb_cmd; - qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); + + switch (sp->type) { + case SRB_LOGIN_CMD: /* Retry as needed. */ lio->u.logio.data[0] = MBS_COMMAND_ERROR; lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; - qla2x00_post_async_login_done_work(fcport->vha, fcport, - lio->u.logio.data); - } else if (sp->type == SRB_LOGOUT_CMD) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_PLOGI_DONE; + ea.fcport = sp->fcport; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.sp = sp; + qla24xx_handle_plogi_done_event(fcport->vha, &ea); + break; + case SRB_LOGOUT_CMD: qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); + break; + case SRB_CT_PTHRU_CMD: + case SRB_MB_IOCB: + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + sp->done(sp, QLA_FUNCTION_TIMEOUT); + break; } } static void -qla2x00_async_login_sp_done(void *data, void *ptr, int res) +qla2x00_async_login_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; + struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; + struct event_arg ea; - if (!test_bit(UNLOADING, &vha->dpc_flags)) - qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport, - lio->u.logio.data); - sp->free(sp->fcport->vha, sp); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); + + sp->fcport->flags &= ~FCF_ASYNC_SENT; + if (!test_bit(UNLOADING, &vha->dpc_flags)) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_PLOGI_DONE; + ea.fcport = sp->fcport; + ea.data[0] = lio->u.logio.data[0]; + ea.data[1] = lio->u.logio.data[1]; + ea.iop[0] = lio->u.logio.iop[0]; + ea.iop[1] = lio->u.logio.iop[1]; + ea.sp = sp; + qla2x00_fcport_event_handler(vha, &ea); + } + + sp->free(sp); } int @@ -139,13 +166,23 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, { srb_t *sp; struct srb_iocb *lio; - int rval; + int rval = QLA_FUNCTION_FAILED; + + if (!vha->flags.online) + goto done; + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + goto done; - rval = QLA_FUNCTION_FAILED; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; + fcport->flags |= FCF_ASYNC_SENT; + fcport->logout_completed = 0; + sp->type = SRB_LOGIN_CMD; sp->name = "login"; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); @@ -165,29 +202,30 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, } ql_dbg(ql_dbg_disc, vha, 0x2072, - "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x " - "retries=%d.\n", sp->handle, fcport->loop_id, + "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " + "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->login_retry); return rval; done_free_sp: - sp->free(fcport->vha, sp); + sp->free(sp); done: + fcport->flags &= ~FCF_ASYNC_SENT; return rval; } static void -qla2x00_async_logout_sp_done(void *data, void *ptr, int res) +qla2x00_async_logout_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; - if (!test_bit(UNLOADING, &vha->dpc_flags)) - qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport, + sp->fcport->flags &= ~FCF_ASYNC_SENT; + if (!test_bit(UNLOADING, &sp->vha->dpc_flags)) + qla2x00_post_async_logout_done_work(sp->vha, sp->fcport, lio->u.logio.data); - sp->free(sp->fcport->vha, sp); + sp->free(sp); } int @@ -198,6 +236,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) int rval; rval = QLA_FUNCTION_FAILED; + fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -214,28 +253,30 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) goto done_free_sp; ql_dbg(ql_dbg_disc, vha, 0x2070, - "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", + "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); + fcport->d_id.b.area, fcport->d_id.b.al_pa, + fcport->port_name); return rval; done_free_sp: - sp->free(fcport->vha, sp); + sp->free(sp); done: + fcport->flags &= ~FCF_ASYNC_SENT; return rval; } static void -qla2x00_async_adisc_sp_done(void *data, void *ptr, int res) +qla2x00_async_adisc_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; + struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; if (!test_bit(UNLOADING, &vha->dpc_flags)) - qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport, + qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport, lio->u.logio.data); - sp->free(sp->fcport->vha, sp); + sp->free(sp); } int @@ -247,6 +288,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, int rval; rval = QLA_FUNCTION_FAILED; + fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -270,16 +312,825 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, fcport->d_id.b.area, fcport->d_id.b.al_pa); return rval; -done_free_sp: - sp->free(fcport->vha, sp); -done: - return rval; +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport, *conflict_fcport; + struct get_name_list_extended *e; + u16 i, n, found = 0, loop_id; + port_id_t id; + u64 wwn; + u8 opt = 0; + + fcport = ea->fcport; + + if (ea->rc) { /* rval */ + if (fcport->login_retry == 0) { + fcport->login_retry = vha->hw->login_retry_count; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "GNL failed Port login retry %8phN, retry cnt=%d.\n", + fcport->port_name, fcport->login_retry); + } + return; + } + + if (fcport->last_rscn_gen != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC rscn gen changed rscn %d|%d \n", + __func__, fcport->port_name, + fcport->last_rscn_gen, fcport->rscn_gen); + qla24xx_post_gidpn_work(vha, fcport); + return; + } else if (fcport->last_login_gen != fcport->login_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC login gen changed login %d|%d \n", + __func__, fcport->port_name, + fcport->last_login_gen, fcport->login_gen); + return; + } + + n = ea->data[0] / sizeof(struct get_name_list_extended); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC n %d %02x%02x%02x lid %d \n", + __func__, __LINE__, fcport->port_name, n, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, fcport->loop_id); + + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + wwn = wwn_to_u64(e->port_name); + + if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) + continue; + + found = 1; + id.b.domain = e->port_id[2]; + id.b.area = e->port_id[1]; + id.b.al_pa = e->port_id[0]; + id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(e->nport_handle); + loop_id = (loop_id & 0x7fff); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", + __func__, fcport->port_name, + e->current_login_state, fcport->fw_login_state, + id.b.domain, id.b.area, id.b.al_pa, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, loop_id, fcport->loop_id); + + if ((id.b24 != fcport->d_id.b24) || + ((fcport->loop_id != FC_NO_LOOP_ID) && + (fcport->loop_id != loop_id))) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion(fcport, 1); + return; + } + + fcport->loop_id = loop_id; + + wwn = wwn_to_u64(fcport->port_name); + qlt_find_sess_invalidate_other(vha, wwn, + id, loop_id, &conflict_fcport); + + if (conflict_fcport) { + /* + * Another share fcport share the same loop_id & + * nport id. Conflict fcport needs to finish + * cleanup before this fcport can proceed to login. + */ + conflict_fcport->conflict = fcport; + fcport->login_pause = 1; + } + + switch (e->current_login_state) { + case DSC_LS_PRLI_COMP: + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); + opt = PDO_FORCE_ADISC; + qla24xx_post_gpdb_work(vha, fcport, opt); + break; + + case DSC_LS_PORT_UNAVAIL: + default: + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, fcport); + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + } + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC \n", + __func__, __LINE__, fcport->port_name); + qla24xx_fcport_handle_login(vha, fcport); + break; + } + } + + if (!found) { + /* fw has no record of this port */ + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, fcport); + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + } else { + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + id.b.domain = e->port_id[0]; + id.b.area = e->port_id[1]; + id.b.al_pa = e->port_id[2]; + id.b.rsvd_1 = 0; + loop_id = le16_to_cpu(e->nport_handle); + + if (fcport->d_id.b24 == id.b24) { + conflict_fcport = + qla2x00_find_fcport_by_wwpn(vha, + e->port_name, 0); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + conflict_fcport->port_name); + qlt_schedule_sess_for_deletion + (conflict_fcport, 1); + } + + if (fcport->loop_id == loop_id) { + /* FW already picked this loop id for another fcport */ + qla2x00_find_new_loop_id(vha, fcport); + } + } + } + qla24xx_fcport_handle_login(vha, fcport); + } +} /* gnl_event */ + +static void +qla24xx_async_gnl_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + unsigned long flags; + struct fc_port *fcport = NULL, *tf; + u16 i, n = 0, loop_id; + struct event_arg ea; + struct get_name_list_extended *e; + u64 wwn; + struct list_head h; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x mb[1]=%x mb[2]=%x \n", + sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], + sp->u.iocb_cmd.u.mbx.in_mb[2]); + + memset(&ea, 0, sizeof(ea)); + ea.sp = sp; + ea.rc = res; + ea.event = FCME_GNL_DONE; + + if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= + sizeof(struct get_name_list_extended)) { + n = sp->u.iocb_cmd.u.mbx.in_mb[1] / + sizeof(struct get_name_list_extended); + ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ + } + + for (i = 0; i < n; i++) { + e = &vha->gnl.l[i]; + loop_id = le16_to_cpu(e->nport_handle); + /* mask out reserve bit */ + loop_id = (loop_id & 0x7fff); + set_bit(loop_id, vha->hw->loop_id_map); + wwn = wwn_to_u64(e->port_name); + + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n", + __func__, (void *)&wwn, e->port_id[2], e->port_id[1], + e->port_id[0], e->current_login_state, e->last_login_state, + (loop_id & 0x7fff)); + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + vha->gnl.sent = 0; + + INIT_LIST_HEAD(&h); + fcport = tf = NULL; + if (!list_empty(&vha->gnl.fcports)) + list_splice_init(&vha->gnl.fcports, &h); + + list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { + list_del_init(&fcport->gnl_entry); + fcport->flags &= ~FCF_ASYNC_SENT; + ea.fcport = fcport; + + qla2x00_fcport_event_handler(vha, &ea); + } + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + sp->free(sp); +} + +int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + srb_t *sp; + struct srb_iocb *mbx; + int rval = QLA_FUNCTION_FAILED; + unsigned long flags; + u16 *mb; + + if (!vha->flags.online) + goto done; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-gnlist WWPN %8phC \n", fcport->port_name); + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_GNL; + fcport->last_rscn_gen = fcport->rscn_gen; + fcport->last_login_gen = fcport->login_gen; + + list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); + if (vha->gnl.sent) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + rval = QLA_SUCCESS; + goto done; + } + vha->gnl.sent = 1; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + sp->type = SRB_MB_IOCB; + sp->name = "gnlist"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); + + mb = sp->u.iocb_cmd.u.mbx.out_mb; + mb[0] = MBC_PORT_NODE_NAME_LIST; + mb[1] = BIT_2 | BIT_3; + mb[2] = MSW(vha->gnl.ldma); + mb[3] = LSW(vha->gnl.ldma); + mb[6] = MSW(MSD(vha->gnl.ldma)); + mb[7] = LSW(MSD(vha->gnl.ldma)); + mb[8] = vha->gnl.size; + mb[9] = vha->vp_idx; + + mbx = &sp->u.iocb_cmd; + mbx->timeout = qla2x00_async_iocb_timeout; + + sp->done = qla24xx_async_gnl_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - OUT WWPN %8phC hndl %x\n", + sp->name, fcport->port_name, sp->handle); + + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GNL); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +static +void qla24xx_async_gpdb_sp_done(void *s, int res) +{ + struct srb *sp = s; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct port_database_24xx *pd; + fc_port_t *fcport = sp->fcport; + u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; + int rval = QLA_SUCCESS; + struct event_arg ea; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", + sp->name, res, fcport->port_name, mb[1], mb[2]); + + fcport->flags &= ~FCF_ASYNC_SENT; + + if (res) { + rval = res; + goto gpd_error_out; + } + + pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; + + rval = __qla24xx_parse_gpdb(vha, fcport, pd); + +gpd_error_out: + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_GPDB_DONE; + ea.rc = rval; + ea.fcport = fcport; + ea.sp = sp; + + qla2x00_fcport_event_handler(vha, &ea); + + dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, + sp->u.iocb_cmd.u.mbx.in_dma); + + sp->free(sp); +} + +static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, + u8 opt) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + e->u.fcport.opt = opt; + return qla2x00_post_work(vha, e); +} + +int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) +{ + srb_t *sp; + struct srb_iocb *mbx; + int rval = QLA_FUNCTION_FAILED; + u16 *mb; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + + if (!vha->flags.online) + goto done; + + fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_GPDB; + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate port database structure.\n"); + goto done_free_sp; + } + memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); + + sp->type = SRB_MB_IOCB; + sp->name = "gpdb"; + sp->gen1 = fcport->rscn_gen; + sp->gen2 = fcport->login_gen; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + mb = sp->u.iocb_cmd.u.mbx.out_mb; + mb[0] = MBC_GET_PORT_DATABASE; + mb[1] = fcport->loop_id; + mb[2] = MSW(pd_dma); + mb[3] = LSW(pd_dma); + mb[6] = MSW(MSD(pd_dma)); + mb[7] = LSW(MSD(pd_dma)); + mb[9] = vha->vp_idx; + mb[10] = opt; + + mbx = &sp->u.iocb_cmd; + mbx->timeout = qla2x00_async_iocb_timeout; + mbx->u.mbx.in = (void *)pd; + mbx->u.mbx.in_dma = pd_dma; + + sp->done = qla24xx_async_gpdb_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s %8phC hndl %x opt %x\n", + sp->name, fcport->port_name, sp->handle, opt); + + return rval; + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); + + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + qla24xx_post_gpdb_work(vha, fcport, opt); + return rval; +} + +static +void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) +{ + int rval = ea->rc; + fc_port_t *fcport = ea->fcport; + unsigned long flags; + + fcport->flags &= ~FCF_ASYNC_SENT; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name, + fcport->disc_state, fcport->fw_login_state, rval); + + if (ea->sp->gen2 != fcport->login_gen) { + /* target side must have changed it. */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC generation changed rscn %d|%d login %d|%d \n", + __func__, fcport->port_name, fcport->last_rscn_gen, + fcport->rscn_gen, fcport->last_login_gen, + fcport->login_gen); + return; + } else if (ea->sp->gen1 != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_gidpn_work(vha, fcport); + return; + } + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post del sess\n", + __func__, __LINE__, fcport->port_name); + qlt_schedule_sess_for_deletion_lock(fcport); + return; + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + ea->fcport->login_gen++; + ea->fcport->deleted = 0; + ea->fcport->logout_on_delete = 1; + + if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { + vha->fcport_count++; + ea->fcport->login_succ = 1; + + if (!IS_IIDMA_CAPABLE(vha->hw) || + !vha->hw->flags.gpsc_supported) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, + vha->fcport_count); + + qla24xx_post_upd_fcport_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, + vha->fcport_count); + + qla24xx_post_gpsc_work(vha, fcport); + } + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); +} /* gpdb event */ + +int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + if (fcport->login_retry == 0) + return 0; + + if (fcport->scan_state != QLA_FCPORT_FOUND) + return 0; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->login_pause, fcport->flags, + fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, + fcport->last_login_gen, fcport->login_gen, fcport->login_retry, + fcport->loop_id); + + fcport->login_retry--; + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + return 0; + + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) + return 0; + } + + /* for pure Target Mode. Login will not be initiated */ + if (vha->host->active_mode == MODE_TARGET) + return 0; + + if (fcport->flags & FCF_ASYNC_SENT) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return 0; + } + + switch (fcport->disc_state) { + case DSC_DELETED: + if (fcport->loop_id == FC_NO_LOOP_ID) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gnl\n", + __func__, __LINE__, fcport->port_name); + qla24xx_async_gnl(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post login\n", + __func__, __LINE__, fcport->port_name); + fcport->disc_state = DSC_LOGIN_PEND; + qla2x00_post_async_login_work(vha, fcport, NULL); + } + break; + + case DSC_GNL: + if (fcport->login_pause) { + fcport->last_rscn_gen = fcport->rscn_gen; + fcport->last_login_gen = fcport->login_gen; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + + if (fcport->flags & FCF_FCP2_DEVICE) { + u8 opt = PDO_FORCE_ADISC; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, fcport->port_name); + + fcport->disc_state = DSC_GPDB; + qla24xx_post_gpdb_work(vha, fcport, opt); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post login \n", + __func__, __LINE__, fcport->port_name); + fcport->disc_state = DSC_LOGIN_PEND; + qla2x00_post_async_login_work(vha, fcport, NULL); + } + + break; + + case DSC_LOGIN_FAILED: + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gidpn \n", + __func__, __LINE__, fcport->port_name); + + qla24xx_post_gidpn_work(vha, fcport); + break; + + case DSC_LOGIN_COMPLETE: + /* recheck login state */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb \n", + __func__, __LINE__, fcport->port_name); + + qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC); + break; + + default: + break; + } + + return 0; +} + +static +void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea) +{ + fcport->rscn_gen++; + + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phC DS %d LS %d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state); + + if (fcport->flags & FCF_ASYNC_SENT) + return; + + switch (fcport->disc_state) { + case DSC_DELETED: + case DSC_LOGIN_COMPLETE: + qla24xx_post_gidpn_work(fcport->vha, fcport); + break; + + default: + break; + } +} + +int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, + u8 *port_name, void *pla) +{ + struct qla_work_evt *e; + e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.new_sess.id = *id; + e->u.new_sess.pla = pla; + memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); + + return qla2x00_post_work(vha, e); +} + +static +int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 0; + + switch (vha->host->active_mode) { + case MODE_INITIATOR: + case MODE_DUAL: + if (fcport->scan_state == QLA_FCPORT_FOUND) + qla24xx_fcport_handle_login(vha, fcport); + break; + + case MODE_TARGET: + default: + /* no-op */ + break; + } + + return 0; +} + +static +void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + fc_port_t *fcport = ea->fcport; + + if (fcport->scan_state != QLA_FCPORT_FOUND) { + fcport->login_retry++; + return; + } + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->login_pause, + fcport->deleted, fcport->conflict, + fcport->last_rscn_gen, fcport->rscn_gen, + fcport->last_login_gen, fcport->login_gen, + fcport->flags); + + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + return; + + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) + return; + } + + if (fcport->flags & FCF_ASYNC_SENT) { + fcport->login_retry++; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + return; + } + + if (fcport->disc_state == DSC_DELETE_PEND) { + fcport->login_retry++; + return; + } + + if (fcport->last_rscn_gen != fcport->rscn_gen) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n", + __func__, __LINE__, fcport->port_name); + + qla24xx_async_gidpn(vha, fcport); + return; + } + + qla24xx_fcport_handle_login(vha, fcport); +} + +void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport, *f, *tf; + uint32_t id = 0, mask, rid; + int rc; + + switch (ea->event) { + case FCME_RELOGIN: + if (test_bit(UNLOADING, &vha->dpc_flags)) + return; + + qla24xx_handle_relogin_event(vha, ea); + break; + case FCME_RSCN: + if (test_bit(UNLOADING, &vha->dpc_flags)) + return; + switch (ea->id.b.rsvd_1) { + case RSCN_PORT_ADDR: + fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); + if (!fcport) { + /* cable moved */ + rc = qla24xx_post_gpnid_work(vha, &ea->id); + if (rc) { + ql_log(ql_log_warn, vha, 0xffff, + "RSCN GPNID work failed %02x%02x%02x\n", + ea->id.b.domain, ea->id.b.area, + ea->id.b.al_pa); + } + } else { + ea->fcport = fcport; + qla24xx_handle_rscn_event(fcport, ea); + } + break; + case RSCN_AREA_ADDR: + case RSCN_DOM_ADDR: + if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) { + mask = 0xffff00; + ql_log(ql_dbg_async, vha, 0xffff, + "RSCN: Area 0x%06x was affected\n", + ea->id.b24); + } else { + mask = 0xff0000; + ql_log(ql_dbg_async, vha, 0xffff, + "RSCN: Domain 0x%06x was affected\n", + ea->id.b24); + } + + rid = ea->id.b24 & mask; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, + list) { + id = f->d_id.b24 & mask; + if (rid == id) { + ea->fcport = f; + qla24xx_handle_rscn_event(f, ea); + } + } + break; + case RSCN_FAB_ADDR: + default: + ql_log(ql_log_warn, vha, 0xffff, + "RSCN: Fabric was affected. Addr format %d\n", + ea->id.b.rsvd_1); + qla2x00_mark_all_devices_lost(vha, 1); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + } + break; + case FCME_GIDPN_DONE: + qla24xx_handle_gidpn_event(vha, ea); + break; + case FCME_GNL_DONE: + qla24xx_handle_gnl_done_event(vha, ea); + break; + case FCME_GPSC_DONE: + qla24xx_post_upd_fcport_work(vha, ea->fcport); + break; + case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */ + qla24xx_handle_plogi_done_event(vha, ea); + break; + case FCME_GPDB_DONE: + qla24xx_handle_gpdb_event(vha, ea); + break; + case FCME_GPNID_DONE: + qla24xx_handle_gpnid_event(vha, ea); + break; + case FCME_DELETE_DONE: + qla24xx_handle_delete_done_event(vha, ea); + break; + default: + BUG_ON(1); + break; + } } static void qla2x00_tmf_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *tmf = &sp->u.iocb_cmd; tmf->u.tmf.comp_status = CS_TIMEOUT; @@ -287,10 +1138,11 @@ qla2x00_tmf_iocb_timeout(void *data) } static void -qla2x00_tmf_sp_done(void *data, void *ptr, int res) +qla2x00_tmf_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *tmf = &sp->u.iocb_cmd; + complete(&tmf->u.tmf.comp); } @@ -348,7 +1200,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, } done_free_sp: - sp->free(vha, sp); + sp->free(sp); done: return rval; } @@ -356,7 +1208,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, static void qla24xx_abort_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = CS_TIMEOUT; @@ -364,18 +1216,18 @@ qla24xx_abort_iocb_timeout(void *data) } static void -qla24xx_abort_sp_done(void *data, void *ptr, int res) +qla24xx_abort_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; complete(&abt->u.abt.comp); } -static int +int qla24xx_async_abort_cmd(srb_t *cmd_sp) { - scsi_qla_host_t *vha = cmd_sp->fcport->vha; + scsi_qla_host_t *vha = cmd_sp->vha; fc_port_t *fcport = cmd_sp->fcport; struct srb_iocb *abt_iocb; srb_t *sp; @@ -408,7 +1260,7 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp) QLA_SUCCESS : QLA_FUNCTION_FAILED; done_free_sp: - sp->free(vha, sp); + sp->free(sp); done: return rval; } @@ -441,59 +1293,65 @@ qla24xx_async_abort_command(srb_t *sp) return qla24xx_async_abort_cmd(sp); } -void -qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, - uint16_t *data) +static void +qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { - int rval; + port_id_t cid; /* conflict Nport id */ - switch (data[0]) { + switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: /* * Driver must validate login state - If PRLI not complete, * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ - rval = qla2x00_get_port_database(vha, fcport, 0); - if (rval == QLA_NOT_LOGGED_IN) { - fcport->flags &= ~FCF_ASYNC_SENT; - fcport->flags |= FCF_LOGIN_NEEDED; - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - break; - } - - if (rval != QLA_SUCCESS) { - qla2x00_post_async_logout_work(vha, fcport, NULL); - qla2x00_post_async_login_work(vha, fcport, NULL); - break; - } - if (fcport->flags & FCF_FCP2_DEVICE) { - qla2x00_post_async_adisc_work(vha, fcport, data); - break; - } - qla2x00_update_fcport(vha, fcport); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpdb\n", + __func__, __LINE__, ea->fcport->port_name); + ea->fcport->chip_reset = vha->hw->chip_reset; + ea->fcport->logout_on_delete = 1; + qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; case MBS_COMMAND_ERROR: - fcport->flags &= ~FCF_ASYNC_SENT; - if (data[1] & QLA_LOGIO_LOGIN_RETRIED) + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC cmd error %x\n", + __func__, __LINE__, ea->fcport->port_name, ea->data[1]); + + ea->fcport->flags &= ~FCF_ASYNC_SENT; + ea->fcport->disc_state = DSC_LOGIN_FAILED; + if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else - qla2x00_mark_device_lost(vha, fcport, 1, 0); - break; - case MBS_PORT_ID_USED: - fcport->loop_id = data[1]; - qla2x00_post_async_logout_work(vha, fcport, NULL); - qla2x00_post_async_login_work(vha, fcport, NULL); + qla2x00_mark_device_lost(vha, ea->fcport, 1, 0); break; case MBS_LOOP_ID_USED: - fcport->loop_id++; - rval = qla2x00_find_new_loop_id(vha, fcport); - if (rval != QLA_SUCCESS) { - fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_mark_device_lost(vha, fcport, 1, 0); - break; + /* data[1] = IO PARAM 1 = nport ID */ + cid.b.domain = (ea->iop[1] >> 16) & 0xff; + cid.b.area = (ea->iop[1] >> 8) & 0xff; + cid.b.al_pa = ea->iop[1] & 0xff; + cid.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC LoopID 0x%x in use post gnl\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->loop_id); + + if (IS_SW_RESV_ADDR(cid)) { + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); + ea->fcport->loop_id = FC_NO_LOOP_ID; + } else { + qla2x00_clear_loop_id(ea->fcport); } - qla2x00_post_async_login_work(vha, fcport, NULL); + qla24xx_post_gnl_work(vha, ea->fcport); + break; + case MBS_PORT_ID_USED: + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, + ea->fcport->d_id.b.al_pa); + + qla2x00_clear_loop_id(ea->fcport); + qla24xx_post_gidpn_work(vha, ea->fcport); break; } return; @@ -503,10 +1361,9 @@ void qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { - /* Don't re-login in target mode */ - if (!fcport->tgt_session) - qla2x00_mark_device_lost(vha, fcport, 1, 0); + qla2x00_mark_device_lost(vha, fcport, 1, 0); qlt_logo_completion_handler(fcport, data[0]); + fcport->login_gen++; return; } @@ -709,7 +1566,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) } } - if (qla_ini_mode_enabled(vha)) + if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) rval = qla2x00_init_rings(vha); ha->flags.chip_reset_done = 1; @@ -2088,6 +2945,21 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) __func__, ha->fw_options[2]); } + /* Move PUREX, ABTS RX & RIDA to ATIOQ */ + if (ql2xmvasynctoatio) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_11; + else + ha->fw_options[2] &= ~BIT_11; + } + + ql_dbg(ql_dbg_init, vha, 0xffff, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); + qla2x00_set_fw_options(vha, ha->fw_options); + /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) return; @@ -2306,6 +3178,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) } else { ql_dbg(ql_dbg_init, vha, 0x00d3, "Init Firmware -- success.\n"); + ha->flags.fw_started = 1; } return (rval); @@ -2468,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) uint8_t domain; char connect_type[22]; struct qla_hw_data *ha = vha->hw; - unsigned long flags; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + port_id_t id; /* Get host addresses. */ rval = qla2x00_get_adapter_id(vha, @@ -2547,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) /* Save Host port and loop ID. */ /* byte order - Big Endian */ - vha->d_id.b.domain = domain; - vha->d_id.b.area = area; - vha->d_id.b.al_pa = al_pa; - - spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); - spin_unlock_irqrestore(&ha->vport_slock, flags); + id.b.domain = domain; + id.b.area = area; + id.b.al_pa = al_pa; + id.b.rsvd_1 = 0; + qlt_update_host_map(vha, id); if (!vha->flags.init_done) ql_log(ql_log_info, vha, 0x2010, @@ -2968,8 +3839,14 @@ qla2x00_rport_del(void *data) rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) + if (rport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phN. rport %p roles %x \n", + __func__, fcport->port_name, rport, + rport->roles); + fc_remote_port_delete(rport); + } } /** @@ -2995,9 +3872,42 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); fcport->supported_classes = FC_COS_UNSPECIFIED; + fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, + flags); + fcport->disc_state = DSC_DELETED; + fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + fcport->deleted = QLA_SESS_DELETED; + fcport->login_retry = vha->hw->login_retry_count; + fcport->login_retry = 5; + fcport->logout_on_delete = 1; + + if (!fcport->ct_desc.ct_sns) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate ct_sns request.\n"); + kfree(fcport); + fcport = NULL; + } + INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); + INIT_LIST_HEAD(&fcport->gnl_entry); + INIT_LIST_HEAD(&fcport->list); + return fcport; } +void +qla2x00_free_fcport(fc_port_t *fcport) +{ + if (fcport->ct_desc.ct_sns) { + dma_free_coherent(&fcport->vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, + fcport->ct_desc.ct_sns_dma); + + fcport->ct_desc.ct_sns = NULL; + } + kfree(fcport); +} + /* * qla2x00_configure_loop * Updates Fibre Channel Device Database with what is actually on loop. @@ -3055,10 +3965,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) } else if (ha->current_topology == ISP_CFG_N) { clear_bit(RSCN_UPDATE, &flags); - + } else if (ha->current_topology == ISP_CFG_NL) { + clear_bit(RSCN_UPDATE, &flags); + set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || (test_bit(ABORT_ISP_ACTIVE, &flags))) { - set_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } @@ -3090,12 +4001,14 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) atomic_set(&vha->loop_state, LOOP_READY); ql_dbg(ql_dbg_disc, vha, 0x2069, "LOOP READY.\n"); + ha->flags.fw_init_done = 1; /* * Process any ATIO queue entries that came in * while we weren't online. */ - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) { if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { spin_lock_irqsave(&ha->tgt.atio_lock, flags); @@ -3159,6 +4072,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) uint16_t loop_id; uint8_t domain, area, al_pa; struct qla_hw_data *ha = vha->hw; + unsigned long flags; found_devs = 0; new_fcport = NULL; @@ -3199,7 +4113,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) "Marking port lost loop_id=0x%04x.\n", fcport->loop_id); - qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); + qla2x00_mark_device_lost(vha, fcport, 0, 0); } } @@ -3230,13 +4144,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if (loop_id > LAST_LOCAL_LOOP_ID) continue; - memset(new_fcport, 0, sizeof(fc_port_t)); + memset(new_fcport->port_name, 0, WWN_SIZE); /* Fill in member data. */ new_fcport->d_id.b.domain = domain; new_fcport->d_id.b.area = area; new_fcport->d_id.b.al_pa = al_pa; new_fcport->loop_id = loop_id; + rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x201a, @@ -3249,6 +4164,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) continue; } + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* Check for matching device in port list. */ found = 0; fcport = NULL; @@ -3264,6 +4180,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) memcpy(fcport->node_name, new_fcport->node_name, WWN_SIZE); + if (!fcport->login_succ) { + vha->fcport_count++; + fcport->login_succ = 1; + fcport->disc_state = DSC_LOGIN_COMPLETE; + } + found++; break; } @@ -3274,16 +4196,28 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) /* Allocate a new replacement fcport. */ fcport = new_fcport; + if (!fcport->login_succ) { + vha->fcport_count++; + fcport->login_succ = 1; + fcport->disc_state = DSC_LOGIN_COMPLETE; + } + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0x201c, "Failed to allocate memory for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto cleanup_allocation; } + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); new_fcport->flags &= ~FCF_FABRIC_DEVICE; } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + /* Base iIDMA settings on HBA port speed. */ fcport->fp_speed = ha->link_data_rate; @@ -3334,6 +4268,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) } } +/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ static void qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) { @@ -3352,12 +4287,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) "Unable to allocate fc remote port.\n"); return; } - /* - * Create target mode FC NEXUS in qla_target.c if target mode is - * enabled.. - */ - - qlt_fc_port_added(vha, fcport); spin_lock_irqsave(fcport->vha->host->host_lock, flags); *((fc_port_t **)rport->dd_data) = fcport; @@ -3370,6 +4299,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcport->port_type == FCT_TARGET) rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phN. rport %p is %s mode \n", + __func__, fcport->port_name, rport, + (fcport->port_type == FCT_TARGET) ? "tgt" : "ini"); + fc_remote_port_rolechg(rport, rport_ids.roles); } @@ -3393,25 +4328,44 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { fcport->vha = vha; + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; + + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC \n", + __func__, fcport->port_name); + if (IS_QLAFX00(vha->hw)) { qla2x00_set_fcport_state(fcport, FCS_ONLINE); goto reg_port; } fcport->login_retry = 0; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + fcport->disc_state = DSC_LOGIN_COMPLETE; + fcport->deleted = 0; + fcport->logout_on_delete = 1; qla2x00_set_fcport_state(fcport, FCS_ONLINE); qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); reg_port: - if (qla_ini_mode_enabled(vha)) + switch (vha->host->active_mode) { + case MODE_INITIATOR: qla2x00_reg_remote_port(vha, fcport); - else { - /* - * Create target mode FC NEXUS in qla_target.c - */ - qlt_fc_port_added(vha, fcport); + break; + case MODE_TARGET: + if (!vha->vha_tgt.qla_tgt->tgt_stop && + !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_fc_port_added(vha, fcport); + break; + case MODE_DUAL: + qla2x00_reg_remote_port(vha, fcport); + if (!vha->vha_tgt.qla_tgt->tgt_stop && + !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_fc_port_added(vha, fcport); + break; + default: + break; } } @@ -3430,13 +4384,11 @@ static int qla2x00_configure_fabric(scsi_qla_host_t *vha) { int rval; - fc_port_t *fcport, *fcptemp; - uint16_t next_loopid; + fc_port_t *fcport; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t loop_id; LIST_HEAD(new_fcports); struct qla_hw_data *ha = vha->hw; - struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int discovery_gen; /* If FL port exists, then SNS is present */ @@ -3454,7 +4406,19 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } vha->device_flags |= SWITCH_FOUND; + + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { + rval = qla2x00_send_change_request(vha, 0x3, 0); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x121, + "Failed to enable receiving of RSCN requests: 0x%x.\n", + rval); + } + + do { + qla2x00_mgmt_svr_login(vha); + /* FDMI support. */ if (ql2xfdmienable && test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) @@ -3501,9 +4465,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } } -#define QLA_FCPORT_SCAN 1 -#define QLA_FCPORT_FOUND 2 - list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = QLA_FCPORT_SCAN; } @@ -3516,174 +4477,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) * will be newer than discovery_gen. */ qlt_do_generation_tick(vha, &discovery_gen); - rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); + rval = qla2x00_find_all_fabric_devs(vha); if (rval != QLA_SUCCESS) break; - - /* - * Logout all previous fabric devices marked lost, except - * FCP2 devices. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) - continue; - - if (fcport->scan_state == QLA_FCPORT_SCAN) { - if (qla_ini_mode_enabled(base_vha) && - atomic_read(&fcport->state) == FCS_ONLINE) { - qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0 && - fcport->port_type != FCT_INITIATOR && - fcport->port_type != FCT_BROADCAST) { - ha->isp_ops->fabric_logout(vha, - fcport->loop_id, - fcport->d_id.b.domain, - fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(fcport); - } - } else if (!qla_ini_mode_enabled(base_vha)) { - /* - * In target mode, explicitly kill - * sessions and log out of devices - * that are gone, so that we don't - * end up with an initiator using the - * wrong ACL (if the fabric recycles - * an FC address and we have a stale - * session around) and so that we don't - * report initiators that are no longer - * on the fabric. - */ - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077, - "port gone, logging out/killing session: " - "%8phC state 0x%x flags 0x%x fc4_type 0x%x " - "scan_state %d\n", - fcport->port_name, - atomic_read(&fcport->state), - fcport->flags, fcport->fc4_type, - fcport->scan_state); - qlt_fc_port_deleted(vha, fcport, - discovery_gen); - } - } - } - - /* Starting free loop ID. */ - next_loopid = ha->min_external_loopid; - - /* - * Scan through our port list and login entries that need to be - * logged in. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (atomic_read(&vha->loop_down_timer) || - test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || - (fcport->flags & FCF_LOGIN_NEEDED) == 0) - continue; - - /* - * If we're not an initiator, skip looking for devices - * and logging in. There's no reason for us to do it, - * and it seems to actively cause problems in target - * mode if we race with the initiator logging into us - * (we might get the "port ID used" status back from - * our login command and log out the initiator, which - * seems to cause havoc). - */ - if (!qla_ini_mode_enabled(base_vha)) { - if (fcport->scan_state == QLA_FCPORT_FOUND) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078, - "port %8phC state 0x%x flags 0x%x fc4_type 0x%x " - "scan_state %d (initiator mode disabled; skipping " - "login)\n", fcport->port_name, - atomic_read(&fcport->state), - fcport->flags, fcport->fc4_type, - fcport->scan_state); - } - continue; - } - - if (fcport->loop_id == FC_NO_LOOP_ID) { - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id( - base_vha, fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } - } - /* Login and update database */ - qla2x00_fabric_dev_login(vha, fcport, &next_loopid); - } - - /* Exit if out of loop IDs. */ - if (rval != QLA_SUCCESS) { - break; - } - - /* - * Login and add the new devices to our port list. - */ - list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { - if (atomic_read(&vha->loop_down_timer) || - test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - /* - * If we're not an initiator, skip looking for devices - * and logging in. There's no reason for us to do it, - * and it seems to actively cause problems in target - * mode if we race with the initiator logging into us - * (we might get the "port ID used" status back from - * our login command and log out the initiator, which - * seems to cause havoc). - */ - if (qla_ini_mode_enabled(base_vha)) { - /* Find a new loop ID to use. */ - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id(base_vha, - fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } - - /* Login and update database */ - qla2x00_fabric_dev_login(vha, fcport, - &next_loopid); - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079, - "new port %8phC state 0x%x flags 0x%x fc4_type " - "0x%x scan_state %d (initiator mode disabled; " - "skipping login)\n", - fcport->port_name, - atomic_read(&fcport->state), - fcport->flags, fcport->fc4_type, - fcport->scan_state); - } - - list_move_tail(&fcport->list, &vha->vp_fcports); - } } while (0); - /* Free all new device structures not processed. */ - list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { - list_del(&fcport->list); - kfree(fcport); - } - - if (rval) { + if (rval) ql_dbg(ql_dbg_disc, vha, 0x2068, "Configure fabric error exit rval=%d.\n", rval); - } return (rval); } @@ -3702,12 +4503,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) * Kernel context. */ static int -qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, - struct list_head *new_fcports) +qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) { int rval; uint16_t loop_id; - fc_port_t *fcport, *new_fcport, *fcptemp; + fc_port_t *fcport, *new_fcport; int found; sw_info_t *swl; @@ -3716,6 +4516,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, port_id_t wrap = {}, nxt_d_id; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + unsigned long flags; rval = QLA_SUCCESS; @@ -3736,9 +4537,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, swl = NULL; } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; - } else if (ql2xiidmaenable && - qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { - qla2x00_gpsc(vha, swl); + } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { + swl = NULL; } /* If other queries succeeded probe for FC-4 type */ @@ -3800,11 +4600,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, ql_log(ql_log_warn, vha, 0x2064, "SNS scan failed -- assuming " "zero-entry result.\n"); - list_for_each_entry_safe(fcport, fcptemp, - new_fcports, list) { - list_del(&fcport->list); - kfree(fcport); - } rval = QLA_SUCCESS; break; } @@ -3847,6 +4642,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) continue; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + /* Locate matching device in database. */ found = 0; list_for_each_entry(fcport, &vha->vp_fcports, list) { @@ -3869,7 +4666,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, */ if (fcport->d_id.b24 == new_fcport->d_id.b24 && (atomic_read(&fcport->state) == FCS_ONLINE || - !qla_ini_mode_enabled(base_vha))) { + (vha->host->active_mode == MODE_TARGET))) { break; } @@ -3889,7 +4686,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, * Log it out if still logged in and mark it for * relogin later. */ - if (!qla_ini_mode_enabled(base_vha)) { + if (qla_tgt_mode_enabled(base_vha)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, "port changed FC ID, %8phC" " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", @@ -3907,25 +4704,19 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags |= FCF_LOGIN_NEEDED; - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0 && - (fcport->flags & FCF_ASYNC_SENT) == 0 && - fcport->port_type != FCT_INITIATOR && - fcport->port_type != FCT_BROADCAST) { - ha->isp_ops->fabric_logout(vha, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(fcport); - } - break; } - if (found) + if (found) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); continue; + } /* If device was not in our fcports list, then add it. */ new_fcport->scan_state = QLA_FCPORT_FOUND; - list_add_tail(&new_fcport->list, new_fcports); + list_add_tail(&new_fcport->list, &vha->vp_fcports); + + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + /* Allocate a new replacement fcport. */ nxt_d_id.b24 = new_fcport->d_id.b24; @@ -3939,8 +4730,44 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, new_fcport->d_id.b24 = nxt_d_id.b24; } - kfree(new_fcport); + qla2x00_free_fcport(new_fcport); + + /* + * Logout all previous fabric dev marked lost, except FCP2 devices. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || + (fcport->flags & FCF_LOGIN_NEEDED) == 0) + continue; + + if (fcport->scan_state == QLA_FCPORT_SCAN) { + if ((qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) && + atomic_read(&fcport->state) == FCS_ONLINE) { + qla2x00_mark_device_lost(vha, fcport, + ql2xplogiabsentdevice, 0); + if (fcport->loop_id != FC_NO_LOOP_ID && + (fcport->flags & FCF_FCP2_DEVICE) == 0 && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_BROADCAST) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, + fcport->port_name); + + qlt_schedule_sess_for_deletion_lock + (fcport); + continue; + } + } + } + if (fcport->scan_state == QLA_FCPORT_FOUND) + qla24xx_fcport_handle_login(vha, fcport); + } return (rval); } @@ -3992,64 +4819,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) return (rval); } -/* - * qla2x00_fabric_dev_login - * Login fabric target device and update FC port database. - * - * Input: - * ha: adapter state pointer. - * fcport: port structure list pointer. - * next_loopid: contains value of a new loop ID that can be used - * by the next login attempt. - * - * Returns: - * qla2x00 local function return status code. - * - * Context: - * Kernel context. - */ -static int -qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, - uint16_t *next_loopid) -{ - int rval; - uint8_t opts; - struct qla_hw_data *ha = vha->hw; - - rval = QLA_SUCCESS; - - if (IS_ALOGIO_CAPABLE(ha)) { - if (fcport->flags & FCF_ASYNC_SENT) - return rval; - fcport->flags |= FCF_ASYNC_SENT; - rval = qla2x00_post_async_login_work(vha, fcport, NULL); - if (!rval) - return rval; - } - - fcport->flags &= ~FCF_ASYNC_SENT; - rval = qla2x00_fabric_login(vha, fcport, next_loopid); - if (rval == QLA_SUCCESS) { - /* Send an ADISC to FCP2 devices.*/ - opts = 0; - if (fcport->flags & FCF_FCP2_DEVICE) - opts |= BIT_1; - rval = qla2x00_get_port_database(vha, fcport, opts); - if (rval != QLA_SUCCESS) { - ha->isp_ops->fabric_logout(vha, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_mark_device_lost(vha, fcport, 1, 0); - } else { - qla2x00_update_fcport(vha, fcport); - } - } else { - /* Retry Login. */ - qla2x00_mark_device_lost(vha, fcport, 1, 0); - } - - return (rval); -} /* * qla2x00_fabric_login @@ -4341,17 +5110,11 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_rport_del(fcport); - /* - * Release the target mode FC NEXUS in - * qla_target.c, if target mod is enabled. - */ - qlt_fc_port_deleted(vha, fcport, - base_vha->total_fcport_update_gen); - spin_lock_irqsave(&ha->vport_slock, flags); } } atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); } spin_unlock_irqrestore(&ha->vport_slock, flags); } @@ -4730,6 +5493,13 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); + ha->flags.n2n_ae = 0; + ha->flags.lip_ae = 0; + ha->current_topology = 0; + ha->flags.fw_started = 0; + ha->flags.fw_init_done = 0; + ha->chip_reset++; + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -4784,8 +5554,6 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); } - - ha->chip_reset++; /* memory barrier */ wmb(); } @@ -4981,7 +5749,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) if (!status) { /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } @@ -5209,7 +5976,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) rval = 1; } - if (!qla_ini_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha)) { /* Don't enable full login after initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Don't enable LIP full login for initiator */ @@ -5400,6 +6167,7 @@ uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha) for (chksum = 0; cnt--; wptr++) chksum += le32_to_cpu(*wptr); + if (chksum) { ql_dbg(ql_dbg_init, vha, 0x018c, "Checksum validation failed for primary image (0x%x)\n", @@ -6006,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) return; if (!ha->fw_major_version) return; + if (!ha->flags.fw_started) + return; ret = qla2x00_stop_firmware(vha); for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && @@ -6019,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) "Attempting retry of stop-firmware command.\n"); ret = qla2x00_stop_firmware(vha); } + + ha->flags.fw_started = 0; + ha->flags.fw_init_done = 0; } int @@ -6412,6 +7185,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) vha->flags.process_response_queue = 1; } + /* enable RIDA Format2 */ + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) + icb->firmware_options_3 |= BIT_0; + if (rval) { ql_log(ql_log_warn, vha, 0x0076, "NVRAM configuration failed.\n"); @@ -6536,13 +7313,26 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) __func__, ha->fw_options[2]); } - if (!ql2xetsenable) - goto out; + /* Move PUREX, ABTS RX & RIDA to ATIOQ */ + if (ql2xmvasynctoatio) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_11; + else + ha->fw_options[2] &= ~BIT_11; + } + + if (ql2xetsenable) { + /* Enable ETS Burst. */ + memset(ha->fw_options, 0, sizeof(ha->fw_options)); + ha->fw_options[2] |= BIT_9; + } + + ql_dbg(ql_dbg_init, vha, 0xffff, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); - /* Enable ETS Burst. */ - memset(ha->fw_options, 0, sizeof(ha->fw_options)); - ha->fw_options[2] |= BIT_9; -out: qla2x00_set_fw_options(vha, ha->fw_options); } @@ -6748,6 +7538,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v memset(qpair, 0, sizeof(struct qla_qpair)); qpair->hw = vha->hw; + qpair->vha = vha; /* Assign available que pair id */ mutex_lock(&ha->mq_lock); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 44e404583c86fc..66df6cec59da40 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -166,8 +166,8 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state) /* Don't print state transitions during initial allocation of fcport */ if (old_state && old_state != state) { ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, - "FCPort state transitioned from %s to %s - " - "portid=%02x%02x%02x.\n", + "FCPort %8phC state transitioned from %s to %s - " + "portid=%02x%02x%02x.\n", fcport->port_name, port_state_str[old_state], port_state_str[state], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); @@ -232,6 +232,7 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) memset(sp, 0, sizeof(*sp)); sp->fcport = fcport; sp->iocbs = 1; + sp->vha = qpair->vha; done: if (!sp) QLA_QPAIR_MARK_NOT_BUSY(qpair); @@ -249,20 +250,20 @@ static inline srb_t * qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) { srb_t *sp = NULL; - struct qla_hw_data *ha = vha->hw; uint8_t bail; QLA_VHA_MARK_BUSY(vha, bail); if (unlikely(bail)) return NULL; - sp = mempool_alloc(ha->srb_mempool, flag); + sp = mempool_alloc(vha->hw->srb_mempool, flag); if (!sp) goto done; memset(sp, 0, sizeof(*sp)); sp->fcport = fcport; sp->iocbs = 1; + sp->vha = vha; done: if (!sp) QLA_VHA_MARK_NOT_BUSY(vha); @@ -270,10 +271,10 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) } static inline void -qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp) +qla2x00_rel_sp(srb_t *sp) { - mempool_free(sp, vha->hw->srb_mempool); - QLA_VHA_MARK_NOT_BUSY(vha); + QLA_VHA_MARK_NOT_BUSY(sp->vha); + mempool_free(sp, sp->vha->hw->srb_mempool); } static inline void @@ -285,8 +286,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo) sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; add_timer(&sp->u.iocb_cmd.timer); sp->free = qla2x00_sp_free; - if ((IS_QLAFX00(sp->fcport->vha->hw)) && - (sp->type == SRB_FXIOCB_DCMD)) + if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD)) init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); if (sp->type == SRB_ELS_DCMD) init_completion(&sp->u.iocb_cmd.u.els_logo.comp); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 58e49a3e1de8bc..ea027f6a7fd4e9 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -23,7 +23,7 @@ qla2x00_get_cmd_direction(srb_t *sp) { uint16_t cflags; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; cflags = 0; @@ -210,7 +210,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, return; } - vha = sp->fcport->vha; + vha = sp->vha; cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Three DSDs are available in the Command Type 2 IOCB */ @@ -267,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, return; } - vha = sp->fcport->vha; + vha = sp->vha; cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Two DSDs are available in the Command Type 3 IOCB */ @@ -324,7 +324,7 @@ qla2x00_start_scsi(srb_t *sp) struct rsp_que *rsp; /* Setup device pointers. */ - vha = sp->fcport->vha; + vha = sp->vha; ha = vha->hw; reg = &ha->iobase->isp; cmd = GET_CMD_SP(sp); @@ -601,7 +601,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, return 0; } - vha = sp->fcport->vha; + vha = sp->vha; ha = vha->hw; /* Set transfer direction */ @@ -716,7 +716,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, return; } - vha = sp->fcport->vha; + vha = sp->vha; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, struct scatterlist *sg_prot; uint32_t *cur_dsd = dsd; uint16_t used_dsds = tot_dsds; - uint32_t prot_int; /* protection interval */ uint32_t partial; struct qla2_sgx sgx; @@ -966,7 +965,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); - tc->ctx_dsd_alloced = 1; + *tc->ctx_dsd_alloced = 1; } @@ -1005,7 +1004,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, - uint16_t tot_dsds, struct qla_tgt_cmd *tc) + uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); - tc->ctx_dsd_alloced = 1; + *tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ @@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -1108,7 +1107,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, if (sp) { cmd = GET_CMD_SP(sp); sgl = scsi_prot_sglist(cmd); - vha = sp->fcport->vha; + vha = sp->vha; } else if (tc) { vha = tc->vha; sgl = tc->prot_sg; @@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); - tc->ctx_dsd_alloced = 1; + *tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ @@ -1215,7 +1214,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, /* Update entry type to indicate Command Type CRC_2 IOCB */ *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2); - vha = sp->fcport->vha; + vha = sp->vha; ha = vha->hw; /* No data transfer */ @@ -1225,7 +1224,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, return QLA_SUCCESS; } - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -1415,7 +1414,7 @@ qla24xx_start_scsi(srb_t *sp) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; /* Setup device pointers. */ @@ -1492,7 +1491,7 @@ qla24xx_start_scsi(srb_t *sp) cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); @@ -1564,7 +1563,7 @@ qla24xx_dif_start_scsi(srb_t *sp) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_crc_2 *cmd_pkt; uint32_t status = 0; @@ -2214,13 +2213,13 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; - logio->vp_index = sp->fcport->vha->vp_idx; + logio->vp_index = sp->vha->vp_idx; } static void qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) { - struct qla_hw_data *ha = sp->fcport->vha->hw; + struct qla_hw_data *ha = sp->vha->hw; struct srb_iocb *lio = &sp->u.iocb_cmd; uint16_t opts; @@ -2238,7 +2237,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); - mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); } static void @@ -2247,20 +2246,20 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); - if (!sp->fcport->tgt_session || - !sp->fcport->tgt_session->keep_nport_handle) + if (!sp->fcport->se_sess || + !sp->fcport->keep_nport_handle) logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; - logio->vp_index = sp->fcport->vha->vp_idx; + logio->vp_index = sp->vha->vp_idx; } static void qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) { - struct qla_hw_data *ha = sp->fcport->vha->hw; + struct qla_hw_data *ha = sp->vha->hw; mbx->entry_type = MBX_IOCB_TYPE; SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); @@ -2271,7 +2270,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); - mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); /* Implicit: mbx->mbx10 = 0. */ } @@ -2281,13 +2280,13 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); - logio->vp_index = sp->fcport->vha->vp_idx; + logio->vp_index = sp->vha->vp_idx; } static void qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) { - struct qla_hw_data *ha = sp->fcport->vha->hw; + struct qla_hw_data *ha = sp->vha->hw; mbx->entry_type = MBX_IOCB_TYPE; SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); @@ -2302,7 +2301,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); - mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); + mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); } static void @@ -2338,32 +2337,30 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) } static void -qla2x00_els_dcmd_sp_free(void *ptr, void *data) +qla2x00_els_dcmd_sp_free(void *data) { - struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr; - struct qla_hw_data *ha = vha->hw; - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *elsio = &sp->u.iocb_cmd; kfree(sp->fcport); if (elsio->u.els_logo.els_logo_pyld) - dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE, + dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, elsio->u.els_logo.els_logo_pyld, elsio->u.els_logo.els_logo_pyld_dma); del_timer(&elsio->timer); - qla2x00_rel_sp(vha, sp); + qla2x00_rel_sp(sp); } static void qla2x00_els_dcmd_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; - struct srb_iocb *lio = &sp->u.iocb_cmd; + srb_t *sp = data; fc_port_t *fcport = sp->fcport; - struct scsi_qla_host *vha = fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; + struct srb_iocb *lio = &sp->u.iocb_cmd; unsigned long flags = 0; ql_dbg(ql_dbg_io, vha, 0x3069, @@ -2386,12 +2383,12 @@ qla2x00_els_dcmd_iocb_timeout(void *data) } static void -qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res) +qla2x00_els_dcmd_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; fc_port_t *fcport = sp->fcport; struct srb_iocb *lio = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = fcport->vha; + struct scsi_qla_host *vha = sp->vha; ql_dbg(ql_dbg_io, vha, 0x3072, "%s hdl=%x, portid=%02x%02x%02x done\n", @@ -2449,7 +2446,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, GFP_KERNEL); if (!elsio->u.els_logo.els_logo_pyld) { - sp->free(vha, sp); + sp->free(sp); return QLA_FUNCTION_FAILED; } @@ -2468,7 +2465,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - sp->free(vha, sp); + sp->free(sp); return QLA_FUNCTION_FAILED; } @@ -2479,14 +2476,14 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, wait_for_completion(&elsio->u.els_logo.comp); - sp->free(vha, sp); + sp->free(sp); return rval; } static void qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) { - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct srb_iocb *elsio = &sp->u.iocb_cmd; els_iocb->entry_type = ELS_IOCB_TYPE; @@ -2518,7 +2515,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->rx_address[1] = 0; els_iocb->rx_len = 0; - sp->fcport->vha->qla_stats.control_requests++; + sp->vha->qla_stats.control_requests++; } static void @@ -2534,7 +2531,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); - els_iocb->vp_index = sp->fcport->vha->vp_idx; + els_iocb->vp_index = sp->vha->vp_idx; els_iocb->sof_type = EST_SOFI3; els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); @@ -2565,7 +2562,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->rx_len = cpu_to_le32(sg_dma_len (bsg_job->reply_payload.sg_list)); - sp->fcport->vha->qla_stats.control_requests++; + sp->vha->qla_stats.control_requests++; } static void @@ -2576,7 +2573,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) struct scatterlist *sg; int index; uint16_t tot_dsds; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; @@ -2642,7 +2639,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) } ct_iocb->entry_count = entry_count; - sp->fcport->vha->qla_stats.control_requests++; + sp->vha->qla_stats.control_requests++; } static void @@ -2653,7 +2650,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) struct scatterlist *sg; int index; uint16_t tot_dsds; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; @@ -2665,7 +2662,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) ct_iocb->handle = sp->handle; ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); - ct_iocb->vp_index = sp->fcport->vha->vp_idx; + ct_iocb->vp_index = sp->vha->vp_idx; ct_iocb->comp_status = cpu_to_le16(0); ct_iocb->cmd_dsd_count = @@ -2739,7 +2736,7 @@ qla82xx_start_scsi(srb_t *sp) uint32_t *fcp_dl; uint8_t additional_cdb_len; struct ct6_dsd *ctx; - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; struct rsp_que *rsp = NULL; @@ -2901,7 +2898,7 @@ qla82xx_start_scsi(srb_t *sp) cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; /* Build IOCB segments */ if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) @@ -2974,7 +2971,7 @@ qla82xx_start_scsi(srb_t *sp) cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + cmd_pkt->vp_index = sp->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, @@ -3060,7 +3057,7 @@ static void qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) { struct srb_iocb *aio = &sp->u.iocb_cmd; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); @@ -3079,19 +3076,69 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) wmb(); } +static void +qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) +{ + int i, sz; + + mbx->entry_type = MBX_IOCB_TYPE; + mbx->handle = sp->handle; + sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); + + for (i = 0; i < sz; i++) + mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]); +} + +static void +qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) +{ + sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; + qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); + ct_pkt->handle = sp->handle; +} + +static void qla2x00_send_notify_ack_iocb(srb_t *sp, + struct nack_to_isp *nack) +{ + struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; + + nack->entry_type = NOTIFY_ACK_TYPE; + nack->entry_count = 1; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.handle = sp->handle; + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.srr_flags = 0; + nack->u.isp24.srr_reject_code = 0; + nack->u.isp24.srr_reject_code_expl = 0; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; +} + int qla2x00_start_sp(srb_t *sp) { int rval; - struct qla_hw_data *ha = sp->fcport->vha->hw; + scsi_qla_host_t *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; void *pkt; unsigned long flags; rval = QLA_FUNCTION_FAILED; spin_lock_irqsave(&ha->hardware_lock, flags); - pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); + pkt = qla2x00_alloc_iocbs(vha, sp); if (!pkt) { - ql_log(ql_log_warn, sp->fcport->vha, 0x700c, + ql_log(ql_log_warn, vha, 0x700c, "qla2x00_alloc_iocbs failed.\n"); goto done; } @@ -3139,12 +3186,23 @@ qla2x00_start_sp(srb_t *sp) case SRB_ELS_DCMD: qla24xx_els_logo_iocb(sp, pkt); break; + case SRB_CT_PTHRU_CMD: + qla2x00_ctpthru_cmd_iocb(sp, pkt); + break; + case SRB_MB_IOCB: + qla2x00_mb_iocb(sp, pkt); + break; + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + qla2x00_send_notify_ack_iocb(sp, pkt); + break; default: break; } wmb(); - qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]); + qla2x00_start_iocbs(vha, ha->req_q_map[0]); done: spin_unlock_irqrestore(&ha->hardware_lock, flags); return rval; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index edc2264db45bec..3203367a4f4236 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -561,14 +561,50 @@ qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) return ret; } -static inline fc_port_t * +fc_port_t * qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) { - fc_port_t *fcport; + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) + if (f->loop_id == loop_id) + return f; + return NULL; +} - list_for_each_entry(fcport, &vha->vp_fcports, list) - if (fcport->loop_id == loop_id) - return fcport; +fc_port_t * +qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } + return NULL; +} + +fc_port_t * +qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, + u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (f->d_id.b24 == id->b24) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } return NULL; } @@ -672,6 +708,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); ha->isp_ops->fw_dump(vha, 1); + ha->flags.fw_init_done = 0; + ha->flags.fw_started = 0; if (IS_FWI2_CAPABLE(ha)) { if (mb[1] == 0 && mb[2] == 0) { @@ -725,6 +763,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ + ha->flags.lip_ae = 1; + ha->flags.n2n_ae = 0; + ql_dbg(ql_dbg_async, vha, 0x5009, "LIP occurred (%x).\n", mb[1]); @@ -761,6 +802,10 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) break; case MBA_LOOP_DOWN: /* Loop Down Event */ + ha->flags.n2n_ae = 0; + ha->flags.lip_ae = 0; + ha->current_topology = 0; + mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) ? RD_REG_WORD(®24->mailbox4) : 0; mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) @@ -830,6 +875,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) /* case MBA_DCBX_COMPLETE: */ case MBA_POINT_TO_POINT: /* Point-to-Point */ + ha->flags.lip_ae = 0; + ha->flags.n2n_ae = 1; + if (IS_QLA2100(ha)) break; @@ -934,7 +982,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ql_dbg(ql_dbg_async, vha, 0x508a, "Marking port lost loopid=%04x portid=%06x.\n", fcport->loop_id, fcport->d_id.b24); - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + if (qla_ini_mode_enabled(vha)) { + qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + fcport->logout_on_delete = 0; + qlt_schedule_sess_for_deletion_lock(fcport); + } break; global_port_update: @@ -985,9 +1037,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) qla2x00_mark_all_devices_lost(vha, 1); - if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) - set_bit(SCR_PENDING, &vha->dpc_flags); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(VP_CONFIG_OK, &vha->vp_flags); @@ -1024,27 +1073,19 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) if (qla2x00_is_a_vp_did(vha, rscn_entry)) break; - /* - * Search for the rport related to this RSCN entry and mark it - * as lost. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (atomic_read(&fcport->state) != FCS_ONLINE) - continue; - if (fcport->d_id.b24 == rscn_entry) { - qla2x00_mark_device_lost(vha, fcport, 0, 0); - break; - } - } - atomic_set(&vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0; - - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - set_bit(RSCN_UPDATE, &vha->dpc_flags); - qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); + { + struct event_arg ea; + + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_RSCN; + ea.id.b24 = rscn_entry; + ea.id.b.rsvd_1 = rscn_entry >> 24; + qla2x00_fcport_event_handler(vha, &ea); + qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); + } break; - /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: ql_dbg(ql_dbg_async, vha, 0x5015, @@ -1212,7 +1253,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, req->outstanding_cmds[index] = NULL; /* Save ISP completion status */ - sp->done(ha, sp, DID_OK << 16); + sp->done(sp, DID_OK << 16); } else { ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); @@ -1235,7 +1276,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, index = LSW(pkt->handle); if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, - "Invalid command index (%x).\n", index); + "Invalid command index (%x) type %8ph.\n", + index, iocb); if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else @@ -1343,66 +1385,122 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, le16_to_cpu(mbx->mb7)); logio_done: - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void -qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, - sts_entry_t *pkt, int iocb_type) +qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mbx_24xx_entry *pkt) { - const char func[] = "CT_IOCB"; - const char *type; + const char func[] = "MBX-IOCB2"; srb_t *sp; - struct bsg_job *bsg_job; - struct fc_bsg_reply *bsg_reply; - uint16_t comp_status; + struct srb_iocb *si; + u16 sz, i; int res; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; - bsg_job = sp->u.bsg_job; - bsg_reply = bsg_job->reply; + si = &sp->u.iocb_cmd; + sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); - type = "ct pass-through"; + for (i = 0; i < sz; i++) + si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); - comp_status = le16_to_cpu(pkt->comp_status); + res = (si->u.mbx.in_mb[0] & MBS_MASK); - /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT - * fc payload to the caller - */ - bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; - bsg_job->reply_len = sizeof(struct fc_bsg_reply); + sp->done(sp, res); +} - if (comp_status != CS_COMPLETE) { - if (comp_status == CS_DATA_UNDERRUN) { - res = DID_OK << 16; - bsg_reply->reply_payload_rcv_len = - le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); +static void +qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct nack_to_isp *pkt) +{ + const char func[] = "nack"; + srb_t *sp; + int res = 0; - ql_log(ql_log_warn, vha, 0x5048, - "CT pass-through-%s error " - "comp_status-status=0x%x total_byte = 0x%x.\n", - type, comp_status, - bsg_reply->reply_payload_rcv_len); - } else { - ql_log(ql_log_warn, vha, 0x5049, - "CT pass-through-%s error " - "comp_status-status=0x%x.\n", type, comp_status); - res = DID_ERROR << 16; - bsg_reply->reply_payload_rcv_len = 0; - } - ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, - (uint8_t *)pkt, sizeof(*pkt)); - } else { - res = DID_OK << 16; - bsg_reply->reply_payload_rcv_len = - bsg_job->reply_payload.payload_len; - bsg_job->reply_len = 0; - } + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) + res = QLA_FUNCTION_FAILED; - sp->done(vha, sp, res); + sp->done(sp, res); +} + +static void +qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, + sts_entry_t *pkt, int iocb_type) +{ + const char func[] = "CT_IOCB"; + const char *type; + srb_t *sp; + struct bsg_job *bsg_job; + struct fc_bsg_reply *bsg_reply; + uint16_t comp_status; + int res = 0; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + switch (sp->type) { + case SRB_CT_CMD: + bsg_job = sp->u.bsg_job; + bsg_reply = bsg_job->reply; + + type = "ct pass-through"; + + comp_status = le16_to_cpu(pkt->comp_status); + + /* + * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT + * fc payload to the caller + */ + bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + + if (comp_status != CS_COMPLETE) { + if (comp_status == CS_DATA_UNDERRUN) { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); + + ql_log(ql_log_warn, vha, 0x5048, + "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", + type, comp_status, + bsg_reply->reply_payload_rcv_len); + } else { + ql_log(ql_log_warn, vha, 0x5049, + "CT pass-through-%s error comp_status=0x%x.\n", + type, comp_status); + res = DID_ERROR << 16; + bsg_reply->reply_payload_rcv_len = 0; + } + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, + (uint8_t *)pkt, sizeof(*pkt)); + } else { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = + bsg_job->reply_payload.payload_len; + bsg_job->reply_len = 0; + } + break; + case SRB_CT_PTHRU_CMD: + /* + * borrowing sts_entry_24xx.comp_status. + * same location as ct_entry_24xx.comp_status + */ + res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, + sp->name); + break; + } + + sp->done(sp, res); } static void @@ -1438,7 +1536,16 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, type = "Driver ELS logo"; ql_dbg(ql_dbg_user, vha, 0x5047, "Completing %s: (%p) type=%d.\n", type, sp, sp->type); - sp->done(vha, sp, 0); + sp->done(sp, 0); + return; + case SRB_CT_PTHRU_CMD: + /* borrowing sts_entry_24xx.comp_status. + same location as ct_entry_24xx.comp_status + */ + res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, + (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, + sp->name); + sp->done(sp, res); return; default: ql_dbg(ql_dbg_user, vha, 0x503e, @@ -1496,7 +1603,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, bsg_job->reply_len = 0; } - sp->done(vha, sp, res); + sp->done(sp, res); } static void @@ -1525,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, QLA_LOGIO_LOGIN_RETRIED : 0; if (logio->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5034, - "Async-%s error entry - hdl=%x" + "Async-%s error entry - %8phC hdl=%x" "portid=%02x%02x%02x entry-status=%x.\n", - type, sp->handle, fcport->d_id.b.domain, + type, fcport->port_name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, logio->entry_status); ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, @@ -1538,11 +1645,13 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { ql_dbg(ql_dbg_async, fcport->vha, 0x5036, - "Async-%s complete - hdl=%x portid=%02x%02x%02x " - "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, + "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x " + "iop0=%x.\n", type, fcport->port_name, sp->handle, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le32_to_cpu(logio->io_parameter[0])); + vha->hw->exch_starvation = 0; data[0] = MBS_COMMAND_COMPLETE; if (sp->type != SRB_LOGIN_CMD) goto logio_done; @@ -1568,6 +1677,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, iop[0] = le32_to_cpu(logio->io_parameter[0]); iop[1] = le32_to_cpu(logio->io_parameter[1]); + lio->u.logio.iop[0] = iop[0]; + lio->u.logio.iop[1] = iop[1]; switch (iop[0]) { case LSC_SCODE_PORTID_USED: data[0] = MBS_PORT_ID_USED; @@ -1576,21 +1687,48 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; break; + case LSC_SCODE_CMD_FAILED: + if (iop[1] == 0x0606) { + /* + * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, + * Target side acked. + */ + data[0] = MBS_COMMAND_COMPLETE; + goto logio_done; + } + data[0] = MBS_COMMAND_ERROR; + break; + case LSC_SCODE_NOXCB: + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xffff, + "Exchange starvation. Resetting RISC\n"); + + vha->hw->exch_starvation = 0; + + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + /* drop through */ default: data[0] = MBS_COMMAND_ERROR; break; } ql_dbg(ql_dbg_async, fcport->vha, 0x5037, - "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " - "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, + "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x " + "iop0=%x iop1=%x.\n", type, fcport->port_name, + sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le16_to_cpu(logio->comp_status), le32_to_cpu(logio->io_parameter[0]), le32_to_cpu(logio->io_parameter[1])); logio_done: - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void @@ -1640,7 +1778,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, (uint8_t *)sts, sizeof(*sts)); - sp->done(vha, sp, 0); + sp->done(sp, 0); } /** @@ -1728,7 +1866,7 @@ static inline void qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp, int res) { - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cp = GET_CMD_SP(sp); uint32_t track_sense_len; @@ -1756,7 +1894,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", - sp->fcport->vha->host_no, cp->device->id, cp->device->lun, + sp->vha->host_no, cp->device->id, cp->device->lun, cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, cp->sense_buffer, sense_len); @@ -1778,7 +1916,7 @@ struct scsi_dif_tuple { static inline int qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) { - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cmd = GET_CMD_SP(sp); uint8_t *ap = &sts24->data[12]; uint8_t *ep = &sts24->data[20]; @@ -2043,7 +2181,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, bsg_job->reply_len = sizeof(struct fc_bsg_reply); /* Always return DID_OK, bsg will send the vendor specific response * in this case only */ - sp->done(vha, sp, (DID_OK << 6)); + sp->done(sp, DID_OK << 6); } @@ -2076,6 +2214,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) int res = 0; uint16_t state_flags = 0; uint16_t retry_delay = 0; + uint8_t no_logout = 0; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; @@ -2336,6 +2475,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) break; case CS_PORT_LOGGED_OUT: + no_logout = 1; case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: @@ -2358,14 +2498,21 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) break; } - ql_dbg(ql_dbg_io, fcport->vha, 0x3021, - "Port to be marked lost on fcport=%02x%02x%02x, current " - "port state= %s.\n", fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa, - port_state_str[atomic_read(&fcport->state)]); + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, + "Port to be marked lost on fcport=%02x%02x%02x, current " + "port state= %s comp_status %x.\n", fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + port_state_str[atomic_read(&fcport->state)], + comp_status); + + if (no_logout) + fcport->logout_on_delete = 0; - if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + qlt_schedule_sess_for_deletion_lock(fcport); + } + break; case CS_ABORTED: @@ -2407,7 +2554,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) resid_len, fw_resid_len, sp, cp); if (rsp->status_srb == NULL) - sp->done(ha, sp, res); + sp->done(sp, res); } /** @@ -2464,7 +2611,7 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) /* Place command on done queue. */ if (sense_len == 0) { rsp->status_srb = NULL; - sp->done(ha, sp, cp->result); + sp->done(sp, cp->result); } } @@ -2500,7 +2647,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { - sp->done(ha, sp, res); + sp->done(sp, res); return; } fatal: @@ -2557,8 +2704,8 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, return; abt = &sp->u.iocb_cmd; - abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); - sp->done(vha, sp, 0); + abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); + sp->done(sp, 0); } /** @@ -2571,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, struct sts_entry_24xx *pkt; struct qla_hw_data *ha = vha->hw; - if (!vha->flags.online) + if (!ha->flags.fw_started) return; while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { @@ -2629,10 +2776,16 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, } case ABTS_RESP_24XX: case CTIO_TYPE7: - case NOTIFY_ACK_TYPE: case CTIO_CRC2: qlt_response_pkt_all_vps(vha, (response_t *)pkt); break; + case NOTIFY_ACK_TYPE: + if (pkt->handle == QLA_TGT_SKIP_HANDLE) + qlt_response_pkt_all_vps(vha, (response_t *)pkt); + else + qla24xxx_nack_iocb_entry(vha, rsp->req, + (struct nack_to_isp *)pkt); + break; case MARKER_TYPE: /* Do nothing in this case, this check is to prevent it * from falling into default case @@ -2642,6 +2795,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, qla24xx_abort_iocb_entry(vha, rsp->req, (struct abort_entry_24xx *)pkt); break; + case MBX_IOCB_TYPE: + qla24xx_mbx_iocb_entry(vha, rsp->req, + (struct mbx_24xx_entry *)pkt); + break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, @@ -2658,8 +2815,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (IS_P3P_TYPE(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); - } else + } else { WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); + } } static void @@ -3015,14 +3173,17 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) int i, ret; struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + int min_vecs = QLA_BASE_VECTORS; struct irq_affinity desc = { .pre_vectors = QLA_BASE_VECTORS, }; - if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) + if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { desc.pre_vectors++; + min_vecs++; + } - ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS, + ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 67f64db390b0cd..a113ab3592a7f8 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -10,6 +10,28 @@ #include #include +static struct mb_cmd_name { + uint16_t cmd; + const char *str; +} mb_str[] = { + {MBC_GET_PORT_DATABASE, "GPDB"}, + {MBC_GET_ID_LIST, "GIDList"}, + {MBC_GET_LINK_PRIV_STATS, "Stats"}, +}; + +static const char *mb_to_str(uint16_t cmd) +{ + int i; + struct mb_cmd_name *e; + + for (i = 0; i < ARRAY_SIZE(mb_str); i++) { + e = mb_str + i; + if (cmd == e->cmd) + return e->str; + } + return "unknown"; +} + static struct rom_cmd { uint16_t cmd; } rom_cmds[] = { @@ -1637,94 +1659,6 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) return rval; } -/* - * qla2x00_get_node_name_list - * Issue get node name list mailbox command, kmalloc() - * and return the resulting list. Caller must kfree() it! - * - * Input: - * ha = adapter state pointer. - * out_data = resulting list - * out_len = length of the resulting list - * - * Returns: - * qla2x00 local function return status code. - * - * Context: - * Kernel context. - */ -int -qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) -{ - struct qla_hw_data *ha = vha->hw; - struct qla_port_24xx_data *list = NULL; - void *pmap; - mbx_cmd_t mc; - dma_addr_t pmap_dma; - ulong dma_size; - int rval, left; - - left = 1; - while (left > 0) { - dma_size = left * sizeof(*list); - pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size, - &pmap_dma, GFP_KERNEL); - if (!pmap) { - ql_log(ql_log_warn, vha, 0x113f, - "%s(%ld): DMA Alloc failed of %ld\n", - __func__, vha->host_no, dma_size); - rval = QLA_MEMORY_ALLOC_FAILED; - goto out; - } - - mc.mb[0] = MBC_PORT_NODE_NAME_LIST; - mc.mb[1] = BIT_1 | BIT_3; - mc.mb[2] = MSW(pmap_dma); - mc.mb[3] = LSW(pmap_dma); - mc.mb[6] = MSW(MSD(pmap_dma)); - mc.mb[7] = LSW(MSD(pmap_dma)); - mc.mb[8] = dma_size; - mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8; - mc.in_mb = MBX_0|MBX_1; - mc.tov = 30; - mc.flags = MBX_DMA_IN; - - rval = qla2x00_mailbox_command(vha, &mc); - if (rval != QLA_SUCCESS) { - if ((mc.mb[0] == MBS_COMMAND_ERROR) && - (mc.mb[1] == 0xA)) { - left += le16_to_cpu(mc.mb[2]) / - sizeof(struct qla_port_24xx_data); - goto restart; - } - goto out_free; - } - - left = 0; - - list = kmemdup(pmap, dma_size, GFP_KERNEL); - if (!list) { - ql_log(ql_log_warn, vha, 0x1140, - "%s(%ld): failed to allocate node names list " - "structure.\n", __func__, vha->host_no); - rval = QLA_MEMORY_ALLOC_FAILED; - goto out_free; - } - -restart: - dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); - } - - *out_data = list; - *out_len = dma_size; - -out: - return rval; - -out_free: - dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); - return rval; -} /* * qla2x00_get_port_database @@ -2906,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, int qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, - dma_addr_t stats_dma, uint options) + dma_addr_t stats_dma, uint16_t options) { int rval; mbx_cmd_t mc; @@ -2916,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, "Entered %s.\n", __func__); - mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; - mcp->mb[2] = MSW(stats_dma); - mcp->mb[3] = LSW(stats_dma); - mcp->mb[6] = MSW(MSD(stats_dma)); - mcp->mb[7] = LSW(MSD(stats_dma)); - mcp->mb[8] = sizeof(struct link_statistics) / 4; - mcp->mb[9] = vha->vp_idx; - mcp->mb[10] = options; - mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; - mcp->in_mb = MBX_2|MBX_1|MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = IOCTL_CMD; - rval = qla2x00_mailbox_command(vha, mcp); + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_LINK_PRIV_STATS; + mc.mb[2] = MSW(stats_dma); + mc.mb[3] = LSW(stats_dma); + mc.mb[6] = MSW(MSD(stats_dma)); + mc.mb[7] = LSW(MSD(stats_dma)); + mc.mb[8] = sizeof(struct link_statistics) / 4; + mc.mb[9] = cpu_to_le16(vha->vp_idx); + mc.mb[10] = cpu_to_le16(options); + + rval = qla24xx_send_mb_cmd(vha, &mc); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { @@ -3687,12 +3619,11 @@ void qla24xx_report_id_acquisition(scsi_qla_host_t *vha, struct vp_rpt_id_entry_24xx *rptid_entry) { - uint8_t vp_idx; - uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); struct qla_hw_data *ha = vha->hw; - scsi_qla_host_t *vp; + scsi_qla_host_t *vp = NULL; unsigned long flags; int found; + port_id_t id; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, "Entered %s.\n", __func__); @@ -3700,81 +3631,114 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (rptid_entry->entry_status != 0) return; + id.b.domain = rptid_entry->port_id[2]; + id.b.area = rptid_entry->port_id[1]; + id.b.al_pa = rptid_entry->port_id[0]; + id.b.rsvd_1 = 0; + if (rptid_entry->format == 0) { - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, + /* loop */ + ql_dbg(ql_dbg_async, vha, 0x10b7, "Format 0 : Number of VPs setup %d, number of " - "VPs acquired %d.\n", - MSB(le16_to_cpu(rptid_entry->vp_count)), - LSB(le16_to_cpu(rptid_entry->vp_count))); - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, + "VPs acquired %d.\n", rptid_entry->vp_setup, + rptid_entry->vp_acquired); + ql_dbg(ql_dbg_async, vha, 0x10b8, "Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); + + qlt_update_host_map(vha, id); + } else if (rptid_entry->format == 1) { - vp_idx = LSB(stat); - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, + /* fabric */ + ql_dbg(ql_dbg_async, vha, 0x10b9, "Format 1: VP[%d] enabled - status %d - with " - "port id %02x%02x%02x.\n", vp_idx, MSB(stat), + "port id %02x%02x%02x.\n", rptid_entry->vp_idx, + rptid_entry->vp_status, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); /* buffer to buffer credit flag */ - vha->flags.bbcr_enable = (rptid_entry->bbcr & 0xf) != 0; - - /* FA-WWN is only for physical port */ - if (!vp_idx) { - void *wwpn = ha->init_cb->port_name; + vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; + + if (rptid_entry->vp_idx == 0) { + if (rptid_entry->vp_status == VP_STAT_COMPL) { + /* FA-WWN is only for physical port */ + if (qla_ini_mode_enabled(vha) && + ha->flags.fawwpn_enabled && + (rptid_entry->u.f1.flags & + VP_FLAGS_NAME_VALID)) { + memcpy(vha->port_name, + rptid_entry->u.f1.port_name, + WWN_SIZE); + } - if (!MSB(stat)) { - if (rptid_entry->vp_idx_map[1] & BIT_6) - wwpn = rptid_entry->reserved_4 + 8; + qlt_update_host_map(vha, id); } - memcpy(vha->port_name, wwpn, WWN_SIZE); + fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); - ql_dbg(ql_dbg_mbx, vha, 0x1018, - "FA-WWN portname %016llx (%x)\n", - fc_host_port_name(vha->host), MSB(stat)); - } - - vp = vha; - if (vp_idx == 0) - goto reg_needed; - if (MSB(stat) != 0 && MSB(stat) != 2) { - ql_dbg(ql_dbg_mbx, vha, 0x10ba, - "Could not acquire ID for VP[%d].\n", vp_idx); - return; - } + if (qla_ini_mode_enabled(vha)) + ql_dbg(ql_dbg_mbx, vha, 0x1018, + "FA-WWN portname %016llx (%x)\n", + fc_host_port_name(vha->host), + rptid_entry->vp_status); - found = 0; - spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { - if (vp_idx == vp->vp_idx) { - found = 1; - break; + set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); + } else { + if (rptid_entry->vp_status != VP_STAT_COMPL && + rptid_entry->vp_status != VP_STAT_ID_CHG) { + ql_dbg(ql_dbg_mbx, vha, 0x10ba, + "Could not acquire ID for VP[%d].\n", + rptid_entry->vp_idx); + return; } - } - spin_unlock_irqrestore(&ha->vport_slock, flags); - if (!found) - return; + found = 0; + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (rptid_entry->vp_idx == vp->vp_idx) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); - vp->d_id.b.domain = rptid_entry->port_id[2]; - vp->d_id.b.area = rptid_entry->port_id[1]; - vp->d_id.b.al_pa = rptid_entry->port_id[0]; + if (!found) + return; - /* - * Cannot configure here as we are still sitting on the - * response queue. Handle it in dpc context. - */ - set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); + qlt_update_host_map(vp, id); -reg_needed: - set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); - set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); + /* + * Cannot configure here as we are still sitting on the + * response queue. Handle it in dpc context. + */ + set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); + set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); + set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); + } set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); + } else if (rptid_entry->format == 2) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", + rptid_entry->port_id[2], rptid_entry->port_id[1], + rptid_entry->port_id[0]); + + ql_dbg(ql_dbg_async, vha, 0xffff, + "N2N: Remote WWPN %8phC.\n", + rptid_entry->u.f2.port_name); + + /* N2N. direct connect */ + vha->d_id.b.domain = rptid_entry->port_id[2]; + vha->d_id.b.area = rptid_entry->port_id[1]; + vha->d_id.b.al_pa = rptid_entry->port_id[0]; + + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); } } @@ -5873,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, return rval; } + +static void qla2x00_async_mb_sp_done(void *s, int res) +{ + struct srb *sp = s; + + sp->u.iocb_cmd.u.mbx.rc = res; + + complete(&sp->u.iocb_cmd.u.mbx.comp); + /* don't free sp here. Let the caller do the free */ +} + +/* + * This mailbox uses the iocb interface to send MB command. + * This allows non-critial (non chip setup) command to go + * out in parrallel. + */ +int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) +{ + int rval = QLA_FUNCTION_FAILED; + srb_t *sp; + struct srb_iocb *c; + + if (!vha->hw->flags.fw_started) + goto done; + + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_MB_IOCB; + sp->name = mb_to_str(mcp->mb[0]); + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + + memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); + + c = &sp->u.iocb_cmd; + c->timeout = qla2x00_async_iocb_timeout; + init_completion(&c->u.mbx.comp); + + sp->done = qla2x00_async_mb_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: %s Failed submission. %x.\n", + __func__, sp->name, rval); + goto done_free_sp; + } + + ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n", + sp->name, sp->handle); + + wait_for_completion(&c->u.mbx.comp); + memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); + + rval = c->u.mbx.rc; + switch (rval) { + case QLA_FUNCTION_TIMEOUT: + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n", + __func__, sp->name, rval); + break; + case QLA_SUCCESS: + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n", + __func__, sp->name); + sp->free(sp); + break; + default: + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n", + __func__, sp->name, rval); + sp->free(sp); + break; + } + + return rval; + +done_free_sp: + sp->free(sp); +done: + return rval; +} + +/* + * qla24xx_gpdb_wait + * NOTE: Do not call this routine from DPC thread + */ +int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) +{ + int rval = QLA_FUNCTION_FAILED; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xffff, + "Failed to allocate port database structure.\n"); + goto done_free_sp; + } + memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_PORT_DATABASE; + mc.mb[1] = cpu_to_le16(fcport->loop_id); + mc.mb[2] = MSW(pd_dma); + mc.mb[3] = LSW(pd_dma); + mc.mb[6] = MSW(MSD(pd_dma)); + mc.mb[7] = LSW(MSD(pd_dma)); + mc.mb[9] = cpu_to_le16(vha->vp_idx); + mc.mb[10] = cpu_to_le16((uint16_t)opt); + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: %8phC fail\n", __func__, fcport->port_name); + goto done_free_sp; + } + + rval = __qla24xx_parse_gpdb(vha, fcport, pd); + + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n", + __func__, fcport->port_name); + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); +done: + return rval; +} + +int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, + struct port_database_24xx *pd) +{ + int rval = QLA_SUCCESS; + uint64_t zero = 0; + + /* Check for logged in state. */ + if (pd->current_login_state != PDS_PRLI_COMPLETE && + pd->last_login_state != PDS_PRLI_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "Unable to verify login-state (%x/%x) for " + "loop_id %x.\n", pd->current_login_state, + pd->last_login_state, fcport->loop_id); + rval = QLA_FUNCTION_FAILED; + goto gpd_error_out; + } + + if (fcport->loop_id == FC_NO_LOOP_ID || + (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && + memcmp(fcport->port_name, pd->port_name, 8))) { + /* We lost the device mid way. */ + rval = QLA_NOT_LOGGED_IN; + goto gpd_error_out; + } + + /* Names are little-endian. */ + memcpy(fcport->node_name, pd->node_name, WWN_SIZE); + memcpy(fcport->port_name, pd->port_name, WWN_SIZE); + + /* Get port_id of device. */ + fcport->d_id.b.domain = pd->port_id[0]; + fcport->d_id.b.area = pd->port_id[1]; + fcport->d_id.b.al_pa = pd->port_id[2]; + fcport->d_id.b.rsvd_1 = 0; + + /* If not target must be initiator or unknown type. */ + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) + fcport->port_type = FCT_INITIATOR; + else + fcport->port_type = FCT_TARGET; + + /* Passback COS information. */ + fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? + FC_COS_CLASS2 : FC_COS_CLASS3; + + if (pd->prli_svc_param_word_3[0] & BIT_7) { + fcport->flags |= FCF_CONF_COMP_SUPPORTED; + fcport->conf_compl_supported = 1; + } + +gpd_error_out: + return rval; +} + +/* + * qla24xx_gidlist__wait + * NOTE: don't call this routine from DPC thread. + */ +int qla24xx_gidlist_wait(struct scsi_qla_host *vha, + void *id_list, dma_addr_t id_list_dma, uint16_t *entries) +{ + int rval = QLA_FUNCTION_FAILED; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_ID_LIST; + mc.mb[2] = MSW(id_list_dma); + mc.mb[3] = LSW(id_list_dma); + mc.mb[6] = MSW(MSD(id_list_dma)); + mc.mb[7] = LSW(MSD(id_list_dma)); + mc.mb[8] = 0; + mc.mb[9] = cpu_to_le16(vha->vp_idx); + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: fail\n", __func__); + } else { + *entries = mc.mb[1]; + ql_dbg(ql_dbg_mbx, vha, 0xffff, + "%s: done\n", __func__); + } +done: + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c6d6f0d912ff75..09a490c98763a9 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) * ensures no active vp_list traversal while the vport is removed * from the queue) */ - spin_lock_irqsave(&ha->vport_slock, flags); - while (atomic_read(&vha->vref_count)) { - spin_unlock_irqrestore(&ha->vport_slock, flags); - - msleep(500); + wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count), + 10*HZ); - spin_lock_irqsave(&ha->vport_slock, flags); + spin_lock_irqsave(&ha->vport_slock, flags); + if (atomic_read(&vha->vref_count)) { + ql_dbg(ql_dbg_vport, vha, 0xfffa, + "vha->vref_count=%u timeout\n", vha->vref_count.counter); + vha->vref_count = (atomic_t)ATOMIC_INIT(0); } list_del(&vha->list); qlt_update_vp_map(vha, RESET_VP_IDX); @@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); } i++; } diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 96c33e292ebacc..10b742d27e1646 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1789,16 +1789,16 @@ qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) static void qla2x00_fxdisc_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)data; + srb_t *sp = data; struct srb_iocb *lio = &sp->u.iocb_cmd; complete(&lio->u.fxiocb.fxiocb_comp); } static void -qla2x00_fxdisc_sp_done(void *data, void *ptr, int res) +qla2x00_fxdisc_sp_done(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct srb_iocb *lio = &sp->u.iocb_cmd; complete(&lio->u.fxiocb.fxiocb_comp); @@ -1999,7 +1999,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); done_free_sp: - sp->free(vha, sp); + sp->free(sp); done: return rval; } @@ -2127,7 +2127,7 @@ static inline void qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp, int res) { - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cp = GET_CMD_SP(sp); uint32_t track_sense_len; @@ -2162,7 +2162,7 @@ qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", - sp->fcport->vha->host_no, cp->device->id, cp->device->lun, + sp->vha->host_no, cp->device->id, cp->device->lun, cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, cp->sense_buffer, sense_len); @@ -2181,7 +2181,7 @@ qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID))) cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE); tmf->u.tmf.comp_status = cpstatus; - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void @@ -2198,7 +2198,7 @@ qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = pkt->tgt_id_sts; - sp->done(vha, sp, 0); + sp->done(sp, 0); } static void @@ -2264,7 +2264,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; } - sp->done(vha, sp, res); + sp->done(sp, res); } /** @@ -2537,7 +2537,7 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) par_sense_len, rsp_info_len); if (rsp->status_srb == NULL) - sp->done(ha, sp, res); + sp->done(sp, res); } /** @@ -2614,7 +2614,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) /* Place command on done queue. */ if (sense_len == 0) { rsp->status_srb = NULL; - sp->done(ha, sp, cp->result); + sp->done(sp, cp->result); } } @@ -2695,7 +2695,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { - sp->done(ha, sp, res); + sp->done(sp, res); return; } @@ -2997,7 +2997,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, cont_a64_entry_t lcont_pkt; cont_a64_entry_t *cont_pkt; - vha = sp->fcport->vha; + vha = sp->vha; req = vha->req; cmd = GET_CMD_SP(sp); @@ -3081,7 +3081,7 @@ qlafx00_start_scsi(srb_t *sp) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_7_fx00 *cmd_pkt; struct cmd_type_7_fx00 lcmd_pkt; @@ -3205,7 +3205,7 @@ void qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; struct tsk_mgmt_entry_fx00 tm_iocb; struct scsi_lun llun; @@ -3232,7 +3232,7 @@ void qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; - scsi_qla_host_t *vha = sp->fcport->vha; + scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; struct abort_iocb_entry_fx00 abt_iocb; @@ -3346,8 +3346,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) REQUEST_ENTRY_SIZE); cont_pkt = qlafx00_prep_cont_type1_iocb( - sp->fcport->vha->req, - &lcont_pkt); + sp->vha->req, &lcont_pkt); cur_dsd = (__le32 *) lcont_pkt.dseg_0_address; avail_dsds = 5; @@ -3368,7 +3367,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer( ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3042, + sp->vha, 0x3042, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } @@ -3377,7 +3376,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3043, + sp->vha, 0x3043, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } @@ -3409,8 +3408,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) REQUEST_ENTRY_SIZE); cont_pkt = qlafx00_prep_cont_type1_iocb( - sp->fcport->vha->req, - &lcont_pkt); + sp->vha->req, &lcont_pkt); cur_dsd = (__le32 *) lcont_pkt.dseg_0_address; avail_dsds = 5; @@ -3431,7 +3429,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) REQUEST_ENTRY_SIZE); ql_dump_buffer( ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3045, + sp->vha, 0x3045, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } @@ -3440,7 +3438,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3046, + sp->vha, 0x3046, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } @@ -3452,7 +3450,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) } ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x3047, + sp->vha, 0x3047, (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d01c90c7dd04f0..3e7011757c8267 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -237,6 +237,13 @@ MODULE_PARM_DESC(ql2xfwholdabts, "0 (Default) Do not set fw option. " "1 - Set fw option to hold ABTS."); +int ql2xmvasynctoatio = 1; +module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xmvasynctoatio, + "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" + "0 (Default). Do not move IOCBs" + "1 - Move IOCBs."); + /* * SCSI host template entry points */ @@ -607,11 +614,11 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) } void -qla2x00_sp_free_dma(void *vha, void *ptr) +qla2x00_sp_free_dma(void *ptr) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; + struct qla_hw_data *ha = sp->vha->hw; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - struct qla_hw_data *ha = sp->fcport->vha->hw; void *ctx = GET_CMD_CTX_SP(sp); if (sp->flags & SRB_DMA_VALID) { @@ -650,20 +657,19 @@ qla2x00_sp_free_dma(void *vha, void *ptr) } CMD_SP(cmd) = NULL; - qla2x00_rel_sp(sp->fcport->vha, sp); + qla2x00_rel_sp(sp); } void -qla2x00_sp_compl(void *data, void *ptr, int res) +qla2x00_sp_compl(void *ptr, int res) { - struct qla_hw_data *ha = (struct qla_hw_data *)data; - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); cmd->result = res; if (atomic_read(&sp->ref_count) == 0) { - ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015, + ql_dbg(ql_dbg_io, sp->vha, 0x3015, "SP reference-count to ZERO -- sp=%p cmd=%p.\n", sp, GET_CMD_SP(sp)); if (ql2xextended_error_logging & ql_dbg_io) @@ -673,12 +679,12 @@ qla2x00_sp_compl(void *data, void *ptr, int res) if (!atomic_dec_and_test(&sp->ref_count)) return; - qla2x00_sp_free_dma(ha, sp); + qla2x00_sp_free_dma(sp); cmd->scsi_done(cmd); } void -qla2xxx_qpair_sp_free_dma(void *vha, void *ptr) +qla2xxx_qpair_sp_free_dma(void *ptr) { srb_t *sp = (srb_t *)ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); @@ -724,9 +730,9 @@ qla2xxx_qpair_sp_free_dma(void *vha, void *ptr) } void -qla2xxx_qpair_sp_compl(void *data, void *ptr, int res) +qla2xxx_qpair_sp_compl(void *ptr, int res) { - srb_t *sp = (srb_t *)ptr; + srb_t *sp = ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); cmd->result = res; @@ -742,7 +748,7 @@ qla2xxx_qpair_sp_compl(void *data, void *ptr, int res) if (!atomic_dec_and_test(&sp->ref_count)) return; - qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp); + qla2xxx_qpair_sp_free_dma(sp); cmd->scsi_done(cmd); } @@ -863,7 +869,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) return 0; qc24_host_busy_free_sp: - qla2x00_sp_free_dma(ha, sp); + qla2x00_sp_free_dma(sp); qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -952,7 +958,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, return 0; qc24_host_busy_free_sp: - qla2xxx_qpair_sp_free_dma(vha, sp); + qla2xxx_qpair_sp_free_dma(sp); qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -1044,6 +1050,34 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) return (return_status); } +static inline int test_fcport_count(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + int res; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ql_dbg(ql_dbg_init, vha, 0xffff, + "tgt %p, fcport_count=%d\n", + vha, vha->fcport_count); + res = (vha->fcport_count == 0); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return res; +} + +/* + * qla2x00_wait_for_sess_deletion can only be called from remove_one. + * it has dependency on UNLOADING flag to stop device discovery + */ +static void +qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) +{ + qla2x00_mark_all_devices_lost(vha, 0); + + wait_event(vha->fcport_waitQ, test_fcport_count(vha)); +} + /* * qla2x00_wait_for_hba_ready * Wait till the HBA is ready before doing driver unload @@ -1204,7 +1238,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } spin_lock_irqsave(&ha->hardware_lock, flags); - sp->done(ha, sp, 0); + sp->done(sp, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Did the command return during mailbox execution? */ @@ -1249,7 +1283,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, continue; if (sp->type != SRB_SCSI_CMD) continue; - if (vha->vp_idx != sp->fcport->vha->vp_idx) + if (vha->vp_idx != sp->vha->vp_idx) continue; match = 0; cmd = GET_CMD_SP(sp); @@ -1617,7 +1651,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) /* Don't abort commands in adapter during EEH * recovery as it's not accessible/responding. */ - if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { + if (GET_CMD_SP(sp) && !ha->flags.eeh_busy && + (sp->type == SRB_SCSI_CMD)) { /* Get a reference to the sp and drop the lock. * The reference ensures this sp->done() call * - and not the call in qla2xxx_eh_abort() - @@ -1629,7 +1664,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) spin_lock_irqsave(&ha->hardware_lock, flags); } req->outstanding_cmds[cnt] = NULL; - sp->done(vha, sp, res); + sp->done(sp, res); } } } @@ -1815,6 +1850,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha) /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; + ha->msix_count = QLA_BASE_VECTORS; if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; @@ -1842,9 +1878,8 @@ qla2x00_iospace_config(struct qla_hw_data *ha) "BAR 3 not enabled.\n"); mqiobase_exit: - ha->msix_count = ha->max_rsp_queues + 1; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, - "MSIX Count:%d.\n", ha->msix_count); + "MSIX Count: %d.\n", ha->msix_count); return (0); iospace_error_exit: @@ -1892,6 +1927,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha) /* 83XX 26XX always use MQ type access for queues * - mbar 2, a.k.a region 4 */ ha->max_req_queues = ha->max_rsp_queues = 1; + ha->msix_count = QLA_BASE_VECTORS; ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), pci_resource_len(ha->pdev, 4)); @@ -1915,12 +1951,13 @@ qla83xx_iospace_config(struct qla_hw_data *ha) if (ql2xmqsupport) { /* MB interrupt uses 1 vector */ ha->max_req_queues = ha->msix_count - 1; - ha->max_rsp_queues = ha->max_req_queues; /* ATIOQ needs 1 vector. That's 1 less QPair */ if (QLA_TGT_MODE_ENABLED()) ha->max_req_queues--; + ha->max_rsp_queues = ha->max_req_queues; + /* Queue pairs is the max value minus * the base queue pair */ ha->max_qpairs = ha->max_req_queues - 1; @@ -1934,14 +1971,8 @@ qla83xx_iospace_config(struct qla_hw_data *ha) "BAR 1 not enabled.\n"); mqiobase_exit: - ha->msix_count = ha->max_rsp_queues + 1; - if (QLA_TGT_MODE_ENABLED()) - ha->msix_count++; - - qlt_83xx_iospace_config(ha); - ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, - "MSIX Count:%d.\n", ha->msix_count); + "MSIX Count: %d.\n", ha->msix_count); return 0; iospace_error_exit: @@ -2530,6 +2561,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) return atomic_read(&vha->loop_state) == LOOP_READY; } +static void qla2x00_iocb_work_fn(struct work_struct *work) +{ + struct scsi_qla_host *vha = container_of(work, + struct scsi_qla_host, iocb_work); + int cnt = 0; + + while (!list_empty(&vha->work_list)) { + qla2x00_do_work(vha); + cnt++; + if (cnt > 10) + break; + } +} + /* * PCI driver interface */ @@ -3048,6 +3093,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) */ qla2xxx_wake_dpc(base_vha); + INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { @@ -3124,7 +3170,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ql_dbg(ql_dbg_init, base_vha, 0x00f2, "Init done and hba is online.\n"); - if (qla_ini_mode_enabled(base_vha)) + if (qla_ini_mode_enabled(base_vha) || + qla_dual_mode_enabled(base_vha)) scsi_scan_host(host); else ql_dbg(ql_dbg_init, base_vha, 0x0122, @@ -3373,21 +3420,26 @@ qla2x00_remove_one(struct pci_dev *pdev) * resources. */ if (!atomic_read(&pdev->enable_cnt)) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + scsi_host_put(base_vha->host); kfree(ha); pci_set_drvdata(pdev, NULL); return; } - qla2x00_wait_for_hba_ready(base_vha); - /* if UNLOAD flag is already set, then continue unload, + /* + * if UNLOAD flag is already set, then continue unload, * where it was set first. */ if (test_bit(UNLOADING, &base_vha->dpc_flags)) return; set_bit(UNLOADING, &base_vha->dpc_flags); + dma_free_coherent(&ha->pdev->dev, + base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); if (IS_QLAFX00(ha)) qlafx00_driver_shutdown(base_vha, 20); @@ -3433,6 +3485,7 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_free_sysfs_attr(base_vha, true); fc_remove_host(base_vha->host); + qlt_remove_target_resources(ha); scsi_remove_host(base_vha->host); @@ -3536,10 +3589,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, qla2xxx_wake_dpc(base_vha); } else { int now; - if (rport) + if (rport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phN. rport %p roles %x \n", + __func__, fcport->port_name, rport, + rport->roles); fc_remote_port_delete(rport); + } qlt_do_generation_tick(vha, &now); - qlt_fc_port_deleted(vha, fcport, now); } } @@ -3582,7 +3639,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, fcport->login_retry = vha->hw->login_retry_count; ql_dbg(ql_dbg_disc, vha, 0x2067, - "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", + "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", fcport->port_name, fcport->loop_id, fcport->login_retry); } } @@ -3605,7 +3662,13 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) { fc_port_t *fcport; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Mark all dev lost\n"); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->scan_state = 0; + qlt_schedule_sess_for_deletion_lock(fcport); + if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) continue; @@ -4195,10 +4258,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, struct scsi_qla_host *vha = NULL; host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); - if (host == NULL) { + if (!host) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, "Failed to allocate host from the scsi layer, aborting.\n"); - goto fail; + return NULL; } /* Clear our data area */ @@ -4217,9 +4280,23 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->logo_list); INIT_LIST_HEAD(&vha->plogi_ack_list); INIT_LIST_HEAD(&vha->qp_list); + INIT_LIST_HEAD(&vha->gnl.fcports); spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); + init_waitqueue_head(&vha->fcport_waitQ); + init_waitqueue_head(&vha->vref_waitq); + + vha->gnl.size = sizeof(struct get_name_list_extended) * + (ha->max_loop_id + 1); + vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, + vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); + if (!vha->gnl.l) { + ql_log(ql_log_fatal, vha, 0xffff, + "Alloc failed for name list.\n"); + scsi_remove_host(vha->host); + return NULL; + } sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, @@ -4228,12 +4305,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, dev_name(&(ha->pdev->dev))); return vha; - -fail: - return vha; } -static struct qla_work_evt * +struct qla_work_evt * qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; @@ -4255,7 +4329,7 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) return e; } -static int +int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) { unsigned long flags; @@ -4263,7 +4337,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) spin_lock_irqsave(&vha->work_lock, flags); list_add_tail(&e->list, &vha->work_list); spin_unlock_irqrestore(&vha->work_lock, flags); - qla2xxx_wake_dpc(vha); + + if (QLA_EARLY_LINKUP(vha->hw)) + schedule_work(&vha->iocb_work); + else + qla2xxx_wake_dpc(vha); return QLA_SUCCESS; } @@ -4316,7 +4394,6 @@ int qla2x00_post_async_##name##_work( \ } qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); -qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); @@ -4369,6 +4446,67 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, return qla2x00_post_work(vha, e); } +int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.fcport.fcport = fcport; + return qla2x00_post_work(vha, e); +} + +static +void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + unsigned long flags; + fc_port_t *fcport = NULL; + struct qlt_plogi_ack_t *pla = + (struct qlt_plogi_ack_t *)e->u.new_sess.pla; + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); + if (fcport) { + fcport->d_id = e->u.new_sess.id; + if (pla) { + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); + /* we took an extra ref_count to prevent PLOGI ACK when + * fcport/sess has not been created. + */ + pla->ref_count--; + } + } else { + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (fcport) { + fcport->d_id = e->u.new_sess.id; + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->flags |= FCF_FABRIC_DEVICE; + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + + memcpy(fcport->port_name, e->u.new_sess.port_name, + WWN_SIZE); + list_add_tail(&fcport->list, &vha->vp_fcports); + + if (pla) { + qlt_plogi_ack_link(vha, pla, fcport, + QLT_PLOGI_LINK_SAME_WWN); + pla->ref_count--; + } + } + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + if (fcport) { + if (pla) + qlt_plogi_ack_unref(vha, pla); + else + qla24xx_async_gnl(vha, fcport); + } +} + void qla2x00_do_work(struct scsi_qla_host *vha) { @@ -4395,10 +4533,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla2x00_async_login(vha, e->u.logio.fcport, e->u.logio.data); break; - case QLA_EVT_ASYNC_LOGIN_DONE: - qla2x00_async_login_done(vha, e->u.logio.fcport, - e->u.logio.data); - break; case QLA_EVT_ASYNC_LOGOUT: qla2x00_async_logout(vha, e->u.logio.fcport); break; @@ -4420,6 +4554,34 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_AENFX: qlafx00_process_aen(vha, e); break; + case QLA_EVT_GIDPN: + qla24xx_async_gidpn(vha, e->u.fcport.fcport); + break; + case QLA_EVT_GPNID: + qla24xx_async_gpnid(vha, &e->u.gpnid.id); + break; + case QLA_EVT_GPNID_DONE: + qla24xx_async_gpnid_done(vha, e->u.iosb.sp); + break; + case QLA_EVT_NEW_SESS: + qla24xx_create_new_sess(vha, e); + break; + case QLA_EVT_GPDB: + qla24xx_async_gpdb(vha, e->u.fcport.fcport, + e->u.fcport.opt); + break; + case QLA_EVT_GPSC: + qla24xx_async_gpsc(vha, e->u.fcport.fcport); + break; + case QLA_EVT_UPD_FCPORT: + qla2x00_update_fcport(vha, e->u.fcport.fcport); + break; + case QLA_EVT_GNL: + qla24xx_async_gnl(vha, e->u.fcport.fcport); + break; + case QLA_EVT_NACK: + qla24xx_do_nack_work(vha, e); + break; } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); @@ -4436,9 +4598,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) { fc_port_t *fcport; int status; - uint16_t next_loopid = 0; - struct qla_hw_data *ha = vha->hw; - uint16_t data[2]; + struct event_arg ea; list_for_each_entry(fcport, &vha->vp_fcports, list) { /* @@ -4449,77 +4609,38 @@ void qla2x00_relogin(struct scsi_qla_host *vha) fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { fcport->login_retry--; if (fcport->flags & FCF_FABRIC_DEVICE) { - if (fcport->flags & FCF_FCP2_DEVICE) - ha->isp_ops->fabric_logout(vha, - fcport->loop_id, - fcport->d_id.b.domain, - fcport->d_id.b.area, - fcport->d_id.b.al_pa); - - if (fcport->loop_id == FC_NO_LOOP_ID) { - fcport->loop_id = next_loopid = - ha->min_external_loopid; - status = qla2x00_find_new_loop_id( - vha, fcport); - if (status != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } - } - - if (IS_ALOGIO_CAPABLE(ha)) { - fcport->flags |= FCF_ASYNC_SENT; - data[0] = 0; - data[1] = QLA_LOGIO_LOGIN_RETRIED; - status = qla2x00_post_async_login_work( - vha, fcport, data); - if (status == QLA_SUCCESS) - continue; - /* Attempt a retry. */ - status = 1; - } else { - status = qla2x00_fabric_login(vha, - fcport, &next_loopid); - if (status == QLA_SUCCESS) { - int status2; - uint8_t opts; - - opts = 0; - if (fcport->flags & - FCF_FCP2_DEVICE) - opts |= BIT_1; - status2 = - qla2x00_get_port_database( - vha, fcport, opts); - if (status2 != QLA_SUCCESS) - status = 1; - } - } - } else + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phC DS %d LS %d\n", __func__, + fcport->port_name, fcport->disc_state, + fcport->fw_login_state); + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_RELOGIN; + ea.fcport = fcport; + qla2x00_fcport_event_handler(vha, &ea); + } else { status = qla2x00_local_device_login(vha, fcport); + if (status == QLA_SUCCESS) { + fcport->old_loop_id = fcport->loop_id; + ql_dbg(ql_dbg_disc, vha, 0x2003, + "Port login OK: logged in ID 0x%x.\n", + fcport->loop_id); + qla2x00_update_fcport(vha, fcport); + } else if (status == 1) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + /* retry the login again */ + ql_dbg(ql_dbg_disc, vha, 0x2007, + "Retrying %d login again loop_id 0x%x.\n", + fcport->login_retry, + fcport->loop_id); + } else { + fcport->login_retry = 0; + } - if (status == QLA_SUCCESS) { - fcport->old_loop_id = fcport->loop_id; - - ql_dbg(ql_dbg_disc, vha, 0x2003, - "Port login OK: logged in ID 0x%x.\n", - fcport->loop_id); - - qla2x00_update_fcport(vha, fcport); - - } else if (status == 1) { - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - /* retry the login again */ - ql_dbg(ql_dbg_disc, vha, 0x2007, - "Retrying %d login again loop_id 0x%x.\n", - fcport->login_retry, fcport->loop_id); - } else { - fcport->login_retry = 0; + if (fcport->login_retry == 0 && + status != QLA_SUCCESS) + qla2x00_clear_loop_id(fcport); } - - if (fcport->login_retry == 0 && status != QLA_SUCCESS) - qla2x00_clear_loop_id(fcport); } if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; @@ -5183,7 +5304,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - /* if UNLOAD flag is already set, then continue unload, + /* + * if UNLOAD flag is already set, then continue unload, * where it was set first. */ if (test_bit(UNLOADING, &base_vha->dpc_flags)) @@ -5192,6 +5314,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); + qla2x00_wait_for_sess_deletion(base_vha); + set_bit(UNLOADING, &base_vha->dpc_flags); qla2x00_delete_all_vps(ha, base_vha); @@ -5410,16 +5534,6 @@ qla2x00_do_dpc(void *data) qla2x00_update_fcports(base_vha); } - if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) { - int ret; - ret = qla2x00_send_change_request(base_vha, 0x3, 0); - if (ret != QLA_SUCCESS) - ql_log(ql_log_warn, base_vha, 0x121, - "Failed to enable receiving of RSCN " - "requests: 0x%x.\n", ret); - clear_bit(SCR_PENDING, &base_vha->dpc_flags); - } - if (IS_QLAFX00(ha)) goto loop_resync_check; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index e4fda84b959eca..0e03ca2ab3e523 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -55,8 +55,17 @@ MODULE_PARM_DESC(qlini_mode, "disabled on enabling target mode and then on disabling target mode " "enabled back; " "\"disabled\" - initiator mode will never be enabled; " + "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " + "when ready " "\"enabled\" (default) - initiator mode will always stay enabled."); +static int ql_dm_tgt_ex_pct = 50; +module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql_dm_tgt_ex_pct, + "For Dual Mode (qlini_mode=dual), this parameter determines " + "the percentage of exchanges/cmds FW will allocate resources " + "for Target mode."); + int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; static int temp_sam_status = SAM_STAT_BUSY; @@ -102,12 +111,10 @@ enum fcp_resp_rsp_codes { static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, struct atio_from_isp *pkt, uint8_t); static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); -static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, +static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags); static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); -static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, - struct qla_tgt_srr_imm *imm, int ha_lock); static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd); static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, @@ -120,6 +127,12 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *imm, int ha_locked); +static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, + fc_port_t *fcport, bool local); +void qlt_unreg_sess(struct fc_port *sess); +static void qlt_24xx_handle_abts(struct scsi_qla_host *, + struct abts_recv_from_24xx *); + /* * Global Variables */ @@ -130,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); static LIST_HEAD(qla_tgt_glist); +static const char *prot_op_str(u32 prot_op) +{ + switch (prot_op) { + case TARGET_PROT_NORMAL: return "NORMAL"; + case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; + case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; + case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; + case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; + case TARGET_PROT_DIN_PASS: return "DIN_PASS"; + case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; + default: return "UNKNOWN"; + } +} + /* This API intentionally takes dest as a parameter, rather than returning * int value to avoid caller forgetting to issue wmb() after the store */ void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) @@ -140,21 +167,6 @@ void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) wmb(); } -/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ -static struct qla_tgt_sess *qlt_find_sess_by_port_name( - struct qla_tgt *tgt, - const uint8_t *port_name) -{ - struct qla_tgt_sess *sess; - - list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { - if (!memcmp(sess->port_name, port_name, WWN_SIZE)) - return sess; - } - - return NULL; -} - /* Might release hw lock, then reaquire!! */ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) { @@ -175,21 +187,23 @@ static inline struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, uint8_t *d_id) { - struct qla_hw_data *ha = vha->hw; - uint8_t vp_idx; - - if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) - return NULL; + struct scsi_qla_host *host; + uint32_t key = 0; - if (vha->d_id.b.al_pa == d_id[2]) + if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && + (vha->d_id.b.al_pa == d_id[2])) return vha; - BUG_ON(ha->tgt.tgt_vp_map == NULL); - vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; - if (likely(test_bit(vp_idx, ha->vp_idx_map))) - return ha->tgt.tgt_vp_map[vp_idx].vha; + key = (uint32_t)d_id[0] << 16; + key |= (uint32_t)d_id[1] << 8; + key |= (uint32_t)d_id[2]; - return NULL; + host = btree_lookup32(&vha->hw->tgt.host_map, key); + if (!host) + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "Unable to find host %06x\n", key); + + return host; } static inline @@ -229,6 +243,105 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } + +static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, + struct atio_from_isp *atio, uint8_t ha_locked) +{ + struct qla_tgt_sess_op *u; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + + if (tgt->tgt_stop) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "qla_target(%d): dropping unknown ATIO_TYPE7, " + "because tgt is being stopped", vha->vp_idx); + goto out_term; + } + + u = kzalloc(sizeof(*u), GFP_ATOMIC); + if (u == NULL) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u)); + /* It should be harmless and on the next retry should work well */ + goto out_term; + } + + u->vha = vha; + memcpy(&u->atio, atio, sizeof(*atio)); + INIT_LIST_HEAD(&u->cmd_list); + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_add_tail(&u->cmd_list, &vha->unknown_atio_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + schedule_delayed_work(&vha->unknown_atio_work, 1); + +out: + return; + +out_term: + qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0); + goto out; +} + +static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, + uint8_t ha_locked) +{ + struct qla_tgt_sess_op *u, *t; + scsi_qla_host_t *host; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + unsigned long flags; + uint8_t queued = 0; + + list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { + if (u->aborted) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Freeing unknown %s %p, because of Abort", + "ATIO_TYPE7", u); + qlt_send_term_exchange(vha, NULL, &u->atio, + ha_locked, 0); + goto abort; + } + + host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); + if (host != NULL) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Requeuing unknown ATIO_TYPE7 %p", u); + qlt_24xx_atio_pkt(host, &u->atio, ha_locked); + } else if (tgt->tgt_stop) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "Freeing unknown %s %p, because tgt is being stopped", + "ATIO_TYPE7", u); + qlt_send_term_exchange(vha, NULL, &u->atio, + ha_locked, 0); + } else { + ql_dbg(ql_dbg_async, vha, 0xffff, + "u %p, vha %p, host %p, sched again..", u, + vha, host); + if (!queued) { + queued = 1; + schedule_delayed_work(&vha->unknown_atio_work, + 1); + } + continue; + } + +abort: + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&u->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + kfree(u); + } +} + +void qlt_unknown_atio_work_fn(struct work_struct *work) +{ + struct scsi_qla_host *vha = container_of(to_delayed_work(work), + struct scsi_qla_host, unknown_atio_work); + + qlt_try_to_dequeue_unknown_atios(vha, 0); +} + static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint8_t ha_locked) { @@ -249,8 +362,14 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, atio->u.isp24.fcp_hdr.d_id[0], atio->u.isp24.fcp_hdr.d_id[1], atio->u.isp24.fcp_hdr.d_id[2]); + + + qlt_queue_unknown_atio(vha, atio, ha_locked); break; } + if (unlikely(!list_empty(&vha->unknown_atio_list))) + qlt_try_to_dequeue_unknown_atios(vha, ha_locked); + qlt_24xx_atio_pkt(host, atio, ha_locked); break; } @@ -278,6 +397,36 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, break; } + case VP_RPT_ID_IOCB_TYPE: + qla24xx_report_id_acquisition(vha, + (struct vp_rpt_id_entry_24xx *)atio); + break; + + case ABTS_RECV_24XX: + { + struct abts_recv_from_24xx *entry = + (struct abts_recv_from_24xx *)atio; + struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + entry->vp_index); + unsigned long flags; + + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xffff, + "qla_target(%d): Response pkt (ABTS_RECV_24XX) " + "received, with unknown vp_index %d\n", + vha->vp_idx, entry->vp_index); + break; + } + if (!ha_locked) + spin_lock_irqsave(&host->hw->hardware_lock, flags); + qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); + if (!ha_locked) + spin_unlock_irqrestore(&host->hw->hardware_lock, flags); + break; + } + + /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ + default: ql_dbg(ql_dbg_tgt, vha, 0xe040, "qla_target(%d): Received unknown ATIO atio " @@ -395,22 +544,265 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) /* * All qlt_plogi_ack_t operations are protected by hardware_lock */ +static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, + struct imm_ntfy_from_isp *ntfy, int type) +{ + struct qla_work_evt *e; + e = qla2x00_alloc_work(vha, QLA_EVT_NACK); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.nack.fcport = fcport; + e->u.nack.type = type; + memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); + return qla2x00_post_work(vha, e); +} + +static +void qla2x00_async_nack_sp_done(void *s, int res) +{ + struct srb *sp = (struct srb *)s; + struct scsi_qla_host *vha = sp->vha; + unsigned long flags; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s res %x %8phC type %d\n", + sp->name, res, sp->fcport->port_name, sp->type); + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + sp->fcport->flags &= ~FCF_ASYNC_SENT; + sp->fcport->chip_reset = vha->hw->chip_reset; + + switch (sp->type) { + case SRB_NACK_PLOGI: + sp->fcport->login_gen++; + sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; + sp->fcport->logout_on_delete = 1; + sp->fcport->plogi_nack_done_deadline = jiffies + HZ; + break; + + case SRB_NACK_PRLI: + sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; + sp->fcport->deleted = 0; + + if (!sp->fcport->login_succ && + !IS_SW_RESV_ADDR(sp->fcport->d_id)) { + sp->fcport->login_succ = 1; + + vha->fcport_count++; + + if (!IS_IIDMA_CAPABLE(vha->hw) || + !vha->hw->flags.gpsc_supported) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, + sp->fcport->port_name, + vha->fcport_count); + + qla24xx_post_upd_fcport_work(vha, sp->fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, + sp->fcport->port_name, + vha->fcport_count); + + qla24xx_post_gpsc_work(vha, sp->fcport); + } + } + break; + + case SRB_NACK_LOGO: + sp->fcport->login_gen++; + sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); + break; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + sp->free(sp); +} + +int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, + struct imm_ntfy_from_isp *ntfy, int type) +{ + int rval = QLA_FUNCTION_FAILED; + srb_t *sp; + char *c = NULL; + + fcport->flags |= FCF_ASYNC_SENT; + switch (type) { + case SRB_NACK_PLOGI: + fcport->fw_login_state = DSC_LS_PLOGI_PEND; + c = "PLOGI"; + break; + case SRB_NACK_PRLI: + fcport->fw_login_state = DSC_LS_PRLI_PEND; + fcport->deleted = 0; + c = "PRLI"; + break; + case SRB_NACK_LOGO: + fcport->fw_login_state = DSC_LS_LOGO_PEND; + c = "LOGO"; + break; + } + + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); + if (!sp) + goto done; + + sp->type = type; + sp->name = "nack"; + + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); + + sp->u.iocb_cmd.u.nack.ntfy = ntfy; + + sp->done = qla2x00_async_nack_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s %8phC hndl %x %s\n", + sp->name, fcport->port_name, sp->handle, c); + + return rval; + +done_free_sp: + sp->free(sp); +done: + fcport->flags &= ~FCF_ASYNC_SENT; + return rval; +} + +void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) +{ + fc_port_t *t; + unsigned long flags; + + switch (e->u.nack.type) { + case SRB_NACK_PRLI: + mutex_lock(&vha->vha_tgt.tgt_mutex); + t = qlt_create_sess(vha, e->u.nack.fcport, 0); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + if (t) { + ql_log(ql_log_info, vha, 0xffff, + "%s create sess success %p", __func__, t); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + /* create sess has an extra kref */ + vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + } + break; + } + qla24xx_async_notify_ack(vha, e->u.nack.fcport, + (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type); +} + +void qla24xx_delete_sess_fn(struct work_struct *work) +{ + fc_port_t *fcport = container_of(work, struct fc_port, del_work); + struct qla_hw_data *ha = fcport->vha->hw; + unsigned long flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + + if (fcport->se_sess) { + ha->tgt.tgt_ops->shutdown_sess(fcport); + ha->tgt.tgt_ops->put_sess(fcport); + } else { + qlt_unreg_sess(fcport); + } + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} + +/* + * Called from qla2x00_reg_remote_port() + */ +void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct fc_port *sess = fcport; + unsigned long flags; + + if (!vha->hw->tgt.tgt_ops) + return; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (tgt->tgt_stop) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (fcport->disc_state == DSC_DELETE_PEND) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (!sess->se_sess) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + mutex_lock(&vha->vha_tgt.tgt_mutex); + sess = qlt_create_sess(vha, fcport, false); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + } else { + if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: kref_get fail sess %8phC \n", + __func__, sess->port_name); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, + "qla_target(%u): %ssession for port %8phC " + "(loop ID %d) reappeared\n", vha->vp_idx, + sess->local ? "local " : "", sess->port_name, sess->loop_id); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, + "Reappeared sess %p\n", sess); + + ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, + fcport->loop_id, + (fcport->flags & FCF_CONF_COMP_SUPPORTED)); + } + + if (sess && sess->local) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, + "qla_target(%u): local session for " + "port %8phC (loop ID %d) became global\n", vha->vp_idx, + fcport->port_name, sess->loop_id); + sess->local = 0; + } + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} /* * This is a zero-base ref-counting solution, since hardware_lock * guarantees that ref_count is not modified concurrently. * Upon successful return content of iocb is undefined */ -static qlt_plogi_ack_t * +static struct qlt_plogi_ack_t * qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, struct imm_ntfy_from_isp *iocb) { - qlt_plogi_ack_t *pla; + struct qlt_plogi_ack_t *pla; list_for_each_entry(pla, &vha->plogi_ack_list, list) { if (pla->id.b24 == id->b24) { qlt_send_term_imm_notif(vha, &pla->iocb, 1); - pla->iocb = *iocb; + memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); return pla; } } @@ -423,50 +815,78 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, return NULL; } - pla->iocb = *iocb; + memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); pla->id = *id; list_add_tail(&pla->list, &vha->plogi_ack_list); return pla; } -static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla) +void qlt_plogi_ack_unref(struct scsi_qla_host *vha, + struct qlt_plogi_ack_t *pla) { + struct imm_ntfy_from_isp *iocb = &pla->iocb; + port_id_t port_id; + uint16_t loop_id; + fc_port_t *fcport = pla->fcport; + BUG_ON(!pla->ref_count); pla->ref_count--; if (pla->ref_count) return; - ql_dbg(ql_dbg_async, vha, 0x5089, + ql_dbg(ql_dbg_disc, vha, 0x5089, "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" - " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name, - pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1], - pla->iocb.u.isp24.port_id[0], - le16_to_cpu(pla->iocb.u.isp24.nport_handle), - pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id); - qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0); + " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, + iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], + iocb->u.isp24.port_id[0], + le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.exchange_address, iocb->ox_id); + + port_id.b.domain = iocb->u.isp24.port_id[2]; + port_id.b.area = iocb->u.isp24.port_id[1]; + port_id.b.al_pa = iocb->u.isp24.port_id[0]; + port_id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); + + fcport->loop_id = loop_id; + fcport->d_id = port_id; + qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) + fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; + if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) + fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; + } list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); } -static void -qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla, - struct qla_tgt_sess *sess, qlt_plogi_link_t link) +void +qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, + struct fc_port *sess, enum qlt_plogi_link_t link) { + struct imm_ntfy_from_isp *iocb = &pla->iocb; /* Inc ref_count first because link might already be pointing at pla */ pla->ref_count++; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, + "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" + " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", + sess, link, sess->port_name, + iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], + iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], + pla->ref_count, pla, link); + if (sess->plogi_link[link]) qlt_plogi_ack_unref(vha, sess->plogi_link[link]); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, - "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" - " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name, - pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2], - pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0], - pla->ref_count); + if (link == QLT_PLOGI_LINK_SAME_WWN) + pla->fcport = sess; sess->plogi_link[link] = pla; } @@ -519,49 +939,45 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) static void qlt_free_session_done(struct work_struct *work) { - struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, + struct fc_port *sess = container_of(work, struct fc_port, free_work); struct qla_tgt *tgt = sess->tgt; struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; bool logout_started = false; - fc_port_t fcport; + struct event_arg ea; + scsi_qla_host_t *base_vha; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, + sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, sess->logout_on_delete, sess->keep_nport_handle, sess->send_els_logo); - BUG_ON(!tgt); - if (sess->send_els_logo) { - qlt_port_logo_t logo; - logo.id = sess->s_id; - logo.cmd_count = 0; - qlt_send_first_logo(vha, &logo); - } + if (!IS_SW_RESV_ADDR(sess->d_id)) { + if (sess->send_els_logo) { + qlt_port_logo_t logo; - if (sess->logout_on_delete) { - int rc; + logo.id = sess->d_id; + logo.cmd_count = 0; + qlt_send_first_logo(vha, &logo); + } - memset(&fcport, 0, sizeof(fcport)); - fcport.loop_id = sess->loop_id; - fcport.d_id = sess->s_id; - memcpy(fcport.port_name, sess->port_name, WWN_SIZE); - fcport.vha = vha; - fcport.tgt_session = sess; - - rc = qla2x00_post_async_logout_work(vha, &fcport, NULL); - if (rc != QLA_SUCCESS) - ql_log(ql_log_warn, vha, 0xf085, - "Schedule logo failed sess %p rc %d\n", - sess, rc); - else - logout_started = true; + if (sess->logout_on_delete) { + int rc; + + rc = qla2x00_post_async_logout_work(vha, sess, NULL); + if (rc != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0xf085, + "Schedule logo failed sess %p rc %d\n", + sess, rc); + else + logout_started = true; + } } /* @@ -583,29 +999,61 @@ static void qlt_free_session_done(struct work_struct *work) msleep(100); } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087, - "%s: sess %p logout completed\n", - __func__, sess); + ql_dbg(ql_dbg_disc, vha, 0xf087, + "%s: sess %p logout completed\n",__func__, sess); } - spin_lock_irqsave(&ha->hardware_lock, flags); + if (sess->logo_ack_needed) { + sess->logo_ack_needed = 0; + qla24xx_async_notify_ack(vha, sess, + (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); + } + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (sess->se_sess) { + sess->se_sess = NULL; + if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) + tgt->sess_count--; + } + + sess->disc_state = DSC_DELETED; + sess->fw_login_state = DSC_LS_PORT_UNAVAIL; + sess->deleted = QLA_SESS_DELETED; + sess->login_retry = vha->hw->login_retry_count; + + if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { + vha->fcport_count--; + sess->login_succ = 0; + } + + if (sess->chip_reset != sess->vha->hw->chip_reset) + qla2x00_clear_loop_id(sess); + + if (sess->conflict) { + sess->conflict->login_pause = 0; + sess->conflict = NULL; + if (!test_bit(UNLOADING, &vha->dpc_flags)) + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } { - qlt_plogi_ack_t *own = + struct qlt_plogi_ack_t *own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; - qlt_plogi_ack_t *con = + struct qlt_plogi_ack_t *con = sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; + struct imm_ntfy_from_isp *iocb; if (con) { + iocb = &con->iocb; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, - "se_sess %p / sess %p port %8phC is gone," - " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", - sess->se_sess, sess, sess->port_name, - own ? "releasing own PLOGI" : - "no own PLOGI pending", - own ? own->ref_count : -1, - con->iocb.u.isp24.port_name, con->ref_count); + "se_sess %p / sess %p port %8phC is gone," + " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", + sess->se_sess, sess, sess->port_name, + own ? "releasing own PLOGI" : "no own PLOGI pending", + own ? own->ref_count : -1, + iocb->u.isp24.port_name, con->ref_count); qlt_plogi_ack_unref(vha, con); + sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", @@ -615,59 +1063,64 @@ static void qlt_free_session_done(struct work_struct *work) own ? own->ref_count : -1); } - if (own) + if (own) { + sess->fw_login_state = DSC_LS_PLOGI_PEND; qlt_plogi_ack_unref(vha, own); + sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; + } } - - list_del(&sess->sess_list_entry); - - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, - "Unregistration of sess %p finished\n", sess); + "Unregistration of sess %p %8phC finished fcp_cnt %d\n", + sess, sess->port_name, vha->fcport_count); - kfree(sess); - /* - * We need to protect against race, when tgt is freed before or - * inside wake_up() - */ - tgt->sess_count--; - if (tgt->sess_count == 0) + if (tgt && (tgt->sess_count == 0)) wake_up_all(&tgt->waitQ); + + if (vha->fcport_count == 0) + wake_up_all(&vha->fcport_waitQ); + + base_vha = pci_get_drvdata(ha->pdev); + if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) + return; + + if (!tgt || !tgt->tgt_stop) { + memset(&ea, 0, sizeof(ea)); + ea.event = FCME_DELETE_DONE; + ea.fcport = sess; + qla2x00_fcport_event_handler(vha, &ea); + } } /* ha->tgt.sess_lock supposed to be held on entry */ -static void qlt_release_session(struct kref *kref) +void qlt_unreg_sess(struct fc_port *sess) { - struct qla_tgt_sess *sess = - container_of(kref, struct qla_tgt_sess, sess_kref); struct scsi_qla_host *vha = sess->vha; + ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + "%s sess %p for deletion %8phC\n", + __func__, sess, sess->port_name); + if (sess->se_sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); - if (!list_empty(&sess->del_list_entry)) - list_del_init(&sess->del_list_entry); + qla2x00_mark_device_lost(vha, sess, 1, 1); + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + sess->disc_state = DSC_DELETE_PEND; + sess->last_rscn_gen = sess->rscn_gen; + sess->last_login_gen = sess->login_gen; INIT_WORK(&sess->free_work, qlt_free_session_done); schedule_work(&sess->free_work); } - -void qlt_put_sess(struct qla_tgt_sess *sess) -{ - if (!sess) - return; - - assert_spin_locked(&sess->vha->hw->tgt.sess_lock); - kref_put(&sess->sess_kref, qlt_release_session); -} -EXPORT_SYMBOL(qlt_put_sess); +EXPORT_SYMBOL(qlt_unreg_sess); static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; uint16_t loop_id; int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; @@ -680,31 +1133,6 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) spin_lock_irqsave(&ha->tgt.sess_lock, flags); qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); -#if 0 /* FIXME: do we need to choose a session here? */ - if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { - sess = list_entry(ha->tgt.qla_tgt->sess_list.next, - typeof(*sess), sess_list_entry); - switch (mcmd) { - case QLA_TGT_NEXUS_LOSS_SESS: - mcmd = QLA_TGT_NEXUS_LOSS; - break; - case QLA_TGT_ABORT_ALL_SESS: - mcmd = QLA_TGT_ABORT_ALL; - break; - case QLA_TGT_NEXUS_LOSS: - case QLA_TGT_ABORT_ALL: - break; - default: - ql_dbg(ql_dbg_tgt, vha, 0xe046, - "qla_target(%d): Not allowed " - "command %x in %s", vha->vp_idx, - mcmd, __func__); - sess = NULL; - break; - } - } else - sess = NULL; -#endif } else { spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); @@ -726,57 +1154,69 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); } +static void qla24xx_chk_fcp_state(struct fc_port *sess) +{ + if (sess->chip_reset != sess->vha->hw->chip_reset) { + sess->logout_on_delete = 0; + sess->logo_ack_needed = 0; + sess->fw_login_state = DSC_LS_PORT_UNAVAIL; + sess->scan_state = 0; + } +} + /* ha->tgt.sess_lock supposed to be held on entry */ -static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, +void qlt_schedule_sess_for_deletion(struct fc_port *sess, bool immediate) { struct qla_tgt *tgt = sess->tgt; - uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; - if (sess->deleted) { - /* Upgrade to unconditional deletion in case it was temporary */ - if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING) - list_del(&sess->del_list_entry); - else + if (sess->disc_state == DSC_DELETE_PEND) + return; + + if (sess->disc_state == DSC_DELETED) { + if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) + wake_up_all(&tgt->waitQ); + if (sess->vha->fcport_count == 0) + wake_up_all(&sess->vha->fcport_waitQ); + + if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && + !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) return; } - ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, - "Scheduling sess %p for deletion\n", sess); + sess->disc_state = DSC_DELETE_PEND; - if (immediate) { - dev_loss_tmo = 0; - sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - list_add(&sess->del_list_entry, &tgt->del_sess_list); - } else { - sess->deleted = QLA_SESS_DELETION_PENDING; - list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); - } + if (sess->deleted == QLA_SESS_DELETED) + sess->logout_on_delete = 0; - sess->expires = jiffies + dev_loss_tmo * HZ; + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + qla24xx_chk_fcp_state(sess); - ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, - "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)" - " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n", - sess->vha->vp_idx, sess->port_name, sess->loop_id, - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, - dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete, - sess->generation); + ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, + "Scheduling sess %p for deletion\n", sess); - if (immediate) - mod_delayed_work(system_wq, &tgt->sess_del_work, 0); - else - schedule_delayed_work(&tgt->sess_del_work, - sess->expires - jiffies); + schedule_work(&sess->del_work); +} + +void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess) +{ + unsigned long flags; + struct qla_hw_data *ha = sess->vha->hw; + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + qlt_schedule_sess_for_deletion(sess, 1); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } /* ha->tgt.sess_lock supposed to be held on entry */ static void qlt_clear_tgt_db(struct qla_tgt *tgt) { - struct qla_tgt_sess *sess; + struct fc_port *sess; + scsi_qla_host_t *vha = tgt->vha; - list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) - qlt_schedule_sess_for_deletion(sess, true); + list_for_each_entry(sess, &vha->vp_fcports, list) { + if (sess->se_sess) + qlt_schedule_sess_for_deletion(sess, 1); + } /* At this point tgt could be already dead */ } @@ -801,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, } /* Get list of logged in devices */ - rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); + rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, "qla_target(%d): get_id_list() failed: %x\n", @@ -830,240 +1270,84 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, return res; } -/* ha->tgt.sess_lock supposed to be held on entry */ -static void qlt_undelete_sess(struct qla_tgt_sess *sess) -{ - BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING); - - list_del_init(&sess->del_list_entry); - sess->deleted = 0; -} - -static void qlt_del_sess_work_fn(struct delayed_work *work) -{ - struct qla_tgt *tgt = container_of(work, struct qla_tgt, - sess_del_work); - struct scsi_qla_host *vha = tgt->vha; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; - unsigned long flags, elapsed; - - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - while (!list_empty(&tgt->del_sess_list)) { - sess = list_entry(tgt->del_sess_list.next, typeof(*sess), - del_list_entry); - elapsed = jiffies; - if (time_after_eq(elapsed, sess->expires)) { - /* No turning back */ - list_del_init(&sess->del_list_entry); - sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, - "Timeout: sess %p about to be deleted\n", - sess); - if (sess->se_sess) - ha->tgt.tgt_ops->shutdown_sess(sess); - qlt_put_sess(sess); - } else { - schedule_delayed_work(&tgt->sess_del_work, - sess->expires - elapsed); - break; - } - } - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); -} - /* * Adds an extra ref to allow to drop hw lock after adding sess to the list. * Caller must put it. */ -static struct qla_tgt_sess *qlt_create_sess( +static struct fc_port *qlt_create_sess( struct scsi_qla_host *vha, fc_port_t *fcport, bool local) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess = fcport; unsigned long flags; - /* Check to avoid double sessions */ - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, - sess_list_entry) { - if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, - "Double sess %p found (s_id %x:%x:%x, " - "loop_id %d), updating to d_id %x:%x:%x, " - "loop_id %d", sess, sess->s_id.b.domain, - sess->s_id.b.al_pa, sess->s_id.b.area, - sess->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.al_pa, fcport->d_id.b.area, - fcport->loop_id); - - /* Cannot undelete at this point */ - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, - flags); - return NULL; - } - - if (sess->deleted) - qlt_undelete_sess(sess); - - if (!sess->se_sess) { - if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, - &sess->port_name[0], sess) < 0) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - return NULL; - } - } - - kref_get(&sess->sess_kref); - ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, - (fcport->flags & FCF_CONF_COMP_SUPPORTED)); - - if (sess->local && !local) - sess->local = 0; - - qlt_do_generation_tick(vha, &sess->generation); - - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + if (vha->vha_tgt.qla_tgt->tgt_stop) + return NULL; - return sess; + if (fcport->se_sess) { + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: kref_get_unless_zero failed for %8phC\n", + __func__, sess->port_name); + return NULL; } - } - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - sess = kzalloc(sizeof(*sess), GFP_KERNEL); - if (!sess) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, - "qla_target(%u): session allocation failed, all commands " - "from port %8phC will be refused", vha->vp_idx, - fcport->port_name); - - return NULL; + return fcport; } sess->tgt = vha->vha_tgt.qla_tgt; - sess->vha = vha; - sess->s_id = fcport->d_id; - sess->loop_id = fcport->loop_id; sess->local = local; - kref_init(&sess->sess_kref); - INIT_LIST_HEAD(&sess->del_list_entry); - /* Under normal circumstances we want to logout from firmware when + /* + * Under normal circumstances we want to logout from firmware when * session eventually ends and release corresponding nport handle. * In the exception cases (e.g. when new PLOGI is waiting) corresponding - * code will adjust these flags as necessary. */ + * code will adjust these flags as necessary. + */ sess->logout_on_delete = 1; sess->keep_nport_handle = 0; + sess->logout_completed = 0; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, - "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", - sess, vha->vha_tgt.qla_tgt); - - sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); - BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); - memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); - - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); - vha->vha_tgt.qla_tgt->sess_count++; - qlt_do_generation_tick(vha, &sess->generation); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, - "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " - "s_id %x:%x:%x, confirmed completion %ssupported) added\n", - vha->vp_idx, local ? "local " : "", fcport->port_name, - fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, - sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); - - /* - * Determine if this fc_port->port_name is allowed to access - * target mode using explict NodeACLs+MappedLUNs, or using - * TPG demo mode. If this is successful a target mode FC nexus - * is created. - */ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, &fcport->port_name[0], sess) < 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "(%d) %8phC check_initiator_node_acl failed\n", + vha->vp_idx, fcport->port_name); return NULL; } else { + kref_init(&fcport->sess_kref); /* - * Take an extra reference to ->sess_kref here to handle qla_tgt_sess - * access across ->tgt.sess_lock reaquire. + * Take an extra reference to ->sess_kref here to handle + * fc_port access across ->tgt.sess_lock reaquire. */ - kref_get(&sess->sess_kref); - } - - return sess; -} - -/* - * Called from qla2x00_reg_remote_port() - */ -void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) -{ - struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess; - unsigned long flags; - - if (!vha->hw->tgt.tgt_ops) - return; - - if (!tgt || (fcport->port_type != FCT_INITIATOR)) - return; + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: kref_get_unless_zero failed for %8phC\n", + __func__, sess->port_name); + return NULL; + } - if (qla_ini_mode_enabled(vha)) - return; + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (!IS_SW_RESV_ADDR(sess->d_id)) + vha->vha_tgt.qla_tgt->sess_count++; - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - if (tgt->tgt_stop) { + qlt_do_generation_tick(vha, &sess->generation); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - return; } - sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); - if (!sess) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - mutex_lock(&vha->vha_tgt.tgt_mutex); - sess = qlt_create_sess(vha, fcport, false); - mutex_unlock(&vha->vha_tgt.tgt_mutex); - - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - /* Point of no return */ - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - return; - } else { - kref_get(&sess->sess_kref); - if (sess->deleted) { - qlt_undelete_sess(sess); - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, - "qla_target(%u): %ssession for port %8phC " - "(loop ID %d) reappeared\n", vha->vp_idx, - sess->local ? "local " : "", sess->port_name, - sess->loop_id); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, + "Adding sess %p se_sess %p to tgt %p sess_count %d\n", + sess, sess->se_sess, vha->vha_tgt.qla_tgt, + vha->vha_tgt.qla_tgt->sess_count); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, - "Reappeared sess %p\n", sess); - } - ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, - (fcport->flags & FCF_CONF_COMP_SUPPORTED)); - } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, + "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " + "s_id %x:%x:%x, confirmed completion %ssupported) added\n", + vha->vp_idx, local ? "local " : "", fcport->port_name, + fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); - if (sess && sess->local) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, - "qla_target(%u): local session for " - "port %8phC (loop ID %d) became global\n", vha->vp_idx, - fcport->port_name, sess->loop_id); - sess->local = 0; - } - qlt_put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return sess; } /* @@ -1074,7 +1358,7 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess; + struct fc_port *sess = fcport; unsigned long flags; if (!vha->hw->tgt.tgt_ops) @@ -1088,8 +1372,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } - sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); - if (!sess) { + if (!sess->se_sess) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } @@ -1120,12 +1403,12 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt) * We need to protect against race, when tgt is freed before or * inside wake_up() */ - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, - "tgt %p, empty(sess_list)=%d sess_count=%d\n", - tgt, list_empty(&tgt->sess_list), tgt->sess_count); + "tgt %p, sess_count=%d\n", + tgt, tgt->sess_count); res = (tgt->sess_count == 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return res; } @@ -1173,8 +1456,6 @@ int qlt_stop_phase1(struct qla_tgt *tgt) mutex_unlock(&vha->vha_tgt.tgt_mutex); mutex_unlock(&qla_tgt_mutex); - flush_delayed_work(&tgt->sess_del_work); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, "Waiting for sess works (tgt %p)", tgt); spin_lock_irqsave(&tgt->sess_work_lock, flags); @@ -1186,14 +1467,13 @@ int qlt_stop_phase1(struct qla_tgt *tgt) spin_unlock_irqrestore(&tgt->sess_work_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, - "Waiting for tgt %p: list_empty(sess_list)=%d " - "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), - tgt->sess_count); + "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); /* Big hammer */ - if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) + if (!ha->flags.host_shutting_down && + (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) qlt_disable_vha(vha); /* Wait for sessions to clear out (just in case) */ @@ -1297,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, request_t *pkt; struct nack_to_isp *nack; + if (!ha->flags.fw_started) + return; + ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); /* Send marker if required */ @@ -1320,6 +1603,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; + nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & @@ -1489,6 +1773,14 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) } } + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + if (tag == op->atio.u.isp24.exchange_addr) { + op->aborted = true; + spin_unlock(&vha->cmd_list_lock); + return 1; + } + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { if (tag == cmd->atio.u.isp24.exchange_addr) { cmd->aborted = 1; @@ -1525,6 +1817,18 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, if (op_key == key && op_lun == lun) op->aborted = true; } + + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + uint32_t op_key; + u64 op_lun; + + op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + op_lun = scsilun_to_int( + (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); + if (op_key == key && op_lun == lun) + op->aborted = true; + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key; uint32_t cmd_lun; @@ -1540,7 +1844,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, /* ha->hardware_lock supposed to be held on entry */ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, - struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) + struct abts_recv_from_24xx *abts, struct fc_port *sess) { struct qla_hw_data *ha = vha->hw; struct se_session *se_sess = sess->se_sess; @@ -1549,8 +1853,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, u32 lun = 0; int rc; bool found_lun = false; + unsigned long flags; - spin_lock(&se_sess->sess_cmd_lock); + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); @@ -1560,7 +1865,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, break; } } - spin_unlock(&se_sess->sess_cmd_lock); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); /* cmd not in LIO lists, look in qla list */ if (!found_lun) { @@ -1592,8 +1897,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, mcmd->sess = sess; memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); mcmd->reset_count = vha->hw->chip_reset; + mcmd->tmr_func = QLA_TGT_ABTS; - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, abts->exchange_addr_to_abort); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, @@ -1613,7 +1919,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; uint32_t tag = abts->exchange_addr_to_abort; uint8_t s_id[3]; int rc; @@ -1665,7 +1971,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); return; } @@ -1736,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) } EXPORT_SYMBOL(qlt_free_mcmd); +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then + * reacquire + */ +void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, + uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) +{ + struct atio_from_isp *atio = &cmd->atio; + struct ctio7_to_24xx *ctio; + uint16_t temp; + + ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, + "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " + "sense_key=%02x, asc=%02x, ascq=%02x", + vha, atio, scsi_status, sense_key, asc, ascq); + + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); + if (!ctio) { + ql_dbg(ql_dbg_async, vha, 0x3067, + "qla2x00t(%ld): %s failed: unable to allocate request packet", + vha->host_no, __func__); + goto out; + } + + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->handle = QLA_TGT_SKIP_HANDLE; + ctio->nport_handle = cmd->sess->loop_id; + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = vha->vp_idx; + ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; + ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; + ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio->exchange_addr = atio->u.isp24.exchange_addr; + ctio->u.status1.flags = (atio->u.isp24.attr << 9) | + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.ox_id = cpu_to_le16(temp); + ctio->u.status1.scsi_status = + cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); + ctio->u.status1.response_len = cpu_to_le16(18); + ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); + + if (ctio->u.status1.residual != 0) + ctio->u.status1.scsi_status |= + cpu_to_le16(SS_RESIDUAL_UNDER); + + /* Response code and sense key */ + put_unaligned_le32(((0x70 << 24) | (sense_key << 8)), + (&ctio->u.status1.sense_data)[0]); + /* Additional sense length */ + put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]); + /* ASC and ASCQ */ + put_unaligned_le32(((asc << 24) | (ascq << 16)), + (&ctio->u.status1.sense_data)[3]); + + /* Memory Barrier */ + wmb(); + + qla2x00_start_iocbs(vha, vha->req); +out: + return; +} + /* callback from target fabric module code */ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) { @@ -1763,10 +2133,23 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) return; } - if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) - qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, - 0, 0, 0, 0, 0, 0); - else { + if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { + if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == + ELS_LOGO || + mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == + ELS_PRLO || + mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == + ELS_TPRLO) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "TM response logo %phC status %#x state %#x", + mcmd->sess->port_name, mcmd->fc_tm_rsp, + mcmd->flags); + qlt_schedule_sess_for_deletion_lock(mcmd->sess); + } else { + qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, + 0, 0, 0, 0, 0, 0); + } + } else { if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, mcmd->fc_tm_rsp, false); @@ -1971,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, */ return -EAGAIN; } else - ha->tgt.cmds[h-1] = prm->cmd; + ha->tgt.cmds[h - 1] = prm->cmd; pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = prm->cmd->loop_id; @@ -2101,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd) return cmd->bufflen > 0; } +static void qlt_print_dif_err(struct qla_tgt_prm *prm) +{ + struct qla_tgt_cmd *cmd; + struct scsi_qla_host *vha; + + /* asc 0x10=dif error */ + if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { + cmd = prm->cmd; + vha = cmd->vha; + /* ASCQ */ + switch (prm->sense_buffer[13]) { + case 1: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + case 2: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + case 3: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + default: + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "BE detected Dif ERR: lba[%llx|%lld] len[%x] " + "se_cmd=%p tag[%x]", + cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr); + break; + } + ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16); + } +} + /* * Called without ha->hardware_lock held */ @@ -2182,95 +2609,6 @@ static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, cmd->conf_compl_supported; } -#ifdef CONFIG_QLA_TGT_DEBUG_SRR -/* - * Original taken from the XFS code - */ -static unsigned long qlt_srr_random(void) -{ - static int Inited; - static unsigned long RandomValue; - static DEFINE_SPINLOCK(lock); - /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ - register long rv; - register long lo; - register long hi; - unsigned long flags; - - spin_lock_irqsave(&lock, flags); - if (!Inited) { - RandomValue = jiffies; - Inited = 1; - } - rv = RandomValue; - hi = rv / 127773; - lo = rv % 127773; - rv = 16807 * lo - 2836 * hi; - if (rv <= 0) - rv += 2147483647; - RandomValue = rv; - spin_unlock_irqrestore(&lock, flags); - return rv; -} - -static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) -{ -#if 0 /* This is not a real status packets lost, so it won't lead to SRR */ - if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) - == 50) { - *xmit_type &= ~QLA_TGT_XMIT_STATUS; - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, - "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag); - } -#endif - /* - * It's currently not possible to simulate SRRs for FCP_WRITE without - * a physical link layer failure, so don't even try here.. - */ - if (cmd->dma_data_direction != DMA_FROM_DEVICE) - return; - - if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && - ((qlt_srr_random() % 100) == 20)) { - int i, leave = 0; - unsigned int tot_len = 0; - - while (leave == 0) - leave = qlt_srr_random() % cmd->sg_cnt; - - for (i = 0; i < leave; i++) - tot_len += cmd->sg[i].length; - - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, - "Cutting cmd %p (tag %d) buffer" - " tail to len %d, sg_cnt %d (cmd->bufflen %d," - " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave, - cmd->bufflen, cmd->sg_cnt); - - cmd->bufflen = tot_len; - cmd->sg_cnt = leave; - } - - if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { - unsigned int offset = qlt_srr_random() % cmd->bufflen; - - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, - "Cutting cmd %p (tag %d) buffer head " - "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset, - cmd->bufflen); - if (offset == 0) - *xmit_type &= ~QLA_TGT_XMIT_DATA; - else if (qlt_set_data_offset(cmd, offset)) { - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, - "qlt_set_data_offset() failed (tag %d)", se_cmd->tag); - } - } -} -#else -static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) -{} -#endif - static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, struct qla_tgt_prm *prm) { @@ -2288,7 +2626,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, int i; if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { - if (prm->cmd->se_cmd.scsi_status != 0) { + if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, "Skipping EXPLICIT_CONFORM and " "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " @@ -2311,18 +2649,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, for (i = 0; i < prm->sense_buffer_len/4; i++) ((uint32_t *)ctio->u.status1.sense_data)[i] = cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); -#if 0 - if (unlikely((prm->sense_buffer_len % 4) != 0)) { - static int q; - if (q < 10) { - ql_dbg(ql_dbg_tgt, vha, 0xe04f, - "qla_target(%d): %d bytes of sense " - "lost", prm->tgt->ha->vp_idx, - prm->sense_buffer_len % 4); - q++; - } - } -#endif + + qlt_print_dif_err(prm); + } else { ctio->u.status1.flags &= ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); @@ -2336,19 +2665,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, /* Sense with len > 24, is it possible ??? */ } - - -/* diff */ static inline int qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) { - /* - * Uncomment when corresponding SCSI changes are done. - * - if (!sp->cmd->prot_chk) - return 0; - * - */ switch (se_cmd->prot_op) { case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DIN_STRIP: @@ -2369,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) return 0; } +static inline int +qla_tgt_ref_mask_check(struct se_cmd *se_cmd) +{ + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_STRIP: + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + return 1; + default: + return 0; + } + return 0; +} + /* - * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command - * + * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command */ -static inline void -qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) +static void +qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, + uint16_t *pfw_prot_opts) { + struct se_cmd *se_cmd = &cmd->se_cmd; uint32_t lba = 0xffffffff & se_cmd->t_task_lba; + scsi_qla_host_t *vha = cmd->tgt->vha; + struct qla_hw_data *ha = vha->hw; + uint32_t t32 = 0; - /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 + /* + * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 * have been immplemented by TCM, before AppTag is avail. * Look for modesense_handlers[] */ @@ -2386,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) ctx->app_tag_mask[0] = 0x0; ctx->app_tag_mask[1] = 0x0; + if (IS_PI_UNINIT_CAPABLE(ha)) { + if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || + (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) + *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; + else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) + *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; + } + + t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); + switch (se_cmd->prot_type) { case TARGET_DIF_TYPE0_PROT: /* - * No check for ql2xenablehba_err_chk, as it would be an - * I/O error if hba tag generation is not done. + * No check for ql2xenablehba_err_chk, as it + * would be an I/O error if hba tag generation + * is not done. */ ctx->ref_tag = cpu_to_le32(lba); - - if (!qlt_hba_err_chk_enabled(se_cmd)) - break; - /* enable ALL bytes of the ref tag */ ctx->ref_tag_mask[0] = 0xff; ctx->ref_tag_mask[1] = 0xff; ctx->ref_tag_mask[2] = 0xff; ctx->ref_tag_mask[3] = 0xff; break; - /* - * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and - * 16 bit app tag. - */ case TARGET_DIF_TYPE1_PROT: - ctx->ref_tag = cpu_to_le32(lba); - - if (!qlt_hba_err_chk_enabled(se_cmd)) - break; - - /* enable ALL bytes of the ref tag */ - ctx->ref_tag_mask[0] = 0xff; - ctx->ref_tag_mask[1] = 0xff; - ctx->ref_tag_mask[2] = 0xff; - ctx->ref_tag_mask[3] = 0xff; - break; - /* - * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to - * match LBA in CDB + N - */ + /* + * For TYPE 1 protection: 16 bit GUARD tag, 32 bit + * REF tag, and 16 bit app tag. + */ + ctx->ref_tag = cpu_to_le32(lba); + if (!qla_tgt_ref_mask_check(se_cmd) || + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + break; + } + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; case TARGET_DIF_TYPE2_PROT: - ctx->ref_tag = cpu_to_le32(lba); - - if (!qlt_hba_err_chk_enabled(se_cmd)) - break; - - /* enable ALL bytes of the ref tag */ - ctx->ref_tag_mask[0] = 0xff; - ctx->ref_tag_mask[1] = 0xff; - ctx->ref_tag_mask[2] = 0xff; - ctx->ref_tag_mask[3] = 0xff; - break; - - /* For Type 3 protection: 16 bit GUARD only */ + /* + * For TYPE 2 protection: 16 bit GUARD + 32 bit REF + * tag has to match LBA in CDB + N + */ + ctx->ref_tag = cpu_to_le32(lba); + if (!qla_tgt_ref_mask_check(se_cmd) || + !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + break; + } + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; case TARGET_DIF_TYPE3_PROT: - ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = - ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; - break; + /* For TYPE 3 protection: 16 bit GUARD only */ + *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; + ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = + ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; + break; } } - static inline int qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) { @@ -2463,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) struct se_cmd *se_cmd = &cmd->se_cmd; uint32_t h; struct atio_from_isp *atio = &prm->cmd->atio; + struct qla_tc_param tc; uint16_t t16; ha = vha->hw; @@ -2488,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) case TARGET_PROT_DIN_INSERT: case TARGET_PROT_DOUT_STRIP: transfer_length = data_bytes; - data_bytes += dif_bytes; + if (cmd->prot_sg_cnt) + data_bytes += dif_bytes; break; - case TARGET_PROT_DIN_STRIP: case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DIN_PASS: case TARGET_PROT_DOUT_PASS: transfer_length = data_bytes + dif_bytes; break; - default: BUG(); break; @@ -2533,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) break; } - /* ---- PKT ---- */ /* Update entry type to indicate Command Type CRC_2 IOCB */ pkt->entry_type = CTIO_CRC2; @@ -2551,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) } else ha->tgt.cmds[h-1] = prm->cmd; - pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; - pkt->nport_handle = prm->cmd->loop_id; + pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; @@ -2574,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) else if (cmd->dma_data_direction == DMA_FROM_DEVICE) pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); - pkt->dseg_count = prm->tot_dsds; /* Fibre channel byte count */ pkt->transfer_length = cpu_to_le32(transfer_length); - /* ----- CRC context -------- */ /* Allocate CRC context from global pool */ @@ -2599,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) /* Set handle */ crc_ctx_pkt->handle = pkt->handle; - qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); + qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); pkt->crc_context_len = CRC_CONTEXT_LEN_FW; - if (!bundling) { cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; } else { @@ -2626,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); crc_ctx_pkt->guard_seed = cpu_to_le16(0); + memset((uint8_t *)&tc, 0 , sizeof(tc)); + tc.vha = vha; + tc.blk_sz = cmd->blk_sz; + tc.bufflen = cmd->bufflen; + tc.sg = cmd->sg; + tc.prot_sg = cmd->prot_sg; + tc.ctx = crc_ctx_pkt; + tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; /* Walks data segments */ pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); if (!bundling && prm->prot_seg_cnt) { if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, - prm->tot_dsds, cmd)) + prm->tot_dsds, &tc)) goto crc_queuing_error; } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, - (prm->tot_dsds - prm->prot_seg_cnt), cmd)) + (prm->tot_dsds - prm->prot_seg_cnt), &tc)) goto crc_queuing_error; if (bundling && prm->prot_seg_cnt) { @@ -2644,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, - prm->prot_seg_cnt, cmd)) + prm->prot_seg_cnt, &tc)) goto crc_queuing_error; } return QLA_SUCCESS; crc_queuing_error: /* Cleanup will be performed by the caller */ + vha->hw->tgt.cmds[h - 1] = NULL; return QLA_FUNCTION_FAILED; } - /* * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * * QLA_TGT_XMIT_STATUS for >= 24xx silicon @@ -2672,7 +3024,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, int res; spin_lock_irqsave(&ha->hardware_lock, flags); - if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (cmd->sess && cmd->sess->deleted) { cmd->state = QLA_TGT_STATE_PROCESSED; if (cmd->sess->logout_completed) /* no need to terminate. FW already freed exchange. */ @@ -2685,7 +3037,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, spin_unlock_irqrestore(&ha->hardware_lock, flags); memset(&prm, 0, sizeof(prm)); - qlt_check_srr_debug(cmd, &xmit_type); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", @@ -2706,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, else vha->tgt_counters.core_qla_que_buf++; - if (!vha->flags.online || cmd->reset_count != ha->chip_reset) { + if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) { /* * Either the port is not online or this request was from * previous life, just abort the processing. @@ -2847,8 +3198,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); - if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || - (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) { + if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) || + (cmd->sess && cmd->sess->deleted)) { /* * Either the port is not online or this request was from * previous life, just abort the processing. @@ -2904,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer); /* - * Checks the guard or meta-data for the type of error - * detected by the HBA. + * it is assumed either hardware_lock or qpair lock is held. */ -static inline int +static void qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, - struct ctio_crc_from_fw *sts) + struct ctio_crc_from_fw *sts) { uint8_t *ap = &sts->actual_dif[0]; uint8_t *ep = &sts->expected_dif[0]; - uint32_t e_ref_tag, a_ref_tag; - uint16_t e_app_tag, a_app_tag; - uint16_t e_guard, a_guard; uint64_t lba = cmd->se_cmd.t_task_lba; + uint8_t scsi_status, sense_key, asc, ascq; + unsigned long flags; - a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); - a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); - a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); - - e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); - e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); - e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); - - ql_dbg(ql_dbg_tgt, vha, 0xe075, - "iocb(s) %p Returned STATUS.\n", sts); - - ql_dbg(ql_dbg_tgt, vha, 0xf075, - "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); - - /* - * Ignore sector if: - * For type 3: ref & app tag is all 'f's - * For type 0,1,2: app tag is all 'f's - */ - if ((a_app_tag == 0xffff) && - ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || - (a_ref_tag == 0xffffffff))) { - uint32_t blocks_done; - - /* 2TB boundary case covered automatically with this */ - blocks_done = e_ref_tag - (uint32_t)lba + 1; - cmd->se_cmd.bad_sector = e_ref_tag; - cmd->se_cmd.pi_err = 0; - ql_dbg(ql_dbg_tgt, vha, 0xf074, - "need to return scsi good\n"); - - /* Update protection tag */ - if (cmd->prot_sg_cnt) { - uint32_t i, k = 0, num_ent; - struct scatterlist *sg, *sgl; - - - sgl = cmd->prot_sg; - - /* Patch the corresponding protection tags */ - for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { - num_ent = sg_dma_len(sg) / 8; - if (k + num_ent < blocks_done) { - k += num_ent; - continue; - } - k = blocks_done; - break; - } + cmd->trc_flags |= TRC_DIF_ERR; - if (k != blocks_done) { - ql_log(ql_log_warn, vha, 0xf076, - "unexpected tag values tag:lba=%u:%llu)\n", - e_ref_tag, (unsigned long long)lba); - goto out; - } + cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); + cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); + cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); -#if 0 - struct sd_dif_tuple *spt; - /* TODO: - * This section came from initiator. Is it valid here? - * should ulp be override with actual val??? - */ - spt = page_address(sg_page(sg)) + sg->offset; - spt += j; + cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); + cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); + cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); - spt->app_tag = 0xffff; - if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) - spt->ref_tag = 0xffffffff; -#endif - } + ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, + "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); - return 0; - } + scsi_status = sense_key = asc = ascq = 0; - /* check guard */ - if (e_guard != a_guard) { - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; - cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; - - ql_log(ql_log_warn, vha, 0xe076, - "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, - a_guard, e_guard, cmd); - goto out; + /* check appl tag */ + if (cmd->e_app_tag != cmd->a_app_tag) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " + "Ref[%x|%x], App[%x|%x], " + "Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, + cmd->a_app_tag, cmd->e_app_tag, + cmd->a_guard, cmd->e_guard, + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + + cmd->dif_err_code = DIF_ERR_APP; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x2; } /* check ref tag */ - if (e_ref_tag != a_ref_tag) { - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; - cmd->se_cmd.bad_sector = e_ref_tag; - - ql_log(ql_log_warn, vha, 0xe077, - "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, - a_guard, e_guard, cmd); + if (cmd->e_ref_tag != cmd->a_ref_tag) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " + "Ref[%x|%x], App[%x|%x], " + "Guard[%x|%x] cmd=%p ox_id[%04x] ", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, + cmd->a_app_tag, cmd->e_app_tag, + cmd->a_guard, cmd->e_guard, + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + + cmd->dif_err_code = DIF_ERR_REF; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x3; goto out; } - /* check appl tag */ - if (e_app_tag != a_app_tag) { - cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; - cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; - - ql_log(ql_log_warn, vha, 0xe078, - "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", - cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, - a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, - a_guard, e_guard, cmd); - goto out; + /* check guard */ + if (cmd->e_guard != cmd->a_guard) { + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " + "Ref[%x|%x], App[%x|%x], " + "Guard [%x|%x] cmd=%p ox_id[%04x]", + cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, + cmd->a_ref_tag, cmd->e_ref_tag, + cmd->a_app_tag, cmd->e_app_tag, + cmd->a_guard, cmd->e_guard, + cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); + cmd->dif_err_code = DIF_ERR_GRD; + scsi_status = SAM_STAT_CHECK_CONDITION; + sense_key = ABORTED_COMMAND; + asc = 0x10; + ascq = 0x1; } out: - return 1; -} + switch (cmd->state) { + case QLA_TGT_STATE_NEED_DATA: + /* handle_data will load DIF error code */ + cmd->state = QLA_TGT_STATE_DATA_IN; + vha->hw->tgt.tgt_ops->handle_data(cmd); + break; + default: + spin_lock_irqsave(&cmd->cmd_lock, flags); + if (cmd->aborted) { + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + vha->hw->tgt.tgt_ops->free_cmd(cmd); + break; + } + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq); + /* assume scsi status gets out on the wire. + * Will not wait for completion. + */ + vha->hw->tgt.tgt_ops->free_cmd(cmd); + break; + } +} /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ @@ -3051,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, "Sending TERM ELS CTIO (ha=%p)\n", ha); - pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); + pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); if (pkt == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe080, "qla_target(%d): %s failed: unable to allocate " @@ -3296,7 +3621,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd) return EIO; } cmd->aborted = 1; - cmd->cmd_flags |= BIT_6; + cmd->trc_flags |= TRC_ABORT; spin_unlock_irqrestore(&cmd->cmd_lock, flags); qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1); @@ -3306,7 +3631,7 @@ EXPORT_SYMBOL(qlt_abort_cmd); void qlt_free_cmd(struct qla_tgt_cmd *cmd) { - struct qla_tgt_sess *sess = cmd->sess; + struct fc_port *sess = cmd->sess; ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, "%s: se_cmd[%p] ox_id %04x\n", @@ -3330,94 +3655,10 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) WARN_ON(1); return; } - cmd->jiffies_at_free = get_jiffies_64(); - percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); -} -EXPORT_SYMBOL(qlt_free_cmd); - -/* ha->hardware_lock supposed to be held on entry */ -static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, - struct qla_tgt_cmd *cmd, void *ctio) -{ - struct qla_tgt_srr_ctio *sc; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_srr_imm *imm; - - tgt->ctio_srr_id++; - cmd->cmd_flags |= BIT_15; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, - "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); - - if (!ctio) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, - "qla_target(%d): SRR CTIO, but ctio is NULL\n", - vha->vp_idx); - return -EINVAL; - } - - sc = kzalloc(sizeof(*sc), GFP_ATOMIC); - if (sc != NULL) { - sc->cmd = cmd; - /* IRQ is already OFF */ - spin_lock(&tgt->srr_lock); - sc->srr_id = tgt->ctio_srr_id; - list_add_tail(&sc->srr_list_entry, - &tgt->srr_ctio_list); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, - "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); - if (tgt->imm_srr_id == tgt->ctio_srr_id) { - int found = 0; - list_for_each_entry(imm, &tgt->srr_imm_list, - srr_list_entry) { - if (imm->srr_id == sc->srr_id) { - found = 1; - break; - } - } - if (found) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, - "Scheduling srr work\n"); - schedule_work(&tgt->srr_work); - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, - "qla_target(%d): imm_srr_id " - "== ctio_srr_id (%d), but there is no " - "corresponding SRR IMM, deleting CTIO " - "SRR %p\n", vha->vp_idx, - tgt->ctio_srr_id, sc); - list_del(&sc->srr_list_entry); - spin_unlock(&tgt->srr_lock); - - kfree(sc); - return -EINVAL; - } - } - spin_unlock(&tgt->srr_lock); - } else { - struct qla_tgt_srr_imm *ti; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, - "qla_target(%d): Unable to allocate SRR CTIO entry\n", - vha->vp_idx); - spin_lock(&tgt->srr_lock); - list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, - srr_list_entry) { - if (imm->srr_id == tgt->ctio_srr_id) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, - "IMM SRR %p deleted (id %d)\n", - imm, imm->srr_id); - list_del(&imm->srr_list_entry); - qlt_reject_free_srr_imm(vha, imm, 1); - } - } - spin_unlock(&tgt->srr_lock); - - return -ENOMEM; - } - - return 0; + cmd->jiffies_at_free = get_jiffies_64(); + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); } +EXPORT_SYMBOL(qlt_free_cmd); /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire @@ -3427,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, { int term = 0; + if (cmd->se_cmd.prot_op) + ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, + "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " + "se_cmd=%p tag[%x] op %#x/%s", + cmd->lba, cmd->lba, + cmd->num_blks, &cmd->se_cmd, + cmd->atio.u.isp24.exchange_addr, + cmd->se_cmd.prot_op, + prot_op_str(cmd->se_cmd.prot_op)); + if (ctio != NULL) { struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; term = !(c->flags & @@ -3527,7 +3778,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) dump_stack(); } - cmd->cmd_flags |= BIT_17; + cmd->trc_flags |= TRC_FLUSH; ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3632,50 +3883,27 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, */ cmd->sess->logout_on_delete = 0; cmd->sess->send_els_logo = 1; - qlt_schedule_sess_for_deletion(cmd->sess, true); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, cmd->sess->port_name); + + qlt_schedule_sess_for_deletion_lock(cmd->sess); } break; } - case CTIO_SRR_RECEIVED: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, - "qla_target(%d): CTIO with SRR_RECEIVED" - " status %x received (state %x, se_cmd %p)\n", - vha->vp_idx, status, cmd->state, se_cmd); - if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) - break; - else - return; - case CTIO_DIF_ERROR: { struct ctio_crc_from_fw *crc = (struct ctio_crc_from_fw *)ctio; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, - "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", + "qla_target(%d): CTIO with DIF_ERROR status %x " + "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " + "expect_dif[0x%llx]\n", vha->vp_idx, status, cmd->state, se_cmd, *((u64 *)&crc->actual_dif[0]), *((u64 *)&crc->expected_dif[0])); - if (qlt_handle_dif_error(vha, cmd, ctio)) { - if (cmd->state == QLA_TGT_STATE_NEED_DATA) { - /* scsi Write/xfer rdy complete */ - goto skip_term; - } else { - /* scsi read/xmit respond complete - * call handle dif to send scsi status - * rather than terminate exchange. - */ - cmd->state = QLA_TGT_STATE_PROCESSED; - ha->tgt.tgt_ops->handle_dif_err(cmd); - return; - } - } else { - /* Need to generate a SCSI good completion. - * because FW did not send scsi status. - */ - status = 0; - goto skip_term; - } - break; + qlt_handle_dif_error(vha, cmd, ctio); + return; } default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, @@ -3693,15 +3921,14 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, */ if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && (!cmd->aborted)) { - cmd->cmd_flags |= BIT_13; + cmd->trc_flags |= TRC_CTIO_ERR; if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) return; } } -skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { - cmd->cmd_flags |= BIT_12; + cmd->trc_flags |= TRC_CTIO_DONE; } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { cmd->state = QLA_TGT_STATE_DATA_IN; @@ -3711,11 +3938,11 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, ha->tgt.tgt_ops->handle_data(cmd); return; } else if (cmd->aborted) { - cmd->cmd_flags |= BIT_18; + cmd->trc_flags |= TRC_CTIO_ABORTED; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); } else { - cmd->cmd_flags |= BIT_19; + cmd->trc_flags |= TRC_CTIO_STRANGE; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, "qla_target(%d): A command in state (%d) should " "not return a CTIO complete\n", vha->vp_idx, cmd->state); @@ -3762,7 +3989,7 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, return fcp_task_attr; } -static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, +static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *, uint8_t *); /* * Process context for I/O path into tcm_qla2xxx code @@ -3772,7 +3999,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess = cmd->sess; + struct fc_port *sess = cmd->sess; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; unsigned long flags; @@ -3780,7 +4007,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) int ret, fcp_task_attr, data_dir, bidi = 0; cmd->cmd_in_wq = 0; - cmd->cmd_flags |= BIT_1; + cmd->trc_flags |= TRC_DO_WORK; if (tgt->tgt_stop) goto out_term; @@ -3822,7 +4049,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; @@ -3832,7 +4059,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) * cmd has not sent to target yet, so pass NULL as the second * argument to qlt_send_term_exchange() and free the memory here. */ - cmd->cmd_flags |= BIT_2; + cmd->trc_flags |= TRC_DO_WORK_ERR; spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0); @@ -3841,7 +4068,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->tgt.sess_lock, flags); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } @@ -3859,7 +4086,7 @@ static void qlt_do_work(struct work_struct *work) } static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, - struct qla_tgt_sess *sess, + struct fc_port *sess, struct atio_from_isp *atio) { struct se_session *se_sess = sess->se_sess; @@ -3883,7 +4110,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; - cmd->cmd_flags = 0; + cmd->trc_flags = 0; cmd->jiffies_at_alloc = get_jiffies_64(); cmd->reset_count = vha->hw->chip_reset; @@ -3900,7 +4127,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work) struct qla_tgt_sess_op, work); scsi_qla_host_t *vha = op->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct qla_tgt_cmd *cmd; unsigned long flags; uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; @@ -3941,11 +4168,12 @@ static void qlt_create_sess_from_atio(struct work_struct *work) if (!cmd) { spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); - qlt_put_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); return; } + /* * __qlt_do_work() will call qlt_put_sess() to release * the extra reference taken above by qlt_make_local_sess() @@ -3953,13 +4181,11 @@ static void qlt_create_sess_from_atio(struct work_struct *work) __qlt_do_work(cmd); kfree(op); return; - out_term: spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); - } /* ha->hardware_lock supposed to be held on entry */ @@ -3968,8 +4194,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct qla_tgt_cmd *cmd; + unsigned long flags; if (unlikely(tgt->tgt_stop)) { ql_dbg(ql_dbg_io, vha, 0x3061, @@ -3998,7 +4225,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, /* Another WWN used to have our s_id. Our PLOGI scheduled its * session deletion, but it's still in sess_del_work wq */ - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { ql_dbg(ql_dbg_io, vha, 0x3061, "New command while old session %p is being deleted\n", sess); @@ -4008,24 +4235,32 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, /* * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. */ - kref_get(&sess->sess_kref); + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt, vha, 0xffff, + "%s: kref_get fail, %8phC oxid %x \n", + __func__, sess->port_name, + be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + return -EFAULT; + } cmd = qlt_get_tag(vha, sess, atio); if (!cmd) { ql_dbg(ql_dbg_io, vha, 0x3062, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); - qlt_put_sess(sess); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return -ENOMEM; } cmd->cmd_in_wq = 1; - cmd->cmd_flags |= BIT_0; + cmd->trc_flags |= TRC_NEW_CMD; cmd->se_cmd.cpuid = ha->msix_count ? ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND; - spin_lock(&vha->cmd_list_lock); + spin_lock_irqsave(&vha->cmd_list_lock, flags); list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); - spin_unlock(&vha->cmd_list_lock); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); INIT_WORK(&cmd->work, qlt_do_work); if (ha->msix_count) { @@ -4043,7 +4278,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, } /* ha->hardware_lock supposed to be held on entry */ -static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, +static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags) { struct scsi_qla_host *vha = sess->vha; @@ -4051,7 +4286,6 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, struct qla_tgt_mgmt_cmd *mcmd; struct atio_from_isp *a = (struct atio_from_isp *)iocb; int res; - uint8_t tmr_func; mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); if (!mcmd) { @@ -4073,74 +4307,12 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, mcmd->reset_count = vha->hw->chip_reset; switch (fn) { - case QLA_TGT_CLEAR_ACA: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, - "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); - tmr_func = TMR_CLEAR_ACA; - break; - - case QLA_TGT_TARGET_RESET: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, - "qla_target(%d): TARGET_RESET received\n", - sess->vha->vp_idx); - tmr_func = TMR_TARGET_WARM_RESET; - break; - case QLA_TGT_LUN_RESET: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, - "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); - tmr_func = TMR_LUN_RESET; - abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); - break; - - case QLA_TGT_CLEAR_TS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, - "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); - tmr_func = TMR_CLEAR_TASK_SET; - break; - - case QLA_TGT_ABORT_TS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, - "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); - tmr_func = TMR_ABORT_TASK_SET; - break; -#if 0 - case QLA_TGT_ABORT_ALL: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, - "qla_target(%d): Doing ABORT_ALL_TASKS\n", - sess->vha->vp_idx); - tmr_func = 0; - break; - - case QLA_TGT_ABORT_ALL_SESS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, - "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", - sess->vha->vp_idx); - tmr_func = 0; - break; - - case QLA_TGT_NEXUS_LOSS_SESS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, - "qla_target(%d): Doing NEXUS_LOSS_SESS\n", - sess->vha->vp_idx); - tmr_func = 0; - break; - - case QLA_TGT_NEXUS_LOSS: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, - "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); - tmr_func = 0; - break; -#endif - default: - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, - "qla_target(%d): Unknown task mgmt fn 0x%x\n", - sess->vha->vp_idx, fn); - mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); - return -ENOSYS; + abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); + break; } - res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); + res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0); if (res != 0) { ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", @@ -4158,7 +4330,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt; - struct qla_tgt_sess *sess; + struct fc_port *sess; uint32_t lun, unpacked_lun; int fn; unsigned long flags; @@ -4183,7 +4355,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) sizeof(struct atio_from_isp)); } - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) + if (sess->deleted) return -EFAULT; return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); @@ -4191,7 +4363,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) /* ha->hardware_lock supposed to be held on entry */ static int __qlt_abort_task(struct scsi_qla_host *vha, - struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) + struct imm_ntfy_from_isp *iocb, struct fc_port *sess) { struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; @@ -4215,8 +4387,9 @@ static int __qlt_abort_task(struct scsi_qla_host *vha, lun = a->u.isp24.fcp_cmnd.lun; unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); mcmd->reset_count = vha->hw->chip_reset; + mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, le16_to_cpu(iocb->u.isp2x.seq_id)); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, @@ -4234,7 +4407,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; int loop_id; unsigned long flags; @@ -4257,22 +4430,20 @@ static int qlt_abort_task(struct scsi_qla_host *vha, void qlt_logo_completion_handler(fc_port_t *fcport, int rc) { - if (fcport->tgt_session) { - if (rc != MBS_COMMAND_COMPLETE) { - ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, - "%s: se_sess %p / sess %p from" - " port %8phC loop_id %#04x s_id %02x:%02x:%02x" - " LOGO failed: %#x\n", - __func__, - fcport->tgt_session->se_sess, - fcport->tgt_session, - fcport->port_name, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, rc); - } - - fcport->tgt_session->logout_completed = 1; + if (rc != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, + "%s: se_sess %p / sess %p from" + " port %8phC loop_id %#04x s_id %02x:%02x:%02x" + " LOGO failed: %#x\n", + __func__, + fcport->se_sess, + fcport, + fcport->port_name, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, rc); } + + fcport->logout_completed = 1; } /* @@ -4282,16 +4453,16 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc) * deletion. Returns existing session with matching wwn if present. * Null otherwise. */ -static struct qla_tgt_sess * -qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, - port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess) +struct fc_port * +qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, + port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) { - struct qla_tgt_sess *sess = NULL, *other_sess; + struct fc_port *sess = NULL, *other_sess; uint64_t other_wwn; *conflict_sess = NULL; - list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) { + list_for_each_entry(other_sess, &vha->vp_fcports, list) { other_wwn = wwn_to_u64(other_sess->port_name); @@ -4302,9 +4473,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, } /* find other sess with nport_id collision */ - if (port_id.b24 == other_sess->s_id.b24) { + if (port_id.b24 == other_sess->d_id.b24) { if (loop_id != other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c, + ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4320,6 +4491,11 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, * Another wwn used to have our s_id/loop_id * kill the session, but don't free the loop_id */ + ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + other_sess->keep_nport_handle = 1; *conflict_sess = other_sess; qlt_schedule_sess_for_deletion(other_sess, @@ -4329,8 +4505,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, } /* find other sess with nport handle collision */ - if (loop_id == other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d, + if ((loop_id == other_sess->loop_id) && + (loop_id != FC_NO_LOOP_ID)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4358,11 +4535,21 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) spin_lock(&vha->cmd_list_lock); list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + + if (op_key == key) { + op->aborted = true; + count++; + } + } + + list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { + uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); if (op_key == key) { op->aborted = true; count++; } } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); if (cmd_key == key) { @@ -4383,13 +4570,13 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL; + struct fc_port *sess = NULL, *conflict_sess = NULL; uint64_t wwn; port_id_t port_id; uint16_t loop_id; uint16_t wd3_lo; int res = 0; - qlt_plogi_ack_t *pla; + struct qlt_plogi_ack_t *pla; unsigned long flags; wwn = wwn_to_u64(iocb->u.isp24.port_name); @@ -4401,9 +4588,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, - "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", - vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); + ql_dbg(ql_dbg_disc, vha, 0xf026, + "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", + vha->vp_idx, iocb->u.isp24.port_id[2], + iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], + iocb->u.isp24.status_subcode, loop_id, + iocb->u.isp24.port_name); /* res = 1 means ack at the end of thread * res = 0 means ack async/later. @@ -4416,12 +4606,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, if (wwn) { spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); - sess = qlt_find_sess_invalidate_other(tgt, wwn, - port_id, loop_id, &conflict_sess); + sess = qlt_find_sess_invalidate_other(vha, wwn, + port_id, loop_id, &conflict_sess); spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); } - if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) { + if (IS_SW_RESV_ADDR(port_id)) { res = 1; break; } @@ -4429,580 +4619,228 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); if (!pla) { qlt_send_term_imm_notif(vha, iocb, 1); - - res = 0; break; } res = 0; - if (conflict_sess) - qlt_plogi_ack_link(vha, pla, conflict_sess, - QLT_PLOGI_LINK_CONFLICT); - - if (!sess) - break; - - qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); - /* - * Under normal circumstances we want to release nport handle - * during LOGO process to avoid nport handle leaks inside FW. - * The exception is when LOGO is done while another PLOGI with - * the same nport handle is waiting as might be the case here. - * Note: there is always a possibily of a race where session - * deletion has already started for other reasons (e.g. ACL - * removal) and now PLOGI arrives: - * 1. if PLOGI arrived in FW after nport handle has been freed, - * FW must have assigned this PLOGI a new/same handle and we - * can proceed ACK'ing it as usual when session deletion - * completes. - * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT - * bit reached it, the handle has now been released. We'll - * get an error when we ACK this PLOGI. Nothing will be sent - * back to initiator. Initiator should eventually retry - * PLOGI and situation will correct itself. - */ - sess->keep_nport_handle = ((sess->loop_id == loop_id) && - (sess->s_id.b24 == port_id.b24)); - qlt_schedule_sess_for_deletion(sess, true); - break; - - case ELS_PRLI: - wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); - - if (wwn) { - spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); - sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id, - loop_id, &conflict_sess); - spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); - } - if (conflict_sess) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, - "PRLI with conflicting sess %p port %8phC\n", - conflict_sess, conflict_sess->port_name); - qlt_send_term_imm_notif(vha, iocb, 1); - res = 0; - break; - } - - if (sess != NULL) { - if (sess->deleted) { - /* - * Impatient initiator sent PRLI before last - * PLOGI could finish. Will force him to re-try, - * while last one finishes. - */ - ql_log(ql_log_warn, sess->vha, 0xf095, - "sess %p PRLI received, before plogi ack.\n", - sess); - qlt_send_term_imm_notif(vha, iocb, 1); - res = 0; - break; - } - - /* - * This shouldn't happen under normal circumstances, - * since we have deleted the old session during PLOGI - */ - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, - "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", - sess->loop_id, sess, iocb->u.isp24.nport_handle); - - sess->local = 0; - sess->loop_id = loop_id; - sess->s_id = port_id; - - if (wd3_lo & BIT_7) - sess->conf_compl_supported = 1; - - } - res = 1; /* send notify ack */ - - /* Make session global (not used in fabric mode) */ - if (ha->current_topology != ISP_CFG_F) { - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); - } else { - /* todo: else - create sess here. */ - res = 1; /* send notify ack */ - } - - break; - - case ELS_LOGO: - case ELS_PRLO: - res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); - break; - case ELS_PDISC: - case ELS_ADISC: - { - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - if (tgt->link_reinit_iocb_pending) { - qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, - 0, 0, 0, 0, 0, 0); - tgt->link_reinit_iocb_pending = 0; - } - res = 1; /* send notify ack */ - break; - } - - case ELS_FLOGI: /* should never happen */ - default: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, - "qla_target(%d): Unsupported ELS command %x " - "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); - res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); - break; - } - - return res; -} - -static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) -{ -#if 1 - /* - * FIXME: Reject non zero SRR relative offset until we can test - * this code properly. - */ - pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); - return -1; -#else - struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; - size_t first_offset = 0, rem_offset = offset, tmp = 0; - int i, sg_srr_cnt, bufflen = 0; - - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, - "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " - "cmd->sg_cnt: %u, direction: %d\n", - cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); - - if (!cmd->sg || !cmd->sg_cnt) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, - "Missing cmd->sg or zero cmd->sg_cnt in" - " qla_tgt_set_data_offset\n"); - return -EINVAL; - } - /* - * Walk the current cmd->sg list until we locate the new sg_srr_start - */ - for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, - "sg[%d]: %p page: %p, length: %d, offset: %d\n", - i, sg, sg_page(sg), sg->length, sg->offset); - - if ((sg->length + tmp) > offset) { - first_offset = rem_offset; - sg_srr_start = sg; - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, - "Found matching sg[%d], using %p as sg_srr_start, " - "and using first_offset: %zu\n", i, sg, - first_offset); - break; - } - tmp += sg->length; - rem_offset -= sg->length; - } - - if (!sg_srr_start) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, - "Unable to locate sg_srr_start for offset: %u\n", offset); - return -EINVAL; - } - sg_srr_cnt = (cmd->sg_cnt - i); - - sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); - if (!sg_srr) { - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, - "Unable to allocate sgp\n"); - return -ENOMEM; - } - sg_init_table(sg_srr, sg_srr_cnt); - sgp = &sg_srr[0]; - /* - * Walk the remaining list for sg_srr_start, mapping to the newly - * allocated sg_srr taking first_offset into account. - */ - for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { - if (first_offset) { - sg_set_page(sgp, sg_page(sg), - (sg->length - first_offset), first_offset); - first_offset = 0; - } else { - sg_set_page(sgp, sg_page(sg), sg->length, 0); - } - bufflen += sgp->length; - - sgp = sg_next(sgp); - if (!sgp) - break; - } - - cmd->sg = sg_srr; - cmd->sg_cnt = sg_srr_cnt; - cmd->bufflen = bufflen; - cmd->offset += offset; - cmd->free_sg = 1; - - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", - cmd->sg_cnt); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", - cmd->bufflen); - ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", - cmd->offset); - - if (cmd->sg_cnt < 0) - BUG(); - - if (cmd->bufflen < 0) - BUG(); - - return 0; -#endif -} - -static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, - uint32_t srr_rel_offs, int *xmit_type) -{ - int res = 0, rel_offs; - - rel_offs = srr_rel_offs - cmd->offset; - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", - srr_rel_offs, rel_offs); - - *xmit_type = QLA_TGT_XMIT_ALL; - - if (rel_offs < 0) { - ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, - "qla_target(%d): SRR rel_offs (%d) < 0", - cmd->vha->vp_idx, rel_offs); - res = -1; - } else if (rel_offs == cmd->bufflen) - *xmit_type = QLA_TGT_XMIT_STATUS; - else if (rel_offs > 0) - res = qlt_set_data_offset(cmd, rel_offs); - - return res; -} - -/* No locks, thread context */ -static void qlt_handle_srr(struct scsi_qla_host *vha, - struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) -{ - struct imm_ntfy_from_isp *ntfy = - (struct imm_ntfy_from_isp *)&imm->imm_ntfy; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt_cmd *cmd = sctio->cmd; - struct se_cmd *se_cmd = &cmd->se_cmd; - unsigned long flags; - int xmit_type = 0, resp = 0; - uint32_t offset; - uint16_t srr_ui; - - offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); - srr_ui = ntfy->u.isp24.srr_ui; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", - cmd, srr_ui); - - switch (srr_ui) { - case SRR_IU_STATUS: - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, - 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - xmit_type = QLA_TGT_XMIT_STATUS; - resp = 1; - break; - case SRR_IU_DATA_IN: - if (!cmd->sg || !cmd->sg_cnt) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, - "Unable to process SRR_IU_DATA_IN due to" - " missing cmd->sg, state: %d\n", cmd->state); - dump_stack(); - goto out_reject; - } - if (se_cmd->scsi_status != 0) { - ql_dbg(ql_dbg_tgt, vha, 0xe02a, - "Rejecting SRR_IU_DATA_IN with non GOOD " - "scsi_status\n"); - goto out_reject; - } - cmd->bufflen = se_cmd->data_length; - - if (qlt_has_data(cmd)) { - if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) - goto out_reject; - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, - 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - resp = 1; - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, - "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject", - vha->vp_idx, se_cmd->tag, - cmd->se_cmd.scsi_status); - goto out_reject; - } - break; - case SRR_IU_DATA_OUT: - if (!cmd->sg || !cmd->sg_cnt) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, - "Unable to process SRR_IU_DATA_OUT due to" - " missing cmd->sg\n"); - dump_stack(); - goto out_reject; - } - if (se_cmd->scsi_status != 0) { - ql_dbg(ql_dbg_tgt, vha, 0xe02b, - "Rejecting SRR_IU_DATA_OUT" - " with non GOOD scsi_status\n"); - goto out_reject; - } - cmd->bufflen = se_cmd->data_length; - - if (qlt_has_data(cmd)) { - if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) - goto out_reject; - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, - 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (xmit_type & QLA_TGT_XMIT_DATA) { - cmd->cmd_flags |= BIT_8; - qlt_rdy_to_xfer(cmd); - } - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, - "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject", - vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status); - goto out_reject; + conflict_sess->login_gen++; + qlt_plogi_ack_link(vha, pla, conflict_sess, + QLT_PLOGI_LINK_CONFLICT); } - break; - default: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, - "qla_target(%d): Unknown srr_ui value %x", - vha->vp_idx, srr_ui); - goto out_reject; - } - - /* Transmit response in case of status and data-in cases */ - if (resp) { - cmd->cmd_flags |= BIT_7; - qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); - } - - return; - -out_reject: - spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_notify_ack(vha, ntfy, 0, 0, 0, - NOTIFY_ACK_SRR_FLAGS_REJECT, - NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, - NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); - if (cmd->state == QLA_TGT_STATE_NEED_DATA) { - cmd->state = QLA_TGT_STATE_DATA_IN; - dump_stack(); - } else { - cmd->cmd_flags |= BIT_9; - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); - } - spin_unlock_irqrestore(&ha->hardware_lock, flags); -} - -static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, - struct qla_tgt_srr_imm *imm, int ha_locked) -{ - struct qla_hw_data *ha = vha->hw; - unsigned long flags = 0; - -#ifndef __CHECKER__ - if (!ha_locked) - spin_lock_irqsave(&ha->hardware_lock, flags); -#endif - qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, - NOTIFY_ACK_SRR_FLAGS_REJECT, - NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, - NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); + if (!sess) { + pla->ref_count++; + qla24xx_post_newsess_work(vha, &port_id, + iocb->u.isp24.port_name, pla); + res = 0; + break; + } -#ifndef __CHECKER__ - if (!ha_locked) - spin_unlock_irqrestore(&ha->hardware_lock, flags); -#endif + qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); + sess->fw_login_state = DSC_LS_PLOGI_PEND; + sess->d_id = port_id; + sess->login_gen++; - kfree(imm); -} + switch (sess->disc_state) { + case DSC_DELETED: + qlt_plogi_ack_unref(vha, pla); + break; -static void qlt_handle_srr_work(struct work_struct *work) -{ - struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); - struct scsi_qla_host *vha = tgt->vha; - struct qla_tgt_srr_ctio *sctio; - unsigned long flags; + default: + /* + * Under normal circumstances we want to release nport handle + * during LOGO process to avoid nport handle leaks inside FW. + * The exception is when LOGO is done while another PLOGI with + * the same nport handle is waiting as might be the case here. + * Note: there is always a possibily of a race where session + * deletion has already started for other reasons (e.g. ACL + * removal) and now PLOGI arrives: + * 1. if PLOGI arrived in FW after nport handle has been freed, + * FW must have assigned this PLOGI a new/same handle and we + * can proceed ACK'ing it as usual when session deletion + * completes. + * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT + * bit reached it, the handle has now been released. We'll + * get an error when we ACK this PLOGI. Nothing will be sent + * back to initiator. Initiator should eventually retry + * PLOGI and situation will correct itself. + */ + sess->keep_nport_handle = ((sess->loop_id == loop_id) && + (sess->d_id.b24 == port_id.b24)); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", - tgt); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post del sess\n", + __func__, __LINE__, sess->port_name); -restart: - spin_lock_irqsave(&tgt->srr_lock, flags); - list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { - struct qla_tgt_srr_imm *imm, *i, *ti; - struct qla_tgt_cmd *cmd; - struct se_cmd *se_cmd; - - imm = NULL; - list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, - srr_list_entry) { - if (i->srr_id == sctio->srr_id) { - list_del(&i->srr_list_entry); - if (imm) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, - "qla_target(%d): There must be " - "only one IMM SRR per CTIO SRR " - "(IMM SRR %p, id %d, CTIO %p\n", - vha->vp_idx, i, i->srr_id, sctio); - qlt_reject_free_srr_imm(tgt->vha, i, 0); - } else - imm = i; - } + + qlt_schedule_sess_for_deletion_lock(sess); + break; } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, - "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, - sctio->srr_id); + break; - if (imm == NULL) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, - "Not found matching IMM for SRR CTIO (id %d)\n", - sctio->srr_id); - continue; - } else - list_del(&sctio->srr_list_entry); + case ELS_PRLI: + wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); - spin_unlock_irqrestore(&tgt->srr_lock, flags); + if (wwn) { + spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); + sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, + loop_id, &conflict_sess); + spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); + } - cmd = sctio->cmd; - /* - * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow - * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() - * logic.. - */ - cmd->offset = 0; - if (cmd->free_sg) { - kfree(cmd->sg); - cmd->sg = NULL; - cmd->free_sg = 0; + if (conflict_sess) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, + "PRLI with conflicting sess %p port %8phC\n", + conflict_sess, conflict_sess->port_name); + qlt_send_term_imm_notif(vha, iocb, 1); + res = 0; + break; } - se_cmd = &cmd->se_cmd; - cmd->sg_cnt = se_cmd->t_data_nents; - cmd->sg = se_cmd->t_data_sg; + if (sess != NULL) { + if (sess->fw_login_state != DSC_LS_PLOGI_PEND && + sess->fw_login_state != DSC_LS_PLOGI_COMP) { + /* + * Impatient initiator sent PRLI before last + * PLOGI could finish. Will force him to re-try, + * while last one finishes. + */ + ql_log(ql_log_warn, sess->vha, 0xf095, + "sess %p PRLI received, before plogi ack.\n", + sess); + qlt_send_term_imm_notif(vha, iocb, 1); + res = 0; + break; + } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, - "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d", - cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ? - se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset); + /* + * This shouldn't happen under normal circumstances, + * since we have deleted the old session during PLOGI + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, + "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", + sess->loop_id, sess, iocb->u.isp24.nport_handle); - qlt_handle_srr(vha, sctio, imm); + sess->local = 0; + sess->loop_id = loop_id; + sess->d_id = port_id; + sess->fw_login_state = DSC_LS_PRLI_PEND; - kfree(imm); - kfree(sctio); - goto restart; - } - spin_unlock_irqrestore(&tgt->srr_lock, flags); -} + if (wd3_lo & BIT_7) + sess->conf_compl_supported = 1; -/* ha->hardware_lock supposed to be held on entry */ -static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, - struct imm_ntfy_from_isp *iocb) -{ - struct qla_tgt_srr_imm *imm; - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_srr_ctio *sctio; - - tgt->imm_srr_id++; - - ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n", - vha->vp_idx); - - imm = kzalloc(sizeof(*imm), GFP_ATOMIC); - if (imm != NULL) { - memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); - - /* IRQ is already OFF */ - spin_lock(&tgt->srr_lock); - imm->srr_id = tgt->imm_srr_id; - list_add_tail(&imm->srr_list_entry, - &tgt->srr_imm_list); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, - "IMM NTFY SRR %p added (id %d, ui %x)\n", - imm, imm->srr_id, iocb->u.isp24.srr_ui); - if (tgt->imm_srr_id == tgt->ctio_srr_id) { - int found = 0; - list_for_each_entry(sctio, &tgt->srr_ctio_list, - srr_list_entry) { - if (sctio->srr_id == imm->srr_id) { - found = 1; - break; - } - } - if (found) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", - "Scheduling srr work\n"); - schedule_work(&tgt->srr_work); + if ((wd3_lo & BIT_4) == 0) + sess->port_type = FCT_INITIATOR; + else + sess->port_type = FCT_TARGET; + } + res = 1; /* send notify ack */ + + /* Make session global (not used in fabric mode) */ + if (ha->current_topology != ISP_CFG_F) { + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); + qla24xx_post_nack_work(vha, sess, iocb, + SRB_NACK_PRLI); + res = 0; } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, - "qla_target(%d): imm_srr_id " - "== ctio_srr_id (%d), but there is no " - "corresponding SRR CTIO, deleting IMM " - "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, - imm); - list_del(&imm->srr_list_entry); - - kfree(imm); - - spin_unlock(&tgt->srr_lock); - goto out_reject; + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } else { + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post nack\n", + __func__, __LINE__, sess->port_name); + qla24xx_post_nack_work(vha, sess, iocb, + SRB_NACK_PRLI); + res = 0; } } - spin_unlock(&tgt->srr_lock); - } else { - struct qla_tgt_srr_ctio *ts; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, - "qla_target(%d): Unable to allocate SRR IMM " - "entry, SRR request will be rejected\n", vha->vp_idx); - - /* IRQ is already OFF */ - spin_lock(&tgt->srr_lock); - list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, - srr_list_entry) { - if (sctio->srr_id == tgt->imm_srr_id) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, - "CTIO SRR %p deleted (id %d)\n", - sctio, sctio->srr_id); - list_del(&sctio->srr_list_entry); - qlt_send_term_exchange(vha, sctio->cmd, - &sctio->cmd->atio, 1, 0); - kfree(sctio); + break; + + case ELS_TPRLO: + if (le16_to_cpu(iocb->u.isp24.flags) & + NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { + loop_id = 0xFFFF; + qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); + res = 1; + break; + } + /* drop through */ + case ELS_LOGO: + case ELS_PRLO: + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + sess = qla2x00_find_fcport_by_loopid(vha, loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + if (sess) { + sess->login_gen++; + sess->fw_login_state = DSC_LS_LOGO_PEND; + sess->logo_ack_needed = 1; + memcpy(sess->iocb, iocb, IOCB_SIZE); + } + + res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: logo %llx res %d sess %p ", + __func__, wwn, res, sess); + if (res == 0) { + /* + * cmd went upper layer, look for qlt_xmit_tm_rsp() + * for LOGO_ACK & sess delete + */ + BUG_ON(!sess); + res = 0; + } else { + /* cmd did not go to upper layer. */ + if (sess) { + qlt_schedule_sess_for_deletion_lock(sess); + res = 0; } + /* else logo will be ack */ + } + break; + case ELS_PDISC: + case ELS_ADISC: + { + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + if (tgt->link_reinit_iocb_pending) { + qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, + 0, 0, 0, 0, 0, 0); + tgt->link_reinit_iocb_pending = 0; + } + + sess = qla2x00_find_fcport_by_wwpn(vha, + iocb->u.isp24.port_name, 1); + if (sess) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "sess %p lid %d|%d DS %d LS %d\n", + sess, sess->loop_id, loop_id, + sess->disc_state, sess->fw_login_state); } - spin_unlock(&tgt->srr_lock); - goto out_reject; + + res = 1; /* send notify ack */ + break; } - return; + case ELS_FLOGI: /* should never happen */ + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, + "qla_target(%d): Unsupported ELS command %x " + "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); + res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); + break; + } -out_reject: - qlt_send_notify_ack(vha, iocb, 0, 0, 0, - NOTIFY_ACK_SRR_FLAGS_REJECT, - NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, - NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); + return res; } /* @@ -5126,12 +4964,6 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, if (qlt_24xx_handle_els(vha, iocb) == 0) send_notify_ack = 0; break; - - case IMM_NTFY_SRR: - qlt_prepare_srr_imm(vha, iocb); - send_notify_ack = 0; - break; - default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, "qla_target(%d): Received unknown immediate " @@ -5153,7 +4985,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, struct ctio7_to_24xx *ctio24; struct qla_hw_data *ha = vha->hw; request_t *pkt; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; unsigned long flags; spin_lock_irqsave(&ha->tgt.sess_lock, flags); @@ -5214,7 +5046,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct se_session *se_sess; struct qla_tgt_cmd *cmd; int tag; @@ -5372,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha, static int qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, - struct atio_from_isp *atio) + struct atio_from_isp *atio, bool ha_locked) { struct qla_hw_data *ha = vha->hw; uint16_t status; + unsigned long flags; if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) return 0; + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); status = temp_sam_status; qlt_send_busy(vha, atio, status); + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return 1; } @@ -5396,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, unsigned long flags; if (unlikely(tgt == NULL)) { - ql_dbg(ql_dbg_io, vha, 0x3064, + ql_dbg(ql_dbg_tgt, vha, 0x3064, "ATIO pkt, but no tgt (ha %p)", ha); return; } @@ -5426,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { - rc = qlt_chk_qfull_thresh_hold(vha, atio); + rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked); if (rc != 0) { tgt->atio_irq_cmd_count--; return; @@ -5549,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) break; } - rc = qlt_chk_qfull_thresh_hold(vha, atio); + rc = qlt_chk_qfull_thresh_hold(vha, atio, true); if (rc != 0) { tgt->irq_cmd_count--; return; @@ -5756,6 +5594,32 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); break; + case MBA_REJECTED_FCP_CMD: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "qla_target(%d): Async event LS_REJECT occurred " + "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); + + if (le16_to_cpu(mailbox[3]) == 1) { + /* exchange starvation. */ + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xffff, + "Exchange starvation-. Resetting RISC\n"); + + vha->hw->exch_starvation = 0; + if (IS_P3P_TYPE(vha->hw)) + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } + break; + case MBA_PORT_UPDATE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, "qla_target(%d): Port update async event %#x " @@ -5765,14 +5629,14 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); login_code = le16_to_cpu(mailbox[2]); - if (login_code == 0x4) + if (login_code == 0x4) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, "Async MB 2: Got PLOGI Complete\n"); - else if (login_code == 0x7) + vha->hw->exch_starvation = 0; + } else if (login_code == 0x7) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, "Async MB 2: Port Logged Out\n"); break; - default: break; } @@ -5783,8 +5647,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, uint16_t loop_id) { - fc_port_t *fcport; + fc_port_t *fcport, *tfcp, *del; int rc; + unsigned long flags; + u8 newfcport = 0; fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); if (!fcport) { @@ -5796,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, fcport->loop_id = loop_id; - rc = qla2x00_get_port_database(vha, fcport, 0); + rc = qla24xx_gpdb_wait(vha, fcport, 0); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, "qla_target(%d): Failed to retrieve fcport " @@ -5806,18 +5672,82 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, return NULL; } + del = NULL; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); + + if (tfcp) { + tfcp->d_id = fcport->d_id; + tfcp->port_type = fcport->port_type; + tfcp->supported_classes = fcport->supported_classes; + tfcp->flags |= fcport->flags; + + del = fcport; + fcport = tfcp; + } else { + if (vha->hw->current_topology == ISP_CFG_F) + fcport->flags |= FCF_FABRIC_DEVICE; + + list_add_tail(&fcport->list, &vha->vp_fcports); + if (!IS_SW_RESV_ADDR(fcport->d_id)) + vha->fcport_count++; + fcport->login_gen++; + fcport->disc_state = DSC_LOGIN_COMPLETE; + fcport->login_succ = 1; + newfcport = 1; + } + + fcport->deleted = 0; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + switch (vha->host->active_mode) { + case MODE_INITIATOR: + case MODE_DUAL: + if (newfcport) { + if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post upd_fcport fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, vha->fcport_count); + qla24xx_post_upd_fcport_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC post gpsc fcp_cnt %d\n", + __func__, __LINE__, fcport->port_name, vha->fcport_count); + qla24xx_post_gpsc_work(vha, fcport); + } + } + break; + + case MODE_TARGET: + default: + break; + } + if (del) + qla2x00_free_fcport(del); + return fcport; } /* Must be called under tgt_mutex */ -static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, +static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, uint8_t *s_id) { - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; fc_port_t *fcport = NULL; int rc, global_resets; uint16_t loop_id = 0; + if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { + /* + * This is Domain Controller, so it should be + * OK to drop SCSI commands from it. + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, + "Unable to find initiator with S_ID %x:%x:%x", + s_id[0], s_id[1], s_id[2]); + return NULL; + } + mutex_lock(&vha->vha_tgt.tgt_mutex); retry: @@ -5828,21 +5758,11 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, if (rc != 0) { mutex_unlock(&vha->vha_tgt.tgt_mutex); - if ((s_id[0] == 0xFF) && - (s_id[1] == 0xFC)) { - /* - * This is Domain Controller, so it should be - * OK to drop SCSI commands from it. - */ - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, - "Unable to find initiator with S_ID %x:%x:%x", - s_id[0], s_id[1], s_id[2]); - } else - ql_log(ql_log_info, vha, 0xf071, - "qla_target(%d): Unable to find " - "initiator with S_ID %x:%x:%x", - vha->vp_idx, s_id[0], s_id[1], - s_id[2]); + ql_log(ql_log_info, vha, 0xf071, + "qla_target(%d): Unable to find " + "initiator with S_ID %x:%x:%x", + vha->vp_idx, s_id[0], s_id[1], + s_id[2]); if (rc == -ENOENT) { qlt_port_logo_t logo; @@ -5875,7 +5795,6 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, mutex_unlock(&vha->vha_tgt.tgt_mutex); - kfree(fcport); return sess; } @@ -5884,7 +5803,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, { struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; unsigned long flags = 0, flags2 = 0; uint32_t be_s_id; uint8_t s_id[3]; @@ -5911,37 +5830,37 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (!sess) goto out_term2; } else { - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { sess = NULL; goto out_term2; } - kref_get(&sess->sess_kref); + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + "%s: kref_get fail %8phC \n", + __func__, sess->port_name); + sess = NULL; + goto out_term2; + } } - spin_lock_irqsave(&ha->hardware_lock, flags); - - if (tgt->tgt_stop) - goto out_term; - rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + if (rc != 0) goto out_term; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - qlt_put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); return; out_term2: - spin_lock_irqsave(&ha->hardware_lock, flags); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); out_term: + spin_lock_irqsave(&ha->hardware_lock, flags); qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); - - qlt_put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); } static void qlt_tmr_work(struct qla_tgt *tgt, @@ -5950,7 +5869,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, struct atio_from_isp *a = &prm->tm_iocb2; struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct fc_port *sess = NULL; unsigned long flags; uint8_t *s_id = NULL; /* to hide compiler warnings */ int rc; @@ -5961,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (tgt->tgt_stop) - goto out_term; + goto out_term2; s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); @@ -5973,14 +5892,20 @@ static void qlt_tmr_work(struct qla_tgt *tgt, spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (!sess) - goto out_term; + goto out_term2; } else { - if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + if (sess->deleted) { sess = NULL; - goto out_term; + goto out_term2; } - kref_get(&sess->sess_kref); + if (!kref_get_unless_zero(&sess->sess_kref)) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, + "%s: kref_get fail %8phC\n", + __func__, sess->port_name); + sess = NULL; + goto out_term2; + } } iocb = a; @@ -5989,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt, unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + if (rc != 0) goto out_term; - - qlt_put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; +out_term2: + if (sess) + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); out_term: qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); - qlt_put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } static void qlt_sess_work_fn(struct work_struct *work) @@ -6075,17 +6002,10 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) tgt->ha = ha; tgt->vha = base_vha; init_waitqueue_head(&tgt->waitQ); - INIT_LIST_HEAD(&tgt->sess_list); INIT_LIST_HEAD(&tgt->del_sess_list); - INIT_DELAYED_WORK(&tgt->sess_del_work, - (void (*)(struct work_struct *))qlt_del_sess_work_fn); spin_lock_init(&tgt->sess_work_lock); INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); INIT_LIST_HEAD(&tgt->sess_works_list); - spin_lock_init(&tgt->srr_lock); - INIT_LIST_HEAD(&tgt->srr_ctio_list); - INIT_LIST_HEAD(&tgt->srr_imm_list); - INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); atomic_set(&tgt->tgt_global_resets_count, 0); base_vha->vha_tgt.qla_tgt = tgt; @@ -6099,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; - if (base_vha->fc_vport) - return 0; - mutex_lock(&qla_tgt_mutex); list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); mutex_unlock(&qla_tgt_mutex); + if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) + ha->tgt.tgt_ops->add_target(base_vha); + return 0; } @@ -6134,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) return 0; } +void qlt_remove_target_resources(struct qla_hw_data *ha) +{ + struct scsi_qla_host *node; + u32 key = 0; + + btree_for_each_safe32(&ha->tgt.host_map, key, node) + btree_remove32(&ha->tgt.host_map, key); + + btree_destroy32(&ha->tgt.host_map); +} + static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, unsigned char *b) { @@ -6251,29 +6182,25 @@ EXPORT_SYMBOL(qlt_lport_deregister); /* Must be called under HW lock */ static void qlt_set_mode(struct scsi_qla_host *vha) { - struct qla_hw_data *ha = vha->hw; - switch (ql2x_ini_mode) { case QLA2XXX_INI_MODE_DISABLED: case QLA2XXX_INI_MODE_EXCLUSIVE: vha->host->active_mode = MODE_TARGET; break; case QLA2XXX_INI_MODE_ENABLED: - vha->host->active_mode |= MODE_TARGET; + vha->host->active_mode = MODE_UNKNOWN; + break; + case QLA2XXX_INI_MODE_DUAL: + vha->host->active_mode = MODE_DUAL; break; default: break; } - - if (ha->tgt.ini_mode_force_reverse) - qla_reverse_ini_mode(vha); } /* Must be called under HW lock */ static void qlt_clear_mode(struct scsi_qla_host *vha) { - struct qla_hw_data *ha = vha->hw; - switch (ql2x_ini_mode) { case QLA2XXX_INI_MODE_DISABLED: vha->host->active_mode = MODE_UNKNOWN; @@ -6282,14 +6209,12 @@ static void qlt_clear_mode(struct scsi_qla_host *vha) vha->host->active_mode = MODE_INITIATOR; break; case QLA2XXX_INI_MODE_ENABLED: - vha->host->active_mode &= ~MODE_TARGET; + case QLA2XXX_INI_MODE_DUAL: + vha->host->active_mode = MODE_INITIATOR; break; default: break; } - - if (ha->tgt.ini_mode_force_reverse) - qla_reverse_ini_mode(vha); } /* @@ -6377,9 +6302,6 @@ static void qlt_disable_vha(struct scsi_qla_host *vha) void qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) { - if (!qla_tgt_mode_enabled(vha)) - return; - vha->vha_tgt.qla_tgt = NULL; mutex_init(&vha->vha_tgt.tgt_mutex); @@ -6405,13 +6327,11 @@ qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) * FC-4 Feature bit 0 indicates target functionality to the name server. */ if (qla_tgt_mode_enabled(vha)) { - if (qla_ini_mode_enabled(vha)) - ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; - else - ct_req->req.rff_id.fc4_feature = BIT_0; + ct_req->req.rff_id.fc4_feature = BIT_0; } else if (qla_ini_mode_enabled(vha)) { ct_req->req.rff_id.fc4_feature = BIT_1; - } + } else if (qla_dual_mode_enabled(vha)) + ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; } /* @@ -6430,7 +6350,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha) uint16_t cnt; struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; - if (!qla_tgt_mode_enabled(vha)) + if (qla_ini_mode_enabled(vha)) return; for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { @@ -6451,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) struct atio_from_isp *pkt; int cnt, i; - if (!vha->flags.online) + if (!ha->flags.fw_started) return; while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || @@ -6523,8 +6443,10 @@ void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) { struct qla_hw_data *ha = vha->hw; + u32 tmp; + u16 t; - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; @@ -6537,13 +6459,30 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) ha->tgt.saved_set = 1; } - nv->exchange_count = cpu_to_le16(0xFFFF); + if (qla_tgt_mode_enabled(vha)) { + nv->exchange_count = cpu_to_le16(0xFFFF); + } else { /* dual */ + if (ql_dm_tgt_ex_pct > 100) { + ql_dm_tgt_ex_pct = 50; + } else if (ql_dm_tgt_ex_pct == 100) { + /* leave some for FW */ + ql_dm_tgt_ex_pct = 95; + } + + tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; + tmp = tmp/100; + if (tmp > 0xffff) + tmp = 0xffff; + + t = tmp & 0xffff; + nv->exchange_count = cpu_to_le16(t); + } /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ - if (!qla_ini_mode_enabled(vha)) + if (qla_tgt_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ @@ -6622,11 +6561,13 @@ void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) { struct qla_hw_data *ha = vha->hw; + u32 tmp; + u16 t; if (!QLA_TGT_MODE_ENABLED()) return; - if (qla_tgt_mode_enabled(vha)) { + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; @@ -6639,13 +6580,29 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) ha->tgt.saved_set = 1; } - nv->exchange_count = cpu_to_le16(0xFFFF); + if (qla_tgt_mode_enabled(vha)) { + nv->exchange_count = cpu_to_le16(0xFFFF); + } else { /* dual */ + if (ql_dm_tgt_ex_pct > 100) { + ql_dm_tgt_ex_pct = 50; + } else if (ql_dm_tgt_ex_pct == 100) { + /* leave some for FW */ + ql_dm_tgt_ex_pct = 95; + } + + tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; + tmp = tmp/100; + if (tmp > 0xffff) + tmp = 0xffff; + t = tmp & 0xffff; + nv->exchange_count = cpu_to_le16(t); + } /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ - if (!qla_ini_mode_enabled(vha)) + if (qla_tgt_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); @@ -6749,16 +6706,20 @@ void qlt_modify_vp_config(struct scsi_qla_host *vha, struct vp_config_entry_24xx *vpmod) { - if (qla_tgt_mode_enabled(vha)) + /* enable target mode. Bit5 = 1 => disable */ + if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_5; - /* Disable ini mode, if requested */ - if (!qla_ini_mode_enabled(vha)) + + /* Disable ini mode, if requested. bit4 = 1 => disable */ + if (qla_tgt_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_4; } void qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) { + int rc; + if (!QLA_TGT_MODE_ENABLED()) return; @@ -6772,7 +6733,19 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) mutex_init(&base_vha->vha_tgt.tgt_mutex); mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); + + INIT_LIST_HEAD(&base_vha->unknown_atio_list); + INIT_DELAYED_WORK(&base_vha->unknown_atio_work, + qlt_unknown_atio_work_fn); + qlt_clear_mode(base_vha); + + rc = btree_init32(&ha->tgt.host_map); + if (rc) + ql_log(ql_log_info, base_vha, 0xffff, + "Unable to initialize ha->host_map btree\n"); + + qlt_update_vp_map(base_vha, SET_VP_IDX); } irqreturn_t @@ -6815,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work) spin_lock_irqsave(&ha->hardware_lock, flags); qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); spin_unlock_irqrestore(&ha->hardware_lock, flags); + + kfree(op); } void @@ -6879,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha) void qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) { + void *slot; + u32 key; + int rc; + if (!QLA_TGT_MODE_ENABLED()) return; + key = vha->d_id.b24; + switch (cmd) { case SET_VP_IDX: vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; break; case SET_AL_PA: - vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; + slot = btree_lookup32(&vha->hw->tgt.host_map, key); + if (!slot) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "Save vha in host_map %p %06x\n", vha, key); + rc = btree_insert32(&vha->hw->tgt.host_map, + key, vha, GFP_ATOMIC); + if (rc) + ql_log(ql_log_info, vha, 0xffff, + "Unable to insert s_id into host_map: %06x\n", + key); + return; + } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "replace existing vha in host_map %p %06x\n", vha, key); + btree_update32(&vha->hw->tgt.host_map, key, vha); break; case RESET_VP_IDX: vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; break; case RESET_AL_PA: - vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "clear vha in host_map %p %06x\n", vha, key); + slot = btree_lookup32(&vha->hw->tgt.host_map, key); + if (slot) + btree_remove32(&vha->hw->tgt.host_map, key); + vha->d_id.b24 = 0; break; } } +void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) +{ + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + if (!vha->d_id.b24) { + spin_lock_irqsave(&ha->vport_slock, flags); + vha->d_id = id; + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else if (vha->d_id.b24 != id.b24) { + spin_lock_irqsave(&ha->vport_slock, flags); + qlt_update_vp_map(vha, RESET_AL_PA); + vha->d_id = id; + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock_irqrestore(&ha->vport_slock, flags); + } +} + static int __init qlt_parse_ini_mode(void) { if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) @@ -6906,6 +6925,8 @@ static int __init qlt_parse_ini_mode(void) ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; + else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; else return false; @@ -6935,9 +6956,8 @@ int __init qlt_init(void) } qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", - sizeof(qlt_plogi_ack_t), - __alignof__(qlt_plogi_ack_t), - 0, NULL); + sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), + 0, NULL); if (!qla_tgt_plogi_cachep) { ql_log(ql_log_fatal, NULL, 0xe06d, diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 0824a8164a2494..d64420251194eb 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -45,10 +45,12 @@ #define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive" #define QLA2XXX_INI_MODE_STR_DISABLED "disabled" #define QLA2XXX_INI_MODE_STR_ENABLED "enabled" +#define QLA2XXX_INI_MODE_STR_DUAL "dual" #define QLA2XXX_INI_MODE_EXCLUSIVE 0 #define QLA2XXX_INI_MODE_DISABLED 1 #define QLA2XXX_INI_MODE_ENABLED 2 +#define QLA2XXX_INI_MODE_DUAL 3 #define QLA2XXX_COMMAND_COUNT_INIT 250 #define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250 @@ -118,84 +120,6 @@ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ : (uint16_t)(iocb)->u.isp2x.target.id.standard) -#ifndef IMMED_NOTIFY_TYPE -#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */ -/* - * ISP queue - immediate notify entry structure definition. - * This is sent by the ISP to the Target driver. - * This IOCB would have report of events sent by the - * initiator, that needs to be handled by the target - * driver immediately. - */ -struct imm_ntfy_from_isp { - uint8_t entry_type; /* Entry type. */ - uint8_t entry_count; /* Entry count. */ - uint8_t sys_define; /* System defined. */ - uint8_t entry_status; /* Entry Status. */ - union { - struct { - uint32_t sys_define_2; /* System defined. */ - target_id_t target; - uint16_t lun; - uint8_t target_id; - uint8_t reserved_1; - uint16_t status_modifier; - uint16_t status; - uint16_t task_flags; - uint16_t seq_id; - uint16_t srr_rx_id; - uint32_t srr_rel_offs; - uint16_t srr_ui; -#define SRR_IU_DATA_IN 0x1 -#define SRR_IU_DATA_OUT 0x5 -#define SRR_IU_STATUS 0x7 - uint16_t srr_ox_id; - uint8_t reserved_2[28]; - } isp2x; - struct { - uint32_t reserved; - uint16_t nport_handle; - uint16_t reserved_2; - uint16_t flags; -#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 -#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 - uint16_t srr_rx_id; - uint16_t status; - uint8_t status_subcode; - uint8_t fw_handle; - uint32_t exchange_address; - uint32_t srr_rel_offs; - uint16_t srr_ui; - uint16_t srr_ox_id; - union { - struct { - uint8_t node_name[8]; - } plogi; /* PLOGI/ADISC/PDISC */ - struct { - /* PRLI word 3 bit 0-15 */ - uint16_t wd3_lo; - uint8_t resv0[6]; - } prli; - struct { - uint8_t port_id[3]; - uint8_t resv1; - uint16_t nport_handle; - uint16_t resv2; - } req_els; - } u; - uint8_t port_name[8]; - uint8_t resv3[3]; - uint8_t vp_index; - uint32_t reserved_5; - uint8_t port_id[3]; - uint8_t reserved_6; - } isp24; - } u; - uint16_t reserved_7; - uint16_t ox_id; -} __packed; -#endif - #ifndef NOTIFY_ACK_TYPE #define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */ /* @@ -454,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio) atio->u.isp24.fcp_cmnd.add_cdb_len = 0; } +static inline int get_datalen_for_atio(struct atio_from_isp *atio) +{ + int len = atio->u.isp24.fcp_cmnd.add_cdb_len; + + return (be32_to_cpu(get_unaligned((uint32_t *) + &atio->u.isp24.fcp_cmnd.add_cdb[len * 4]))); +} + #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ /* @@ -731,7 +663,7 @@ struct abts_resp_from_24xx_fw { \********************************************************************/ struct qla_tgt_mgmt_cmd; -struct qla_tgt_sess; +struct fc_port; /* * This structure provides a template of function calls that the @@ -743,22 +675,25 @@ struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, unsigned char *, uint32_t, int, int, int); void (*handle_data)(struct qla_tgt_cmd *); - void (*handle_dif_err)(struct qla_tgt_cmd *); - int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, + int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); void (*free_mcmd)(struct qla_tgt_mgmt_cmd *); - void (*free_session)(struct qla_tgt_sess *); + void (*free_session)(struct fc_port *); int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, - struct qla_tgt_sess *); - void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool); - struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *, + struct fc_port *); + void (*update_sess)(struct fc_port *, port_id_t, uint16_t, bool); + struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *, const uint16_t); - struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, + struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *, const uint8_t *); - void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *); - void (*shutdown_sess)(struct qla_tgt_sess *); + void (*clear_nacl_from_fcport_map)(struct fc_port *); + void (*put_sess)(struct fc_port *); + void (*shutdown_sess)(struct fc_port *); + int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts); + int (*chk_dif_tags)(uint32_t tag); + void (*add_target)(struct scsi_qla_host *); }; int qla2x00_wait_for_hba_online(struct scsi_qla_host *); @@ -795,6 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define QLA_TGT_ABORT_ALL 0xFFFE #define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD #define QLA_TGT_NEXUS_LOSS 0xFFFC +#define QLA_TGT_ABTS 0xFFFB +#define QLA_TGT_2G_ABORT_TASK 0xFFFA /* Notify Acknowledge flags */ #define NOTIFY_ACK_RES_COUNT BIT_8 @@ -872,12 +809,8 @@ struct qla_tgt { /* Count of sessions refering qla_tgt. Protected by hardware_lock. */ int sess_count; - /* Protected by hardware_lock. Addition also protected by tgt_mutex. */ - struct list_head sess_list; - /* Protected by hardware_lock */ struct list_head del_sess_list; - struct delayed_work sess_del_work; spinlock_t sess_work_lock; struct list_head sess_works_list; @@ -888,16 +821,7 @@ struct qla_tgt { int notify_ack_expected; int abts_resp_expected; int modify_lun_expected; - - int ctio_srr_id; - int imm_srr_id; - spinlock_t srr_lock; - struct list_head srr_ctio_list; - struct list_head srr_imm_list; - struct work_struct srr_work; - atomic_t tgt_global_resets_count; - struct list_head tgt_list_entry; }; @@ -910,92 +834,33 @@ struct qla_tgt_sess_op { bool aborted; }; -enum qla_sess_deletion { - QLA_SESS_DELETION_NONE = 0, - QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of - * this one */ - QLA_SESS_DELETION_IN_PROGRESS = 2, -}; - -typedef enum { - QLT_PLOGI_LINK_SAME_WWN, - QLT_PLOGI_LINK_CONFLICT, - QLT_PLOGI_LINK_MAX -} qlt_plogi_link_t; - -typedef struct { - struct list_head list; - struct imm_ntfy_from_isp iocb; - port_id_t id; - int ref_count; -} qlt_plogi_ack_t; - -/* - * Equivilant to IT Nexus (Initiator-Target) - */ -struct qla_tgt_sess { - uint16_t loop_id; - port_id_t s_id; - - unsigned int conf_compl_supported:1; - unsigned int deleted:2; - unsigned int local:1; - unsigned int logout_on_delete:1; - unsigned int keep_nport_handle:1; - unsigned int send_els_logo:1; - - unsigned char logout_completed; - - int generation; - - struct se_session *se_sess; - struct kref sess_kref; - struct scsi_qla_host *vha; - struct qla_tgt *tgt; - - struct list_head sess_list_entry; - unsigned long expires; - struct list_head del_list_entry; - - uint8_t port_name[WWN_SIZE]; - struct work_struct free_work; - - qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; +enum trace_flags { + TRC_NEW_CMD = BIT_0, + TRC_DO_WORK = BIT_1, + TRC_DO_WORK_ERR = BIT_2, + TRC_XFR_RDY = BIT_3, + TRC_XMIT_DATA = BIT_4, + TRC_XMIT_STATUS = BIT_5, + TRC_SRR_RSP = BIT_6, + TRC_SRR_XRDY = BIT_7, + TRC_SRR_TERM = BIT_8, + TRC_SRR_CTIO = BIT_9, + TRC_FLUSH = BIT_10, + TRC_CTIO_ERR = BIT_11, + TRC_CTIO_DONE = BIT_12, + TRC_CTIO_ABORTED = BIT_13, + TRC_CTIO_STRANGE= BIT_14, + TRC_CMD_DONE = BIT_15, + TRC_CMD_CHK_STOP = BIT_16, + TRC_CMD_FREE = BIT_17, + TRC_DATA_IN = BIT_18, + TRC_ABORT = BIT_19, + TRC_DIF_ERR = BIT_20, }; -typedef enum { - /* - * BIT_0 - Atio Arrival / schedule to work - * BIT_1 - qlt_do_work - * BIT_2 - qlt_do work failed - * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending - * BIT_4 - read respond/tcm_qla2xx_queue_data_in - * BIT_5 - status respond / tcm_qla2xx_queue_status - * BIT_6 - tcm request to abort/Term exchange. - * pre_xmit_response->qlt_send_term_exchange - * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) - * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) - * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) - * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data - - * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd - * BIT_13 - Bad completion - - * qlt_ctio_do_completion --> qlt_term_ctio_exchange - * BIT_14 - Back end data received/sent. - * BIT_15 - SRR prepare ctio - * BIT_16 - complete free - * BIT_17 - flush - qlt_abort_cmd_on_host_reset - * BIT_18 - completion w/abort status - * BIT_19 - completion w/unknown status - * BIT_20 - tcm_qla2xxx_free_cmd - */ - CMD_FLAG_DATA_WORK = BIT_11, - CMD_FLAG_DATA_WORK_FREE = BIT_21, -} cmd_flags_t; - struct qla_tgt_cmd { struct se_cmd se_cmd; - struct qla_tgt_sess *sess; + struct fc_port *sess; int state; struct work_struct free_work; struct work_struct work; @@ -1008,12 +873,13 @@ struct qla_tgt_cmd { unsigned int sg_mapped:1; unsigned int free_sg:1; unsigned int write_data_transferred:1; - unsigned int ctx_dsd_alloced:1; unsigned int q_full:1; unsigned int term_exchg:1; unsigned int cmd_sent_to_fw:1; unsigned int cmd_in_wq:1; unsigned int aborted:1; + unsigned int data_work:1; + unsigned int data_work_free:1; struct scatterlist *sg; /* cmd data buffer SG vector */ int sg_cnt; /* SG segments count */ @@ -1029,16 +895,30 @@ struct qla_tgt_cmd { struct list_head cmd_list; struct atio_from_isp atio; - /* t10dif */ + + uint8_t ctx_dsd_alloced; + + /* T10-DIF */ +#define DIF_ERR_NONE 0 +#define DIF_ERR_GRD 1 +#define DIF_ERR_REF 2 +#define DIF_ERR_APP 3 + int8_t dif_err_code; struct scatterlist *prot_sg; uint32_t prot_sg_cnt; - uint32_t blk_sz; + uint32_t blk_sz, num_blks; + uint8_t scsi_status, sense_key, asc, ascq; + struct crc_context *ctx; + uint8_t *cdb; + uint64_t lba; + uint16_t a_guard, e_guard, a_app_tag, e_app_tag; + uint32_t a_ref_tag, e_ref_tag; uint64_t jiffies_at_alloc; uint64_t jiffies_at_free; - cmd_flags_t cmd_flags; + enum trace_flags trc_flags; }; struct qla_tgt_sess_work_param { @@ -1056,9 +936,9 @@ struct qla_tgt_sess_work_param { }; struct qla_tgt_mgmt_cmd { - uint8_t tmr_func; + uint16_t tmr_func; uint8_t fc_tm_rsp; - struct qla_tgt_sess *sess; + struct fc_port *sess; struct se_cmd se_cmd; struct work_struct free_work; unsigned int flags; @@ -1090,18 +970,6 @@ struct qla_tgt_prm { uint16_t tot_dsds; }; -struct qla_tgt_srr_imm { - struct list_head srr_list_entry; - int srr_id; - struct imm_ntfy_from_isp imm_ntfy; -}; - -struct qla_tgt_srr_ctio { - struct list_head srr_list_entry; - int srr_id; - struct qla_tgt_cmd *cmd; -}; - /* Check for Switch reserved address */ #define IS_SW_RESV_ADDR(_s_id) \ ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc)) @@ -1121,7 +989,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); extern int qlt_lport_register(void *, u64, u64, u64, int (*callback)(struct scsi_qla_host *, void *, u64, u64)); extern void qlt_lport_deregister(struct scsi_qla_host *); -void qlt_put_sess(struct qla_tgt_sess *sess); +extern void qlt_unreg_sess(struct fc_port *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); @@ -1133,24 +1001,22 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int); * is not set. Right now, ha value is ignored. */ #define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED) + extern int ql2x_ini_mode; static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha) { - return ha->host->active_mode & MODE_TARGET; + return ha->host->active_mode == MODE_TARGET; } static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha) { - return ha->host->active_mode & MODE_INITIATOR; + return ha->host->active_mode == MODE_INITIATOR; } -static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha) +static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha) { - if (ha->host->active_mode & MODE_INITIATOR) - ha->host->active_mode &= ~MODE_INITIATOR; - else - ha->host->active_mode |= MODE_INITIATOR; + return (ha->host->active_mode == MODE_DUAL); } static inline uint32_t sid_to_key(const uint8_t *s_id) @@ -1211,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *); extern void qlt_logo_completion_handler(fc_port_t *, int); extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); +void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t, + uint8_t, uint8_t, uint8_t); + #endif /* __QLA_TARGET_H */ diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 3cb1964b7786e4..45bc84e8e3bf50 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.38-k" +#define QLA2XXX_VERSION "9.00.00.00-k" -#define QLA_DRIVER_MAJOR_VER 8 -#define QLA_DRIVER_MINOR_VER 7 +#define QLA_DRIVER_MAJOR_VER 9 +#define QLA_DRIVER_MINOR_VER 0 #define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 3084983c128720..7443e4efa3aed4 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -282,10 +282,10 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work) cmd->cmd_in_wq = 0; - WARN_ON(cmd->cmd_flags & BIT_16); + WARN_ON(cmd->trc_flags & TRC_CMD_FREE); cmd->vha->tgt_counters.qla_core_ret_sta_ctio++; - cmd->cmd_flags |= BIT_16; + cmd->trc_flags |= TRC_CMD_FREE; transport_generic_free_cmd(&cmd->se_cmd, 0); } @@ -299,8 +299,8 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) cmd->vha->tgt_counters.core_qla_free_cmd++; cmd->cmd_in_wq = 1; - BUG_ON(cmd->cmd_flags & BIT_20); - cmd->cmd_flags |= BIT_20; + WARN_ON(cmd->trc_flags & TRC_CMD_DONE); + cmd->trc_flags |= TRC_CMD_DONE; INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); @@ -315,7 +315,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - cmd->cmd_flags |= BIT_14; + cmd->trc_flags |= TRC_CMD_CHK_STOP; } return target_put_sess_cmd(se_cmd); @@ -339,9 +339,26 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) qlt_free_cmd(cmd); } +static void tcm_qla2xxx_release_session(struct kref *kref) +{ + struct fc_port *sess = container_of(kref, + struct fc_port, sess_kref); + + qlt_unreg_sess(sess); +} + +static void tcm_qla2xxx_put_sess(struct fc_port *sess) +{ + if (!sess) + return; + + assert_spin_locked(&sess->vha->hw->tgt.sess_lock); + kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); +} + static void tcm_qla2xxx_close_session(struct se_session *se_sess) { - struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; + struct fc_port *sess = se_sess->fabric_sess_ptr; struct scsi_qla_host *vha; unsigned long flags; @@ -350,7 +367,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); target_sess_cmd_list_set_waiting(se_sess); - qlt_put_sess(sess); + tcm_qla2xxx_put_sess(sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } @@ -377,7 +394,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) cmd->se_cmd.se_cmd_flags); return 0; } - cmd->cmd_flags |= BIT_3; + cmd->trc_flags |= TRC_XFR_RDY; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -441,7 +458,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, { struct se_cmd *se_cmd = &cmd->se_cmd; struct se_session *se_sess; - struct qla_tgt_sess *sess; + struct fc_port *sess; #ifdef CONFIG_TCM_QLA2XXX_DEBUG struct se_portal_group *se_tpg; struct tcm_qla2xxx_tpg *tpg; @@ -456,7 +473,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, sess = cmd->sess; if (!sess) { - pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); + pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n"); return -EINVAL; } @@ -493,9 +510,9 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) cmd->cmd_in_wq = 0; spin_lock_irqsave(&cmd->cmd_lock, flags); - cmd->cmd_flags |= CMD_FLAG_DATA_WORK; + cmd->data_work = 1; if (cmd->aborted) { - cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + cmd->data_work_free = 1; spin_unlock_irqrestore(&cmd->cmd_lock, flags); tcm_qla2xxx_free_cmd(cmd); @@ -514,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) return; } + switch (cmd->dif_err_code) { + case DIF_ERR_GRD: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + break; + case DIF_ERR_REF: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + break; + case DIF_ERR_APP: + cmd->se_cmd.pi_err = + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + break; + case DIF_ERR_NONE: + default: + break; + } + if (cmd->se_cmd.pi_err) transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); @@ -532,44 +567,78 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) */ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) { - cmd->cmd_flags |= BIT_10; + cmd->trc_flags |= TRC_DATA_IN; cmd->cmd_in_wq = 1; INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); } -static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) +static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) { - struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); - - /* take an extra kref to prevent cmd free too early. - * need to wait for SCSI status/check condition to - * finish responding generate by transport_generic_request_failure. - */ - kref_get(&cmd->se_cmd.cmd_kref); - transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); + return 0; } -/* - * Called from qla_target.c:qlt_do_ctio_completion() - */ -static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) +static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, + uint16_t *pfw_prot_opts) { - INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); - queue_work(tcm_qla2xxx_free_wq, &cmd->work); + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) + *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; + + if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) + *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; + + return 0; } /* * Called from qla_target.c:qlt_issue_task_mgmt() */ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, - uint8_t tmr_func, uint32_t tag) + uint16_t tmr_func, uint32_t tag) { - struct qla_tgt_sess *sess = mcmd->sess; + struct fc_port *sess = mcmd->sess; struct se_cmd *se_cmd = &mcmd->se_cmd; + int transl_tmr_func = 0; + + switch (tmr_func) { + case QLA_TGT_ABTS: + pr_debug("%ld: ABTS received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK; + break; + case QLA_TGT_2G_ABORT_TASK: + pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK; + break; + case QLA_TGT_CLEAR_ACA: + pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no); + transl_tmr_func = TMR_CLEAR_ACA; + break; + case QLA_TGT_TARGET_RESET: + pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no); + transl_tmr_func = TMR_TARGET_WARM_RESET; + break; + case QLA_TGT_LUN_RESET: + pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no); + transl_tmr_func = TMR_LUN_RESET; + break; + case QLA_TGT_CLEAR_TS: + pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no); + transl_tmr_func = TMR_CLEAR_TASK_SET; + break; + case QLA_TGT_ABORT_TS: + pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no); + transl_tmr_func = TMR_ABORT_TASK_SET; + break; + default: + pr_debug("%ld: Unknown task mgmt fn 0x%x\n", + sess->vha->host_no, tmr_func); + return -ENOSYS; + } return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, - tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); + transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); } static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) @@ -591,7 +660,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) return 0; } - cmd->cmd_flags |= BIT_4; + cmd->trc_flags |= TRC_XMIT_DATA; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -622,11 +691,11 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg_cnt = 0; cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - if (cmd->cmd_flags & BIT_5) { - pr_crit("Bit_5 already set for cmd = %p.\n", cmd); + if (cmd->trc_flags & TRC_XMIT_STATUS) { + pr_crit("Multiple calls for status = %p.\n", cmd); dump_stack(); } - cmd->cmd_flags |= BIT_5; + cmd->trc_flags |= TRC_XMIT_STATUS; if (se_cmd->data_direction == DMA_FROM_DEVICE) { /* @@ -682,10 +751,7 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) qlt_xmit_tm_rsp(mcmd); } - -#define DATA_WORK_NOT_FREE(_flags) \ - (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \ - CMD_FLAG_DATA_WORK) +#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free) static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, @@ -697,13 +763,13 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) spin_lock_irqsave(&cmd->cmd_lock, flags); if ((cmd->state == QLA_TGT_STATE_NEW)|| - ((cmd->state == QLA_TGT_STATE_DATA_IN) && - DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) { - - cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + ((cmd->state == QLA_TGT_STATE_DATA_IN) && + DATA_WORK_NOT_FREE(cmd))) { + cmd->data_work_free = 1; spin_unlock_irqrestore(&cmd->cmd_lock, flags); - /* Cmd have not reached firmware. - * Use this trigger to free it. */ + /* + * cmd has not reached fw, Use this trigger to free it. + */ tcm_qla2xxx_free_cmd(cmd); return; } @@ -713,11 +779,11 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, - struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); + struct tcm_qla2xxx_nacl *, struct fc_port *); /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) +static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) { struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; struct se_portal_group *se_tpg = se_nacl->se_tpg; @@ -756,7 +822,7 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); } -static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) +static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) { assert_spin_locked(&sess->vha->hw->tgt.sess_lock); target_sess_cmd_list_set_waiting(sess->se_sess); @@ -1141,7 +1207,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( +static struct fc_port *tcm_qla2xxx_find_sess_by_s_id( scsi_qla_host_t *vha, const uint8_t *s_id) { @@ -1169,12 +1235,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( se_nacl, se_nacl->initiatorname); nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - if (!nacl->qla_tgt_sess) { - pr_err("Unable to locate struct qla_tgt_sess\n"); + if (!nacl->fc_port) { + pr_err("Unable to locate struct fc_port\n"); return NULL; } - return nacl->qla_tgt_sess; + return nacl->fc_port; } /* @@ -1185,7 +1251,7 @@ static void tcm_qla2xxx_set_sess_by_s_id( struct se_node_acl *new_se_nacl, struct tcm_qla2xxx_nacl *nacl, struct se_session *se_sess, - struct qla_tgt_sess *qla_tgt_sess, + struct fc_port *fc_port, uint8_t *s_id) { u32 key; @@ -1209,22 +1275,22 @@ static void tcm_qla2xxx_set_sess_by_s_id( pr_debug("Wiping nonexisting fc_port entry\n"); } - qla_tgt_sess->se_sess = se_sess; - nacl->qla_tgt_sess = qla_tgt_sess; + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; return; } - if (nacl->qla_tgt_sess) { + if (nacl->fc_port) { if (new_se_nacl == NULL) { - pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n"); + pr_debug("Clearing existing nacl->fc_port and fc_port entry\n"); btree_remove32(&lport->lport_fcport_map, key); - nacl->qla_tgt_sess = NULL; + nacl->fc_port = NULL; return; } - pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n"); + pr_debug("Replacing existing nacl->fc_port and fc_port entry\n"); btree_update32(&lport->lport_fcport_map, key, new_se_nacl); - qla_tgt_sess->se_sess = se_sess; - nacl->qla_tgt_sess = qla_tgt_sess; + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; return; } @@ -1234,19 +1300,19 @@ static void tcm_qla2xxx_set_sess_by_s_id( return; } - pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n"); + pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n"); btree_update32(&lport->lport_fcport_map, key, new_se_nacl); - qla_tgt_sess->se_sess = se_sess; - nacl->qla_tgt_sess = qla_tgt_sess; + fc_port->se_sess = se_sess; + nacl->fc_port = fc_port; - pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n", - nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); + pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n", + nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); } /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( +static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id( scsi_qla_host_t *vha, const uint16_t loop_id) { @@ -1274,12 +1340,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - if (!nacl->qla_tgt_sess) { - pr_err("Unable to locate struct qla_tgt_sess\n"); + if (!nacl->fc_port) { + pr_err("Unable to locate struct fc_port\n"); return NULL; } - return nacl->qla_tgt_sess; + return nacl->fc_port; } /* @@ -1290,7 +1356,7 @@ static void tcm_qla2xxx_set_sess_by_loop_id( struct se_node_acl *new_se_nacl, struct tcm_qla2xxx_nacl *nacl, struct se_session *se_sess, - struct qla_tgt_sess *qla_tgt_sess, + struct fc_port *fc_port, uint16_t loop_id) { struct se_node_acl *saved_nacl; @@ -1305,27 +1371,27 @@ static void tcm_qla2xxx_set_sess_by_loop_id( if (!saved_nacl) { pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); fc_loopid->se_nacl = new_se_nacl; - if (qla_tgt_sess->se_sess != se_sess) - qla_tgt_sess->se_sess = se_sess; - if (nacl->qla_tgt_sess != qla_tgt_sess) - nacl->qla_tgt_sess = qla_tgt_sess; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; return; } - if (nacl->qla_tgt_sess) { + if (nacl->fc_port) { if (new_se_nacl == NULL) { - pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); + pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n"); fc_loopid->se_nacl = NULL; - nacl->qla_tgt_sess = NULL; + nacl->fc_port = NULL; return; } - pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); + pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n"); fc_loopid->se_nacl = new_se_nacl; - if (qla_tgt_sess->se_sess != se_sess) - qla_tgt_sess->se_sess = se_sess; - if (nacl->qla_tgt_sess != qla_tgt_sess) - nacl->qla_tgt_sess = qla_tgt_sess; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; return; } @@ -1335,29 +1401,29 @@ static void tcm_qla2xxx_set_sess_by_loop_id( return; } - pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n"); + pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n"); fc_loopid->se_nacl = new_se_nacl; - if (qla_tgt_sess->se_sess != se_sess) - qla_tgt_sess->se_sess = se_sess; - if (nacl->qla_tgt_sess != qla_tgt_sess) - nacl->qla_tgt_sess = qla_tgt_sess; + if (fc_port->se_sess != se_sess) + fc_port->se_sess = se_sess; + if (nacl->fc_port != fc_port) + nacl->fc_port = fc_port; - pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n", - nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); + pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n", + nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); } /* * Should always be called with qla_hw_data->tgt.sess_lock held. */ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, - struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) + struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) { struct se_session *se_sess = sess->se_sess; unsigned char be_sid[3]; - be_sid[0] = sess->s_id.b.domain; - be_sid[1] = sess->s_id.b.area; - be_sid[2] = sess->s_id.b.al_pa; + be_sid[0] = sess->d_id.b.domain; + be_sid[1] = sess->d_id.b.area; + be_sid[2] = sess->d_id.b.al_pa; tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, sess, be_sid); @@ -1365,7 +1431,7 @@ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, sess, sess->loop_id); } -static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) +static void tcm_qla2xxx_free_session(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; struct qla_hw_data *ha = tgt->ha; @@ -1377,7 +1443,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) se_sess = sess->se_sess; if (!se_sess) { - pr_err("struct qla_tgt_sess->se_sess is NULL\n"); + pr_err("struct fc_port->se_sess is NULL\n"); dump_stack(); return; } @@ -1404,14 +1470,14 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, struct se_node_acl *se_nacl = se_sess->se_node_acl; struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - struct qla_tgt_sess *qlat_sess = p; + struct fc_port *qlat_sess = p; uint16_t loop_id = qlat_sess->loop_id; unsigned long flags; unsigned char be_sid[3]; - be_sid[0] = qlat_sess->s_id.b.domain; - be_sid[1] = qlat_sess->s_id.b.area; - be_sid[2] = qlat_sess->s_id.b.al_pa; + be_sid[0] = qlat_sess->d_id.b.domain; + be_sid[1] = qlat_sess->d_id.b.area; + be_sid[2] = qlat_sess->d_id.b.al_pa; /* * And now setup se_nacl and session pointers into HW lport internal @@ -1434,7 +1500,7 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, static int tcm_qla2xxx_check_initiator_node_acl( scsi_qla_host_t *vha, unsigned char *fc_wwpn, - struct qla_tgt_sess *qlat_sess) + struct fc_port *qlat_sess) { struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport; @@ -1478,7 +1544,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( return 0; } -static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, +static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, uint16_t loop_id, bool conf_compl_supported) { struct qla_tgt *tgt = sess->tgt; @@ -1491,11 +1557,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, u32 key; - if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) + if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24) pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", sess, sess->port_name, - sess->loop_id, loop_id, sess->s_id.b.domain, - sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain, + sess->loop_id, loop_id, sess->d_id.b.domain, + sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain, s_id.b.area, s_id.b.al_pa); if (sess->loop_id != loop_id) { @@ -1515,18 +1581,20 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, sess->loop_id = loop_id; } - if (sess->s_id.b24 != s_id.b24) { - key = (((u32) sess->s_id.b.domain << 16) | - ((u32) sess->s_id.b.area << 8) | - ((u32) sess->s_id.b.al_pa)); + if (sess->d_id.b24 != s_id.b24) { + key = (((u32) sess->d_id.b.domain << 16) | + ((u32) sess->d_id.b.area << 8) | + ((u32) sess->d_id.b.al_pa)); if (btree_lookup32(&lport->lport_fcport_map, key)) - WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl, - "Found wrong se_nacl when updating s_id %x:%x:%x\n", - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); + WARN(btree_remove32(&lport->lport_fcport_map, key) != + se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n", + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa); else WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); + sess->d_id.b.domain, sess->d_id.b.area, + sess->d_id.b.al_pa); key = (((u32) s_id.b.domain << 16) | ((u32) s_id.b.area << 8) | @@ -1537,10 +1605,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, s_id.b.domain, s_id.b.area, s_id.b.al_pa); btree_update32(&lport->lport_fcport_map, key, se_nacl); } else { - btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC); + btree_insert32(&lport->lport_fcport_map, key, se_nacl, + GFP_ATOMIC); } - sess->s_id = s_id; + sess->d_id = s_id; nacl->nport_id = key; } @@ -1557,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .handle_cmd = tcm_qla2xxx_handle_cmd, .handle_data = tcm_qla2xxx_handle_data, - .handle_dif_err = tcm_qla2xxx_handle_dif_err, .handle_tmr = tcm_qla2xxx_handle_tmr, .free_cmd = tcm_qla2xxx_free_cmd, .free_mcmd = tcm_qla2xxx_free_mcmd, @@ -1567,7 +1635,10 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, + .put_sess = tcm_qla2xxx_put_sess, .shutdown_sess = tcm_qla2xxx_shutdown_sess, + .get_dif_tags = tcm_qla2xxx_dif_tags, + .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, }; static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) @@ -1690,7 +1761,7 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; struct fc_vport_identifiers vport_id; - if (!qla_tgt_mode_enabled(base_vha)) { + if (qla_ini_mode_enabled(base_vha)) { pr_err("qla2xxx base_vha not enabled for target mode\n"); return -EPERM; } @@ -1738,7 +1809,7 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport( p = strchr(tmp, '@'); if (!p) { - pr_err("Unable to locate NPIV '@' seperator\n"); + pr_err("Unable to locate NPIV '@' separator\n"); return ERR_PTR(-EINVAL); } *p++ = '\0'; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index cf8430be183b6b..071035dfa99ae8 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -20,8 +20,8 @@ struct tcm_qla2xxx_nacl { u64 nport_wwnn; /* ASCII formatted WWPN for FC Initiator Nport */ char nport_name[TCM_QLA2XXX_NAMELEN]; - /* Pointer to qla_tgt_sess */ - struct qla_tgt_sess *qla_tgt_sess; + /* Pointer to fc_port */ + struct fc_port *fc_port; /* Pointer to TCM FC nexus */ struct se_session *nport_nexus; }; diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c index b1383a71400ead..a75673bb82b393 100644 --- a/drivers/scsi/scsi_common.c +++ b/drivers/scsi/scsi_common.c @@ -137,11 +137,11 @@ EXPORT_SYMBOL(int_to_scsilun); bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, struct scsi_sense_hdr *sshdr) { + memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); + if (!sense_buffer || !sb_len) return false; - memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); - sshdr->response_code = (sense_buffer[0] & 0x7f); if (!scsi_sense_valid(sshdr)) diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index b8d3b97b217ac5..84addee05be67a 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -219,20 +219,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) } EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); -static struct scsi_device *get_sdev_from_queue(struct request_queue *q) -{ - struct scsi_device *sdev; - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - sdev = q->queuedata; - if (!sdev || !get_device(&sdev->sdev_gendev)) - sdev = NULL; - spin_unlock_irqrestore(q->queue_lock, flags); - - return sdev; -} - /* * scsi_dh_activate - activate the path associated with the scsi_device * corresponding to the given request queue. @@ -251,7 +237,7 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) struct scsi_device *sdev; int err = SCSI_DH_NOSYS; - sdev = get_sdev_from_queue(q); + sdev = scsi_device_from_queue(q); if (!sdev) { if (fn) fn(data, err); @@ -298,7 +284,7 @@ int scsi_dh_set_params(struct request_queue *q, const char *params) struct scsi_device *sdev; int err = -SCSI_DH_NOSYS; - sdev = get_sdev_from_queue(q); + sdev = scsi_device_from_queue(q); if (!sdev) return err; @@ -321,7 +307,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name) struct scsi_device_handler *scsi_dh; int err = 0; - sdev = get_sdev_from_queue(q); + sdev = scsi_device_from_queue(q); if (!sdev) return -ENODEV; @@ -359,7 +345,7 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) struct scsi_device *sdev; const char *handler_name = NULL; - sdev = get_sdev_from_queue(q); + sdev = scsi_device_from_queue(q); if (!sdev) return NULL; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 8b8c814df5c75d..b6bf3f29a12a4e 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -199,6 +199,7 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { char scsi_cmd[MAX_COMMAND_SIZE]; + struct scsi_sense_hdr sense_hdr; /* Check for deprecated ioctls ... all the ioctls which don't * follow the new unique numbering scheme are deprecated */ @@ -243,7 +244,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); case SCSI_IOCTL_TEST_UNIT_READY: return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT, - NORMAL_RETRIES, NULL); + NORMAL_RETRIES, &sense_hdr); case SCSI_IOCTL_START_UNIT: scsi_cmd[0] = START_STOP; scsi_cmd[1] = 0; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 3e32dc954c3c8c..19125d72f322c9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -213,10 +213,30 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) __scsi_queue_insert(cmd, reason, 1); } -static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, + +/** + * scsi_execute - insert request and wait for the result + * @sdev: scsi device + * @cmd: scsi command + * @data_direction: data direction + * @buffer: data buffer + * @bufflen: len of buffer + * @sense: optional sense buffer + * @sshdr: optional decoded sense header + * @timeout: request timeout in seconds + * @retries: number of times to retry request + * @flags: flags for ->cmd_flags + * @rq_flags: flags for ->rq_flags + * @resid: optional residual length + * + * returns the req->errors value which is the scsi_cmnd result + * field. + */ +int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, u64 flags, - req_flags_t rq_flags, int *resid) + unsigned char *sense, struct scsi_sense_hdr *sshdr, + int timeout, int retries, u64 flags, req_flags_t rq_flags, + int *resid) { struct request *req; struct scsi_request *rq; @@ -259,62 +279,16 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, *resid = rq->resid_len; if (sense && rq->sense_len) memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE); + if (sshdr) + scsi_normalize_sense(rq->sense, rq->sense_len, sshdr); ret = req->errors; out: blk_put_request(req); return ret; } - -/** - * scsi_execute - insert request and wait for the result - * @sdev: scsi device - * @cmd: scsi command - * @data_direction: data direction - * @buffer: data buffer - * @bufflen: len of buffer - * @sense: optional sense buffer - * @timeout: request timeout in seconds - * @retries: number of times to retry request - * @flags: or into request flags; - * @resid: optional residual length - * - * returns the req->errors value which is the scsi_cmnd result - * field. - */ -int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, - int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, u64 flags, - int *resid) -{ - return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, - timeout, retries, flags, 0, resid); -} EXPORT_SYMBOL(scsi_execute); -int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, - int data_direction, void *buffer, unsigned bufflen, - struct scsi_sense_hdr *sshdr, int timeout, int retries, - int *resid, u64 flags, req_flags_t rq_flags) -{ - char *sense = NULL; - int result; - - if (sshdr) { - sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); - if (!sense) - return DRIVER_ERROR << 24; - } - result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, flags, rq_flags, resid); - if (sshdr) - scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); - - kfree(sense); - return result; -} -EXPORT_SYMBOL(scsi_execute_req_flags); - /* * Function: scsi_init_cmd_errh() * @@ -2231,6 +2205,29 @@ void scsi_mq_destroy_tags(struct Scsi_Host *shost) blk_mq_free_tag_set(&shost->tag_set); } +/** + * scsi_device_from_queue - return sdev associated with a request_queue + * @q: The request queue to return the sdev from + * + * Return the sdev associated with a request queue or NULL if the + * request_queue does not reference a SCSI device. + */ +struct scsi_device *scsi_device_from_queue(struct request_queue *q) +{ + struct scsi_device *sdev = NULL; + + if (q->mq_ops) { + if (q->mq_ops == &scsi_mq_ops) + sdev = q->queuedata; + } else if (q->request_fn == scsi_request_fn) + sdev = q->queuedata; + if (!sdev || !get_device(&sdev->sdev_gendev)) + sdev = NULL; + + return sdev; +} +EXPORT_SYMBOL_GPL(scsi_device_from_queue); + /* * Function: scsi_block_requests() * @@ -2497,28 +2494,20 @@ EXPORT_SYMBOL(scsi_mode_sense); * @sdev: scsi device to change the state of. * @timeout: command timeout * @retries: number of retries before failing - * @sshdr_external: Optional pointer to struct scsi_sense_hdr for - * returning sense. Make sure that this is cleared before passing - * in. + * @sshdr: outpout pointer for decoded sense information. * * Returns zero if unsuccessful or an error if TUR failed. For * removable media, UNIT_ATTENTION sets ->changed flag. **/ int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, - struct scsi_sense_hdr *sshdr_external) + struct scsi_sense_hdr *sshdr) { char cmd[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0, }; - struct scsi_sense_hdr *sshdr; int result; - if (!sshdr_external) - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); - else - sshdr = sshdr_external; - /* try to eat the UNIT_ATTENTION if there are enough retries */ do { result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, @@ -2529,8 +2518,6 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, } while (scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION && --retries); - if (!sshdr_external) - kfree(sshdr); return result; } EXPORT_SYMBOL(scsi_test_unit_ready); @@ -2945,6 +2932,8 @@ EXPORT_SYMBOL(scsi_target_resume); /** * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state * @sdev: device to block + * @wait: Whether or not to wait until ongoing .queuecommand() / + * .queue_rq() calls have finished. * * Block request made by scsi lld's to temporarily stop all * scsi commands on the specified device. May sleep. @@ -2962,7 +2951,7 @@ EXPORT_SYMBOL(scsi_target_resume); * remove the rport mutex lock and unlock calls from srp_queuecommand(). */ int -scsi_internal_device_block(struct scsi_device *sdev) +scsi_internal_device_block(struct scsi_device *sdev, bool wait) { struct request_queue *q = sdev->request_queue; unsigned long flags; @@ -2982,12 +2971,16 @@ scsi_internal_device_block(struct scsi_device *sdev) * request queue. */ if (q->mq_ops) { - blk_mq_quiesce_queue(q); + if (wait) + blk_mq_quiesce_queue(q); + else + blk_mq_stop_hw_queues(q); } else { spin_lock_irqsave(q->queue_lock, flags); blk_stop_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); - scsi_wait_for_queuecommand(sdev); + if (wait) + scsi_wait_for_queuecommand(sdev); } return 0; @@ -3049,7 +3042,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); static void device_block(struct scsi_device *sdev, void *data) { - scsi_internal_device_block(sdev); + scsi_internal_device_block(sdev, true); } static int diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 99bfc985e1903b..f11bd102d6d5d6 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -188,8 +188,5 @@ static inline void scsi_dh_remove_device(struct scsi_device *sdev) { } */ #define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */ -extern int scsi_internal_device_block(struct scsi_device *sdev); -extern int scsi_internal_device_unblock(struct scsi_device *sdev, - enum scsi_device_state new_state); #endif /* _SCSI_PRIV_H */ diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 319868f3f67430..d0219e36080c3b 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -123,25 +123,21 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd, { int i, result; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; + struct scsi_sense_hdr sshdr_tmp; + + if (!sshdr) + sshdr = &sshdr_tmp; for(i = 0; i < DV_RETRIES; i++) { - result = scsi_execute(sdev, cmd, dir, buffer, bufflen, - sense, DV_TIMEOUT, /* retries */ 1, + result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, + sshdr, DV_TIMEOUT, /* retries */ 1, REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER, - NULL); - if (driver_byte(result) & DRIVER_SENSE) { - struct scsi_sense_hdr sshdr_tmp; - if (!sshdr) - sshdr = &sshdr_tmp; - - if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, - sshdr) - && sshdr->sense_key == UNIT_ATTENTION) - continue; - } - break; + 0, NULL); + if (!(driver_byte(result) & DRIVER_SENSE) || + sshdr->sense_key != UNIT_ATTENTION) + break; } return result; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index cb6e68dd6df09d..fcfeddc79331bb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1425,7 +1425,6 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) { struct scsi_disk *sdkp = scsi_disk_get(disk); struct scsi_device *sdp; - struct scsi_sense_hdr *sshdr = NULL; int retval; if (!sdkp) @@ -1454,22 +1453,21 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever * sd_revalidate() is called. */ - retval = -ENODEV; - if (scsi_block_when_processing_errors(sdp)) { - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); + struct scsi_sense_hdr sshdr = { 0, }; + retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, - sshdr); - } + &sshdr); - /* failed to execute TUR, assume media not present */ - if (host_byte(retval)) { - set_media_not_present(sdkp); - goto out; - } + /* failed to execute TUR, assume media not present */ + if (host_byte(retval)) { + set_media_not_present(sdkp); + goto out; + } - if (media_not_present(sdkp, sshdr)) - goto out; + if (media_not_present(sdkp, &sshdr)) + goto out; + } /* * For removable scsi disk we have to recognise the presence @@ -1485,7 +1483,6 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) * Medium present state has changed in either direction. * Device has indicated UNIT_ATTENTION. */ - kfree(sshdr); retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; sdp->changed = 0; scsi_disk_put(sdkp); @@ -1511,9 +1508,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * Leave the rest of the command zero to indicate * flush everything. */ - res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, - &sshdr, timeout, SD_MAX_RETRIES, - NULL, 0, RQF_PM); + res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); if (res == 0) break; } @@ -1787,6 +1783,8 @@ static int sd_done(struct scsi_cmnd *SCpnt) { int result = SCpnt->result; unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); + unsigned int sector_size = SCpnt->device->sector_size; + unsigned int resid; struct scsi_sense_hdr sshdr; struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); struct request *req = SCpnt->request; @@ -1817,6 +1815,21 @@ static int sd_done(struct scsi_cmnd *SCpnt) scsi_set_resid(SCpnt, blk_rq_bytes(req)); } break; + default: + /* + * In case of bogus fw or device, we could end up having + * an unaligned partial completion. Check this here and force + * alignment. + */ + resid = scsi_get_resid(SCpnt); + if (resid & (sector_size - 1)) { + sd_printk(KERN_INFO, sdkp, + "Unaligned partial completion (resid=%u, sector_sz=%u)\n", + resid, sector_size); + resid = min(scsi_bufflen(SCpnt), + round_up(resid, sector_size)); + scsi_set_resid(SCpnt, resid); + } } if (result) { @@ -3079,23 +3092,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) put_device(&sdkp->dev); } -struct sd_devt { - int idx; - struct disk_devt disk_devt; -}; - -void sd_devt_release(struct disk_devt *disk_devt) -{ - struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt, - disk_devt); - - spin_lock(&sd_index_lock); - ida_remove(&sd_index_ida, sd_devt->idx); - spin_unlock(&sd_index_lock); - - kfree(sd_devt); -} - /** * sd_probe - called during driver initialization and whenever a * new scsi device is attached to the system. It is called once @@ -3117,7 +3113,6 @@ void sd_devt_release(struct disk_devt *disk_devt) static int sd_probe(struct device *dev) { struct scsi_device *sdp = to_scsi_device(dev); - struct sd_devt *sd_devt; struct scsi_disk *sdkp; struct gendisk *gd; int index; @@ -3143,13 +3138,9 @@ static int sd_probe(struct device *dev) if (!sdkp) goto out; - sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL); - if (!sd_devt) - goto out_free; - gd = alloc_disk(SD_MINORS); if (!gd) - goto out_free_devt; + goto out_free; do { if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) @@ -3165,11 +3156,6 @@ static int sd_probe(struct device *dev) goto out_put; } - atomic_set(&sd_devt->disk_devt.count, 1); - sd_devt->disk_devt.release = sd_devt_release; - sd_devt->idx = index; - gd->disk_devt = &sd_devt->disk_devt; - error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); if (error) { sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); @@ -3209,14 +3195,13 @@ static int sd_probe(struct device *dev) return 0; out_free_index: - put_disk_devt(&sd_devt->disk_devt); - sd_devt = NULL; + spin_lock(&sd_index_lock); + ida_remove(&sd_index_ida, index); + spin_unlock(&sd_index_lock); out_put: put_disk(gd); out_free: kfree(sdkp); - out_free_devt: - kfree(sd_devt); out: scsi_autopm_put_device(sdp); return error; @@ -3275,7 +3260,10 @@ static void scsi_disk_release(struct device *dev) struct scsi_disk *sdkp = to_scsi_disk(dev); struct gendisk *disk = sdkp->disk; - put_disk_devt(disk->disk_devt); + spin_lock(&sd_index_lock); + ida_remove(&sd_index_ida, sdkp->index); + spin_unlock(&sd_index_lock); + disk->private_data = NULL; put_disk(disk); put_device(&sdkp->device->sdev_gendev); @@ -3299,8 +3287,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) if (!scsi_device_online(sdp)) return -ENODEV; - res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM); + res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL); if (res) { sd_print_result(sdkp, "Start/Stop Unit failed", res); if (driver_byte(res) & DRIVER_SENSE) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 29b86505f796d9..225abaad4d1cc8 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -996,6 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) result = get_user(val, ip); if (result) return result; + if (val > SG_MAX_CDB_SIZE) + return -ENOMEM; sfp->next_cmd_len = (val > 0) ? val : 0; return 0; case SG_GET_VERSION_NUM: diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 11c0dfb3dfa392..657ad15682a34c 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -534,8 +534,7 @@ static int pqi_write_current_time_to_host_wellness( size_t buffer_length; time64_t local_time; unsigned int year; - struct timeval time; - struct rtc_time tm; + struct tm tm; buffer_length = sizeof(*buffer); @@ -552,9 +551,8 @@ static int pqi_write_current_time_to_host_wellness( put_unaligned_le16(sizeof(buffer->time), &buffer->time_length); - do_gettimeofday(&time); - local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60); - rtc_time64_to_tm(local_time, &tm); + local_time = ktime_get_real_seconds(); + time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); year = tm.tm_year + 1900; buffer->time[0] = bin2bcd(tm.tm_hour); diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index dfffdf63e44c92..4610c8c5693fd4 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -187,30 +187,19 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) struct scsi_device *SDev; struct scsi_sense_hdr sshdr; int result, err = 0, retries = 0; - struct request_sense *sense = cgc->sense; SDev = cd->device; - if (!sense) { - sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); - if (!sense) { - err = -ENOMEM; - goto out; - } - } - retry: if (!scsi_block_when_processing_errors(SDev)) { err = -ENODEV; goto out; } - memset(sense, 0, sizeof(*sense)); result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, - cgc->buffer, cgc->buflen, (char *)sense, - cgc->timeout, IOCTL_RETRIES, 0, NULL); - - scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr); + cgc->buffer, cgc->buflen, + (unsigned char *)cgc->sense, &sshdr, + cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); /* Minimal error checking. Ignore cases we know about, and report the rest. */ if (driver_byte(result) != 0) { @@ -261,8 +250,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) /* Wake up a process waiting for device */ out: - if (!cgc->sense) - kfree(sense); cgc->stat = err; return err; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 81212d4bd9bf2d..e5ef78a6848ef1 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -23,7 +23,7 @@ static const char *verstr = "20160209"; #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 638e5f427c901f..016639d7fef176 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -400,8 +400,6 @@ MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels") */ static int storvsc_timeout = 180; -static int msft_blist_flags = BLIST_TRY_VPD_PAGES; - #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) static struct scsi_transport_template *fc_transport_template; #endif @@ -1383,6 +1381,22 @@ static int storvsc_do_io(struct hv_device *device, return ret; } +static int storvsc_device_alloc(struct scsi_device *sdevice) +{ + /* + * Set blist flag to permit the reading of the VPD pages even when + * the target may claim SPC-2 compliance. MSFT targets currently + * claim SPC-2 compliance while they implement post SPC-2 features. + * With this flag we can correctly handle WRITE_SAME_16 issues. + * + * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but + * still supports REPORT LUN. + */ + sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES; + + return 0; +} + static int storvsc_device_configure(struct scsi_device *sdevice) { @@ -1395,14 +1409,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice) sdevice->no_write_same = 1; - /* - * Add blist flags to permit the reading of the VPD pages even when - * the target may claim SPC-2 compliance. MSFT targets currently - * claim SPC-2 compliance while they implement post SPC-2 features. - * With this patch we can correctly handle WRITE_SAME_16 issues. - */ - sdevice->sdev_bflags |= msft_blist_flags; - /* * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 * if the device is a MSFT virtual device. If the host is @@ -1661,6 +1667,7 @@ static struct scsi_host_template scsi_driver = { .eh_host_reset_handler = storvsc_host_reset_handler, .proc_name = "storvsc_host", .eh_timed_out = storvsc_eh_timed_out, + .slave_alloc = storvsc_device_alloc, .slave_configure = storvsc_device_configure, .cmd_per_lun = 255, .this_id = -1, diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index ce5d023c1c915c..c87d770b519a7c 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1523,18 +1523,6 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) return false; } - /* - * Not performing check for each individual select_major - * mappings of select_minor, since there is no harm in - * configuring a non-existent select_minor - */ - if (host->testbus.select_minor > 0xFF) { - dev_err(host->hba->dev, - "%s: 0x%05X is not a legal testbus option\n", - __func__, host->testbus.select_minor); - return false; - } - return true; } diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 318e4a1f76c92b..54deeb754db5fc 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -146,7 +146,7 @@ enum attr_idn { /* Descriptor idn for Query requests */ enum desc_idn { QUERY_DESC_IDN_DEVICE = 0x0, - QUERY_DESC_IDN_CONFIGURAION = 0x1, + QUERY_DESC_IDN_CONFIGURATION = 0x1, QUERY_DESC_IDN_UNIT = 0x2, QUERY_DESC_IDN_RFU_0 = 0x3, QUERY_DESC_IDN_INTERCONNECT = 0x4, @@ -162,19 +162,13 @@ enum desc_header_offset { QUERY_DESC_DESC_TYPE_OFFSET = 0x01, }; -enum ufs_desc_max_size { - QUERY_DESC_DEVICE_MAX_SIZE = 0x40, - QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90, - QUERY_DESC_UNIT_MAX_SIZE = 0x23, - QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06, - /* - * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes - * of descriptor header. - */ - QUERY_DESC_STRING_MAX_SIZE = 0xFE, - QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44, - QUERY_DESC_POWER_MAX_SIZE = 0x62, - QUERY_DESC_RFU_MAX_SIZE = 0x00, +enum ufs_desc_def_size { + QUERY_DESC_DEVICE_DEF_SIZE = 0x40, + QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, + QUERY_DESC_UNIT_DEF_SIZE = 0x23, + QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, + QUERY_DESC_POWER_DEF_SIZE = 0x62, }; /* Unit descriptor parameters offsets in bytes*/ diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index a72a4ba78125b0..8e5e6c04c035e1 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -309,8 +309,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mmio_base = devm_ioremap_resource(dev, mem_res); - if (IS_ERR(*(void **)&mmio_base)) { - err = PTR_ERR(*(void **)&mmio_base); + if (IS_ERR(mmio_base)) { + err = PTR_ERR(mmio_base); goto out; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 8b721f431dd0df..096e95b911bd7b 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -100,19 +100,6 @@ #define ufshcd_hex_dump(prefix_str, buf, len) \ print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false) -static u32 ufs_query_desc_max_size[] = { - QUERY_DESC_DEVICE_MAX_SIZE, - QUERY_DESC_CONFIGURAION_MAX_SIZE, - QUERY_DESC_UNIT_MAX_SIZE, - QUERY_DESC_RFU_MAX_SIZE, - QUERY_DESC_INTERCONNECT_MAX_SIZE, - QUERY_DESC_STRING_MAX_SIZE, - QUERY_DESC_RFU_MAX_SIZE, - QUERY_DESC_GEOMETRY_MAX_SIZE, - QUERY_DESC_POWER_MAX_SIZE, - QUERY_DESC_RFU_MAX_SIZE, -}; - enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, @@ -2857,7 +2844,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, goto out; } - if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { + if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", __func__, *buf_len); err = -EINVAL; @@ -2937,6 +2924,92 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba, return err; } +/** + * ufshcd_read_desc_length - read the specified descriptor length from header + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_index: descriptor index + * @desc_length: pointer to variable to read the length of descriptor + * + * Return 0 in case of success, non-zero otherwise + */ +static int ufshcd_read_desc_length(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + int *desc_length) +{ + int ret; + u8 header[QUERY_DESC_HDR_SIZE]; + int header_len = QUERY_DESC_HDR_SIZE; + + if (desc_id >= QUERY_DESC_IDN_MAX) + return -EINVAL; + + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, header, + &header_len); + + if (ret) { + dev_err(hba->dev, "%s: Failed to get descriptor header id %d", + __func__, desc_id); + return ret; + } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { + dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch", + __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], + desc_id); + ret = -EINVAL; + } + + *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; + return ret; + +} + +/** + * ufshcd_map_desc_id_to_length - map descriptor IDN to its length + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_len: mapped desc length (out) + * + * Return 0 in case of success, non-zero otherwise + */ +int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, + enum desc_idn desc_id, int *desc_len) +{ + switch (desc_id) { + case QUERY_DESC_IDN_DEVICE: + *desc_len = hba->desc_size.dev_desc; + break; + case QUERY_DESC_IDN_POWER: + *desc_len = hba->desc_size.pwr_desc; + break; + case QUERY_DESC_IDN_GEOMETRY: + *desc_len = hba->desc_size.geom_desc; + break; + case QUERY_DESC_IDN_CONFIGURATION: + *desc_len = hba->desc_size.conf_desc; + break; + case QUERY_DESC_IDN_UNIT: + *desc_len = hba->desc_size.unit_desc; + break; + case QUERY_DESC_IDN_INTERCONNECT: + *desc_len = hba->desc_size.interc_desc; + break; + case QUERY_DESC_IDN_STRING: + *desc_len = QUERY_DESC_MAX_SIZE; + break; + case QUERY_DESC_IDN_RFU_0: + case QUERY_DESC_IDN_RFU_1: + *desc_len = 0; + break; + default: + *desc_len = 0; + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); + /** * ufshcd_read_desc_param - read the specified descriptor parameter * @hba: Pointer to adapter instance @@ -2951,42 +3024,49 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba, static int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, - u32 param_offset, + u8 param_offset, u8 *param_read_buf, - u32 param_size) + u8 param_size) { int ret; u8 *desc_buf; - u32 buff_len; + int buff_len; bool is_kmalloc = true; - /* safety checks */ - if (desc_id >= QUERY_DESC_IDN_MAX) + /* Safety check */ + if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) return -EINVAL; - buff_len = ufs_query_desc_max_size[desc_id]; - if ((param_offset + param_size) > buff_len) - return -EINVAL; + /* Get the max length of descriptor from structure filled up at probe + * time. + */ + ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); - if (!param_offset && (param_size == buff_len)) { - /* memory space already available to hold full descriptor */ - desc_buf = param_read_buf; - is_kmalloc = false; - } else { - /* allocate memory to hold full descriptor */ + /* Sanity checks */ + if (ret || !buff_len) { + dev_err(hba->dev, "%s: Failed to get full descriptor length", + __func__); + return ret; + } + + /* Check whether we need temp memory */ + if (param_offset != 0 || param_size < buff_len) { desc_buf = kmalloc(buff_len, GFP_KERNEL); if (!desc_buf) return -ENOMEM; + } else { + desc_buf = param_read_buf; + is_kmalloc = false; } + /* Request for full descriptor */ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, - desc_id, desc_index, 0, desc_buf, - &buff_len); + desc_id, desc_index, 0, + desc_buf, &buff_len); if (ret) { dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d", __func__, desc_id, desc_index, param_offset, ret); - goto out; } @@ -2998,25 +3078,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, goto out; } - /* - * While reading variable size descriptors (like string descriptor), - * some UFS devices may report the "LENGTH" (field in "Transaction - * Specific fields" of Query Response UPIU) same as what was requested - * in Query Request UPIU instead of reporting the actual size of the - * variable size descriptor. - * Although it's safe to ignore the "LENGTH" field for variable size - * descriptors as we can always derive the length of the descriptor from - * the descriptor header fields. Hence this change impose the length - * match check only for fixed size descriptors (for which we always - * request the correct size as part of Query Request UPIU). - */ - if ((desc_id != QUERY_DESC_IDN_STRING) && - (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) { - dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d", - __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]); - ret = -EINVAL; - goto out; - } + /* Check wherher we will not copy more data, than available */ + if (is_kmalloc && param_size > buff_len) + param_size = buff_len; if (is_kmalloc) memcpy(param_read_buf, &desc_buf[param_offset], param_size); @@ -4598,8 +4662,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, } if (ufshcd_is_clkscaling_supported(hba)) hba->clk_scaling.active_reqs--; - if (ufshcd_is_clkscaling_supported(hba)) - hba->clk_scaling.active_reqs--; } /* clear corresponding bits of completed commands */ @@ -5919,8 +5981,8 @@ static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level) static void ufshcd_init_icc_levels(struct ufs_hba *hba) { int ret; - int buff_len = QUERY_DESC_POWER_MAX_SIZE; - u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE]; + int buff_len = hba->desc_size.pwr_desc; + u8 desc_buf[hba->desc_size.pwr_desc]; ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); if (ret) { @@ -6017,11 +6079,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba, { int err; u8 model_index; - u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0}; - u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE]; + u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0}; + u8 desc_buf[hba->desc_size.dev_desc]; - err = ufshcd_read_device_desc(hba, desc_buf, - QUERY_DESC_DEVICE_MAX_SIZE); + err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); if (err) { dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", __func__, err); @@ -6038,14 +6099,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba, model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, - QUERY_DESC_STRING_MAX_SIZE, ASCII_STD); + QUERY_DESC_MAX_SIZE, ASCII_STD); if (err) { dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", __func__, err); goto out; } - str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; + str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], MAX_MODEL_LEN)); @@ -6251,6 +6312,51 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) hba->req_abort_count = 0; } +static void ufshcd_init_desc_sizes(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, + &hba->desc_size.dev_desc); + if (err) + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, + &hba->desc_size.pwr_desc); + if (err) + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, + &hba->desc_size.interc_desc); + if (err) + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, + &hba->desc_size.conf_desc); + if (err) + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, + &hba->desc_size.unit_desc); + if (err) + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, + &hba->desc_size.geom_desc); + if (err) + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; +} + +static void ufshcd_def_desc_sizes(struct ufs_hba *hba) +{ + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance @@ -6285,6 +6391,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; + /* Init check for device descriptor sizes */ + ufshcd_init_desc_sizes(hba); + ret = ufs_get_device_desc(hba, &card); if (ret) { dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", @@ -6320,6 +6429,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* set the state as operational after switching to desired gear */ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + /* * If we are in error handling context or in power management callbacks * context, no need to scan the host @@ -6915,9 +7025,9 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) goto out; } - ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, - UFSHCD_REQ_SENSE_SIZE, NULL, - msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM); + ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer, + UFSHCD_REQ_SENSE_SIZE, NULL, NULL, + msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL); if (ret) pr_err("%s: failed with err %d\n", __func__, ret); @@ -6982,8 +7092,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, * callbacks hence set the RQF_PM flag so that it doesn't resume the * already suspended childs. */ - ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM); + ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL); if (ret) { sdev_printk(KERN_WARNING, sdp, "START_STOP failed for power mode: %d, result %x\n", @@ -7530,7 +7640,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev, if (kstrtoul(buf, 0, &value)) return -EINVAL; - if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX)) + if (value >= UFS_PM_LVL_MAX) return -EINVAL; spin_lock_irqsave(hba->host->host_lock, flags); @@ -7774,6 +7884,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->mmio_base = mmio_base; hba->irq = irq; + /* Set descriptor lengths to specification defaults */ + ufshcd_def_desc_sizes(hba); + err = ufshcd_hba_init(hba); if (err) goto out_error; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 7630600217a2ef..cdc8bd05f7dfcf 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -220,6 +220,15 @@ struct ufs_dev_cmd { struct ufs_query query; }; +struct ufs_desc_size { + int dev_desc; + int pwr_desc; + int geom_desc; + int interc_desc; + int unit_desc; + int conf_desc; +}; + /** * struct ufs_clk_info - UFS clock related info * @list: list headed by hba->clk_list_head @@ -483,6 +492,7 @@ struct ufs_stats { * @clk_list_head: UFS host controller clocks list node head * @pwr_info: holds current power mode * @max_pwr_info: keeps the device max valid pwm + * @desc_size: descriptor sizes reported by device * @urgent_bkops_lvl: keeps track of urgent bkops level for device * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for * device is known or not. @@ -666,6 +676,7 @@ struct ufs_hba { bool is_urgent_bkops_lvl_checked; struct rw_semaphore clk_scaling_lock; + struct ufs_desc_size desc_size; }; /* Returns true if clocks can be gated. Otherwise false */ @@ -832,6 +843,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); + +int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_length); + u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); /* Wrapper functions for safely calling variant operations */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index c680d76413116c..939c47df73fa97 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #define VIRTIO_SCSI_MEMPOOL_SZ 64 #define VIRTIO_SCSI_EVENT_LEN 8 @@ -108,7 +110,6 @@ struct virtio_scsi { bool affinity_hint_set; struct hlist_node node; - struct hlist_node node_dead; /* Protected by event_vq lock */ bool stop_events; @@ -118,7 +119,6 @@ struct virtio_scsi { struct virtio_scsi_vq req_vqs[]; }; -static enum cpuhp_state virtioscsi_online; static struct kmem_cache *virtscsi_cmd_cache; static mempool_t *virtscsi_cmd_pool; @@ -766,6 +766,13 @@ static void virtscsi_target_destroy(struct scsi_target *starget) kfree(tgt); } +static int virtscsi_map_queues(struct Scsi_Host *shost) +{ + struct virtio_scsi *vscsi = shost_priv(shost); + + return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2); +} + static struct scsi_host_template virtscsi_host_template_single = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", @@ -801,6 +808,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .use_clustering = ENABLE_CLUSTERING, .target_alloc = virtscsi_target_alloc, .target_destroy = virtscsi_target_destroy, + .map_queues = virtscsi_map_queues, .track_queue_depth = 1, }; @@ -817,80 +825,6 @@ static struct scsi_host_template virtscsi_host_template_multi = { virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ } while(0) -static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) -{ - int i; - int cpu; - - /* In multiqueue mode, when the number of cpu is equal - * to the number of request queues, we let the qeueues - * to be private to one cpu by setting the affinity hint - * to eliminate the contention. - */ - if ((vscsi->num_queues == 1 || - vscsi->num_queues != num_online_cpus()) && affinity) { - if (vscsi->affinity_hint_set) - affinity = false; - else - return; - } - - if (affinity) { - i = 0; - for_each_online_cpu(cpu) { - virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu); - i++; - } - - vscsi->affinity_hint_set = true; - } else { - for (i = 0; i < vscsi->num_queues; i++) { - if (!vscsi->req_vqs[i].vq) - continue; - - virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); - } - - vscsi->affinity_hint_set = false; - } -} - -static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) -{ - get_online_cpus(); - __virtscsi_set_affinity(vscsi, affinity); - put_online_cpus(); -} - -static int virtscsi_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct virtio_scsi *vscsi = hlist_entry_safe(node, struct virtio_scsi, - node); - __virtscsi_set_affinity(vscsi, true); - return 0; -} - -static int virtscsi_cpu_notif_add(struct virtio_scsi *vi) -{ - int ret; - - ret = cpuhp_state_add_instance(virtioscsi_online, &vi->node); - if (ret) - return ret; - - ret = cpuhp_state_add_instance(CPUHP_VIRT_SCSI_DEAD, &vi->node_dead); - if (ret) - cpuhp_state_remove_instance(virtioscsi_online, &vi->node); - return ret; -} - -static void virtscsi_cpu_notif_remove(struct virtio_scsi *vi) -{ - cpuhp_state_remove_instance_nocalls(virtioscsi_online, &vi->node); - cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_SCSI_DEAD, - &vi->node_dead); -} - static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, struct virtqueue *vq) { @@ -900,14 +834,8 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, static void virtscsi_remove_vqs(struct virtio_device *vdev) { - struct Scsi_Host *sh = virtio_scsi_host(vdev); - struct virtio_scsi *vscsi = shost_priv(sh); - - virtscsi_set_affinity(vscsi, false); - /* Stop all the virtqueues. */ vdev->config->reset(vdev); - vdev->config->del_vqs(vdev); } @@ -920,6 +848,7 @@ static int virtscsi_init(struct virtio_device *vdev, vq_callback_t **callbacks; const char **names; struct virtqueue **vqs; + struct irq_affinity desc = { .pre_vectors = 2 }; num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); @@ -941,7 +870,8 @@ static int virtscsi_init(struct virtio_device *vdev, } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, + &desc); if (err) goto out; @@ -1007,10 +937,6 @@ static int virtscsi_probe(struct virtio_device *vdev) if (err) goto virtscsi_init_failed; - err = virtscsi_cpu_notif_add(vscsi); - if (err) - goto scsi_add_host_failed; - cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; @@ -1065,9 +991,6 @@ static void virtscsi_remove(struct virtio_device *vdev) virtscsi_cancel_event_work(vscsi); scsi_remove_host(shost); - - virtscsi_cpu_notif_remove(vscsi); - virtscsi_remove_vqs(vdev); scsi_host_put(shost); } @@ -1075,10 +998,6 @@ static void virtscsi_remove(struct virtio_device *vdev) #ifdef CONFIG_PM_SLEEP static int virtscsi_freeze(struct virtio_device *vdev) { - struct Scsi_Host *sh = virtio_scsi_host(vdev); - struct virtio_scsi *vscsi = shost_priv(sh); - - virtscsi_cpu_notif_remove(vscsi); virtscsi_remove_vqs(vdev); return 0; } @@ -1093,11 +1012,6 @@ static int virtscsi_restore(struct virtio_device *vdev) if (err) return err; - err = virtscsi_cpu_notif_add(vscsi); - if (err) { - vdev->config->del_vqs(vdev); - return err; - } virtio_device_ready(vdev); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) @@ -1152,16 +1066,6 @@ static int __init init(void) pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); goto error; } - ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, - "scsi/virtio:online", - virtscsi_cpu_online, NULL); - if (ret < 0) - goto error; - virtioscsi_online = ret; - ret = cpuhp_setup_state_multi(CPUHP_VIRT_SCSI_DEAD, "scsi/virtio:dead", - NULL, virtscsi_cpu_online); - if (ret) - goto error; ret = register_virtio_driver(&virtio_scsi_driver); if (ret < 0) goto error; @@ -1177,17 +1081,12 @@ static int __init init(void) kmem_cache_destroy(virtscsi_cmd_cache); virtscsi_cmd_cache = NULL; } - if (virtioscsi_online) - cpuhp_remove_multi_state(virtioscsi_online); - cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD); return ret; } static void __exit fini(void) { unregister_virtio_driver(&virtio_scsi_driver); - cpuhp_remove_multi_state(virtioscsi_online); - cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD); mempool_destroy(virtscsi_cmd_pool); kmem_cache_destroy(virtscsi_cmd_cache); } diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index ef474a7487449b..c374e3b5c678d2 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -1487,7 +1487,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) irq_flag &= ~PCI_IRQ_MSI; error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); - if (error) + if (error < 0) goto out_reset_adapter; adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index 2eaf3184f61dae..2ce394aa4c9502 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 44222ef9471e58..90b5b2efafbf45 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 2c3ffbcbd621a5..f45115fce4eb7c 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "ion.h" #include "ion_priv.h" diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 4e5c0f17f579ae..c69d0bd536934a 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include "ion.h" diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index ec3b6656141242..05466004939597 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 57e8599b54e645..8deac8d9225da2 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c index c63e591631f637..c3b8fc54883db8 100644 --- a/drivers/staging/dgnc/dgnc_tty.c +++ b/drivers/staging/dgnc/dgnc_tty.c @@ -19,7 +19,7 @@ */ #include -#include /* For jiffies, task states */ +#include /* For jiffies, task states, etc. */ #include /* For tasklet and interrupt structs/defines */ #include #include diff --git a/drivers/staging/dgnc/dgnc_utils.c b/drivers/staging/dgnc/dgnc_utils.c index 95272f4765fcfa..6f59240024d131 100644 --- a/drivers/staging/dgnc/dgnc_utils.c +++ b/drivers/staging/dgnc/dgnc_utils.c @@ -1,5 +1,5 @@ #include -#include +#include #include "dgnc_utils.h" /* diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c index 47acb0a298422a..3be5f25ff11306 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c +++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c @@ -588,8 +588,7 @@ static int parse_mc_ranges(struct device *dev, int *paddr_cells, int *mc_addr_cells, int *mc_size_cells, - const __be32 **ranges_start, - u8 *num_ranges) + const __be32 **ranges_start) { const __be32 *prop; int range_tuple_cell_count; @@ -602,8 +601,6 @@ static int parse_mc_ranges(struct device *dev, dev_warn(dev, "missing or empty ranges property for device tree node '%s'\n", mc_node->name); - - *num_ranges = 0; return 0; } @@ -630,8 +627,7 @@ static int parse_mc_ranges(struct device *dev, return -EINVAL; } - *num_ranges = ranges_len / tuple_len; - return 0; + return ranges_len / tuple_len; } static int get_mc_addr_translation_ranges(struct device *dev, @@ -639,7 +635,7 @@ static int get_mc_addr_translation_ranges(struct device *dev, **ranges, u8 *num_ranges) { - int error; + int ret; int paddr_cells; int mc_addr_cells; int mc_size_cells; @@ -647,16 +643,16 @@ static int get_mc_addr_translation_ranges(struct device *dev, const __be32 *ranges_start; const __be32 *cell; - error = parse_mc_ranges(dev, + ret = parse_mc_ranges(dev, &paddr_cells, &mc_addr_cells, &mc_size_cells, - &ranges_start, - num_ranges); - if (error < 0) - return error; + &ranges_start); + if (ret < 0) + return ret; - if (!(*num_ranges)) { + *num_ranges = ret; + if (!ret) { /* * Missing or empty ranges property ("ranges;") for the * 'fsl,qoriq-mc' node. In this case, identity mapping diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index ab0dbf5cab5aa7..43255e2e927673 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c index cf902154f0aae0..bcf9f3dd0310c7 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include "../../../include/linux/libcfs/libcfs.h" diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c index b7b87ecefcdfc7..9fca8d225ee092 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-socket.c +++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c @@ -532,7 +532,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock) newsock->ops = sock->ops; - rc = sock->ops->accept(sock, newsock, O_NONBLOCK); + rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false); if (rc == -EAGAIN) { /* Nothing ready, so wait for activity */ init_waitqueue_entry(&wait, current); @@ -540,7 +540,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock) set_current_state(TASK_INTERRUPTIBLE); schedule(); remove_wait_queue(sk_sleep(sock->sk), &wait); - rc = sock->ops->accept(sock, newsock, O_NONBLOCK); + rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false); } if (rc) diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c index 92cd4113cf75e9..87fe366f8f7031 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ b/drivers/staging/lustre/lnet/selftest/rpc.c @@ -255,7 +255,7 @@ srpc_service_init(struct srpc_service *svc) svc->sv_shuttingdown = 0; svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*svc->sv_cpt_data)); + sizeof(**svc->sv_cpt_data)); if (!svc->sv_cpt_data) return -ENOMEM; diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 21aec0ca9ad36b..7d8628ce0d3b3a 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -44,6 +44,7 @@ #ifdef __KERNEL__ # include +# include # include /* snprintf() */ # include #else /* !__KERNEL__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h index 300e96fb032ae9..da9ce195c52ecf 100644 --- a/drivers/staging/lustre/lustre/include/lustre_compat.h +++ b/drivers/staging/lustre/lustre/include/lustre_compat.h @@ -35,6 +35,7 @@ #include #include +#include #include "lustre_patchless_compat.h" diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index 27f3148c43442e..b04d613846ee6f 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h @@ -42,7 +42,7 @@ * @{ */ -#include +#include #include #include #include "../../include/linux/libcfs/libcfs.h" diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h index aaedec7d793c85..dace6591a0a449 100644 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ b/drivers/staging/lustre/lustre/include/obd_support.h @@ -34,6 +34,8 @@ #define _OBD_SUPPORT #include +#include + #include "../../include/linux/libcfs/libcfs.h" #include "lustre_compat.h" #include "lprocfs_status.h" diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 10adfcdd70354a..481c0d01d4c626 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -2952,15 +2952,16 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits) return rc; } -int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat) +int ll_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(de); + struct inode *inode = d_inode(path->dentry); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_inode_info *lli = ll_i2info(inode); int res; - res = ll_inode_revalidate(de, MDS_INODELOCK_UPDATE | - MDS_INODELOCK_LOOKUP); + res = ll_inode_revalidate(path->dentry, + MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP); ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1); if (res) diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index ecdfd0c29b7ff9..55f68acd85d1fc 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -750,7 +750,8 @@ int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); int ll_release_openhandle(struct inode *, struct lookup_intent *); int ll_md_real_close(struct inode *inode, fmode_t fmode); -int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat); +int ll_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); struct posix_acl *ll_get_acl(struct inode *inode, int type); int ll_migrate(struct inode *parent, struct file *file, int mdtidx, const char *name, int namelen); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index e860df7c45a2bd..366f2ce20f5ebb 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -38,7 +38,9 @@ #include "../../include/linux/libcfs/libcfs.h" #include +#include #include +#include #include "../include/obd.h" #include "../include/obd_class.h" diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c index c75ae43095ba55..c6c3de94adaa25 100644 --- a/drivers/staging/media/lirc/lirc_sir.c +++ b/drivers/staging/media/lirc/lirc_sir.c @@ -36,7 +36,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include +#include #include #include #include diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c index 34aac3e2eb871f..e4a533b6beb376 100644 --- a/drivers/staging/media/lirc/lirc_zilog.c +++ b/drivers/staging/media/lirc/lirc_zilog.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/staging/media/platform/bcm2835/mmal-vchiq.c b/drivers/staging/media/platform/bcm2835/mmal-vchiq.c index f0639ee6c8b998..fdfb6a620a4314 100644 --- a/drivers/staging/media/platform/bcm2835/mmal-vchiq.c +++ b/drivers/staging/media/platform/bcm2835/mmal-vchiq.c @@ -397,8 +397,10 @@ buffer_from_host(struct vchiq_mmal_instance *instance, /* get context */ msg_context = get_msg_context(instance); - if (msg_context == NULL) - return -ENOMEM; + if (!msg_context) { + ret = -ENOMEM; + goto unlock; + } /* store bulk message context for when data arrives */ msg_context->u.bulk.instance = instance; @@ -454,6 +456,7 @@ buffer_from_host(struct vchiq_mmal_instance *instance, vchi_service_release(instance->handle); +unlock: mutex_unlock(&instance->bulk_mutex); return ret; diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 7f8cf875157c60..65a2856319948e 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -336,7 +336,6 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) if (likely((port < TOTAL_NUMBER_OF_PORTS) && cvm_oct_device[port])) { struct net_device *dev = cvm_oct_device[port]; - struct octeon_ethernet *priv = netdev_priv(dev); /* * Only accept packets for devices that are diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h index ee3f5ee0652976..9e390648d93e1f 100644 --- a/drivers/staging/rtl8188eu/include/osdep_service.h +++ b/drivers/staging/rtl8188eu/include/osdep_service.h @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h index b8a17097843487..5d33020554cd5d 100644 --- a/drivers/staging/rtl8712/osdep_service.h +++ b/drivers/staging/rtl8712/osdep_service.h @@ -33,7 +33,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c index f19b6b27aa7142..5346c657485df8 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.c +++ b/drivers/staging/rtl8712/rtl8712_cmd.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index ff68a384f9c218..d2ff0afd685aea 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -22,7 +22,7 @@ #include #include /* for misc_register, and SYNTH_MINOR */ #include /* for poll_wait() */ -#include /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ +#include /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ #include "spk_priv.h" #include "speakup.h" diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig index e61e4ca064a8ab..74094fff436781 100644 --- a/drivers/staging/vc04_services/Kconfig +++ b/drivers/staging/vc04_services/Kconfig @@ -1,6 +1,7 @@ config BCM2835_VCHIQ tristate "Videocore VCHIQ" depends on HAS_DMA + depends on OF depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) default y help diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index e6241fb5cfa695..3aeffcb9c87e91 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -121,8 +121,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state) if (err < 0) return err; - (void)of_property_read_u32(dev->of_node, "cache-line-size", + err = of_property_read_u32(dev->of_node, "cache-line-size", &g_cache_line_size); + + if (err) { + dev_err(dev, "Missing cache-line-size property\n"); + return -ENODEV; + } + g_fragments_size = 2 * g_cache_line_size; /* Allocate space for the channels in coherent memory */ diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index cb0b7ca36b1ec6..8a0d214f6e9b82 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h index 4055d4bf9f740a..e63964f5a18a87 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h @@ -47,7 +47,7 @@ #include #include #include -#include +#include #include #include #include /* for time_t */ diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 2fb1bf1a26c5e6..37a05185dcbe0e 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -872,7 +872,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, goto out; csk->mtu = ndev->mtu; csk->tx_chan = cxgb4_port_chan(ndev); - csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; + csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type, + cxgb4_port_viid(ndev)); step = cdev->lldi.ntxq / cdev->lldi.nchan; csk->txq_idx = cxgb4_port_idx(ndev) * step; @@ -907,7 +908,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, port_id = cxgb4_port_idx(ndev); csk->mtu = dst_mtu(dst); csk->tx_chan = cxgb4_port_chan(ndev); - csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; + csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type, + cxgb4_port_viid(ndev)); step = cdev->lldi.ntxq / cdev->lldi.nports; csk->txq_idx = (port_id * step) + @@ -1066,6 +1068,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) struct sk_buff *skb; const struct tcphdr *tcph; struct cpl_t5_pass_accept_rpl *rpl5; + struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; unsigned int len = roundup(sizeof(*rpl5), 16); unsigned int mtu_idx; u64 opt0; @@ -1111,6 +1114,9 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); + if (!is_t5(lldi->adapter_type)) + opt2 |= RX_FC_DISABLE_F; + if (req->tcpopt.tstamp) opt2 |= TSTAMPS_EN_F; if (req->tcpopt.sack) @@ -1119,8 +1125,13 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) opt2 |= WND_SCALE_EN_F; hlen = ntohl(req->hdr_len); - tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + - IP_HDR_LEN_G(hlen); + + if (is_t5(lldi->adapter_type)) + tcph = (struct tcphdr *)((u8 *)(req + 1) + + ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen)); + else + tcph = (struct tcphdr *)((u8 *)(req + 1) + + T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen)); if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN_V(1); @@ -1726,7 +1737,7 @@ static bool cxgbit_credit_err(const struct cxgbit_sock *csk) } while (skb) { - credit += skb->csum; + credit += (__force u32)skb->csum; skb = cxgbit_skcb_tx_wr_next(skb); } @@ -1753,6 +1764,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) while (credits) { struct sk_buff *p = cxgbit_sock_peek_wr(csk); + const u32 csum = (__force u32)p->csum; if (unlikely(!p)) { pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n", @@ -1761,17 +1773,17 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) break; } - if (unlikely(credits < p->csum)) { + if (unlikely(credits < csum)) { pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n", csk, csk->tid, credits, csk->wr_cred, csk->wr_una_cred, - p->csum); - p->csum -= credits; + csum); + p->csum = (__force __wsum)(csum - credits); break; } cxgbit_sock_dequeue_wr(csk); - credits -= p->csum; + credits -= csum; kfree_skb(p); } diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h index 28c11bd1b9308c..dcaed3a1d23f87 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_lro.h +++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h @@ -31,8 +31,9 @@ enum cxgbit_pducb_flags { PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */ PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */ PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */ - PDUCBF_RX_HCRC_ERR = (1 << 4), /* header digest error */ - PDUCBF_RX_DCRC_ERR = (1 << 5), /* data digest error */ + PDUCBF_RX_DDP_CMP = (1 << 4), /* ddp completion */ + PDUCBF_RX_HCRC_ERR = (1 << 5), /* header digest error */ + PDUCBF_RX_DCRC_ERR = (1 << 6), /* data digest error */ }; struct cxgbit_lro_pdu_cb { diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index 96eedfc49c9428..4fd775ace541a9 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -165,29 +165,24 @@ static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state) } static void -cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl, - struct cxgbit_lro_pdu_cb *pdu_cb) +cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb, + u32 ddpvld) { - unsigned int status = ntohl(cpl->ddpvld); - pdu_cb->flags |= PDUCBF_RX_STATUS; - pdu_cb->ddigest = ntohl(cpl->ulp_crc); - pdu_cb->pdulen = ntohs(cpl->len); - - if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { - pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status); + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld); pdu_cb->flags |= PDUCBF_RX_HCRC_ERR; } - if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { - pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status); + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld); pdu_cb->flags |= PDUCBF_RX_DCRC_ERR; } - if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) - pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status); + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) + pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld); - if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && + if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && (!(pdu_cb->flags & PDUCBF_RX_DATA))) { pdu_cb->flags |= PDUCBF_RX_DATA_DDPD; } @@ -201,13 +196,17 @@ cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp) lro_cb->pdu_idx); struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1); - cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb); + cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld)); + + pdu_cb->flags |= PDUCBF_RX_STATUS; + pdu_cb->ddigest = ntohl(cpl->ulp_crc); + pdu_cb->pdulen = ntohs(cpl->len); if (pdu_cb->flags & PDUCBF_RX_HDR) pdu_cb->complete = true; - lro_cb->complete = true; lro_cb->pdu_totallen += pdu_cb->pdulen; + lro_cb->complete = true; lro_cb->pdu_idx++; } @@ -257,7 +256,7 @@ cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) cxgbit_skcb_flags(skb) = 0; lro_cb->complete = false; - } else { + } else if (op == CPL_ISCSI_DATA) { struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va; offset = sizeof(struct cpl_iscsi_data); @@ -267,6 +266,36 @@ cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) pdu_cb->doffset = lro_cb->offset; pdu_cb->nr_dfrags = gl->nfrags; pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags; + lro_cb->complete = false; + } else { + struct cpl_rx_iscsi_cmp *cpl; + + cpl = (struct cpl_rx_iscsi_cmp *)gl->va; + offset = sizeof(struct cpl_rx_iscsi_cmp); + pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS); + len = be16_to_cpu(cpl->len); + pdu_cb->hdr = gl->va + offset; + pdu_cb->hlen = len; + pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags; + pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc); + pdu_cb->pdulen = ntohs(cpl->len); + + if (unlikely(gl->nfrags > 1)) + cxgbit_skcb_flags(skb) = 0; + + cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, + be32_to_cpu(cpl->ddpvld)); + + if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) { + pdu_cb->flags |= PDUCBF_RX_DDP_CMP; + pdu_cb->complete = true; + } else if (pdu_cb->flags & PDUCBF_RX_DATA) { + pdu_cb->complete = true; + } + + lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen; + lro_cb->complete = true; + lro_cb->pdu_idx++; } cxgbit_copy_frags(skb, gl, offset); @@ -413,6 +442,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, switch (op) { case CPL_ISCSI_HDR: case CPL_ISCSI_DATA: + case CPL_RX_ISCSI_CMP: case CPL_RX_ISCSI_DDP: case CPL_FW4_ACK: lro_flush = false; @@ -454,12 +484,13 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, if (unlikely(op != *(u8 *)gl->va)) { pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", gl->va, be64_to_cpu(*rsp), - be64_to_cpu(*(u64 *)gl->va), + get_unaligned_be64(gl->va), gl->tot_len); return 0; } - if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) { + if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) || + (op == CPL_RX_ISCSI_CMP)) { if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, napi)) return 0; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 8bcb9b71f76432..bdcc8b4c522a2f 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -8,6 +8,8 @@ #include #include +#include + #include #include #include @@ -162,12 +164,14 @@ cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, u32 len, u32 credits, u32 compl) { struct fw_ofld_tx_data_wr *req; + const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; u32 submode = cxgbit_skcb_submode(skb); u32 wr_ulp_mode = 0; u32 hdr_size = sizeof(*req); u32 opcode = FW_OFLD_TX_DATA_WR; u32 immlen = 0; - u32 force = TX_FORCE_V(!submode); + u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) : + T6_TX_FORCE_F; if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) { opcode = FW_ISCSI_TX_DATA_WR; @@ -243,7 +247,7 @@ void cxgbit_push_tx_frames(struct cxgbit_sock *csk) } __skb_unlink(skb, &csk->txq); set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); - skb->csum = credits_needed + flowclen16; + skb->csum = (__force __wsum)(credits_needed + flowclen16); csk->wr_cred -= credits_needed; csk->wr_una_cred += credits_needed; @@ -651,26 +655,6 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) u32 max_npdu, max_iso_npdu; if (conn->login->leading_connection) { - param = iscsi_find_param_from_key(DATASEQUENCEINORDER, - conn->param_list); - if (!param) { - pr_err("param not found key %s\n", DATASEQUENCEINORDER); - return -1; - } - - if (strcmp(param->value, YES)) - return 0; - - param = iscsi_find_param_from_key(DATAPDUINORDER, - conn->param_list); - if (!param) { - pr_err("param not found key %s\n", DATAPDUINORDER); - return -1; - } - - if (strcmp(param->value, YES)) - return 0; - param = iscsi_find_param_from_key(MAXBURSTLENGTH, conn->param_list); if (!param) { @@ -681,11 +665,6 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) if (kstrtou32(param->value, 0, &mbl) < 0) return -1; } else { - if (!conn->sess->sess_ops->DataSequenceInOrder) - return 0; - if (!conn->sess->sess_ops->DataPDUInOrder) - return 0; - mbl = conn->sess->sess_ops->MaxBurstLength; } @@ -704,6 +683,53 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) return 0; } +/* + * cxgbit_seq_pdu_inorder() + * @csk: pointer to cxgbit socket structure + * + * This function checks whether data sequence and data + * pdu are in order. + * + * Return: returns -1 on error, 0 if data sequence and + * data pdu are in order, 1 if data sequence or data pdu + * is not in order. + */ +static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk) +{ + struct iscsi_conn *conn = csk->conn; + struct iscsi_param *param; + + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(DATASEQUENCEINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATASEQUENCEINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 1; + + param = iscsi_find_param_from_key(DATAPDUINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATAPDUINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 1; + + } else { + if (!conn->sess->sess_ops->DataSequenceInOrder) + return 1; + if (!conn->sess->sess_ops->DataPDUInOrder) + return 1; + } + + return 0; +} + static int cxgbit_set_params(struct iscsi_conn *conn) { struct cxgbit_sock *csk = conn->context; @@ -730,11 +756,24 @@ static int cxgbit_set_params(struct iscsi_conn *conn) } if (!erl) { + int ret; + + ret = cxgbit_seq_pdu_inorder(csk); + if (ret < 0) { + return -1; + } else if (ret > 0) { + if (is_t5(cdev->lldi.adapter_type)) + goto enable_ddp; + else + goto enable_digest; + } + if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { if (cxgbit_set_iso_npdu(csk)) return -1; } +enable_ddp: if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) { if (cxgbit_setup_conn_pgidx(csk, ppm->tformat.pgsz_idx_dflt)) @@ -743,6 +782,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn) } } +enable_digest: if (cxgbit_set_digest(csk)) return -1; @@ -983,11 +1023,36 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) int rc, sg_nents, sg_off; bool dcrc_err = false; - rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); - if (rc < 0) - return rc; - else if (!cmd) - return 0; + if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) { + u32 offset = be32_to_cpu(hdr->offset); + u32 ddp_data_len; + u32 payload_length = ntoh24(hdr->dlength); + bool success = false; + + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0); + if (!cmd) + return 0; + + ddp_data_len = offset - cmd->write_data_done; + atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets); + + cmd->write_data_done = offset; + cmd->next_burst_len = ddp_data_len; + cmd->data_sn = be32_to_cpu(hdr->datasn); + + rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, + cmd, payload_length, &success); + if (rc < 0) + return rc; + else if (!success) + return 0; + } else { + rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); + if (rc < 0) + return rc; + else if (!cmd) + return 0; + } if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { pr_err("ITT: 0x%08x, Offset: %u, Length: %u," @@ -1351,6 +1416,9 @@ static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) for (i = 0; i < ssi->nr_frags; i++) put_page(skb_frag_page(&ssi->frags[i])); ssi->nr_frags = 0; + skb->data_len = 0; + skb->truesize -= skb->len; + skb->len = 0; } static void @@ -1364,39 +1432,42 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) unsigned int len = 0; if (pdu_cb->flags & PDUCBF_RX_HDR) { - hpdu_cb->flags = pdu_cb->flags; + u8 hfrag_idx = hssi->nr_frags; + + hpdu_cb->flags |= pdu_cb->flags; hpdu_cb->seq = pdu_cb->seq; hpdu_cb->hdr = pdu_cb->hdr; hpdu_cb->hlen = pdu_cb->hlen; - memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx], + memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx], sizeof(skb_frag_t)); - get_page(skb_frag_page(&hssi->frags[0])); - hssi->nr_frags = 1; - hpdu_cb->frags = 1; - hpdu_cb->hfrag_idx = 0; + get_page(skb_frag_page(&hssi->frags[hfrag_idx])); + hssi->nr_frags++; + hpdu_cb->frags++; + hpdu_cb->hfrag_idx = hfrag_idx; - len = hssi->frags[0].size; - hskb->len = len; - hskb->data_len = len; - hskb->truesize = len; + len = hssi->frags[hfrag_idx].size; + hskb->len += len; + hskb->data_len += len; + hskb->truesize += len; } if (pdu_cb->flags & PDUCBF_RX_DATA) { - u8 hfrag_idx = 1, i; + u8 dfrag_idx = hssi->nr_frags, i; hpdu_cb->flags |= pdu_cb->flags; + hpdu_cb->dfrag_idx = dfrag_idx; len = 0; - for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) { - memcpy(&hssi->frags[hfrag_idx], + for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) { + memcpy(&hssi->frags[dfrag_idx], &ssi->frags[pdu_cb->dfrag_idx + i], sizeof(skb_frag_t)); - get_page(skb_frag_page(&hssi->frags[hfrag_idx])); + get_page(skb_frag_page(&hssi->frags[dfrag_idx])); - len += hssi->frags[hfrag_idx].size; + len += hssi->frags[dfrag_idx].size; hssi->nr_frags++; hpdu_cb->frags++; @@ -1405,7 +1476,6 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) hpdu_cb->dlen = pdu_cb->dlen; hpdu_cb->doffset = hpdu_cb->hlen; hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags; - hpdu_cb->dfrag_idx = 1; hskb->len += len; hskb->data_len += len; hskb->truesize += len; @@ -1490,10 +1560,15 @@ static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) { + struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; int ret = -1; - if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) - ret = cxgbit_rx_lro_skb(csk, skb); + if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) { + if (is_t5(lldi->adapter_type)) + ret = cxgbit_rx_lro_skb(csk, skb); + else + ret = cxgbit_process_lro_skb(csk, skb); + } __kfree_skb(skb); return ret; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index da2c73a255dec1..a91802432f2f47 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -1431,36 +1432,17 @@ static void iscsit_do_crypto_hash_buf( } int -iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, - struct iscsi_cmd **out_cmd) +__iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd *cmd, u32 payload_length, + bool *success) { - struct iscsi_data *hdr = (struct iscsi_data *)buf; - struct iscsi_cmd *cmd = NULL; + struct iscsi_data *hdr = buf; struct se_cmd *se_cmd; - u32 payload_length = ntoh24(hdr->dlength); int rc; - if (!payload_length) { - pr_warn("DataOUT payload is ZERO, ignoring.\n"); - return 0; - } - /* iSCSI write */ atomic_long_add(payload_length, &conn->sess->rx_data_octets); - if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { - pr_err("DataSegmentLength: %u is greater than" - " MaxXmitDataSegmentLength: %u\n", payload_length, - conn->conn_ops->MaxXmitDataSegmentLength); - return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, - buf); - } - - cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, - payload_length); - if (!cmd) - return 0; - pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset), @@ -1545,7 +1527,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, } } /* - * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and + * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and * within-command recovery checks before receiving the payload. */ rc = iscsit_check_pre_dataout(cmd, buf); @@ -1553,10 +1535,44 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, return 0; else if (rc == DATAOUT_CANNOT_RECOVER) return -1; - - *out_cmd = cmd; + *success = true; return 0; } +EXPORT_SYMBOL(__iscsit_check_dataout_hdr); + +int +iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd **out_cmd) +{ + struct iscsi_data *hdr = buf; + struct iscsi_cmd *cmd; + u32 payload_length = ntoh24(hdr->dlength); + int rc; + bool success = false; + + if (!payload_length) { + pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n"); + return 0; + } + + if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { + pr_err_ratelimited("DataSegmentLength: %u is greater than" + " MaxXmitDataSegmentLength: %u\n", payload_length, + conn->conn_ops->MaxXmitDataSegmentLength); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length); + if (!cmd) + return 0; + + rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success); + + if (success) + *out_cmd = cmd; + + return rc; +} EXPORT_SYMBOL(iscsit_check_dataout_hdr); static int @@ -1920,6 +1936,28 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, return ret; } +static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf) +{ + switch (iscsi_tmf) { + case ISCSI_TM_FUNC_ABORT_TASK: + return TMR_ABORT_TASK; + case ISCSI_TM_FUNC_ABORT_TASK_SET: + return TMR_ABORT_TASK_SET; + case ISCSI_TM_FUNC_CLEAR_ACA: + return TMR_CLEAR_ACA; + case ISCSI_TM_FUNC_CLEAR_TASK_SET: + return TMR_CLEAR_TASK_SET; + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + return TMR_LUN_RESET; + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + return TMR_TARGET_WARM_RESET; + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + return TMR_TARGET_COLD_RESET; + default: + return TMR_UNKNOWN; + } +} + int iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, unsigned char *buf) @@ -1929,7 +1967,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct iscsi_tm *hdr; int out_of_order_cmdsn = 0, ret; bool sess_ref = false; - u8 function; + u8 function, tcm_function = TMR_UNKNOWN; hdr = (struct iscsi_tm *) buf; hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; @@ -1975,54 +2013,27 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * LIO-Target $FABRIC_MOD */ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { - - u8 tcm_function; - int ret; - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, conn->sess->se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, cmd->sense_buffer + 2); target_get_sess_cmd(&cmd->se_cmd, true); sess_ref = true; - - switch (function) { - case ISCSI_TM_FUNC_ABORT_TASK: - tcm_function = TMR_ABORT_TASK; - break; - case ISCSI_TM_FUNC_ABORT_TASK_SET: - tcm_function = TMR_ABORT_TASK_SET; - break; - case ISCSI_TM_FUNC_CLEAR_ACA: - tcm_function = TMR_CLEAR_ACA; - break; - case ISCSI_TM_FUNC_CLEAR_TASK_SET: - tcm_function = TMR_CLEAR_TASK_SET; - break; - case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: - tcm_function = TMR_LUN_RESET; - break; - case ISCSI_TM_FUNC_TARGET_WARM_RESET: - tcm_function = TMR_TARGET_WARM_RESET; - break; - case ISCSI_TM_FUNC_TARGET_COLD_RESET: - tcm_function = TMR_TARGET_COLD_RESET; - break; - default: + tcm_function = iscsit_convert_tmf(function); + if (tcm_function == TMR_UNKNOWN) { pr_err("Unknown iSCSI TMR Function:" " 0x%02x\n", function); return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } - - ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, - tcm_function, GFP_KERNEL); - if (ret < 0) - return iscsit_add_reject_cmd(cmd, + } + ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function, + GFP_KERNEL); + if (ret < 0) + return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); - cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; - } + cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; cmd->i_state = ISTATE_SEND_TASKMGTRSP; @@ -4136,7 +4147,7 @@ int iscsit_close_connection( /* * During Connection recovery drop unacknowledged out of order * commands for this connection, and prepare the other commands - * for realligence. + * for reallegiance. * * During normal operation clear the out of order commands (but * do not free the struct iscsi_ooo_cmdsn's) and release all @@ -4144,7 +4155,7 @@ int iscsit_close_connection( */ if (atomic_read(&conn->connection_recovery)) { iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); - iscsit_prepare_cmds_for_realligance(conn); + iscsit_prepare_cmds_for_reallegiance(conn); } else { iscsit_clear_ooo_cmdsns_for_conn(conn); iscsit_release_commands_from_conn(conn); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index b54e72c7ab0fa5..9a96e17bf7cd5f 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -17,6 +17,8 @@ * GNU General Public License for more details. ******************************************************************************/ +#include + #include #include #include @@ -44,10 +46,8 @@ void iscsit_set_dataout_sequence_values( */ if (cmd->unsolicited_data) { cmd->seq_start_offset = cmd->write_data_done; - cmd->seq_end_offset = (cmd->write_data_done + - ((cmd->se_cmd.data_length > - conn->sess->sess_ops->FirstBurstLength) ? - conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length)); + cmd->seq_end_offset = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); return; } diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index faf9ae014b3044..8df9c90f3db3e9 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -312,7 +312,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) return 0; } -int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) +int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *conn) { u32 cmd_count = 0; struct iscsi_cmd *cmd, *cmd_tmp; @@ -347,7 +347,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) && (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) { - pr_debug("Not performing realligence on" + pr_debug("Not performing reallegiance on" " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x," " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); @@ -382,7 +382,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) cmd_count++; pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x," " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for" - " realligence.\n", cmd->iscsi_opcode, + " reallegiance.\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn, conn->cid); diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h index 7965f1e865061e..634d01e136521c 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.h +++ b/drivers/target/iscsi/iscsi_target_erl2.h @@ -19,7 +19,7 @@ extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *, struct iscsi_session *); extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32); extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *); -extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *); +extern int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *); extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *); #endif /*** ISCSI_TARGET_ERL2_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index eab274d17b5cbd..ad8f3011bdc2f1 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include /* TCP_NODELAY */ #include /* ipv6_addr_v4mapped() */ @@ -223,7 +224,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) return 0; pr_debug("%s iSCSI Session SID %u is still active for %s," - " preforming session reinstatement.\n", (sessiontype) ? + " performing session reinstatement.\n", (sessiontype) ? "Discovery" : "Normal", sess->sid, sess->sess_ops->InitiatorName); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 46388c9e08dad3..7ccc9c1cbfd1a6 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -1249,16 +1250,16 @@ int iscsi_target_start_negotiation( { int ret; - if (conn->sock) { - struct sock *sk = conn->sock->sk; + if (conn->sock) { + struct sock *sk = conn->sock->sk; - write_lock_bh(&sk->sk_callback_lock); - set_bit(LOGIN_FLAGS_READY, &conn->login_flags); - write_unlock_bh(&sk->sk_callback_lock); - } + write_lock_bh(&sk->sk_callback_lock); + set_bit(LOGIN_FLAGS_READY, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } - ret = iscsi_target_do_login(conn, login); - if (ret < 0) { + ret = iscsi_target_do_login(conn, login); + if (ret < 0) { cancel_delayed_work_sync(&conn->login_work); cancel_delayed_work_sync(&conn->login_cleanup_work); iscsi_target_restore_sock_callbacks(conn); diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index 3d637055c36f05..cb231c907d5119 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -440,14 +440,14 @@ static int iscsit_task_reassign_complete( break; default: pr_err("Illegal iSCSI Opcode 0x%02x during" - " command realligence\n", cmd->iscsi_opcode); + " command reallegiance\n", cmd->iscsi_opcode); return -1; } if (ret != 0) return ret; - pr_debug("Completed connection realligence for Opcode: 0x%02x," + pr_debug("Completed connection reallegiance for Opcode: 0x%02x," " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode, cmd->init_task_tag, conn->cid); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index b5a1b4ccba124d..5041a9c8bdcbfd 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -417,6 +417,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( return NULL; } +EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump); struct iscsi_cmd *iscsit_find_cmd_from_ttt( struct iscsi_conn *conn, @@ -1304,39 +1305,6 @@ static int iscsit_do_rx_data( return total_rx; } -static int iscsit_do_tx_data( - struct iscsi_conn *conn, - struct iscsi_data_count *count) -{ - int ret, iov_len; - struct kvec *iov_p; - struct msghdr msg; - - if (!conn || !conn->sock || !conn->conn_ops) - return -1; - - if (count->data_length <= 0) { - pr_err("Data length is: %d\n", count->data_length); - return -1; - } - - memset(&msg, 0, sizeof(struct msghdr)); - - iov_p = count->iov; - iov_len = count->iov_count; - - ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, - count->data_length); - if (ret != count->data_length) { - pr_err("Unexpected ret: %d send data %d\n", - ret, count->data_length); - return -EPIPE; - } - pr_debug("ret: %d, sent data: %d\n", ret, count->data_length); - - return ret; -} - int rx_data( struct iscsi_conn *conn, struct kvec *iov, @@ -1363,45 +1331,35 @@ int tx_data( int iov_count, int data) { - struct iscsi_data_count c; + struct msghdr msg; + int total_tx = 0; if (!conn || !conn->sock || !conn->conn_ops) return -1; - memset(&c, 0, sizeof(struct iscsi_data_count)); - c.iov = iov; - c.iov_count = iov_count; - c.data_length = data; - c.type = ISCSI_TX_DATA; + if (data <= 0) { + pr_err("Data length is: %d\n", data); + return -1; + } - return iscsit_do_tx_data(conn, &c); -} + memset(&msg, 0, sizeof(struct msghdr)); -static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y) -{ - switch (x->ss_family) { - case AF_INET: { - struct sockaddr_in *sinx = (struct sockaddr_in *)x; - struct sockaddr_in *siny = (struct sockaddr_in *)y; - if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) - return false; - if (sinx->sin_port != siny->sin_port) - return false; - break; - } - case AF_INET6: { - struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; - struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; - if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) - return false; - if (sinx->sin6_port != siny->sin6_port) - return false; - break; - } - default: - return false; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, + iov, iov_count, data); + + while (msg_data_left(&msg)) { + int tx_loop = sock_sendmsg(conn->sock, &msg); + if (tx_loop <= 0) { + pr_debug("tx_loop: %d total_tx %d\n", + tx_loop, total_tx); + return tx_loop; + } + total_tx += tx_loop; + pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", + tx_loop, total_tx, data); } - return true; + + return total_tx; } void iscsit_collect_login_stats( @@ -1420,13 +1378,6 @@ void iscsit_collect_login_stats( ls = &tiqn->login_stats; spin_lock(&ls->lock); - if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) && - ((get_jiffies_64() - ls->last_fail_time) < 10)) { - /* We already have the failure info for this login */ - spin_unlock(&ls->lock); - return; - } - if (status_class == ISCSI_STATUS_CLS_SUCCESS) ls->accepts++; else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { @@ -1471,10 +1422,10 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) { struct iscsi_portal_group *tpg; - if (!conn || !conn->sess) + if (!conn) return NULL; - tpg = conn->sess->tpg; + tpg = conn->tpg; if (!tpg) return NULL; diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index f5e330099bfca7..fd7c16a7ca6e06 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -43,7 +43,7 @@ #include "target_core_ua.h" static sense_reason_t core_alua_check_transition(int state, int valid, - int *primary); + int *primary, int explicit); static int core_alua_set_tg_pt_secondary_state( struct se_lun *lun, int explicit, int offline); @@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) * the state is a primary or secondary target port asymmetric * access state. */ - rc = core_alua_check_transition(alua_access_state, - valid_states, &primary); + rc = core_alua_check_transition(alua_access_state, valid_states, + &primary, 1); if (rc) { /* * If the SET TARGET PORT GROUPS attempts to establish @@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd) if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) return 0; /* @@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd) * Check implicit and explicit ALUA state change request. */ static sense_reason_t -core_alua_check_transition(int state, int valid, int *primary) +core_alua_check_transition(int state, int valid, int *primary, int explicit) { /* * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are @@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary) *primary = 0; break; case ALUA_ACCESS_STATE_TRANSITION: - /* - * Transitioning is set internally, and - * cannot be selected manually. - */ - goto not_supported; + if (!(valid & ALUA_T_SUP) || explicit) + /* + * Transitioning is set internally and by tcmu daemon, + * and cannot be selected through a STPG. + */ + goto not_supported; + *primary = 0; + break; default: pr_err("Unknown ALUA access state: 0x%02x\n", state); return TCM_INVALID_PARAMETER_LIST; @@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) static void core_alua_do_transition_tg_pt_work(struct work_struct *work) { struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, - struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); + struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work); struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); @@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt( if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) return 0; - if (new_state == ALUA_ACCESS_STATE_TRANSITION) + if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) return -EAGAIN; /* * Flush any pending transitions */ - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == - ALUA_ACCESS_STATE_TRANSITION) { - /* Just in case */ - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; - tg_pt_gp->tg_pt_gp_transition_complete = &wait; - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); - wait_for_completion(&wait); - tg_pt_gp->tg_pt_gp_transition_complete = NULL; - return 0; - } + if (!explicit) + flush_work(&tg_pt_gp->tg_pt_gp_transition_work); /* * Save the old primary ALUA access state, and set the current state * to ALUA_ACCESS_STATE_TRANSITION. */ - tg_pt_gp->tg_pt_gp_alua_previous_state = - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_TRANSITION); tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? @@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt( core_alua_queue_state_change_ua(tg_pt_gp); + if (new_state == ALUA_ACCESS_STATE_TRANSITION) + return 0; + + tg_pt_gp->tg_pt_gp_alua_previous_state = + atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + /* * Check for the optional ALUA primary state transition delay */ @@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt( atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { - unsigned long transition_tmo; - - transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, - &tg_pt_gp->tg_pt_gp_transition_work, - transition_tmo); - } else { + schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); + if (explicit) { tg_pt_gp->tg_pt_gp_transition_complete = &wait; - queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, - &tg_pt_gp->tg_pt_gp_transition_work, 0); wait_for_completion(&wait); tg_pt_gp->tg_pt_gp_transition_complete = NULL; } @@ -1149,8 +1138,12 @@ int core_alua_do_port_transition( struct t10_alua_tg_pt_gp *tg_pt_gp; int primary, valid_states, rc = 0; + if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) + return -ENODEV; + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; - if (core_alua_check_transition(new_state, valid_states, &primary) != 0) + if (core_alua_check_transition(new_state, valid_states, &primary, + explicit) != 0) return -EINVAL; local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; @@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); - INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, - core_alua_do_transition_tg_pt_work); + INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work, + core_alua_do_transition_tg_pt_work); tg_pt_gp->tg_pt_gp_dev = dev; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); @@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp( dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); + flush_work(&tg_pt_gp->tg_pt_gp_transition_work); /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by @@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info( unsigned char buf[TG_PT_GROUP_NAME_BUF]; int move = 0; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) return -ENODEV; @@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit( unsigned long tmp; int ret; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) return -ENODEV; @@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata( int core_setup_alua(struct se_device *dev) { - if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && + if (!(dev->transport->transport_flags & + TRANSPORT_FLAG_PASSTHROUGH_ALUA) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { struct t10_alua_lu_gp_member *lu_gp_mem; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 54b36c9835be3a..38b5025e4c7a87 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) pr_err("Missing tfo->aborted_task()\n"); return -EINVAL; } + if (!tfo->check_stop_free) { + pr_err("Missing tfo->check_stop_free()\n"); + return -EINVAL; + } /* * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 26929c44d70316..c754ae33bf7b15 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -78,12 +78,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) &deve->read_bytes); se_lun = rcu_dereference(deve->se_lun); + + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { + se_lun = NULL; + goto out_unlock; + } + se_cmd->se_lun = rcu_dereference(deve->se_lun); se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; - - percpu_ref_get(&se_lun->lun_ref); se_cmd->lun_ref_active = true; if ((se_cmd->data_direction == DMA_TO_DEVICE) && @@ -97,6 +101,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) goto ref_dev; } } +out_unlock: rcu_read_unlock(); if (!se_lun) { @@ -163,7 +168,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) rcu_read_lock(); deve = target_nacl_find_deve(nacl, unpacked_lun); if (deve) { - se_tmr->tmr_lun = rcu_dereference(deve->se_lun); se_cmd->se_lun = rcu_dereference(deve->se_lun); se_lun = rcu_dereference(deve->se_lun); se_cmd->pr_res_key = deve->pr_res_key; @@ -816,6 +820,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) xcopy_lun = &dev->xcopy_lun; rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); init_completion(&xcopy_lun->lun_ref_comp); + init_completion(&xcopy_lun->lun_shutdown_comp); INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index a8f8e53f2f5748..94cda7991e80ab 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, buf = kzalloc(12, GFP_KERNEL); if (!buf) - return; + goto out_free; memset(cdb, 0, MAX_COMMAND_SIZE); cdb[0] = MODE_SENSE; @@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, * If MODE_SENSE still returns zero, set the default value to 1024. */ sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); +out_free: if (!sdev->sector_size) sdev->sector_size = 1024; -out_free: + kfree(buf); } @@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, sd->lun, sd->queue_depth); } - dev->dev_attrib.hw_block_size = sd->sector_size; + dev->dev_attrib.hw_block_size = + min_not_zero((int)sd->sector_size, 512); dev->dev_attrib.hw_max_sectors = - min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); + min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); dev->dev_attrib.hw_queue_depth = sd->queue_depth; /* @@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, /* * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. */ - if (sd->type == TYPE_TAPE) + if (sd->type == TYPE_TAPE) { pscsi_tape_read_blocksize(dev, sd); + dev->dev_attrib.hw_block_size = sd->sector_size; + } return 0; } @@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) /* * Called with struct Scsi_Host->host_lock called. */ -static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) +static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) __releases(sh->host_lock) { struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; @@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) return 0; } -/* - * Called with struct Scsi_Host->host_lock called. - */ -static int pscsi_create_type_other(struct se_device *dev, - struct scsi_device *sd) - __releases(sh->host_lock) -{ - struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; - struct Scsi_Host *sh = sd->host; - int ret; - - spin_unlock_irq(sh->host_lock); - ret = pscsi_add_device_to_list(dev, sd); - if (ret) - return ret; - - pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", - phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, - sd->channel, sd->id, sd->lun); - return 0; -} - static int pscsi_configure_device(struct se_device *dev) { struct se_hba *hba = dev->se_hba; @@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev) case TYPE_DISK: ret = pscsi_create_type_disk(dev, sd); break; - case TYPE_ROM: - ret = pscsi_create_type_rom(dev, sd); - break; default: - ret = pscsi_create_type_other(dev, sd); + ret = pscsi_create_type_nondisk(dev, sd); break; } @@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev) else if (pdv->pdv_lld_host) scsi_host_put(pdv->pdv_lld_host); - if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) - scsi_device_put(sd); + scsi_device_put(sd); pdv->pdv_sd = NULL; } @@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) if (pdv->pdv_bd && pdv->pdv_bd->bd_part) return pdv->pdv_bd->bd_part->nr_sects; - dump_stack(); return 0; } @@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate) static const struct target_backend_ops pscsi_ops = { .name = "pscsi", .owner = THIS_MODULE, - .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH | + TRANSPORT_FLAG_PASSTHROUGH_ALUA, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index df7b6e95c019dd..c194063f169b13 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -604,7 +604,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_PROCESSING; - cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); __target_execute_cmd(cmd, false); @@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) return ret; break; case VERIFY: + case VERIFY_16: size = 0; - sectors = transport_get_sectors_10(cdb); - cmd->t_task_lba = transport_lba_32(cdb); + if (cdb[0] == VERIFY) { + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); + } else { + sectors = transport_get_sectors_16(cdb); + cmd->t_task_lba = transport_lba_64(cdb); + } cmd->execute_cmd = sbc_emulate_noop; goto check_lba; case REZERO_UNIT: diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 1a39033d2bffaa..8038255b21e874 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -158,12 +158,28 @@ static ssize_t target_stat_tgt_resets_show(struct config_item *item, atomic_long_read(&to_stat_tgt_dev(item)->num_resets)); } +static ssize_t target_stat_tgt_aborts_complete_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&to_stat_tgt_dev(item)->aborts_complete)); +} + +static ssize_t target_stat_tgt_aborts_no_task_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&to_stat_tgt_dev(item)->aborts_no_task)); +} + CONFIGFS_ATTR_RO(target_stat_tgt_, inst); CONFIGFS_ATTR_RO(target_stat_tgt_, indx); CONFIGFS_ATTR_RO(target_stat_tgt_, num_lus); CONFIGFS_ATTR_RO(target_stat_tgt_, status); CONFIGFS_ATTR_RO(target_stat_tgt_, non_access_lus); CONFIGFS_ATTR_RO(target_stat_tgt_, resets); +CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_complete); +CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_no_task); static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { &target_stat_tgt_attr_inst, @@ -172,6 +188,8 @@ static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { &target_stat_tgt_attr_status, &target_stat_tgt_attr_non_access_lus, &target_stat_tgt_attr_resets, + &target_stat_tgt_attr_aborts_complete, + &target_stat_tgt_attr_aborts_no_task, NULL, }; @@ -795,16 +813,34 @@ static ssize_t target_stat_transport_dev_name_show(struct config_item *item, return ret; } +static ssize_t target_stat_transport_proto_id_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_transport_stat(item); + struct se_device *dev; + struct se_portal_group *tpg = lun->lun_tpg; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->proto_id); + rcu_read_unlock(); + return ret; +} + CONFIGFS_ATTR_RO(target_stat_transport_, inst); CONFIGFS_ATTR_RO(target_stat_transport_, device); CONFIGFS_ATTR_RO(target_stat_transport_, indx); CONFIGFS_ATTR_RO(target_stat_transport_, dev_name); +CONFIGFS_ATTR_RO(target_stat_transport_, proto_id); static struct configfs_attribute *target_stat_scsi_transport_attrs[] = { &target_stat_transport_attr_inst, &target_stat_transport_attr_device, &target_stat_transport_attr_indx, &target_stat_transport_attr_dev_name, + &target_stat_transport_attr_proto_id, NULL, }; diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 4f229e711e1c1c..dce1e1b4731617 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -175,10 +175,9 @@ void core_tmr_abort_task( printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", se_cmd->se_tfo->get_fabric_name(), ref_tag); - if (!__target_check_io_state(se_cmd, se_sess, 0)) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - goto out; - } + if (!__target_check_io_state(se_cmd, se_sess, 0)) + continue; + list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); @@ -191,14 +190,15 @@ void core_tmr_abort_task( printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %llu\n", ref_tag); tmr->response = TMR_FUNCTION_COMPLETE; + atomic_long_inc(&dev->aborts_complete); return; } spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); -out: printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n", tmr->ref_task_tag); tmr->response = TMR_TASK_DOES_NOT_EXIST; + atomic_long_inc(&dev->aborts_no_task); } static void core_tmr_drain_tmr_list( @@ -217,13 +217,8 @@ static void core_tmr_drain_tmr_list( * LUN_RESET tmr.. */ spin_lock_irqsave(&dev->se_tmr_lock, flags); + list_del_init(&tmr->tmr_list); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { - /* - * Allow the received TMR to return with FUNCTION_COMPLETE. - */ - if (tmr_p == tmr) - continue; - cmd = tmr_p->task_cmd; if (!cmd) { pr_err("Unable to locate struct se_cmd for TMR\n"); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 4a8b180c478bce..6fb191914f458f 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -445,7 +445,7 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref) { struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); - complete(&lun->lun_ref_comp); + complete(&lun->lun_shutdown_comp); } /* Does not change se_wwn->priv. */ @@ -572,6 +572,7 @@ struct se_lun *core_tpg_alloc_lun( lun->lun_link_magic = SE_LUN_LINK_MAGIC; atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_ref_comp); + init_completion(&lun->lun_shutdown_comp); INIT_LIST_HEAD(&lun->lun_deve_list); INIT_LIST_HEAD(&lun->lun_dev_link); atomic_set(&lun->lun_tg_pt_secondary_offline, 0); @@ -601,7 +602,8 @@ int core_tpg_add_lun( if (ret) goto out_kill_ref; - if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && + if (!(dev->transport->transport_flags & + TRANSPORT_FLAG_PASSTHROUGH_ALUA) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 437591bc7c0855..b1a3cdb29468cf 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -593,9 +593,6 @@ static void target_remove_from_state_list(struct se_cmd *cmd) if (!dev) return; - if (cmd->transport_state & CMD_T_BUSY) - return; - spin_lock_irqsave(&dev->execute_task_lock, flags); if (cmd->state_active) { list_del(&cmd->state_list); @@ -604,24 +601,18 @@ static void target_remove_from_state_list(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, - bool write_pending) +static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) { unsigned long flags; - if (remove_from_lists) { - target_remove_from_state_list(cmd); + target_remove_from_state_list(cmd); - /* - * Clear struct se_cmd->se_lun before the handoff to FE. - */ - cmd->se_lun = NULL; - } + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (write_pending) - cmd->t_state = TRANSPORT_WRITE_PENDING; - /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. @@ -635,31 +626,17 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, complete_all(&cmd->t_transport_stop_comp); return 1; } - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) { - /* - * Some fabric modules like tcm_loop can release - * their internally allocated I/O reference now and - * struct se_cmd now. - * - * Fabric modules are expected to return '1' here if the - * se_cmd being passed is released at this point, - * or zero if not being released. - */ - if (cmd->se_tfo->check_stop_free != NULL) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return cmd->se_tfo->check_stop_free(cmd); - } - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; -} -static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) -{ - return transport_cmd_check_stop(cmd, true, false); + /* + * Some fabric modules like tcm_loop can release their internally + * allocated I/O reference and struct se_cmd now. + * + * Fabric modules are expected to return '1' here if the se_cmd being + * passed is released at this point, or zero if not being released. + */ + return cmd->se_tfo->check_stop_free(cmd); } static void transport_lun_remove_cmd(struct se_cmd *cmd) @@ -733,7 +710,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) spin_lock_irqsave(&cmd->t_state_lock, flags); - cmd->transport_state &= ~CMD_T_BUSY; if (dev && dev->transport->transport_complete) { dev->transport->transport_complete(cmd, @@ -1246,7 +1222,6 @@ void transport_init_se_cmd( init_completion(&cmd->cmd_wait_comp); spin_lock_init(&cmd->t_state_lock); kref_init(&cmd->cmd_kref); - cmd->transport_state = CMD_T_DEV_ACTIVE; cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1671,6 +1646,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, { int ret = 0, post_ret = 0; + if (transport_check_aborted_status(cmd, 1)) + return; + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", @@ -1801,7 +1779,7 @@ void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) return; err: spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, ret); @@ -1829,7 +1807,7 @@ static int target_write_prot_action(struct se_cmd *cmd) sectors, 0, cmd->t_prot_sg, 0); if (unlikely(cmd->pi_err)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, cmd->pi_err); return -1; @@ -1918,7 +1896,7 @@ void target_execute_cmd(struct se_cmd *cmd) } cmd->t_state = TRANSPORT_PROCESSING; - cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); if (target_write_prot_action(cmd)) @@ -1926,7 +1904,7 @@ void target_execute_cmd(struct se_cmd *cmd) if (target_handle_task_attr(cmd)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); return; } @@ -1979,8 +1957,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd) if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { atomic_dec_mb(&dev->simple_cmds); dev->dev_cur_ordered_id++; - pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", - dev->dev_cur_ordered_id); } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", @@ -2387,6 +2363,7 @@ EXPORT_SYMBOL(target_alloc_sgl); sense_reason_t transport_generic_new_cmd(struct se_cmd *cmd) { + unsigned long flags; int ret = 0; bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); @@ -2452,7 +2429,24 @@ transport_generic_new_cmd(struct se_cmd *cmd) target_execute_cmd(cmd); return 0; } - transport_cmd_check_stop(cmd, false, true); + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->t_state = TRANSPORT_WRITE_PENDING; + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. + */ + if (cmd->transport_state & CMD_T_STOP) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + complete_all(&cmd->t_transport_stop_comp); + return 0; + } + cmd->transport_state &= ~CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); ret = cmd->se_tfo->write_pending(cmd); if (ret == -EAGAIN || ret == -ENOMEM) @@ -2595,39 +2589,38 @@ static void target_release_cmd_kref(struct kref *kref) unsigned long flags; bool fabric_stop; - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + if (se_sess) { + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - spin_lock(&se_cmd->t_state_lock); - fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && - (se_cmd->transport_state & CMD_T_ABORTED); - spin_unlock(&se_cmd->t_state_lock); + spin_lock(&se_cmd->t_state_lock); + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && + (se_cmd->transport_state & CMD_T_ABORTED); + spin_unlock(&se_cmd->t_state_lock); - if (se_cmd->cmd_wait_set || fabric_stop) { + if (se_cmd->cmd_wait_set || fabric_stop) { + list_del_init(&se_cmd->se_cmd_list); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + target_free_cmd_mem(se_cmd); + complete(&se_cmd->cmd_wait_comp); + return; + } list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - target_free_cmd_mem(se_cmd); - complete(&se_cmd->cmd_wait_comp); - return; } - list_del_init(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); } -/* target_put_sess_cmd - Check for active I/O shutdown via kref_put - * @se_cmd: command descriptor to drop +/** + * target_put_sess_cmd - decrease the command reference count + * @se_cmd: command to drop a reference from + * + * Returns 1 if and only if this target_put_sess_cmd() call caused the + * refcount to drop to zero. Returns zero otherwise. */ int target_put_sess_cmd(struct se_cmd *se_cmd) { - struct se_session *se_sess = se_cmd->se_sess; - - if (!se_sess) { - target_free_cmd_mem(se_cmd); - se_cmd->se_tfo->release_cmd(se_cmd); - return 1; - } return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); @@ -2706,10 +2699,39 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) } EXPORT_SYMBOL(target_wait_for_sess_cmds); +static void target_lun_confirm(struct percpu_ref *ref) +{ + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + + complete(&lun->lun_ref_comp); +} + void transport_clear_lun_ref(struct se_lun *lun) { - percpu_ref_kill(&lun->lun_ref); + /* + * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop + * the initial reference and schedule confirm kill to be + * executed after one full RCU grace period has completed. + */ + percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm); + /* + * The first completion waits for percpu_ref_switch_to_atomic_rcu() + * to call target_lun_confirm after lun->lun_ref has been marked + * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t + * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref + * fails for all new incoming I/O. + */ wait_for_completion(&lun->lun_ref_comp); + /* + * The second completion waits for percpu_ref_put_many() to + * invoke ->release() after lun->lun_ref has switched to + * atomic_t mode, and lun->lun_ref.count has reached zero. + * + * At this point all target-core lun->lun_ref references have + * been dropped via transport_lun_remove_cmd(), and it's safe + * to proceed with the remaining LUN shutdown. + */ + wait_for_completion(&lun->lun_shutdown_comp); } static bool @@ -2765,11 +2787,8 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, } /** - * transport_wait_for_tasks - wait for completion to occur - * @cmd: command to wait - * - * Called from frontend fabric context to wait for storage engine - * to pause and/or release frontend generated struct se_cmd. + * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp + * @cmd: command to wait on */ bool transport_wait_for_tasks(struct se_cmd *cmd) { diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index c3adefe95e50f7..c6874c38a10bc4 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -112,6 +113,7 @@ struct tcmu_dev { spinlock_t commands_lock; struct timer_list timeout; + unsigned int cmd_time_out; char dev_config[TCMU_CONFIG_LEN]; }; @@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; - tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); + if (udev->cmd_time_out) + tcmu_cmd->deadline = jiffies + + msecs_to_jiffies(udev->cmd_time_out); idr_preload(GFP_KERNEL); spin_lock_irq(&udev->commands_lock); @@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) pr_debug("sleeping for ring space\n"); spin_unlock_irq(&udev->cmdr_lock); - ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); + if (udev->cmd_time_out) + ret = schedule_timeout( + msecs_to_jiffies(udev->cmd_time_out)); + else + ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); finish_wait(&udev->wait_cmdr, &__wait); if (!ret) { pr_warn("tcmu: command timed out\n"); @@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) /* TODO: only if FLUSH and FUA? */ uio_event_notify(&udev->uio_info); - mod_timer(&udev->timeout, - round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); + if (udev->cmd_time_out) + mod_timer(&udev->timeout, round_jiffies_up(jiffies + + msecs_to_jiffies(udev->cmd_time_out))); return TCM_NO_SENSE; } @@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) } udev->hba = hba; + udev->cmd_time_out = TCMU_TIME_OUT; init_waitqueue_head(&udev->wait_cmdr); spin_lock_init(&udev->cmdr_lock); @@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev) if (dev->dev_attrib.hw_block_size == 0) dev->dev_attrib.hw_block_size = 512; /* Other attributes can be configured in userspace */ - dev->dev_attrib.hw_max_sectors = 128; + if (!dev->dev_attrib.hw_max_sectors) + dev->dev_attrib.hw_max_sectors = 128; dev->dev_attrib.hw_queue_depth = 128; ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, @@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p) kfree(udev); } +static bool tcmu_dev_configured(struct tcmu_dev *udev) +{ + return udev->uio_info.uio_dev ? true : false; +} + static void tcmu_free_device(struct se_device *dev) { struct tcmu_dev *udev = TCMU_DEV(dev); @@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev) spin_unlock_irq(&udev->commands_lock); WARN_ON(!all_expired); - /* Device was configured */ - if (udev->uio_info.uio_dev) { + if (tcmu_dev_configured(udev)) { tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, udev->uio_info.uio_dev->minor); @@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev) } enum { - Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, + Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, + Opt_err, }; static match_table_t tokens = { {Opt_dev_config, "dev_config=%s"}, {Opt_dev_size, "dev_size=%u"}, {Opt_hw_block_size, "hw_block_size=%u"}, + {Opt_hw_max_sectors, "hw_max_sectors=%u"}, {Opt_err, NULL} }; +static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) +{ + unsigned long tmp_ul; + char *arg_p; + int ret; + + arg_p = match_strdup(arg); + if (!arg_p) + return -ENOMEM; + + ret = kstrtoul(arg_p, 0, &tmp_ul); + kfree(arg_p); + if (ret < 0) { + pr_err("kstrtoul() failed for dev attrib\n"); + return ret; + } + if (!tmp_ul) { + pr_err("dev attrib must be nonzero\n"); + return -EINVAL; + } + *dev_attrib = tmp_ul; + return 0; +} + static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, const char *page, ssize_t count) { @@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, char *orig, *ptr, *opts, *arg_p; substring_t args[MAX_OPT_ARGS]; int ret = 0, token; - unsigned long tmp_ul; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, pr_err("kstrtoul() failed for dev_size=\n"); break; case Opt_hw_block_size: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; - break; - } - ret = kstrtoul(arg_p, 0, &tmp_ul); - kfree(arg_p); - if (ret < 0) { - pr_err("kstrtoul() failed for hw_block_size=\n"); - break; - } - if (!tmp_ul) { - pr_err("hw_block_size must be nonzero\n"); - break; - } - dev->dev_attrib.hw_block_size = tmp_ul; + ret = tcmu_set_dev_attrib(&args[0], + &(dev->dev_attrib.hw_block_size)); + break; + case Opt_hw_max_sectors: + ret = tcmu_set_dev_attrib(&args[0], + &(dev->dev_attrib.hw_max_sectors)); break; default: break; } + + if (ret) + break; } kfree(orig); @@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd) return passthrough_parse_cdb(cmd, tcmu_queue_cmd); } -static const struct target_backend_ops tcmu_ops = { +static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = container_of(da->da_dev, + struct tcmu_dev, se_dev); + + return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); +} + +static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = container_of(da->da_dev, + struct tcmu_dev, se_dev); + u32 val; + int ret; + + if (da->da_dev->export_count) { + pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); + return -EINVAL; + } + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (!val) { + pr_err("Illegal value for cmd_time_out\n"); + return -EINVAL; + } + + udev->cmd_time_out = val * MSEC_PER_SEC; + return count; +} +CONFIGFS_ATTR(tcmu_, cmd_time_out); + +static struct configfs_attribute **tcmu_attrs; + +static struct target_backend_ops tcmu_ops = { .name = "user", .owner = THIS_MODULE, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, @@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = { .show_configfs_dev_params = tcmu_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = tcmu_get_blocks, - .tb_dev_attrib_attrs = passthrough_attrib_attrs, + .tb_dev_attrib_attrs = NULL, }; static int __init tcmu_module_init(void) { - int ret; + int ret, i, len = 0; BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); @@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void) goto out_unreg_device; } + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { + len += sizeof(struct configfs_attribute *); + } + len += sizeof(struct configfs_attribute *) * 2; + + tcmu_attrs = kzalloc(len, GFP_KERNEL); + if (!tcmu_attrs) { + ret = -ENOMEM; + goto out_unreg_genl; + } + + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { + tcmu_attrs[i] = passthrough_attrib_attrs[i]; + } + tcmu_attrs[i] = &tcmu_attr_cmd_time_out; + tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; + ret = transport_backend_register(&tcmu_ops); if (ret) - goto out_unreg_genl; + goto out_attrs; return 0; +out_attrs: + kfree(tcmu_attrs); out_unreg_genl: genl_unregister_family(&tcmu_genl_family); out_unreg_device: @@ -1194,6 +1287,7 @@ static int __init tcmu_module_init(void) static void __exit tcmu_module_exit(void) { target_backend_unregister(&tcmu_ops); + kfree(tcmu_attrs); genl_unregister_family(&tcmu_genl_family); root_device_unregister(tcmu_root_device); kmem_cache_destroy(tcmu_cmd_cache); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 9af7842b8178e9..ec372860106f12 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -83,14 +83,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; - struct fc_lport *lport; struct ft_sess *sess; if (!cmd) return; sess = cmd->sess; fp = cmd->req_frame; - lport = fr_dev(fp); if (fr_seq(fp)) fc_seq_release(fr_seq(fp)); fc_frame_free(fp); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 91048eeca28b2d..69d0f430b2d190 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -107,8 +107,6 @@ struct cpufreq_cooling_device { }; static DEFINE_IDA(cpufreq_ida); -static unsigned int cpufreq_dev_count; - static DEFINE_MUTEX(cooling_list_lock); static LIST_HEAD(cpufreq_dev_list); @@ -395,13 +393,20 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device, opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz, true); + if (IS_ERR(opp)) { + dev_warn_ratelimited(cpufreq_device->cpu_dev, + "Failed to find OPP for frequency %lu: %ld\n", + freq_hz, PTR_ERR(opp)); + return -EINVAL; + } + voltage = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); if (voltage == 0) { - dev_warn_ratelimited(cpufreq_device->cpu_dev, - "Failed to get voltage for frequency %lu: %ld\n", - freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0); + dev_err_ratelimited(cpufreq_device->cpu_dev, + "Failed to get voltage for frequency %lu\n", + freq_hz); return -EINVAL; } @@ -693,9 +698,9 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev, *state = cpufreq_cooling_get_level(cpu, target_freq); if (*state == THERMAL_CSTATE_INVALID) { - dev_warn_ratelimited(&cdev->device, - "Failed to convert %dKHz for cpu %d into a cdev state\n", - target_freq, cpu); + dev_err_ratelimited(&cdev->device, + "Failed to convert %dKHz for cpu %d into a cdev state\n", + target_freq, cpu); return -EINVAL; } @@ -771,6 +776,7 @@ __cpufreq_cooling_register(struct device_node *np, unsigned int freq, i, num_cpus; int ret; struct thermal_cooling_device_ops *cooling_ops; + bool first; if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL)) return ERR_PTR(-ENOMEM); @@ -874,13 +880,14 @@ __cpufreq_cooling_register(struct device_node *np, cpufreq_dev->cool_dev = cool_dev; mutex_lock(&cooling_list_lock); + /* Register the notifier for first cpufreq cooling device */ + first = list_empty(&cpufreq_dev_list); list_add(&cpufreq_dev->node, &cpufreq_dev_list); + mutex_unlock(&cooling_list_lock); - /* Register the notifier for first cpufreq cooling device */ - if (!cpufreq_dev_count++) + if (first) cpufreq_register_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); - mutex_unlock(&cooling_list_lock); goto put_policy; @@ -1021,6 +1028,7 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register); void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { struct cpufreq_cooling_device *cpufreq_dev; + bool last; if (!cdev) return; @@ -1028,14 +1036,15 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) cpufreq_dev = cdev->devdata; mutex_lock(&cooling_list_lock); + list_del(&cpufreq_dev->node); /* Unregister the notifier for the last cpufreq cooling device */ - if (!--cpufreq_dev_count) + last = list_empty(&cpufreq_dev_list); + mutex_unlock(&cooling_list_lock); + + if (last) cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); - list_del(&cpufreq_dev->node); - mutex_unlock(&cooling_list_lock); - thermal_cooling_device_unregister(cpufreq_dev->cool_dev); ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); kfree(cpufreq_dev->dyn_power_table); diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index 7743a78d472397..4bf4ad58cffda0 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -186,16 +186,22 @@ get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq) return 0; opp = dev_pm_opp_find_freq_exact(dev, freq, true); - if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE)) + if (PTR_ERR(opp) == -ERANGE) opp = dev_pm_opp_find_freq_exact(dev, freq, false); + if (IS_ERR(opp)) { + dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n", + freq, PTR_ERR(opp)); + return 0; + } + voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */ dev_pm_opp_put(opp); if (voltage == 0) { - dev_warn_ratelimited(dev, - "Failed to get voltage for frequency %lu: %ld\n", - freq, IS_ERR(opp) ? PTR_ERR(opp) : 0); + dev_err_ratelimited(dev, + "Failed to get voltage for frequency %lu\n", + freq); return 0; } diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index a47103a659fa4d..d718cd179ddbb2 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index f3932baed07dc3..55577cf9b6a4e0 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index 1bacbc3b19a05c..e94aea8c0d0535 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -114,7 +114,7 @@ #define DEFAULT_TX_BUF_COUNT 3 struct n_hdlc_buf { - struct n_hdlc_buf *link; + struct list_head list_item; int count; char buf[1]; }; @@ -122,8 +122,7 @@ struct n_hdlc_buf { #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) struct n_hdlc_buf_list { - struct n_hdlc_buf *head; - struct n_hdlc_buf *tail; + struct list_head list; int count; spinlock_t spinlock; }; @@ -136,7 +135,6 @@ struct n_hdlc_buf_list { * @backup_tty - TTY to use if tty gets closed * @tbusy - reentrancy flag for tx wakeup code * @woke_up - FIXME: describe this field - * @tbuf - currently transmitting tx buffer * @tx_buf_list - list of pending transmit frame buffers * @rx_buf_list - list of received frame buffers * @tx_free_buf_list - list unused transmit frame buffers @@ -149,7 +147,6 @@ struct n_hdlc { struct tty_struct *backup_tty; int tbusy; int woke_up; - struct n_hdlc_buf *tbuf; struct n_hdlc_buf_list tx_buf_list; struct n_hdlc_buf_list rx_buf_list; struct n_hdlc_buf_list tx_free_buf_list; @@ -159,6 +156,8 @@ struct n_hdlc { /* * HDLC buffer list manipulation functions */ +static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, + struct n_hdlc_buf *buf); static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, struct n_hdlc_buf *buf); static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); @@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty) { struct n_hdlc *n_hdlc = tty2n_hdlc(tty); struct n_hdlc_buf *buf; - unsigned long flags; while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); - spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); - if (n_hdlc->tbuf) { - n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf); - n_hdlc->tbuf = NULL; - } - spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); } static struct tty_ldisc_ops n_hdlc_ldisc = { @@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc) } else break; } - kfree(n_hdlc->tbuf); kfree(n_hdlc); } /* end of n_hdlc_release() */ @@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) n_hdlc->woke_up = 0; spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); - /* get current transmit buffer or get new transmit */ - /* buffer from list of pending transmit buffers */ - - tbuf = n_hdlc->tbuf; - if (!tbuf) - tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); - + tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); while (tbuf) { if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)sending frame %p, count=%d\n", @@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) /* rollback was possible and has been done */ if (actual == -ERESTARTSYS) { - n_hdlc->tbuf = tbuf; + n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); break; } /* if transmit error, throw frame away by */ @@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) /* free current transmit buffer */ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); - - /* this tx buffer is done */ - n_hdlc->tbuf = NULL; - + /* wait up sleeping writers */ wake_up_interruptible(&tty->write_wait); @@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)frame %p pending\n", __FILE__,__LINE__,tbuf); - - /* buffer not accepted by driver */ - /* set this buffer as pending buffer */ - n_hdlc->tbuf = tbuf; + + /* + * the buffer was not accepted by driver, + * return it back into tx queue + */ + n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); break; } } @@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, int error = 0; int count; unsigned long flags; - + struct n_hdlc_buf *buf = NULL; + if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", __FILE__,__LINE__,cmd); @@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, /* report count of read data available */ /* in next available frame (if any) */ spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); - if (n_hdlc->rx_buf_list.head) - count = n_hdlc->rx_buf_list.head->count; + buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list, + struct n_hdlc_buf, list_item); + if (buf) + count = buf->count; else count = 0; spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); @@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, count = tty_chars_in_buffer(tty); /* add size of next output frame in queue */ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); - if (n_hdlc->tx_buf_list.head) - count += n_hdlc->tx_buf_list.head->count; + buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list, + struct n_hdlc_buf, list_item); + if (buf) + count += buf->count; spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); error = put_user(count, (int __user *)arg); break; @@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, poll_wait(filp, &tty->write_wait, wait); /* set bits for operations that won't block */ - if (n_hdlc->rx_buf_list.head) + if (!list_empty(&n_hdlc->rx_buf_list.list)) mask |= POLLIN | POLLRDNORM; /* readable */ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= POLLHUP; if (tty_hung_up_p(filp)) mask |= POLLHUP; if (!tty_is_writelocked(tty) && - n_hdlc->tx_free_buf_list.head) + !list_empty(&n_hdlc->tx_free_buf_list.list)) mask |= POLLOUT | POLLWRNORM; /* writable */ } return mask; @@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void) spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); spin_lock_init(&n_hdlc->rx_buf_list.spinlock); spin_lock_init(&n_hdlc->tx_buf_list.spinlock); - + + INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list); + INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list); + INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list); + INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list); + /* allocate free rx buffer list */ for(i=0;ispinlock, flags); + + list_add(&buf->list_item, &buf_list->list); + buf_list->count++; + + spin_unlock_irqrestore(&buf_list->spinlock, flags); +} + /** * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list - * @list - pointer to buffer list + * @buf_list - pointer to buffer list * @buf - pointer to buffer */ -static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, +static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list, struct n_hdlc_buf *buf) { unsigned long flags; - spin_lock_irqsave(&list->spinlock,flags); - - buf->link=NULL; - if (list->tail) - list->tail->link = buf; - else - list->head = buf; - list->tail = buf; - (list->count)++; - - spin_unlock_irqrestore(&list->spinlock,flags); - + + spin_lock_irqsave(&buf_list->spinlock, flags); + + list_add_tail(&buf->list_item, &buf_list->list); + buf_list->count++; + + spin_unlock_irqrestore(&buf_list->spinlock, flags); } /* end of n_hdlc_buf_put() */ /** * n_hdlc_buf_get - remove and return an HDLC buffer from list - * @list - pointer to HDLC buffer list + * @buf_list - pointer to HDLC buffer list * * Remove and return an HDLC buffer from the head of the specified HDLC buffer * list. * Returns a pointer to HDLC buffer if available, otherwise %NULL. */ -static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list) +static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list) { unsigned long flags; struct n_hdlc_buf *buf; - spin_lock_irqsave(&list->spinlock,flags); - - buf = list->head; + + spin_lock_irqsave(&buf_list->spinlock, flags); + + buf = list_first_entry_or_null(&buf_list->list, + struct n_hdlc_buf, list_item); if (buf) { - list->head = buf->link; - (list->count)--; + list_del(&buf->list_item); + buf_list->count--; } - if (!list->head) - list->tail = NULL; - - spin_unlock_irqrestore(&list->spinlock,flags); + + spin_unlock_irqrestore(&buf_list->spinlock, flags); return buf; - } /* end of n_hdlc_buf_get() */ static char hdlc_banner[] __initdata = diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index a23fa5ed1d67f0..66b59a15780db0 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 6ee55a2d47bb42..e65808c482f184 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -257,7 +257,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, { unsigned int baud = tty_termios_baud_rate(termios); struct dw8250_data *d = p->private_data; - unsigned int rate; + long rate; int ret; if (IS_ERR(d->clk) || !old) @@ -265,7 +265,12 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, clk_disable_unprepare(d->clk); rate = clk_round_rate(d->clk, baud * 16); - ret = clk_set_rate(d->clk, rate); + if (rate < 0) + ret = rate; + else if (rate == 0) + ret = -ENOENT; + else + ret = clk_set_rate(d->clk, rate); clk_prepare_enable(d->clk); if (!ret) diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index a65fb8197aecb6..0e3f529d50e9d0 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -128,9 +128,13 @@ config SERIAL_8250_PCI by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL. config SERIAL_8250_EXAR - tristate "8250/16550 PCI device support" - depends on SERIAL_8250_PCI + tristate "8250/16550 Exar/Commtech PCI/PCIe device support" + depends on SERIAL_8250_PCI default SERIAL_8250 + help + This builds support for XR17C1xx, XR17V3xx and some Commtech + 422x PCIe serial cards that are not covered by the more generic + SERIAL_8250_PCI option. config SERIAL_8250_HP300 tristate diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 8789ea423ccfd1..b0a377725d636c 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2373,7 +2373,7 @@ static int __init pl011_console_match(struct console *co, char *name, int idx, if (strcmp(name, "qdf2400_e44") == 0) { pr_info_once("UART: Working around QDF2400 SoC erratum 44"); qdf2400_e44_present = true; - } else if (strcmp(name, "pl011") != 0 || strcmp(name, "ttyAMA") != 0) { + } else if (strcmp(name, "pl011") != 0) { return -ENODEV; } @@ -2452,18 +2452,37 @@ static void pl011_early_write(struct console *con, const char *s, unsigned n) uart_console_write(&dev->port, s, n, pl011_putc); } +/* + * On non-ACPI systems, earlycon is enabled by specifying + * "earlycon=pl011,
" on the kernel command line. + * + * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table, + * by specifying only "earlycon" on the command line. Because it requires + * SPCR, the console starts after ACPI is parsed, which is later than a + * traditional early console. + * + * To get the traditional early console that starts before ACPI is parsed, + * specify the full "earlycon=pl011,
" option. + */ static int __init pl011_early_console_setup(struct earlycon_device *device, const char *opt) { if (!device->port.membase) return -ENODEV; - device->con->write = qdf2400_e44_present ? - qdf2400_e44_early_write : pl011_early_write; + /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must + * also be specified, e.g. "earlycon=pl011,
,qdf2400_e44". + */ + if (!strcmp(device->options, "qdf2400_e44")) + device->con->write = qdf2400_e44_early_write; + else + device->con->write = pl011_early_write; + return 0; } OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); +EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup); #else #define AMBA_CONSOLE NULL diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index dcebb28ffbc412..1f50a83ef95860 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1951,6 +1951,11 @@ static void atmel_flush_buffer(struct uart_port *port) atmel_uart_writel(port, ATMEL_PDC_TCR, 0); atmel_port->pdc_tx.ofs = 0; } + /* + * in uart_flush_buffer(), the xmit circular buffer has just + * been cleared, so we have to reset tx_len accordingly. + */ + atmel_port->tx_len = 0; } /* @@ -2483,6 +2488,9 @@ static void atmel_console_write(struct console *co, const char *s, u_int count) pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); + /* Make sure that tx path is actually able to send characters */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); + uart_console_write(port, s, count, atmel_console_putchar); /* diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index e92c23470e519f..59a2a7e18b5a25 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -12,7 +12,7 @@ static char *serial_version = "$Revision: 1.25 $"; #include #include #include -#include +#include #include #include #include diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 6989b227d1349b..be94246b6fcca1 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1088,7 +1088,7 @@ static void mxs_auart_settermios(struct uart_port *u, AUART_LINECTRL_BAUD_DIV_MAX); baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN; baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max); - div = u->uartclk * 32 / baud; + div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud); } ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F); diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index b4f86c219db1e0..7a17aedbf902e0 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1031,8 +1031,10 @@ static int s3c64xx_serial_startup(struct uart_port *port) if (ourport->dma) { ret = s3c24xx_serial_request_dma(ourport); if (ret < 0) { - dev_warn(port->dev, "DMA request failed\n"); - return ret; + dev_warn(port->dev, + "DMA request failed, DMA will not be used\n"); + devm_kfree(port->dev, ourport->dma); + ourport->dma = NULL; } } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 793395451982d8..ca54ce074a5f84 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -29,6 +29,7 @@ #include #include #include +#include #define SC16IS7XX_NAME "sc16is7xx" #define SC16IS7XX_MAX_DEVS 8 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 9939c3d9912b35..3fe56894974a7c 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index bcf1d33e6ffe0b..c334bcc59c649e 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -575,12 +575,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios, pinctrl_select_state(ascport->pinctrl, ascport->states[NO_HW_FLOWCTRL]); - gpiod = devm_get_gpiod_from_child(port->dev, "rts", - &np->fwnode); - if (!IS_ERR(gpiod)) { - gpiod_direction_output(gpiod, 0); + gpiod = devm_fwnode_get_gpiod_from_child(port->dev, + "rts", + &np->fwnode, + GPIOD_OUT_LOW, + np->name); + if (!IS_ERR(gpiod)) ascport->rts = gpiod; - } } } diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 71136742e606b9..c6fc7141d7b281 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -14,8 +14,10 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include +#include #include +#include +#include #include #include #include diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index a1fd3f7d487a68..e6d1a6510886c5 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -69,7 +69,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index f27fc0f14c11f2..a9a978731c5b0d 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 68947f6de5ad63..b0500a0a87b861 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = { struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) { + struct tty_ldisc *ld; + ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT); - if (!tty->ldisc) + ld = tty->ldisc; + if (!ld) ldsem_up_read(&tty->ldisc_sem); - return tty->ldisc; + return ld; } EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); @@ -488,41 +491,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld) tty_ldisc_debug(tty, "%p: closed\n", ld); } -/** - * tty_ldisc_restore - helper for tty ldisc change - * @tty: tty to recover - * @old: previous ldisc - * - * Restore the previous line discipline or N_TTY when a line discipline - * change fails due to an open error - */ - -static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) -{ - struct tty_ldisc *new_ldisc; - int r; - - /* There is an outstanding reference here so this is safe */ - old = tty_ldisc_get(tty, old->ops->num); - WARN_ON(IS_ERR(old)); - tty->ldisc = old; - tty_set_termios_ldisc(tty, old->ops->num); - if (tty_ldisc_open(tty, old) < 0) { - tty_ldisc_put(old); - /* This driver is always present */ - new_ldisc = tty_ldisc_get(tty, N_TTY); - if (IS_ERR(new_ldisc)) - panic("n_tty: get"); - tty->ldisc = new_ldisc; - tty_set_termios_ldisc(tty, N_TTY); - r = tty_ldisc_open(tty, new_ldisc); - if (r < 0) - panic("Couldn't open N_TTY ldisc for " - "%s --- error %d.", - tty_name(tty), r); - } -} - /** * tty_set_ldisc - set line discipline * @tty: the terminal to set @@ -536,12 +504,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) int tty_set_ldisc(struct tty_struct *tty, int disc) { - int retval; - struct tty_ldisc *old_ldisc, *new_ldisc; - - new_ldisc = tty_ldisc_get(tty, disc); - if (IS_ERR(new_ldisc)) - return PTR_ERR(new_ldisc); + int retval, old_disc; tty_lock(tty); retval = tty_ldisc_lock(tty, 5 * HZ); @@ -554,7 +517,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) } /* Check the no-op case */ - if (tty->ldisc->ops->num == disc) + old_disc = tty->ldisc->ops->num; + if (old_disc == disc) goto out; if (test_bit(TTY_HUPPED, &tty->flags)) { @@ -563,34 +527,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) goto out; } - old_ldisc = tty->ldisc; - - /* Shutdown the old discipline. */ - tty_ldisc_close(tty, old_ldisc); - - /* Now set up the new line discipline. */ - tty->ldisc = new_ldisc; - tty_set_termios_ldisc(tty, disc); - - retval = tty_ldisc_open(tty, new_ldisc); + retval = tty_ldisc_reinit(tty, disc); if (retval < 0) { /* Back to the old one or N_TTY if we can't */ - tty_ldisc_put(new_ldisc); - tty_ldisc_restore(tty, old_ldisc); + if (tty_ldisc_reinit(tty, old_disc) < 0) { + pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); + if (tty_ldisc_reinit(tty, N_TTY) < 0) { + /* At this point we have tty->ldisc == NULL. */ + pr_err("tty: reinitializing N_TTY failed\n"); + } + } } - if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) { + if (tty->ldisc && tty->ldisc->ops->num != old_disc && + tty->ops->set_ldisc) { down_read(&tty->termios_rwsem); tty->ops->set_ldisc(tty); up_read(&tty->termios_rwsem); } - /* At this point we hold a reference to the new ldisc and a - reference to the old ldisc, or we hold two references to - the old ldisc (if it was restored as part of error cleanup - above). In either case, releasing a single reference from - the old ldisc is correct. */ - new_ldisc = old_ldisc; out: tty_ldisc_unlock(tty); @@ -598,7 +553,6 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) already running */ tty_buffer_restart_work(tty->port); err: - tty_ldisc_put(new_ldisc); /* drop the extra reference */ tty_unlock(tty); return retval; } @@ -659,10 +613,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) int retval; ld = tty_ldisc_get(tty, disc); - if (IS_ERR(ld)) { - BUG_ON(disc == N_TTY); + if (IS_ERR(ld)) return PTR_ERR(ld); - } if (tty->ldisc) { tty_ldisc_close(tty, tty->ldisc); @@ -674,10 +626,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) tty_set_termios_ldisc(tty, disc); retval = tty_ldisc_open(tty, tty->ldisc); if (retval) { - if (!WARN_ON(disc == N_TTY)) { - tty_ldisc_put(tty->ldisc); - tty->ldisc = NULL; - } + tty_ldisc_put(tty->ldisc); + tty->ldisc = NULL; } return retval; } diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 9229de43e19d18..52b7baef4f7a80 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 5cd3cd93229374..1d21a9c1d33e6e 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 397e1509fe51cc..8af8d954266337 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -26,7 +26,8 @@ #include #include -#include +#include +#include #include #include #include diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 9d3ce505e7aba3..5c4933bb4b5336 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -72,7 +72,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index a56edf2d58eb26..0cbfe1ff6f6c75 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 31d95dc9c202da..60ce7fd54e890e 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 5a59da0dc98a17..3e80aa3b917aa9 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -74,7 +74,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 235e305f8473a2..d5388938bc7aaf 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -32,6 +32,7 @@ #undef VERBOSE_DEBUG #include +#include #include #include #include diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 071964c7847f1f..cc61055fb9befc 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -49,7 +49,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index f03692ec552056..8fb309a0ff6b5d 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1381,7 +1381,7 @@ static int usbtmc_probe(struct usb_interface *intf, dev_dbg(&intf->dev, "%s called\n", __func__); - data = kmalloc(sizeof(*data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -1444,6 +1444,13 @@ static int usbtmc_probe(struct usb_interface *intf, break; } } + + if (!data->bulk_out || !data->bulk_in) { + dev_err(&intf->dev, "bulk endpoints not found\n"); + retcode = -ENODEV; + goto err_put; + } + /* Find int endpoint */ for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) { endpoint = &iface_desc->endpoint[n].desc; @@ -1469,8 +1476,10 @@ static int usbtmc_probe(struct usb_interface *intf, if (data->iin_ep_present) { /* allocate int urb */ data->iin_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!data->iin_urb) + if (!data->iin_urb) { + retcode = -ENOMEM; goto error_register; + } /* Protect interrupt in endpoint data until iin_urb is freed */ kref_get(&data->kref); @@ -1478,8 +1487,10 @@ static int usbtmc_probe(struct usb_interface *intf, /* allocate buffer for interrupt in */ data->iin_buffer = kmalloc(data->iin_wMaxPacketSize, GFP_KERNEL); - if (!data->iin_buffer) + if (!data->iin_buffer) { + retcode = -ENOMEM; goto error_register; + } /* fill interrupt urb */ usb_fill_int_urb(data->iin_urb, data->usb_dev, @@ -1512,6 +1523,7 @@ static int usbtmc_probe(struct usb_interface *intf, sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); usbtmc_free_int(data); +err_put: kref_put(&data->kref, usbtmc_delete); return retcode; } diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 25dbd8c7aec733..4be52c602e9b7a 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -280,6 +280,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, /* * Adjust bInterval for quirked devices. + */ + /* + * This quirk fixes bIntervals reported in ms. + */ + if (to_usb_device(ddev)->quirks & + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) { + n = clamp(fls(d->bInterval) + 3, i, j); + i = j = n; + } + /* * This quirk fixes bIntervals reported in * linear microframes. */ diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index ca425e8099ea81..cfc3cff6e8d590 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -36,6 +36,7 @@ #include #include +#include #include #include #include diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 612fab6e54fb84..79bdca5cb9c7ae 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -520,8 +520,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) */ tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength); tbuf = kzalloc(tbuf_size, GFP_KERNEL); - if (!tbuf) - return -ENOMEM; + if (!tbuf) { + status = -ENOMEM; + goto err_alloc; + } bufp = tbuf; @@ -734,6 +736,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) } kfree(tbuf); + err_alloc: /* any errors get returned through the urb completion */ spin_lock_irq(&hcd_root_hub_lock); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a56c75e09786d5..5286bf67869a83 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -4275,7 +4275,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev) struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; - if (!udev->usb2_hw_lpm_capable) + if (!udev->usb2_hw_lpm_capable || !udev->bos) return; if (hub) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 24f9f98968a5d8..96b21b0dac1e8c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = { /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Baum Vario Ultra */ + { USB_DEVICE(0x0904, 0x6101), .driver_info = + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + { USB_DEVICE(0x0904, 0x6102), .driver_info = + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + { USB_DEVICE(0x0904, 0x6103), .driver_info = + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 2092e46b1380e9..f8d0747810e78d 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -250,6 +250,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, val = dwc3_omap_read_utmi_ctrl(omap); val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; dwc3_omap_write_utmi_ctrl(omap, val); + break; case OMAP_DWC3_VBUS_OFF: val = dwc3_omap_read_utmi_ctrl(omap); @@ -392,7 +393,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap) { u32 reg; struct device_node *node = omap->dev->of_node; - int utmi_mode = 0; + u32 utmi_mode = 0; reg = dwc3_omap_read_utmi_ctrl(omap); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4db97ecae8859b..79e7a3480d51b0 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -171,6 +171,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; + unsigned int unmap_after_complete = false; req->started = false; list_del(&req->list); @@ -180,11 +181,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, if (req->request.status == -EINPROGRESS) req->request.status = status; - if (dwc->ep0_bounced && dep->number <= 1) + /* + * NOTICE we don't want to unmap before calling ->complete() if we're + * dealing with a bounced ep0 request. If we unmap it here, we would end + * up overwritting the contents of req->buf and this could confuse the + * gadget driver. + */ + if (dwc->ep0_bounced && dep->number <= 1) { dwc->ep0_bounced = false; - - usb_gadget_unmap_request_by_dev(dwc->sysdev, - &req->request, req->direction); + unmap_after_complete = true; + } else { + usb_gadget_unmap_request_by_dev(dwc->sysdev, + &req->request, req->direction); + } trace_dwc3_gadget_giveback(req); @@ -192,6 +201,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, usb_gadget_giveback_request(&dep->endpoint, &req->request); spin_lock(&dwc->lock); + if (unmap_after_complete) + usb_gadget_unmap_request_by_dev(dwc->sysdev, + &req->request, req->direction); + if (dep->number > 1) pm_runtime_put(dwc->dev); } @@ -1342,6 +1355,68 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, if (r == req) { /* wait until it is processed */ dwc3_stop_active_transfer(dwc, dep->number, true); + + /* + * If request was already started, this means we had to + * stop the transfer. With that we also need to ignore + * all TRBs used by the request, however TRBs can only + * be modified after completion of END_TRANSFER + * command. So what we do here is that we wait for + * END_TRANSFER completion and only after that, we jump + * over TRBs by clearing HWO and incrementing dequeue + * pointer. + * + * Note that we have 2 possible types of transfers here: + * + * i) Linear buffer request + * ii) SG-list based request + * + * SG-list based requests will have r->num_pending_sgs + * set to a valid number (> 0). Linear requests, + * normally use a single TRB. + * + * For each of these two cases, if r->unaligned flag is + * set, one extra TRB has been used to align transfer + * size to wMaxPacketSize. + * + * All of these cases need to be taken into + * consideration so we don't mess up our TRB ring + * pointers. + */ + wait_event_lock_irq(dep->wait_end_transfer, + !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), + dwc->lock); + + if (!r->trb) + goto out1; + + if (r->num_pending_sgs) { + struct dwc3_trb *trb; + int i = 0; + + for (i = 0; i < r->num_pending_sgs; i++) { + trb = r->trb + i; + trb->ctrl &= ~DWC3_TRB_CTRL_HWO; + dwc3_ep_inc_deq(dep); + } + + if (r->unaligned) { + trb = r->trb + r->num_pending_sgs + 1; + trb->ctrl &= ~DWC3_TRB_CTRL_HWO; + dwc3_ep_inc_deq(dep); + } + } else { + struct dwc3_trb *trb = r->trb; + + trb->ctrl &= ~DWC3_TRB_CTRL_HWO; + dwc3_ep_inc_deq(dep); + + if (r->unaligned) { + trb = r->trb + 1; + trb->ctrl &= ~DWC3_TRB_CTRL_HWO; + dwc3_ep_inc_deq(dep); + } + } goto out1; } dev_err(dwc->dev, "request %p was not queued to %s\n", @@ -1352,6 +1427,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, out1: /* giveback the request */ + dep->queued_requests--; dwc3_gadget_giveback(dep, req, -ECONNRESET); out0: @@ -2126,12 +2202,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, return 1; } - if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) - return 1; - count = trb->size & DWC3_TRB_SIZE_MASK; req->remaining += count; + if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) + return 1; + if (dep->direction) { if (count) { trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); @@ -3228,15 +3304,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc) int dwc3_gadget_suspend(struct dwc3 *dwc) { - int ret; - if (!dwc->gadget_driver) return 0; - ret = dwc3_gadget_run_stop(dwc, false, false); - if (ret < 0) - return ret; - + dwc3_gadget_run_stop(dwc, false, false); dwc3_disconnect_gadget(dwc); __dwc3_gadget_stop(dwc); diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 3129bcf74d7d8d..265e223ab64554 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -28,23 +28,23 @@ struct dwc3; #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) /* DEPCFG parameter 1 */ -#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0) +#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0) #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) -#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16) +#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) -#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25) +#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) #define DWC3_DEPCFG_BULK_BASED (1 << 30) #define DWC3_DEPCFG_FIFO_BASED (1 << 31) /* DEPCFG parameter 0 */ -#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1) -#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3) -#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17) -#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22) +#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1) +#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3) +#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17) +#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22) #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) /* This applies for core versions earlier than 1.94a */ #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 78c44979dde382..cbff3b02840df9 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -269,6 +269,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, ret = unregister_gadget(gi); if (ret) goto err; + kfree(name); } else { if (gi->composite.gadget_driver.udc_name) { ret = -EBUSY; diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index a30766ca422644..5e3828d9dac7f3 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm) { struct usb_composite_dev *cdev = acm->port.func.config->cdev; int status; + __le16 serial_state; spin_lock(&acm->lock); if (acm->notify_req) { dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", acm->port_num, acm->serial_state); + serial_state = cpu_to_le16(acm->serial_state); status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, - 0, &acm->serial_state, sizeof(acm->serial_state)); + 0, &serial_state, sizeof(acm->serial_state)); } else { acm->pending = true; status = 0; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 87fccf611b698c..a0085571824d9b 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -1833,11 +1834,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) spin_lock_irqsave(&func->ffs->eps_lock, flags); while(count--) { struct usb_endpoint_descriptor *ds; + struct usb_ss_ep_comp_descriptor *comp_desc = NULL; + int needs_comp_desc = false; int desc_idx; - if (ffs->gadget->speed == USB_SPEED_SUPER) + if (ffs->gadget->speed == USB_SPEED_SUPER) { desc_idx = 2; - else if (ffs->gadget->speed == USB_SPEED_HIGH) + needs_comp_desc = true; + } else if (ffs->gadget->speed == USB_SPEED_HIGH) desc_idx = 1; else desc_idx = 0; @@ -1854,6 +1858,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) ep->ep->driver_data = ep; ep->ep->desc = ds; + + comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + + USB_DT_ENDPOINT_SIZE); + ep->ep->maxburst = comp_desc->bMaxBurst + 1; + + if (needs_comp_desc) + ep->ep->comp_desc = comp_desc; + ret = usb_ep_enable(ep->ep); if (likely(!ret)) { epfile->ep = ep; @@ -2252,7 +2264,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, if (len < sizeof(*d) || d->bFirstInterfaceNumber >= ffs->interfaces_count || - d->Reserved1) + !d->Reserved1) return -EINVAL; for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) if (d->Reserved2[i]) diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 89b48bcc377a16..5eea44823ca06d 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -367,7 +367,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, count = min_t(unsigned, count, hidg->report_length); spin_unlock_irqrestore(&hidg->write_spinlock, flags); - status = copy_from_user(hidg->req->buf, buffer, count); + status = copy_from_user(req->buf, buffer, count); if (status != 0) { ERROR(hidg->func.config->cdev, @@ -378,9 +378,9 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, spin_lock_irqsave(&hidg->write_spinlock, flags); - /* we our function has been disabled by host */ + /* when our function has been disabled by host */ if (!hidg->req) { - free_ep_req(hidg->in_ep, hidg->req); + free_ep_req(hidg->in_ep, req); /* * TODO * Should we fail with error here? @@ -394,7 +394,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, req->complete = f_hidg_req_complete; req->context = hidg; - status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); + status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); if (status < 0) { ERROR(hidg->func.config->cdev, "usb_ep_queue error on int endpoint %zd\n", status); diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 8f3659b65f5313..4c8aacc232c07b 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -207,6 +207,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 27ed51b5082f66..f8a1881609a2c8 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -258,13 +258,6 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); v4l2_event_queue(&uvc->vdev, &v4l2_event); - /* Pass additional setup data to userspace */ - if (uvc->event_setup_out && uvc->event_length) { - uvc->control_req->length = uvc->event_length; - return usb_ep_queue(uvc->func.config->cdev->gadget->ep0, - uvc->control_req, GFP_ATOMIC); - } - return 0; } @@ -601,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); + /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */ + if (opts->streaming_maxburst && + (opts->streaming_maxpacket % 1024) != 0) { + opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024); + INFO(cdev, "overriding streaming_maxpacket to %d\n", + opts->streaming_maxpacket); + } + /* Fill in the FS/HS/SS Video Streaming specific descriptors from the * module parameters. * @@ -632,7 +633,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; uvc_ss_streaming_comp.wBytesPerInterval = cpu_to_le16(max_packet_size * max_packet_mult * - opts->streaming_maxburst); + (opts->streaming_maxburst + 1)); /* Allocate endpoints. */ ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index a2615d64d07c19..a2c91686929372 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -84,8 +84,7 @@ static int ep_open(struct inode *, struct file *); /* /dev/gadget/$CHIP represents ep0 and the whole device */ enum ep0_state { - /* DISBLED is the initial state. - */ + /* DISABLED is the initial state. */ STATE_DEV_DISABLED = 0, /* Only one open() of /dev/gadget/$CHIP; only one file tracks @@ -1782,8 +1781,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) spin_lock_irq (&dev->lock); value = -EINVAL; - if (dev->buf) + if (dev->buf) { + kfree(kbuf); goto fail; + } dev->buf = kbuf; /* full or low speed config */ diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 11bbce28bc231b..2035906b8ced17 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -610,7 +610,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct usba_ep *ep = to_usba_ep(_ep); struct usba_udc *udc = ep->udc; - unsigned long flags, ept_cfg, maxpacket; + unsigned long flags, maxpacket; unsigned int nr_trans; DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); @@ -630,7 +630,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) ep->is_in = 0; DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n", - ep->ep.name, ept_cfg, maxpacket); + ep->ep.name, ep->ept_cfg, maxpacket); if (usb_endpoint_dir_in(desc)) { ep->is_in = 1; diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index c60abe3a68f9cf..8cabc5944d5f1d 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -1031,6 +1031,8 @@ static int dummy_udc_probe(struct platform_device *pdev) int rc; dum = *((void **)dev_get_platdata(&pdev->dev)); + /* Clear usb_gadget region for new registration to udc-core */ + memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); dum->gadget.name = gadget_name; dum->gadget.ops = &dummy_ops; dum->gadget.max_speed = USB_SPEED_SUPER; diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 85504419ab312e..3828c2ec8623b1 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep) */ while (!list_empty(&ep->queue)) { struct net2280_request *req; - u32 tmp; + u32 req_dma_count; req = list_entry(ep->queue.next, struct net2280_request, queue); if (!req->valid) break; rmb(); - tmp = le32_to_cpup(&req->td->dmacount); - if ((tmp & BIT(VALID_BIT)) != 0) + req_dma_count = le32_to_cpup(&req->td->dmacount); + if ((req_dma_count & BIT(VALID_BIT)) != 0) break; /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" @@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep) */ if (unlikely(req->td->dmadesc == 0)) { /* paranoia */ - tmp = readl(&ep->dma->dmacount); - if (tmp & DMA_BYTE_COUNT_MASK) + u32 const ep_dmacount = readl(&ep->dma->dmacount); + + if (ep_dmacount & DMA_BYTE_COUNT_MASK) break; /* single transfer mode */ - dma_done(ep, req, tmp, 0); + dma_done(ep, req, req_dma_count, 0); num_completed++; break; } else if (!ep->is_in && (req->req.length % ep->ep.maxpacket) && !(ep->dev->quirks & PLX_PCIE)) { - tmp = readl(&ep->regs->ep_stat); + u32 const ep_stat = readl(&ep->regs->ep_stat); /* AVOID TROUBLE HERE by not issuing short reads from * your gadget driver. That helps avoids errata 0121, * 0122, and 0124; not all cases trigger the warning. */ - if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { + if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) { ep_warn(ep->dev, "%s lost packet sync!\n", ep->ep.name); req->req.status = -EOVERFLOW; } else { - tmp = readl(&ep->regs->ep_avail); - if (tmp) { + u32 const ep_avail = readl(&ep->regs->ep_avail); + if (ep_avail) { /* fifo gets flushed later */ ep->out_overflow = 1; ep_dbg(ep->dev, "%s dma, discard %d len %d\n", - ep->ep.name, tmp, + ep->ep.name, ep_avail, req->req.length); req->req.status = -EOVERFLOW; } } } - dma_done(ep, req, tmp, 0); + dma_done(ep, req, req_dma_count, 0); num_completed++; } diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index a97da645c1b9ea..8a365aad66fe2e 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev, td = phys_to_virt(addr); addr2 = (dma_addr_t)td->next; pci_pool_free(dev->data_requests, td, addr); - td->next = 0x00; addr = addr2; } req->chain_len = 1; diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c index e1335ad5bce9f2..832c4fdbe98512 100644 --- a/drivers/usb/gadget/udc/pxa27x_udc.c +++ b/drivers/usb/gadget/udc/pxa27x_udc.c @@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev) usb_del_gadget_udc(&udc->gadget); pxa_cleanup_debugfs(udc); - if (!IS_ERR_OR_NULL(udc->transceiver)) + if (!IS_ERR_OR_NULL(udc->transceiver)) { usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); - usb_put_phy(udc->transceiver); + usb_put_phy(udc->transceiver); + } udc->transceiver = NULL; the_controller = NULL; diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 414e3c376dbbd5..5302f988e7e670 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -350,7 +350,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_SUSPEND: dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n"); - if (valid_port(wIndex)) { + if (valid_port(wIndex) && ohci_at91->sfr_regmap) { ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1); return 0; @@ -393,7 +393,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_SUSPEND: dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n"); - if (valid_port(wIndex)) { + if (valid_port(wIndex) && ohci_at91->sfr_regmap) { ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0); return 0; diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 363d125300eacf..2b4a00fa735dfe 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -109,7 +109,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci) xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); /* xhci 1.1 controllers have the HCCPARAMS2 register */ - if (hci_version > 100) { + if (hci_version > 0x100) { temp = readl(&xhci->cap_regs->hcc_params2); xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp); xhci_dbg(xhci, " HC %s Force save context capability", diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 9066ec9e0c2e7a..67d5dc79b6b50e 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -382,7 +382,6 @@ static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk, static int xhci_mtk_setup(struct usb_hcd *hcd); static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { - .extra_priv_size = sizeof(struct xhci_hcd), .reset = xhci_mtk_setup, }; @@ -678,13 +677,13 @@ static int xhci_mtk_probe(struct platform_device *pdev) goto power_off_phys; } - if (HCC_MAX_PSA(xhci->hcc_params) >= 4) - xhci->shared_hcd->can_do_streams = 1; - ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto put_usb3_hcd; + if (HCC_MAX_PSA(xhci->hcc_params) >= 4) + xhci->shared_hcd->can_do_streams = 1; + ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); if (ret) goto dealloc_usb2_hcd; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 6d33b42ffcf522..6ed468fa7d5e59 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -286,6 +286,8 @@ static int xhci_plat_remove(struct platform_device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct clk *clk = xhci->clk; + xhci->xhc_state |= XHCI_STATE_REMOVING; + usb_remove_hcd(xhci->shared_hcd); usb_phy_shutdown(hcd->usb_phy); @@ -342,6 +344,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, + .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-hcd", .pm = DEV_PM_OPS, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index d9936c771fa074..a3309aa02993df 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1989,6 +1989,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, case TRB_NORMAL: td->urb->actual_length = requested - remaining; goto finish_td; + case TRB_STATUS: + td->urb->actual_length = requested; + goto finish_td; default: xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", trb_type); diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index a59fafb4b329f5..74436f8ca5382f 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1308,7 +1308,6 @@ static int tegra_xhci_setup(struct usb_hcd *hcd) } static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = { - .extra_priv_size = sizeof(struct xhci_hcd), .reset = tegra_xhci_setup, }; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6d6c46000e56cc..953fd8f62df078 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -868,7 +868,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) spin_lock_irqsave(&xhci->lock, flags); - /* disble usb3 ports Wake bits*/ + /* disable usb3 ports Wake bits */ port_index = xhci->num_usb3_ports; port_array = xhci->usb3_ports; while (port_index--) { @@ -879,7 +879,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) writel(t2, port_array[port_index]); } - /* disble usb2 ports Wake bits*/ + /* disable usb2 ports Wake bits */ port_index = xhci->num_usb2_ports; port_array = xhci->usb2_ports; while (port_index--) { @@ -1477,6 +1477,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; struct xhci_command *command; + struct xhci_virt_device *vdev; xhci = hcd_to_xhci(hcd); spin_lock_irqsave(&xhci->lock, flags); @@ -1485,15 +1486,27 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) /* Make sure the URB hasn't completed or been unlinked already */ ret = usb_hcd_check_unlink_urb(hcd, urb, status); - if (ret || !urb->hcpriv) + if (ret) goto done; + + /* give back URB now if we can't queue it for cancel */ + vdev = xhci->devs[urb->dev->slot_id]; + urb_priv = urb->hcpriv; + if (!vdev || !urb_priv) + goto err_giveback; + + ep_index = xhci_get_endpoint_index(&urb->ep->desc); + ep = &vdev->eps[ep_index]; + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); + if (!ep || !ep_ring) + goto err_giveback; + temp = readl(&xhci->op_regs->status); if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "HW died, freeing TD."); - urb_priv = urb->hcpriv; for (i = urb_priv->num_tds_done; - i < urb_priv->num_tds && xhci->devs[urb->dev->slot_id]; + i < urb_priv->num_tds; i++) { td = &urb_priv->td[i]; if (!list_empty(&td->td_list)) @@ -1501,23 +1514,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) if (!list_empty(&td->cancelled_td_list)) list_del_init(&td->cancelled_td_list); } - - usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&xhci->lock, flags); - usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); - xhci_urb_free_priv(urb_priv); - return ret; + goto err_giveback; } - ep_index = xhci_get_endpoint_index(&urb->ep->desc); - ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; - ep_ring = xhci_urb_to_transfer_ring(xhci, urb); - if (!ep_ring) { - ret = -EINVAL; - goto done; - } - - urb_priv = urb->hcpriv; i = urb_priv->num_tds_done; if (i < urb_priv->num_tds) xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, @@ -1554,6 +1553,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) done: spin_unlock_irqrestore(&xhci->lock, flags); return ret; + +err_giveback: + if (urb_priv) + xhci_urb_free_priv(urb_priv); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&xhci->lock, flags); + usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); + return ret; } /* Drop an endpoint from a new bandwidth configuration for this device. diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c index 5cf2633cdb0471..e92540a21b6b5a 100644 --- a/drivers/usb/image/mdc800.c +++ b/drivers/usb/image/mdc800.c @@ -85,7 +85,7 @@ * (20/10/1999) */ -#include +#include #include #include #include diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index c5fa584d8f0a17..db9a9e6ff6bee9 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -21,6 +21,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index debc1fd74b0df2..502bfe30a077a2 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -17,6 +17,7 @@ */ #include +#include #include #include #include @@ -346,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface, if (iface_desc->desc.bInterfaceClass != 0x0A) return -ENODEV; + if (iface_desc->desc.bNumEndpoints < 1) + return -ENODEV; + /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 095778ff984de2..37c63cb39714b8 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -781,12 +781,6 @@ static int iowarrior_probe(struct usb_interface *interface, iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); - if (iface_desc->desc.bNumEndpoints < 1) { - dev_err(&interface->dev, "Invalid number of endpoints\n"); - retval = -EINVAL; - goto error; - } - /* set up the endpoint information */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; @@ -797,6 +791,21 @@ static int iowarrior_probe(struct usb_interface *interface, /* this one will match for the IOWarrior56 only */ dev->int_out_endpoint = endpoint; } + + if (!dev->int_in_endpoint) { + dev_err(&interface->dev, "no interrupt-in endpoint found\n"); + retval = -ENODEV; + goto error; + } + + if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) { + if (!dev->int_out_endpoint) { + dev_err(&interface->dev, "no interrupt-out endpoint found\n"); + retval = -ENODEV; + goto error; + } + } + /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c index 77176511658f33..d3d12475326663 100644 --- a/drivers/usb/misc/lvstest.c +++ b/drivers/usb/misc/lvstest.c @@ -366,6 +366,10 @@ static int lvs_rh_probe(struct usb_interface *intf, hdev = interface_to_usbdev(intf); desc = intf->cur_altsetting; + + if (desc->desc.bNumEndpoints < 1) + return -ENODEV; + endpoint = &desc->endpoint[0].desc; /* valid only for SS root hub */ diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index fc329c98a6e8ea..b106ce76997bff 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 4e18600dc9b43e..91f66d68bcb7b5 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -375,18 +375,24 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, if (of_get_property(np, "dynamic-power-switching", NULL)) hub->conf_data2 |= BIT(7); - if (of_get_property(np, "oc-delay-100us", NULL)) { - hub->conf_data2 &= ~BIT(5); - hub->conf_data2 &= ~BIT(4); - } else if (of_get_property(np, "oc-delay-4ms", NULL)) { - hub->conf_data2 &= ~BIT(5); - hub->conf_data2 |= BIT(4); - } else if (of_get_property(np, "oc-delay-8ms", NULL)) { - hub->conf_data2 |= BIT(5); - hub->conf_data2 &= ~BIT(4); - } else if (of_get_property(np, "oc-delay-16ms", NULL)) { - hub->conf_data2 |= BIT(5); - hub->conf_data2 |= BIT(4); + if (!of_property_read_u32(np, "oc-delay-us", property_u32)) { + if (*property_u32 == 100) { + /* 100 us*/ + hub->conf_data2 &= ~BIT(5); + hub->conf_data2 &= ~BIT(4); + } else if (*property_u32 == 4000) { + /* 4 ms */ + hub->conf_data2 &= ~BIT(5); + hub->conf_data2 |= BIT(4); + } else if (*property_u32 == 16000) { + /* 16 ms */ + hub->conf_data2 |= BIT(5); + hub->conf_data2 |= BIT(4); + } else { + /* 8 ms (DEFAULT) */ + hub->conf_data2 |= BIT(5); + hub->conf_data2 &= ~BIT(4); + } } if (of_get_property(np, "compound-device", NULL)) @@ -432,30 +438,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, } } - hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; - if (!of_property_read_u32(np, "max-sp-power", property_u32)) - hub->max_power_sp = min_t(u8, be32_to_cpu(*property_u32) / 2, - 250); - - hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS; - if (!of_property_read_u32(np, "max-bp-power", property_u32)) - hub->max_power_bp = min_t(u8, be32_to_cpu(*property_u32) / 2, - 250); - - hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF; - if (!of_property_read_u32(np, "max-sp-current", property_u32)) - hub->max_current_sp = min_t(u8, be32_to_cpu(*property_u32) / 2, - 250); - - hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS; - if (!of_property_read_u32(np, "max-bp-current", property_u32)) - hub->max_current_bp = min_t(u8, be32_to_cpu(*property_u32) / 2, - 250); - hub->power_on_time = USB251XB_DEF_POWER_ON_TIME; - if (!of_property_read_u32(np, "power-on-time", property_u32)) - hub->power_on_time = min_t(u8, be32_to_cpu(*property_u32) / 2, - 255); + if (!of_property_read_u32(np, "power-on-time-ms", property_u32)) + hub->power_on_time = min_t(u8, *property_u32 / 2, 255); if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1)) hub->lang_id = USB251XB_DEF_LANGUAGE_ID; @@ -492,6 +477,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, /* The following parameters are currently not exposed to devicetree, but * may be as soon as needed. */ + hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; + hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS; + hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF; + hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS; hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE; hub->boost_up = USB251XB_DEF_BOOST_UP; hub->boost_x = USB251XB_DEF_BOOST_X; diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 0a643fa74cab74..07014cad6dbe35 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -50,6 +50,7 @@ #include #include #include +#include /* * Version Information @@ -708,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf, interface = intf->cur_altsetting; + if (interface->desc.bNumEndpoints < 3) { + usb_put_dev(usbdev); + return -ENODEV; + } + /* * Allocate parport interface */ diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 9fb8b1e6ecc26d..b6d8bf475c9270 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index db1a4abf280613..19c416d69eb933 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index d8bae6ca890475..0c3664ab705eed 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2490,8 +2490,8 @@ static int musb_remove(struct platform_device *pdev) musb_host_cleanup(musb); musb_gadget_cleanup(musb); - spin_lock_irqsave(&musb->lock, flags); musb_platform_disable(musb); + spin_lock_irqsave(&musb->lock, flags); musb_disable_interrupts(musb); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); spin_unlock_irqrestore(&musb->lock, flags); diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index 00e272bfee39a9..355655f8a3fbc9 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -238,8 +238,27 @@ static void cppi41_dma_callback(void *private_data, transferred < cppi41_channel->packet_sz) cppi41_channel->prog_len = 0; - if (cppi41_channel->is_tx) - empty = musb_is_tx_fifo_empty(hw_ep); + if (cppi41_channel->is_tx) { + u8 type; + + if (is_host_active(musb)) + type = hw_ep->out_qh->type; + else + type = hw_ep->ep_in.type; + + if (type == USB_ENDPOINT_XFER_ISOC) + /* + * Don't use the early-TX-interrupt workaround below + * for Isoch transfter. Since Isoch are periodic + * transfer, by the time the next transfer is + * scheduled, the current one should be done already. + * + * This avoids audio playback underrun issue. + */ + empty = true; + else + empty = musb_is_tx_fifo_empty(hw_ep); + } if (!cppi41_channel->is_tx || empty) { cppi41_trans_done(cppi41_channel); diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 7c047c4a2565cc..9c7ee26ef38806 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -933,7 +933,7 @@ static int dsps_probe(struct platform_device *pdev) if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { ret = dsps_setup_optional_vbus_irq(pdev, glue); if (ret) - return ret; + goto err_iounmap; } platform_set_drvdata(pdev, glue); @@ -946,6 +946,8 @@ static int dsps_probe(struct platform_device *pdev) err: pm_runtime_disable(&pdev->dev); +err_iounmap: + iounmap(glue->usbss_base); return ret; } @@ -956,6 +958,7 @@ static int dsps_remove(struct platform_device *pdev) platform_device_unregister(glue->musb); pm_runtime_disable(&pdev->dev); + iounmap(glue->usbss_base); return 0; } diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c index db68156568e6e7..f333024660b4d0 100644 --- a/drivers/usb/phy/phy-isp1301.c +++ b/drivers/usb/phy/phy-isp1301.c @@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = { }; MODULE_DEVICE_TABLE(i2c, isp1301_id); +static const struct of_device_id isp1301_of_match[] = { + {.compatible = "nxp,isp1301" }, + { }, +}; +MODULE_DEVICE_TABLE(of, isp1301_of_match); + static struct i2c_client *isp1301_i2c_client; static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear) @@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client) static struct i2c_driver isp1301_driver = { .driver = { .name = DRV_NAME, + .of_match_table = isp1301_of_match, }, .probe = isp1301_probe, .remove = isp1301_remove, diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index eb433922598cff..6537d3ca2797d8 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -27,6 +27,7 @@ #include #include #include +#include #include /* Defines */ @@ -1499,7 +1500,7 @@ static int digi_read_oob_callback(struct urb *urb) return -1; /* handle each oob command */ - for (i = 0; i < urb->actual_length - 4; i += 4) { + for (i = 0; i < urb->actual_length - 3; i += 4) { opcode = buf[i]; line = buf[i + 1]; status = buf[i + 2]; diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 944de657a07a8d..49ce2be90fa00e 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index ceaeebaa6f9058..a76b95d3215787 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb) function = TIUMP_GET_FUNC_FROM_CODE(data[0]); dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, port_number, function, data[1]); + + if (port_number >= edge_serial->serial->num_ports) { + dev_err(dev, "bad port number %d\n", port_number); + goto exit; + } + port = edge_serial->serial->port[port_number]; edge_port = usb_get_serial_port_data(port); if (!edge_port) { @@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb) port_number = edge_port->port->port_number; - if (edge_port->lsr_event) { + if (urb->actual_length > 0 && edge_port->lsr_event) { edge_port->lsr_event = 0; dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", __func__, port_number, edge_port->lsr_mask, *data); diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index a180b17d24323b..dd706953b46609 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c @@ -31,7 +31,6 @@ #define BT_IGNITIONPRO_ID 0x2000 /* function prototypes */ -static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port); static void omninet_process_read_urb(struct urb *urb); static void omninet_write_bulk_callback(struct urb *urb); static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, @@ -60,7 +59,6 @@ static struct usb_serial_driver zyxel_omninet_device = { .attach = omninet_attach, .port_probe = omninet_port_probe, .port_remove = omninet_port_remove, - .open = omninet_open, .write = omninet_write, .write_room = omninet_write_room, .write_bulk_callback = omninet_write_bulk_callback, @@ -140,17 +138,6 @@ static int omninet_port_remove(struct usb_serial_port *port) return 0; } -static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port) -{ - struct usb_serial *serial = port->serial; - struct usb_serial_port *wport; - - wport = serial->port[1]; - tty_port_tty_set(&wport->port, tty); - - return usb_serial_generic_open(tty, port); -} - #define OMNINET_HEADERLEN 4 #define OMNINET_BULKOUTSIZE 64 #define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 42cc72e54c051b..af67a0de6b5d47 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb); #define BANDRICH_PRODUCT_1012 0x1012 #define QUALCOMM_VENDOR_ID 0x05C6 +/* These Quectel products use Qualcomm's vendor ID */ +#define QUECTEL_PRODUCT_UC20 0x9003 +#define QUECTEL_PRODUCT_UC15 0x9090 + +#define QUECTEL_VENDOR_ID 0x2c7c +/* These Quectel products use Quectel's vendor ID */ +#define QUECTEL_PRODUCT_EC21 0x0121 +#define QUECTEL_PRODUCT_EC25 0x0125 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ + /* Quectel products using Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + /* Quectel products using Quectel vendor ID */ + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 696458db7e3c45..38b3f0d8cd580f 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ /* Huawei devices */ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c index 93c6c9b08daae5..8a069aa154eda4 100644 --- a/drivers/usb/serial/safe_serial.c +++ b/drivers/usb/serial/safe_serial.c @@ -200,6 +200,11 @@ static void safe_process_read_urb(struct urb *urb) if (!safe) goto out; + if (length < 2) { + dev_err(&port->dev, "malformed packet\n"); + return; + } + fcs = fcs_compute10(data, length, CRC10_INITFCS); if (fcs) { dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 16cc18369111d0..9129f6cb823074 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2071,6 +2071,20 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* + * Reported by Tobias Jakobi + * The INIC-3619 bridge is used in the StarTech SLSODDU33B + * SATA-USB enclosure for slimline optical drives. + * + * The quirk enables MakeMKV to properly exchange keys with + * an installed BD drive. + */ +UNUSUAL_DEV( 0x13fd, 0x3609, 0x0209, 0x0209, + "Initio Corporation", + "INIC-3619", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE ), + /* Reported by Qinglin Ye */ UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100, "Kingston", diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 1a6f78d7d0275f..cab2b71a80d027 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -327,13 +327,11 @@ EXPORT_SYMBOL_GPL(usbip_dump_header); int usbip_recv(struct socket *sock, void *buf, int size) { int result; - struct msghdr msg; - struct kvec iov; + struct kvec iov = {.iov_base = buf, .iov_len = size}; + struct msghdr msg = {.msg_flags = MSG_NOSIGNAL}; int total = 0; - /* for blocks of if (usbip_dbg_flag_xmit) */ - char *bp = buf; - int osize = size; + iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); usbip_dbg_xmit("enter\n"); @@ -344,26 +342,18 @@ int usbip_recv(struct socket *sock, void *buf, int size) } do { + int sz = msg_data_left(&msg); sock->sk->sk_allocation = GFP_NOIO; - iov.iov_base = buf; - iov.iov_len = size; - msg.msg_name = NULL; - msg.msg_namelen = 0; - msg.msg_control = NULL; - msg.msg_controllen = 0; - msg.msg_flags = MSG_NOSIGNAL; - - result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); + + result = sock_recvmsg(sock, &msg, MSG_WAITALL); if (result <= 0) { pr_debug("receive sock %p buf %p size %u ret %d total %d\n", - sock, buf, size, result, total); + sock, buf + total, sz, result, total); goto err; } - size -= result; - buf += result; total += result; - } while (size > 0); + } while (msg_data_left(&msg)); if (usbip_dbg_flag_xmit) { if (!in_interrupt()) @@ -372,9 +362,9 @@ int usbip_recv(struct socket *sock, void *buf, int size) pr_debug("interrupt :"); pr_debug("receiving....\n"); - usbip_dump_buffer(bp, osize); - pr_debug("received, osize %d ret %d size %d total %d\n", - osize, result, size, total); + usbip_dump_buffer(buf, size); + pr_debug("received, osize %d ret %d size %zd total %d\n", + size, result, msg_data_left(&msg), total); } return total; diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 9f490375ac9236..f8573a52e41a56 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #define USBIP_VERSION "1.0.0" diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c index 252c7bd9218afd..d01496fd27fe88 100644 --- a/drivers/usb/wusbcore/wa-hc.c +++ b/drivers/usb/wusbcore/wa-hc.c @@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface, int result; struct device *dev = &iface->dev; + if (iface->cur_altsetting->desc.bNumEndpoints < 3) + return -ENODEV; + result = wa_rpipes_create(wa); if (result < 0) goto error_rpipes_create; diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 0aa6c3c29d1726..35a1e777b4497a 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -823,6 +823,9 @@ static int hwarc_probe(struct usb_interface *iface, struct hwarc *hwarc; struct device *dev = &iface->dev; + if (iface->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + result = -ENOMEM; uwb_rc = uwb_rc_alloc(); if (uwb_rc == NULL) { diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index 2bfc846ac07134..6345e85822a424 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c @@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) result); } + if (iface->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + result = -ENOMEM; i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); if (i1480_usb == NULL) { diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 609f4f982c74c5..561084ab387f3f 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -403,6 +403,7 @@ static void vfio_group_release(struct kref *kref) struct iommu_group *iommu_group = group->iommu_group; WARN_ON(!list_empty(&group->device_list)); + WARN_ON(group->notifier.head); list_for_each_entry_safe(unbound, tmp, &group->unbound_list, unbound_next) { @@ -1573,6 +1574,10 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep) return -EBUSY; } + /* Warn if previous user didn't cleanup and re-init to drop them */ + if (WARN_ON(group->notifier.head)) + BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); + filep->private_data = group; return 0; @@ -1584,9 +1589,6 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep) filep->private_data = NULL; - /* Any user didn't unregister? */ - WARN_ON(group->notifier.head); - vfio_group_try_dissolve_container(group); atomic_dec(&group->opened); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 59b3f62a2d64eb..cf3de91fbfe7a5 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -20,6 +20,9 @@ #include #include #include +#include +#include + #include #include #include diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bd6f293c4ebd59..32d2633092a37e 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -31,7 +31,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -1181,8 +1182,7 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain, return NULL; } -static bool vfio_iommu_has_resv_msi(struct iommu_group *group, - phys_addr_t *base) +static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) { struct list_head group_resv_regions; struct iommu_resv_region *region, *next; @@ -1191,7 +1191,7 @@ static bool vfio_iommu_has_resv_msi(struct iommu_group *group, INIT_LIST_HEAD(&group_resv_regions); iommu_get_group_resv_regions(group, &group_resv_regions); list_for_each_entry(region, &group_resv_regions, list) { - if (region->type & IOMMU_RESV_MSI) { + if (region->type == IOMMU_RESV_SW_MSI) { *base = region->start; ret = true; goto out; @@ -1282,7 +1282,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (ret) goto out_domain; - resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base); + resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base); INIT_LIST_HEAD(&domain->group_list); list_add(&group->next, &domain->group_list); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 2fe35354f20e5e..9b519897cc17b8 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 4269e621e254ab..f0ba362d4c101a 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include "vhost.h" @@ -282,6 +284,22 @@ void vhost_poll_queue(struct vhost_poll *poll) } EXPORT_SYMBOL_GPL(vhost_poll_queue); +static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) +{ + int j; + + for (j = 0; j < VHOST_NUM_ADDRS; j++) + vq->meta_iotlb[j] = NULL; +} + +static void vhost_vq_meta_reset(struct vhost_dev *d) +{ + int i; + + for (i = 0; i < d->nvqs; ++i) + __vhost_vq_meta_reset(d->vqs[i]); +} + static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -312,6 +330,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; + __vhost_vq_meta_reset(vq); } static int vhost_worker(void *data) @@ -691,6 +710,18 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, return 1; } +static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, + u64 addr, unsigned int size, + int type) +{ + const struct vhost_umem_node *node = vq->meta_iotlb[type]; + + if (!node) + return NULL; + + return (void *)(uintptr_t)(node->userspace_addr + addr - node->start); +} + /* Can we switch to this memory table? */ /* Caller should have device mutex but not vq mutex */ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, @@ -733,8 +764,14 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, * could be access through iotlb. So -EAGAIN should * not happen in this case. */ - /* TODO: more fast path */ struct iov_iter t; + void __user *uaddr = vhost_vq_meta_fetch(vq, + (u64)(uintptr_t)to, size, + VHOST_ADDR_DESC); + + if (uaddr) + return __copy_to_user(uaddr, from, size); + ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_WO); @@ -762,8 +799,14 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, * could be access through iotlb. So -EAGAIN should * not happen in this case. */ - /* TODO: more fast path */ + void __user *uaddr = vhost_vq_meta_fetch(vq, + (u64)(uintptr_t)from, size, + VHOST_ADDR_DESC); struct iov_iter f; + + if (uaddr) + return __copy_from_user(to, uaddr, size); + ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_RO); @@ -783,17 +826,12 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, return ret; } -static void __user *__vhost_get_user(struct vhost_virtqueue *vq, - void __user *addr, unsigned size) +static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, + void __user *addr, unsigned int size, + int type) { int ret; - /* This function should be called after iotlb - * prefetch, which means we're sure that vq - * could be access through iotlb. So -EAGAIN should - * not happen in this case. - */ - /* TODO: more fast path */ ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_RO); @@ -814,14 +852,32 @@ static void __user *__vhost_get_user(struct vhost_virtqueue *vq, return vq->iotlb_iov[0].iov_base; } -#define vhost_put_user(vq, x, ptr) \ +/* This function should be called after iotlb + * prefetch, which means we're sure that vq + * could be access through iotlb. So -EAGAIN should + * not happen in this case. + */ +static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, + void *addr, unsigned int size, + int type) +{ + void __user *uaddr = vhost_vq_meta_fetch(vq, + (u64)(uintptr_t)addr, size, type); + if (uaddr) + return uaddr; + + return __vhost_get_user_slow(vq, addr, size, type); +} + +#define vhost_put_user(vq, x, ptr) \ ({ \ int ret = -EFAULT; \ if (!vq->iotlb) { \ ret = __put_user(x, ptr); \ } else { \ __typeof__(ptr) to = \ - (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \ + (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ + sizeof(*ptr), VHOST_ADDR_USED); \ if (to != NULL) \ ret = __put_user(x, to); \ else \ @@ -830,14 +886,16 @@ static void __user *__vhost_get_user(struct vhost_virtqueue *vq, ret; \ }) -#define vhost_get_user(vq, x, ptr) \ +#define vhost_get_user(vq, x, ptr, type) \ ({ \ int ret; \ if (!vq->iotlb) { \ ret = __get_user(x, ptr); \ } else { \ __typeof__(ptr) from = \ - (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \ + (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ + sizeof(*ptr), \ + type); \ if (from != NULL) \ ret = __get_user(x, from); \ else \ @@ -846,6 +904,12 @@ static void __user *__vhost_get_user(struct vhost_virtqueue *vq, ret; \ }) +#define vhost_get_avail(vq, x, ptr) \ + vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL) + +#define vhost_get_used(vq, x, ptr) \ + vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) + static void vhost_dev_lock_vqs(struct vhost_dev *d) { int i = 0; @@ -951,6 +1015,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, ret = -EFAULT; break; } + vhost_vq_meta_reset(dev); if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size, msg->iova + msg->size - 1, msg->uaddr, msg->perm)) { @@ -960,6 +1025,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, vhost_iotlb_notify_vq(dev, msg); break; case VHOST_IOTLB_INVALIDATE: + vhost_vq_meta_reset(dev); vhost_del_umem_range(dev->iotlb, msg->iova, msg->iova + msg->size - 1); break; @@ -1103,12 +1169,26 @@ static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, sizeof *used + num * sizeof *used->ring + s); } +static void vhost_vq_meta_update(struct vhost_virtqueue *vq, + const struct vhost_umem_node *node, + int type) +{ + int access = (type == VHOST_ADDR_USED) ? + VHOST_ACCESS_WO : VHOST_ACCESS_RO; + + if (likely(node->perm & access)) + vq->meta_iotlb[type] = node; +} + static int iotlb_access_ok(struct vhost_virtqueue *vq, - int access, u64 addr, u64 len) + int access, u64 addr, u64 len, int type) { const struct vhost_umem_node *node; struct vhost_umem *umem = vq->iotlb; - u64 s = 0, size; + u64 s = 0, size, orig_addr = addr; + + if (vhost_vq_meta_fetch(vq, addr, len, type)) + return true; while (len > s) { node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, @@ -1125,6 +1205,10 @@ static int iotlb_access_ok(struct vhost_virtqueue *vq, } size = node->size - addr + node->start; + + if (orig_addr == addr && size >= len) + vhost_vq_meta_update(vq, node, type); + s += size; addr += size; } @@ -1141,13 +1225,15 @@ int vq_iotlb_prefetch(struct vhost_virtqueue *vq) return 1; return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, - num * sizeof *vq->desc) && + num * sizeof(*vq->desc), VHOST_ADDR_DESC) && iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail, sizeof *vq->avail + - num * sizeof *vq->avail->ring + s) && + num * sizeof(*vq->avail->ring) + s, + VHOST_ADDR_AVAIL) && iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used, sizeof *vq->used + - num * sizeof *vq->used->ring + s); + num * sizeof(*vq->used->ring) + s, + VHOST_ADDR_USED); } EXPORT_SYMBOL_GPL(vq_iotlb_prefetch); @@ -1728,7 +1814,7 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq) r = -EFAULT; goto err; } - r = vhost_get_user(vq, last_used_idx, &vq->used->idx); + r = vhost_get_used(vq, last_used_idx, &vq->used->idx); if (r) { vq_err(vq, "Can't access used idx at %p\n", &vq->used->idx); @@ -1930,29 +2016,36 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, /* Check it isn't doing very strange things with descriptor numbers. */ last_avail_idx = vq->last_avail_idx; - if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) { - vq_err(vq, "Failed to access avail idx at %p\n", - &vq->avail->idx); - return -EFAULT; - } - vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { - vq_err(vq, "Guest moved used index from %u to %u", - last_avail_idx, vq->avail_idx); - return -EFAULT; - } + if (vq->avail_idx == vq->last_avail_idx) { + if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) { + vq_err(vq, "Failed to access avail idx at %p\n", + &vq->avail->idx); + return -EFAULT; + } + vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - /* If there's nothing new since last we looked, return invalid. */ - if (vq->avail_idx == last_avail_idx) - return vq->num; + if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { + vq_err(vq, "Guest moved used index from %u to %u", + last_avail_idx, vq->avail_idx); + return -EFAULT; + } + + /* If there's nothing new since last we looked, return + * invalid. + */ + if (vq->avail_idx == last_avail_idx) + return vq->num; - /* Only get avail ring entries after they have been exposed by guest. */ - smp_rmb(); + /* Only get avail ring entries after they have been + * exposed by guest. + */ + smp_rmb(); + } /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ - if (unlikely(vhost_get_user(vq, ring_head, + if (unlikely(vhost_get_avail(vq, ring_head, &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) { vq_err(vq, "Failed to read head: idx %d address %p\n", last_avail_idx, @@ -2168,7 +2261,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); - if (vhost_get_user(vq, flags, &vq->avail->flags)) { + if (vhost_get_avail(vq, flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; } @@ -2195,7 +2288,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) * interrupts. */ smp_mb(); - if (vhost_get_user(vq, event, vhost_used_event(vq))) { + if (vhost_get_avail(vq, event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } @@ -2242,7 +2335,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (vq->avail_idx != vq->last_avail_idx) return false; - r = vhost_get_user(vq, avail_idx, &vq->avail->idx); + r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); if (unlikely(r)) return false; vq->avail_idx = vhost16_to_cpu(vq, avail_idx); @@ -2278,7 +2371,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) /* They could have slipped one in as we were doing that: make * sure it's written, then check again. */ smp_mb(); - r = vhost_get_user(vq, avail_idx, &vq->avail->idx); + r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); if (r) { vq_err(vq, "Failed to check avail idx at %p: %d\n", &vq->avail->idx, r); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index a9cbbb148f460e..f55671d53f28fe 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -76,6 +76,13 @@ struct vhost_umem { int numem; }; +enum vhost_uaddr_type { + VHOST_ADDR_DESC = 0, + VHOST_ADDR_AVAIL = 1, + VHOST_ADDR_USED = 2, + VHOST_NUM_ADDRS = 3, +}; + /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; @@ -86,6 +93,7 @@ struct vhost_virtqueue { struct vring_desc __user *desc; struct vring_avail __user *avail; struct vring_used __user *used; + const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; struct file *kick; struct file *call; struct file *error; diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index ce5e63d2c66aac..44eed8eb0725b2 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) return len; } +static int +vhost_transport_cancel_pkt(struct vsock_sock *vsk) +{ + struct vhost_vsock *vsock; + struct virtio_vsock_pkt *pkt, *n; + int cnt = 0; + LIST_HEAD(freeme); + + /* Find the vhost_vsock according to guest context id */ + vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); + if (!vsock) + return -ENODEV; + + spin_lock_bh(&vsock->send_pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { + if (pkt->vsk != vsk) + continue; + list_move(&pkt->list, &freeme); + } + spin_unlock_bh(&vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) + vhost_poll_queue(&tx_vq->poll); + } + + return 0; +} + static struct virtio_vsock_pkt * vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, unsigned int out, unsigned int in) @@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = { .release = virtio_transport_release, .connect = virtio_transport_connect, .shutdown = virtio_transport_shutdown, + .cancel_pkt = vhost_transport_cancel_pkt, .dgram_enqueue = virtio_transport_dgram_enqueue, .dgram_dequeue = virtio_transport_dgram_dequeue, diff --git a/drivers/video/fbdev/auo_k190x.c b/drivers/video/fbdev/auo_k190x.c index 9580374667ba7c..0d06038324e003 100644 --- a/drivers/video/fbdev/auo_k190x.c +++ b/drivers/video/fbdev/auo_k190x.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c index 038ac6934fe9d7..9da90bd242f4e4 100644 --- a/drivers/video/fbdev/cobalt_lcdfb.c +++ b/drivers/video/fbdev/cobalt_lcdfb.c @@ -26,6 +26,7 @@ #include #include #include +#include /* * Cursor position address diff --git a/drivers/video/fbdev/nvidia/nv_accel.c b/drivers/video/fbdev/nvidia/nv_accel.c index ad6472a894ea60..7341fed63e35aa 100644 --- a/drivers/video/fbdev/nvidia/nv_accel.c +++ b/drivers/video/fbdev/nvidia/nv_accel.c @@ -48,6 +48,8 @@ */ #include +#include + #include "nv_type.h" #include "nv_proto.h" #include "nv_dma.h" diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 8b810696a42b6c..fd2b372d0264de 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 9d2738e9217f10..34adf9b9c05388 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -31,6 +31,7 @@ #include #include #include +#include /* * Balloon device works in 4K page units. So each page is pointed to by @@ -241,11 +242,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx, #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) -static void update_balloon_stats(struct virtio_balloon *vb) +static unsigned int update_balloon_stats(struct virtio_balloon *vb) { unsigned long events[NR_VM_EVENT_ITEMS]; struct sysinfo i; - int idx = 0; + unsigned int idx = 0; long available; all_vm_events(events); @@ -253,18 +254,22 @@ static void update_balloon_stats(struct virtio_balloon *vb) available = si_mem_available(); +#ifdef CONFIG_VM_EVENT_COUNTERS update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, pages_to_bytes(events[PSWPIN])); update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, pages_to_bytes(events[PSWPOUT])); update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); +#endif update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, pages_to_bytes(i.freeram)); update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, pages_to_bytes(i.totalram)); update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, pages_to_bytes(available)); + + return idx; } /* @@ -290,14 +295,14 @@ static void stats_handle_request(struct virtio_balloon *vb) { struct virtqueue *vq; struct scatterlist sg; - unsigned int len; + unsigned int len, num_stats; - update_balloon_stats(vb); + num_stats = update_balloon_stats(vb); vq = vb->stats_vq; if (!virtqueue_get_buf(vq, &len)) return; - sg_init_one(&sg, vb->stats, sizeof(vb->stats)); + sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); } @@ -413,7 +418,8 @@ static int init_vqs(struct virtio_balloon *vb) * optionally stat. */ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; - err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names); + err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names, + NULL); if (err) return err; @@ -421,13 +427,16 @@ static int init_vqs(struct virtio_balloon *vb) vb->deflate_vq = vqs[1]; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { struct scatterlist sg; + unsigned int num_stats; vb->stats_vq = vqs[2]; /* * Prime this virtqueue with one buffer so the hypervisor can * use it to signal us later (it can't be broken yet!). */ - sg_init_one(&sg, vb->stats, sizeof vb->stats); + num_stats = update_balloon_stats(vb); + + sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) < 0) BUG(); diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 350a2a5a49dbed..79f1293cda9327 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -173,7 +173,8 @@ static int virtinput_init_vqs(struct virtio_input *vi) static const char * const names[] = { "events", "status" }; int err; - err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names); + err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names, + NULL); if (err) return err; vi->evt = vqs[0]; diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index c71fde5fe835c4..78343b8f9034b3 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -70,7 +70,7 @@ #include #include #include -#include +#include #include @@ -446,7 +446,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], + struct irq_affinity *desc) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); unsigned int irq = platform_get_irq(vm_dev->pdev, 0); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 186cbab327b8f6..590534910dc617 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -33,10 +33,8 @@ void vp_synchronize_vectors(struct virtio_device *vdev) struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; - if (vp_dev->intx_enabled) - synchronize_irq(vp_dev->pci_dev->irq); - - for (i = 0; i < vp_dev->msix_vectors; ++i) + synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); + for (i = 1; i < vp_dev->msix_vectors; i++) synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); } @@ -62,16 +60,13 @@ static irqreturn_t vp_config_changed(int irq, void *opaque) static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; - struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; - unsigned long flags; + struct virtqueue *vq; - spin_lock_irqsave(&vp_dev->lock, flags); - list_for_each_entry(info, &vp_dev->virtqueues, node) { - if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { + if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } - spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } @@ -102,237 +97,186 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) return vp_vring_interrupt(irq, opaque); } -static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, - bool per_vq_vectors) +static void vp_remove_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - const char *name = dev_name(&vp_dev->vdev.dev); - unsigned i, v; - int err = -ENOMEM; - - vp_dev->msix_vectors = nvectors; - - vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, - GFP_KERNEL); - if (!vp_dev->msix_names) - goto error; - vp_dev->msix_affinity_masks - = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, - GFP_KERNEL); - if (!vp_dev->msix_affinity_masks) - goto error; - for (i = 0; i < nvectors; ++i) - if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], - GFP_KERNEL)) - goto error; - - err = pci_alloc_irq_vectors(vp_dev->pci_dev, nvectors, nvectors, - PCI_IRQ_MSIX); - if (err < 0) - goto error; - vp_dev->msix_enabled = 1; - - /* Set the vector used for configuration */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-config", name); - err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), - vp_config_changed, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error; - ++vp_dev->msix_used_vectors; - - v = vp_dev->config_vector(vp_dev, v); - /* Verify we had enough resources to assign the vector */ - if (v == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto error; - } - - if (!per_vq_vectors) { - /* Shared vector for all VQs */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-virtqueues", name); - err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), - vp_vring_interrupt, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error; - ++vp_dev->msix_used_vectors; - } - return 0; -error: - return err; -} - -static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name, - u16 msix_vec) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); - struct virtqueue *vq; - unsigned long flags; - - /* fill out our structure that represents an active queue */ - if (!info) - return ERR_PTR(-ENOMEM); + struct virtqueue *vq, *n; - vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec); - if (IS_ERR(vq)) - goto out_info; + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + if (vp_dev->msix_vector_map) { + int v = vp_dev->msix_vector_map[vq->index]; - info->vq = vq; - if (callback) { - spin_lock_irqsave(&vp_dev->lock, flags); - list_add(&info->node, &vp_dev->virtqueues); - spin_unlock_irqrestore(&vp_dev->lock, flags); - } else { - INIT_LIST_HEAD(&info->node); + if (v != VIRTIO_MSI_NO_VECTOR) + free_irq(pci_irq_vector(vp_dev->pci_dev, v), + vq); + } + vp_dev->del_vq(vq); } - - vp_dev->vqs[index] = info; - return vq; - -out_info: - kfree(info); - return vq; -} - -static void vp_del_vq(struct virtqueue *vq) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; - unsigned long flags; - - spin_lock_irqsave(&vp_dev->lock, flags); - list_del(&info->node); - spin_unlock_irqrestore(&vp_dev->lock, flags); - - vp_dev->del_vq(info); - kfree(info); } /* the config->del_vqs() implementation */ void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtqueue *vq, *n; int i; - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - if (vp_dev->per_vq_vectors) { - int v = vp_dev->vqs[vq->index]->msix_vector; - - if (v != VIRTIO_MSI_NO_VECTOR) - free_irq(pci_irq_vector(vp_dev->pci_dev, v), - vq); - } - vp_del_vq(vq); - } - vp_dev->per_vq_vectors = false; - - if (vp_dev->intx_enabled) { - free_irq(vp_dev->pci_dev->irq, vp_dev); - vp_dev->intx_enabled = 0; - } + if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) + return; - for (i = 0; i < vp_dev->msix_used_vectors; ++i) - free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); + vp_remove_vqs(vdev); - for (i = 0; i < vp_dev->msix_vectors; i++) - if (vp_dev->msix_affinity_masks[i]) + if (vp_dev->pci_dev->msix_enabled) { + for (i = 0; i < vp_dev->msix_vectors; i++) free_cpumask_var(vp_dev->msix_affinity_masks[i]); - if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); - pci_free_irq_vectors(vp_dev->pci_dev); - vp_dev->msix_enabled = 0; + kfree(vp_dev->msix_affinity_masks); + kfree(vp_dev->msix_names); + kfree(vp_dev->msix_vector_map); } - vp_dev->msix_vectors = 0; - vp_dev->msix_used_vectors = 0; - kfree(vp_dev->msix_names); - vp_dev->msix_names = NULL; - kfree(vp_dev->msix_affinity_masks); - vp_dev->msix_affinity_masks = NULL; - kfree(vp_dev->vqs); - vp_dev->vqs = NULL; + free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); + pci_free_irq_vectors(vp_dev->pci_dev); } static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[], - bool per_vq_vectors) + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + const char *name = dev_name(&vp_dev->vdev.dev); + int i, j, err = -ENOMEM, allocated_vectors, nvectors; + unsigned flags = PCI_IRQ_MSIX; + bool shared = false; u16 msix_vec; - int i, err, nvectors, allocated_vectors; - vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); - if (!vp_dev->vqs) - return -ENOMEM; + if (desc) { + flags |= PCI_IRQ_AFFINITY; + desc->pre_vectors++; /* virtio config vector */ + } - if (per_vq_vectors) { - /* Best option: one for change interrupt, one per vq. */ - nvectors = 1; - for (i = 0; i < nvqs; ++i) - if (callbacks[i]) - ++nvectors; - } else { - /* Second best: one for change, shared for all vqs. */ - nvectors = 2; + nvectors = 1; + for (i = 0; i < nvqs; i++) + if (callbacks[i]) + nvectors++; + + /* Try one vector per queue first. */ + err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, + nvectors, flags, desc); + if (err < 0) { + /* Fallback to one vector for config, one shared for queues. */ + shared = true; + err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2, + PCI_IRQ_MSIX); + if (err < 0) + return err; + } + if (err < 0) + return err; + + vp_dev->msix_vectors = nvectors; + vp_dev->msix_names = kmalloc_array(nvectors, + sizeof(*vp_dev->msix_names), GFP_KERNEL); + if (!vp_dev->msix_names) + goto out_free_irq_vectors; + + vp_dev->msix_affinity_masks = kcalloc(nvectors, + sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL); + if (!vp_dev->msix_affinity_masks) + goto out_free_msix_names; + + for (i = 0; i < nvectors; ++i) { + if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], + GFP_KERNEL)) + goto out_free_msix_affinity_masks; } - err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); + /* Set the vector used for configuration */ + snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), + "%s-config", name); + err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed, + 0, vp_dev->msix_names[0], vp_dev); if (err) - goto error_find; + goto out_free_msix_affinity_masks; + + /* Verify we had enough resources to assign the vector */ + if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto out_free_config_irq; + } + + vp_dev->msix_vector_map = kmalloc_array(nvqs, + sizeof(*vp_dev->msix_vector_map), GFP_KERNEL); + if (!vp_dev->msix_vector_map) + goto out_disable_config_irq; - vp_dev->per_vq_vectors = per_vq_vectors; - allocated_vectors = vp_dev->msix_used_vectors; + allocated_vectors = j = 1; /* vector 0 is the config interrupt */ for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - if (!callbacks[i]) - msix_vec = VIRTIO_MSI_NO_VECTOR; - else if (vp_dev->per_vq_vectors) - msix_vec = allocated_vectors++; + if (callbacks[i]) + msix_vec = allocated_vectors; else - msix_vec = VP_MSIX_VQ_VECTOR; - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec); + msix_vec = VIRTIO_MSI_NO_VECTOR; + + vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], + msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto error_find; + goto out_remove_vqs; } - if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; continue; + } - /* allocate per-vq irq if available and necessary */ - snprintf(vp_dev->msix_names[msix_vec], - sizeof *vp_dev->msix_names, - "%s-%s", + snprintf(vp_dev->msix_names[j], + sizeof(*vp_dev->msix_names), "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), - vring_interrupt, 0, - vp_dev->msix_names[msix_vec], - vqs[i]); - if (err) - goto error_find; + vring_interrupt, IRQF_SHARED, + vp_dev->msix_names[j], vqs[i]); + if (err) { + /* don't free this irq on error */ + vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; + goto out_remove_vqs; + } + vp_dev->msix_vector_map[i] = msix_vec; + j++; + + /* + * Use a different vector for each queue if they are available, + * else share the same vector for all VQs. + */ + if (!shared) + allocated_vectors++; } + return 0; -error_find: - vp_del_vqs(vdev); +out_remove_vqs: + vp_remove_vqs(vdev); + kfree(vp_dev->msix_vector_map); +out_disable_config_irq: + vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); +out_free_config_irq: + free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); +out_free_msix_affinity_masks: + for (i = 0; i < nvectors; i++) { + if (vp_dev->msix_affinity_masks[i]) + free_cpumask_var(vp_dev->msix_affinity_masks[i]); + } + kfree(vp_dev->msix_affinity_masks); +out_free_msix_names: + kfree(vp_dev->msix_names); +out_free_irq_vectors: + pci_free_irq_vectors(vp_dev->pci_dev); return err; } @@ -343,53 +287,42 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i, err; - vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); - if (!vp_dev->vqs) - return -ENOMEM; - err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (err) - goto out_del_vqs; + return err; - vp_dev->intx_enabled = 1; - vp_dev->per_vq_vectors = false; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], VIRTIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto out_del_vqs; + goto out_remove_vqs; } } return 0; -out_del_vqs: - vp_del_vqs(vdev); + +out_remove_vqs: + vp_remove_vqs(vdev); + free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); return err; } /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]) + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc) { int err; - /* Try MSI-X with one vector per queue. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true); + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); if (!err) return 0; - /* Fallback: MSI-X with one vector for config, one shared for queues. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false); - if (!err) - return 0; - /* Finally fall back to regular interrupts. */ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); } @@ -409,16 +342,15 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; - struct cpumask *mask; - unsigned int irq; if (!vq->callback) return -EINVAL; - if (vp_dev->msix_enabled) { - mask = vp_dev->msix_affinity_masks[info->msix_vector]; - irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); + if (vp_dev->pci_dev->msix_enabled) { + int vec = vp_dev->msix_vector_map[vq->index]; + struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; + unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec); + if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { @@ -430,6 +362,17 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) return 0; } +const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + unsigned int *map = vp_dev->msix_vector_map; + + if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) + return NULL; + + return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); +} + #ifdef CONFIG_PM_SLEEP static int virtio_pci_freeze(struct device *dev) { @@ -498,8 +441,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->pci_dev = pci_dev; - INIT_LIST_HEAD(&vp_dev->virtqueues); - spin_lock_init(&vp_dev->lock); /* enable the device */ rc = pci_enable_device(pci_dev); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index b2f666250ae0bf..ac8c9d7889646a 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -31,17 +31,6 @@ #include #include -struct virtio_pci_vq_info { - /* the actual virtqueue */ - struct virtqueue *vq; - - /* the list node for the virtqueues list */ - struct list_head node; - - /* MSI-X vector (or none) */ - unsigned msix_vector; -}; - /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; @@ -75,47 +64,25 @@ struct virtio_pci_device { /* the IO mapping for the PCI config space */ void __iomem *ioaddr; - /* a list of queues so we can dispatch IRQs */ - spinlock_t lock; - struct list_head virtqueues; - - /* array of all queues for house-keeping */ - struct virtio_pci_vq_info **vqs; - - /* MSI-X support */ - int msix_enabled; - int intx_enabled; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; - /* Number of available vectors */ - unsigned msix_vectors; - /* Vectors allocated, excluding per-vq vectors if any */ - unsigned msix_used_vectors; - - /* Whether we have vector per vq */ - bool per_vq_vectors; + /* Total Number of MSI-X vectors (including per-VQ ones). */ + int msix_vectors; + /* Map of per-VQ MSI-X vectors, may be NULL */ + unsigned *msix_vector_map; struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, - struct virtio_pci_vq_info *info, unsigned idx, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec); - void (*del_vq)(struct virtio_pci_vq_info *info); + void (*del_vq)(struct virtqueue *vq); u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); }; -/* Constants for MSI-X */ -/* Use first vector for configuration changes, second and the rest for - * virtqueues Thus, we need at least 2 vectors for MSI. */ -enum { - VP_MSIX_CONFIG_VECTOR = 0, - VP_MSIX_VQ_VECTOR = 1, -}; - /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { @@ -130,9 +97,8 @@ bool vp_notify(struct virtqueue *vq); void vp_del_vqs(struct virtio_device *vdev); /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]); + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc); const char *vp_bus_name(struct virtio_device *vdev); /* Setup the affinity for a virtqueue: @@ -142,6 +108,8 @@ const char *vp_bus_name(struct virtio_device *vdev); */ int vp_set_vq_affinity(struct virtqueue *vq, int cpu); +const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index); + #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) int virtio_pci_legacy_probe(struct virtio_pci_device *); void virtio_pci_legacy_remove(struct virtio_pci_device *); diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 6d9e5173d5fa6b..f7362c5fe18a96 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -112,7 +112,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, - struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -130,8 +129,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); - info->msix_vector = msix_vec; - /* create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, @@ -162,14 +159,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, return ERR_PTR(err); } -static void del_vq(struct virtio_pci_vq_info *info) +static void del_vq(struct virtqueue *vq) { - struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - if (vp_dev->msix_enabled) { + if (vp_dev->pci_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ @@ -194,6 +190,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, + .get_vq_affinity = vp_get_vq_affinity, }; /* the PCI probing function */ diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 4bf7ab37589417..7bc3004b840ef3 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -293,7 +293,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, - struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -323,8 +322,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* get offset of notification word for this vq */ off = vp_ioread16(&cfg->queue_notify_off); - info->msix_vector = msix_vec; - /* create the vring */ vq = vring_create_virtqueue(index, num, SMP_CACHE_BYTES, &vp_dev->vdev, @@ -387,13 +384,12 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, } static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]) + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq; - int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); + int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, desc); if (rc) return rc; @@ -409,14 +405,13 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, return 0; } -static void del_vq(struct virtio_pci_vq_info *info) +static void del_vq(struct virtqueue *vq) { - struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); vp_iowrite16(vq->index, &vp_dev->common->queue_select); - if (vp_dev->msix_enabled) { + if (vp_dev->pci_dev->msix_enabled) { vp_iowrite16(VIRTIO_MSI_NO_VECTOR, &vp_dev->common->queue_msix_vector); /* Flush the write out to device */ @@ -442,6 +437,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, + .get_vq_affinity = vp_get_vq_affinity, }; static const struct virtio_config_ops virtio_pci_config_ops = { @@ -457,6 +453,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, + .get_vq_affinity = vp_get_vq_affinity, }; /** diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index df1c9bb90eb50b..2096f460498f9d 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c @@ -14,7 +14,7 @@ #include #include -#include /* schedule_timeout() */ +#include #include #include diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index 4ce1b66d5092fb..2cae7b29bb5fb5 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c831b7967bf95f..52a70ee6014fa8 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -142,6 +142,7 @@ config GPIO_WATCHDOG_ARCH_INITCALL config MENF21BMC_WATCHDOG tristate "MEN 14F021P00 BMC Watchdog" depends on MFD_MENF21BMC || COMPILE_TEST + depends on I2C select WATCHDOG_CORE help Say Y here to include support for the MEN 14F021P00 BMC Watchdog. @@ -176,7 +177,7 @@ config WDAT_WDT config WM831X_WATCHDOG tristate "WM831x watchdog" - depends on MFD_WM831X || COMPILE_TEST + depends on MFD_WM831X select WATCHDOG_CORE help Support for the watchdog in the WM831x AudioPlus PMICs. When @@ -217,7 +218,7 @@ config ZIIRAVE_WATCHDOG config ARM_SP805_WATCHDOG tristate "ARM SP805 Watchdog" - depends on (ARM || ARM64) && (ARM_AMBA || COMPILE_TEST) + depends on (ARM || ARM64 || COMPILE_TEST) && ARM_AMBA select WATCHDOG_CORE help ARM Primecell SP805 Watchdog timer. This will reboot your system when @@ -573,7 +574,7 @@ config IMX2_WDT config UX500_WATCHDOG tristate "ST-Ericsson Ux500 watchdog" - depends on MFD_DB8500_PRCMU || (ARM && COMPILE_TEST) + depends on MFD_DB8500_PRCMU select WATCHDOG_CORE default y help @@ -585,7 +586,7 @@ config UX500_WATCHDOG config RETU_WATCHDOG tristate "Retu watchdog" - depends on MFD_RETU || COMPILE_TEST + depends on MFD_RETU select WATCHDOG_CORE help Retu watchdog driver for Nokia Internet Tablets (770, N800, @@ -851,7 +852,7 @@ config SP5100_TCO config GEODE_WDT tristate "AMD Geode CS5535/CS5536 Watchdog" - depends on CS5535_MFGPT || (X86 && COMPILE_TEST) + depends on CS5535_MFGPT help This driver enables a watchdog capability built into the CS5535/CS5536 companion chips for the AMD Geode GX and LX @@ -1063,7 +1064,7 @@ config HP_WATCHDOG config KEMPLD_WDT tristate "Kontron COM Watchdog Timer" - depends on MFD_KEMPLD || COMPILE_TEST + depends on MFD_KEMPLD select WATCHDOG_CORE help Support for the PLD watchdog on some Kontron ETX and COMexpress @@ -1495,7 +1496,7 @@ config BCM63XX_WDT config BCM2835_WDT tristate "Broadcom BCM2835 hardware watchdog" - depends on ARCH_BCM2835 || COMPILE_TEST + depends on ARCH_BCM2835 || (OF && COMPILE_TEST) select WATCHDOG_CORE help Watchdog driver for the built in watchdog hardware in Broadcom diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c index 73c46b3a09ab3e..2f3b049ea3017c 100644 --- a/drivers/watchdog/kempld_wdt.c +++ b/drivers/watchdog/kempld_wdt.c @@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data, unsigned int timeout) { struct kempld_device_data *pld = wdt_data->pld; - u32 prescaler = kempld_prescaler[PRESCALER_21]; + u32 prescaler; u64 stage_timeout64; u32 stage_timeout; u32 remainder; u8 stage_cfg; +#if GCC_VERSION < 40400 + /* work around a bug compiling do_div() */ + prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]); +#else + prescaler = kempld_prescaler[PRESCALER_21]; +#endif + if (!stage) return -EINVAL; diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index 7983029852ab0d..0607406254856a 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -21,13 +21,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include -#include #include #include #include #include -#include #include #include @@ -54,7 +53,10 @@ module_param(soft_panic, int, 0); MODULE_PARM_DESC(soft_panic, "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); -static void softdog_fire(unsigned long data) +static struct hrtimer softdog_ticktock; +static struct hrtimer softdog_preticktock; + +static enum hrtimer_restart softdog_fire(struct hrtimer *timer) { module_put(THIS_MODULE); if (soft_noboot) { @@ -67,32 +69,33 @@ static void softdog_fire(unsigned long data) emergency_restart(); pr_crit("Reboot didn't ?????\n"); } -} -static struct timer_list softdog_ticktock = - TIMER_INITIALIZER(softdog_fire, 0, 0); + return HRTIMER_NORESTART; +} static struct watchdog_device softdog_dev; -static void softdog_pretimeout(unsigned long data) +static enum hrtimer_restart softdog_pretimeout(struct hrtimer *timer) { watchdog_notify_pretimeout(&softdog_dev); -} -static struct timer_list softdog_preticktock = - TIMER_INITIALIZER(softdog_pretimeout, 0, 0); + return HRTIMER_NORESTART; +} static int softdog_ping(struct watchdog_device *w) { - if (!mod_timer(&softdog_ticktock, jiffies + (w->timeout * HZ))) + if (!hrtimer_active(&softdog_ticktock)) __module_get(THIS_MODULE); + hrtimer_start(&softdog_ticktock, ktime_set(w->timeout, 0), + HRTIMER_MODE_REL); if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) { if (w->pretimeout) - mod_timer(&softdog_preticktock, jiffies + - (w->timeout - w->pretimeout) * HZ); + hrtimer_start(&softdog_preticktock, + ktime_set(w->timeout - w->pretimeout, 0), + HRTIMER_MODE_REL); else - del_timer(&softdog_preticktock); + hrtimer_cancel(&softdog_preticktock); } return 0; @@ -100,11 +103,11 @@ static int softdog_ping(struct watchdog_device *w) static int softdog_stop(struct watchdog_device *w) { - if (del_timer(&softdog_ticktock)) + if (hrtimer_cancel(&softdog_ticktock)) module_put(THIS_MODULE); if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) - del_timer(&softdog_preticktock); + hrtimer_cancel(&softdog_preticktock); return 0; } @@ -136,8 +139,15 @@ static int __init softdog_init(void) watchdog_set_nowayout(&softdog_dev, nowayout); watchdog_stop_on_reboot(&softdog_dev); - if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) + hrtimer_init(&softdog_ticktock, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + softdog_ticktock.function = softdog_fire; + + if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) { softdog_info.options |= WDIOF_PRETIMEOUT; + hrtimer_init(&softdog_preticktock, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + softdog_preticktock.function = softdog_pretimeout; + } ret = watchdog_register_device(&softdog_dev); if (ret) diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index db107fa50ca14c..a6d4378eb8d9fc 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2ef2b61b69dfe0..f3bf8f4e2d6cef 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -32,9 +32,11 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -85,7 +87,7 @@ struct grant_map { int index; int count; int flags; - atomic_t users; + refcount_t users; struct unmap_notify notify; struct ioctl_gntdev_grant_ref *grants; struct gnttab_map_grant_ref *map_ops; @@ -165,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) add->index = 0; add->count = count; - atomic_set(&add->users, 1); + refcount_set(&add->users, 1); return add; @@ -211,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) if (!map) return; - if (!atomic_dec_and_test(&map->users)) + if (!refcount_dec_and_test(&map->users)) return; atomic_sub(map->count, &pages_mapped); @@ -399,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma) struct grant_map *map = vma->vm_private_data; pr_debug("gntdev_vma_open %p\n", vma); - atomic_inc(&map->users); + refcount_inc(&map->users); } static void gntdev_vma_close(struct vm_area_struct *vma) @@ -1003,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) goto unlock_out; } - atomic_inc(&map->users); + refcount_inc(&map->users); vma->vm_ops = &gntdev_vmops; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index f8afc6dcc29f27..e8cef1ad0fe31e 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -681,3 +681,50 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) return 0; } EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); + +/* + * Create userspace mapping for the DMA-coherent memory. + * This function should be called with the pages from the current domain only, + * passing pages mapped from other domains would lead to memory corruption. + */ +int +xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) + if (__generic_dma_ops(dev)->mmap) + return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr, + dma_addr, size, attrs); +#endif + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} +EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap); + +/* + * This function should be called with the pages from the current domain only, + * passing pages mapped from other domains would lead to memory corruption. + */ +int +xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, + unsigned long attrs) +{ +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) + if (__generic_dma_ops(dev)->get_sgtable) { +#if 0 + /* + * This check verifies that the page belongs to the current domain and + * is not one mapped from another domain. + * This check is for debug only, and should not go to production build + */ + unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle)); + BUG_ON (!page_is_ram(bfn)); +#endif + return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr, + handle, size, attrs); + } +#endif + return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); +} +EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable); diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 4ce10bcca18b1f..23e391d3ec015d 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -27,10 +27,10 @@ #include #include #include +#include #include #include #include -#include #include #include @@ -408,7 +408,7 @@ static int check_acpi_ids(struct acpi_processor *pr_backup) acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, read_acpi_id, NULL, NULL, NULL); - acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); + acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, read_acpi_id, NULL, NULL); upload: if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { @@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void) return rc; } -static int xen_acpi_processor_resume(struct notifier_block *nb, - unsigned long action, void *data) +static void xen_acpi_processor_resume_worker(struct work_struct *dummy) { + int rc; + bitmap_zero(acpi_ids_done, nr_acpi_bits); - return xen_upload_processor_pm_data(); + + rc = xen_upload_processor_pm_data(); + if (rc != 0) + pr_info("ACPI data upload failed, error = %d\n", rc); +} + +static void xen_acpi_processor_resume(void) +{ + static DECLARE_WORK(wq, xen_acpi_processor_resume_worker); + + /* + * xen_upload_processor_pm_data() calls non-atomic code. + * However, the context for xen_acpi_processor_resume is syscore + * with only the boot CPU online and in an atomic context. + * + * So defer the upload for some point safer. + */ + schedule_work(&wq); } -struct notifier_block xen_acpi_processor_resume_nb = { - .notifier_call = xen_acpi_processor_resume, +static struct syscore_ops xap_syscore_ops = { + .resume = xen_acpi_processor_resume, }; static int __init xen_acpi_processor_init(void) @@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void) if (rc) goto err_unregister; - xen_resume_notifier_register(&xen_acpi_processor_resume_nb); + register_syscore_ops(&xap_syscore_ops); return 0; err_unregister: @@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void) { int i; - xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb); + unregister_syscore_ops(&xap_syscore_ops); kfree(acpi_ids_done); kfree(acpi_id_present); kfree(acpi_id_cst_present); diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 4d343eed08f51e..1f4733b80c8774 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -55,7 +55,6 @@ #include #include #include -#include #include #include diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 60fb47469c86bb..ed4f8519b62706 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -91,10 +91,10 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any) * dentry names. */ static int build_path_from_dentry(struct v9fs_session_info *v9ses, - struct dentry *dentry, char ***names) + struct dentry *dentry, const unsigned char ***names) { int n = 0, i; - char **wnames; + const unsigned char **wnames; struct dentry *ds; for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent) @@ -105,7 +105,7 @@ static int build_path_from_dentry(struct v9fs_session_info *v9ses, goto err_out; for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent) - wnames[i] = (char *)ds->d_name.name; + wnames[i] = ds->d_name.name; *names = wnames; return n; @@ -117,7 +117,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, kuid_t uid, int any) { struct dentry *ds; - char **wnames, *uname; + const unsigned char **wnames, *uname; int i, n, l, clone, access; struct v9fs_session_info *v9ses; struct p9_fid *fid, *old_fid = NULL; @@ -137,7 +137,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, fid = v9fs_fid_find(ds, uid, any); if (fid) { /* Found the parent fid do a lookup with that */ - fid = p9_client_walk(fid, 1, (char **)&dentry->d_name.name, 1); + fid = p9_client_walk(fid, 1, &dentry->d_name.name, 1); goto fid_out; } up_read(&v9ses->rename_sem); diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 072e7599583a81..a89f3cfe3c7d7f 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index f4f4450119e42f..2a5de610dd8fd5 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -643,7 +643,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, struct dentry *dentry, char *extension, u32 perm, u8 mode) { int err; - char *name; + const unsigned char *name; struct p9_fid *dfid, *ofid, *fid; struct inode *inode; @@ -652,7 +652,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, err = 0; ofid = NULL; fid = NULL; - name = (char *) dentry->d_name.name; + name = dentry->d_name.name; dfid = v9fs_parent_fid(dentry); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); @@ -788,7 +788,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, struct v9fs_session_info *v9ses; struct p9_fid *dfid, *fid; struct inode *inode; - char *name; + const unsigned char *name; p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%pd) %p flags: %x\n", dir, dentry, dentry, flags); @@ -802,7 +802,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, if (IS_ERR(dfid)) return ERR_CAST(dfid); - name = (char *) dentry->d_name.name; + name = dentry->d_name.name; fid = p9_client_walk(dfid, 1, &name, 1); if (IS_ERR(fid)) { if (fid == ERR_PTR(-ENOENT)) { @@ -1012,7 +1012,7 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, } v9fs_blank_wstat(&wstat); wstat.muid = v9ses->uname; - wstat.name = (char *) new_dentry->d_name.name; + wstat.name = new_dentry->d_name.name; retval = p9_client_wstat(oldfid, &wstat); clunk_newdir: @@ -1047,16 +1047,18 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, /** * v9fs_vfs_getattr - retrieve file metadata - * @mnt: mount information - * @dentry: file to get attributes on + * @path: Object to query * @stat: metadata structure to populate + * @request_mask: Mask of STATX_xxx flags indicating the caller's interests + * @flags: AT_STATX_xxx setting * */ static int -v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +v9fs_vfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_wstat *st; diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 5999bd050678cd..70f9887c59a90f 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -244,7 +244,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, int err = 0; kgid_t gid; umode_t mode; - char *name = NULL; + const unsigned char *name = NULL; struct p9_qid qid; struct inode *inode; struct p9_fid *fid = NULL; @@ -269,7 +269,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, v9ses = v9fs_inode2v9ses(dir); - name = (char *) dentry->d_name.name; + name = dentry->d_name.name; p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n", name, flags, omode); @@ -385,7 +385,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, struct v9fs_session_info *v9ses; struct p9_fid *fid = NULL, *dfid = NULL; kgid_t gid; - char *name; + const unsigned char *name; umode_t mode; struct inode *inode; struct p9_qid qid; @@ -416,7 +416,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, err); goto error; } - name = (char *) dentry->d_name.name; + name = dentry->d_name.name; err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid); if (err < 0) goto error; @@ -468,9 +468,10 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, } static int -v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_stat_dotl *st; @@ -678,14 +679,14 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, { int err; kgid_t gid; - char *name; + const unsigned char *name; struct p9_qid qid; struct inode *inode; struct p9_fid *dfid; struct p9_fid *fid = NULL; struct v9fs_session_info *v9ses; - name = (char *) dentry->d_name.name; + name = dentry->d_name.name; p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname); v9ses = v9fs_inode2v9ses(dir); @@ -699,7 +700,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, gid = v9fs_get_fsgid_for_create(dir); /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */ - err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid); + err = p9_client_symlink(dfid, name, symname, gid, &qid); if (err < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err); @@ -775,7 +776,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, if (IS_ERR(oldfid)) return PTR_ERR(oldfid); - err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name); + err = p9_client_link(dfid, oldfid, dentry->d_name.name); if (err < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err); @@ -812,7 +813,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, { int err; kgid_t gid; - char *name; + const unsigned char *name; umode_t mode; struct v9fs_session_info *v9ses; struct p9_fid *fid = NULL, *dfid = NULL; @@ -842,7 +843,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, err); goto error; } - name = (char *) dentry->d_name.name; + name = dentry->d_name.name; err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid); if (err < 0) diff --git a/fs/affs/inode.c b/fs/affs/inode.c index a5e6097eb5a9dd..abcc59899229c6 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -10,6 +10,7 @@ * (C) 1991 Linus Torvalds - minix filesystem */ #include +#include #include #include "affs.h" diff --git a/fs/affs/super.c b/fs/affs/super.c index 37532538e8ab12..c2c27a8f128ef7 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/afs/callback.c b/fs/afs/callback.c index b29447e03ede0d..25d404d22caebc 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work) { struct afs_server *server; struct afs_vnode *vnode, *xvnode; - time_t now; + time64_t now; long timeout; int ret; @@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work) _enter(""); - now = get_seconds(); + now = ktime_get_real_seconds(); /* find the first vnode to update */ spin_lock(&server->cb_lock); @@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work) /* and then reschedule */ _debug("reschedule"); - vnode->update_at = get_seconds() + afs_vnode_update_timeout; + vnode->update_at = ktime_get_real_seconds() + + afs_vnode_update_timeout; spin_lock(&server->cb_lock); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 2edbdcbf6432ad..3062cceb5c2aeb 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -187,7 +187,6 @@ static int afs_deliver_cb_callback(struct afs_call *call) struct afs_callback *cb; struct afs_server *server; __be32 *bp; - u32 tmp; int ret, loop; _enter("{%u}", call->unmarshall); @@ -249,9 +248,9 @@ static int afs_deliver_cb_callback(struct afs_call *call) if (ret < 0) return ret; - tmp = ntohl(call->tmp); - _debug("CB count: %u", tmp); - if (tmp != call->count && tmp != 0) + call->count2 = ntohl(call->tmp); + _debug("CB count: %u", call->count2); + if (call->count2 != call->count && call->count2 != 0) return -EBADMSG; call->offset = 0; call->unmarshall++; @@ -259,14 +258,14 @@ static int afs_deliver_cb_callback(struct afs_call *call) case 4: _debug("extract CB array"); ret = afs_extract_data(call, call->buffer, - call->count * 3 * 4, false); + call->count2 * 3 * 4, false); if (ret < 0) return ret; _debug("unmarshall CB array"); cb = call->request; bp = call->buffer; - for (loop = call->count; loop > 0; loop--, cb++) { + for (loop = call->count2; loop > 0; loop--, cb++) { cb->version = ntohl(*bp++); cb->expiry = ntohl(*bp++); cb->type = ntohl(*bp++); diff --git a/fs/afs/file.c b/fs/afs/file.c index ba7b71fba34bcc..0d5b8508869bf0 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -30,6 +30,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping, const struct file_operations afs_file_operations = { .open = afs_open, + .flush = afs_flush, .release = afs_release, .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, @@ -184,10 +185,13 @@ int afs_page_filler(void *data, struct page *page) if (!req) goto enomem; + /* We request a full page. If the page is a partial one at the + * end of the file, the server will return a short read and the + * unmarshalling code will clear the unfilled space. + */ atomic_set(&req->usage, 1); req->pos = (loff_t)page->index << PAGE_SHIFT; - req->len = min_t(size_t, i_size_read(inode) - req->pos, - PAGE_SIZE); + req->len = PAGE_SIZE; req->nr_pages = 1; req->pages[0] = page; get_page(page); @@ -208,7 +212,13 @@ int afs_page_filler(void *data, struct page *page) fscache_uncache_page(vnode->cache, page); #endif BUG_ON(PageFsCache(page)); - goto error; + + if (ret == -EINTR || + ret == -ENOMEM || + ret == -ERESTARTSYS || + ret == -EAGAIN) + goto error; + goto io_error; } SetPageUptodate(page); @@ -227,10 +237,12 @@ int afs_page_filler(void *data, struct page *page) _leave(" = 0"); return 0; +io_error: + SetPageError(page); + goto error; enomem: ret = -ENOMEM; error: - SetPageError(page); unlock_page(page); _leave(" = %d", ret); return ret; diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index ac8e766978dc44..19f76ae36982df 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -16,6 +16,12 @@ #include "internal.h" #include "afs_fs.h" +/* + * We need somewhere to discard into in case the server helpfully returns more + * than we asked for in FS.FetchData{,64}. + */ +static u8 afs_discard_buffer[64]; + /* * decode an AFSFid block */ @@ -105,7 +111,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp, vnode->vfs_inode.i_mode = mode; } - vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server; + vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client; vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; vnode->vfs_inode.i_version = data_version; @@ -139,7 +145,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode) vnode->cb_version = ntohl(*bp++); vnode->cb_expiry = ntohl(*bp++); vnode->cb_type = ntohl(*bp++); - vnode->cb_expires = vnode->cb_expiry + get_seconds(); + vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds(); *_bp = bp; } @@ -315,7 +321,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) void *buffer; int ret; - _enter("{%u,%zu/%u;%u/%llu}", + _enter("{%u,%zu/%u;%llu/%llu}", call->unmarshall, call->offset, call->count, req->remain, req->actual_len); @@ -353,12 +359,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) req->actual_len |= ntohl(call->tmp); _debug("DATA length: %llu", req->actual_len); - /* Check that the server didn't want to send us extra. We - * might want to just discard instead, but that requires - * cooperation from AF_RXRPC. - */ - if (req->actual_len > req->len) - return -EBADMSG; req->remain = req->actual_len; call->offset = req->pos & (PAGE_SIZE - 1); @@ -368,6 +368,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->unmarshall++; begin_page: + ASSERTCMP(req->index, <, req->nr_pages); if (req->remain > PAGE_SIZE - call->offset) size = PAGE_SIZE - call->offset; else @@ -378,7 +379,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) /* extract the returned data */ case 3: - _debug("extract data %u/%llu %zu/%u", + _debug("extract data %llu/%llu %zu/%u", req->remain, req->actual_len, call->offset, call->count); buffer = kmap(req->pages[req->index]); @@ -389,19 +390,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) if (call->offset == PAGE_SIZE) { if (req->page_done) req->page_done(call, req); + req->index++; if (req->remain > 0) { - req->index++; call->offset = 0; + if (req->index >= req->nr_pages) { + call->unmarshall = 4; + goto begin_discard; + } goto begin_page; } } + goto no_more_data; + + /* Discard any excess data the server gave us */ + begin_discard: + case 4: + size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain); + call->count = size; + _debug("extract discard %llu/%llu %zu/%u", + req->remain, req->actual_len, call->offset, call->count); + + call->offset = 0; + ret = afs_extract_data(call, afs_discard_buffer, call->count, true); + req->remain -= call->offset; + if (ret < 0) + return ret; + if (req->remain > 0) + goto begin_discard; no_more_data: call->offset = 0; - call->unmarshall++; + call->unmarshall = 5; /* extract the metadata */ - case 4: + case 5: ret = afs_extract_data(call, call->buffer, (21 + 3 + 6) * 4, false); if (ret < 0) @@ -416,16 +438,17 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->offset = 0; call->unmarshall++; - case 5: + case 6: break; } - if (call->count < PAGE_SIZE) { - buffer = kmap(req->pages[req->index]); - memset(buffer + call->count, 0, PAGE_SIZE - call->count); - kunmap(req->pages[req->index]); + for (; req->index < req->nr_pages; req->index++) { + if (call->count < PAGE_SIZE) + zero_user_segment(req->pages[req->index], + call->count, PAGE_SIZE); if (req->page_done) req->page_done(call, req); + call->count = 0; } _leave(" = 0 [done]"); @@ -711,8 +734,8 @@ int afs_fs_create(struct afs_server *server, memset(bp, 0, padsz); bp = (void *) bp + padsz; } - *bp++ = htonl(AFS_SET_MODE); - *bp++ = 0; /* mtime */ + *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ @@ -980,8 +1003,8 @@ int afs_fs_symlink(struct afs_server *server, memset(bp, 0, c_padsz); bp = (void *) bp + c_padsz; } - *bp++ = htonl(AFS_SET_MODE); - *bp++ = 0; /* mtime */ + *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = htonl(S_IRWXUGO); /* unix mode */ @@ -1180,8 +1203,8 @@ static int afs_fs_store_data64(struct afs_server *server, *bp++ = htonl(vnode->fid.vnode); *bp++ = htonl(vnode->fid.unique); - *bp++ = 0; /* mask */ - *bp++ = 0; /* mtime */ + *bp++ = htonl(AFS_SET_MTIME); /* mask */ + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = 0; /* unix mode */ @@ -1213,7 +1236,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, _enter(",%x,{%x:%u},,", key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode); - size = to - offset; + size = (loff_t)to - (loff_t)offset; if (first != last) size += (loff_t)(last - first) << PAGE_SHIFT; pos = (loff_t)first << PAGE_SHIFT; @@ -1257,8 +1280,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, *bp++ = htonl(vnode->fid.vnode); *bp++ = htonl(vnode->fid.unique); - *bp++ = 0; /* mask */ - *bp++ = 0; /* mtime */ + *bp++ = htonl(AFS_SET_MTIME); /* mask */ + *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = 0; /* unix mode */ diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 86cc7264c21cda..aae55dd151087e 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -54,8 +54,21 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key) inode->i_fop = &afs_dir_file_operations; break; case AFS_FTYPE_SYMLINK: - inode->i_mode = S_IFLNK | vnode->status.mode; - inode->i_op = &page_symlink_inode_operations; + /* Symlinks with a mode of 0644 are actually mountpoints. */ + if ((vnode->status.mode & 0777) == 0644) { + inode->i_flags |= S_AUTOMOUNT; + + spin_lock(&vnode->lock); + set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); + spin_unlock(&vnode->lock); + + inode->i_mode = S_IFDIR | 0555; + inode->i_op = &afs_mntpt_inode_operations; + inode->i_fop = &afs_mntpt_file_operations; + } else { + inode->i_mode = S_IFLNK | vnode->status.mode; + inode->i_op = &page_symlink_inode_operations; + } inode_nohighmem(inode); break; default: @@ -70,27 +83,15 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key) set_nlink(inode, vnode->status.nlink); inode->i_uid = vnode->status.owner; - inode->i_gid = GLOBAL_ROOT_GID; + inode->i_gid = vnode->status.group; inode->i_size = vnode->status.size; - inode->i_ctime.tv_sec = vnode->status.mtime_server; + inode->i_ctime.tv_sec = vnode->status.mtime_client; inode->i_ctime.tv_nsec = 0; inode->i_atime = inode->i_mtime = inode->i_ctime; inode->i_blocks = 0; inode->i_generation = vnode->fid.unique; inode->i_version = vnode->status.data_version; inode->i_mapping->a_ops = &afs_fs_aops; - - /* check to see whether a symbolic link is really a mountpoint */ - if (vnode->status.type == AFS_FTYPE_SYMLINK) { - afs_mntpt_check_symlink(vnode, key); - - if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) { - inode->i_mode = S_IFDIR | vnode->status.mode; - inode->i_op = &afs_mntpt_inode_operations; - inode->i_fop = &afs_mntpt_file_operations; - } - } - return 0; } @@ -245,12 +246,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key, vnode->cb_version = 0; vnode->cb_expiry = 0; vnode->cb_type = 0; - vnode->cb_expires = get_seconds(); + vnode->cb_expires = ktime_get_real_seconds(); } else { vnode->cb_version = cb->version; vnode->cb_expiry = cb->expiry; vnode->cb_type = cb->type; - vnode->cb_expires = vnode->cb_expiry + get_seconds(); + vnode->cb_expires = vnode->cb_expiry + + ktime_get_real_seconds(); } } @@ -323,7 +325,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) && !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { - if (vnode->cb_expires < get_seconds() + 10) { + if (vnode->cb_expires < ktime_get_real_seconds() + 10) { _debug("callback expired"); set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); } else { @@ -375,12 +377,10 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) /* * read the attributes of an inode */ -int afs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int afs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode; - - inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); @@ -446,7 +446,7 @@ void afs_evict_inode(struct inode *inode) mutex_lock(&vnode->permits_lock); permits = vnode->permits; - rcu_assign_pointer(vnode->permits, NULL); + RCU_INIT_POINTER(vnode->permits, NULL); mutex_unlock(&vnode->permits_lock); if (permits) call_rcu(&permits->rcu, afs_zap_permits); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 8acf3670e75649..a6901360fb81d4 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -90,7 +91,10 @@ struct afs_call { unsigned request_size; /* size of request data */ unsigned reply_max; /* maximum size of reply */ unsigned first_offset; /* offset into mapping[first] */ - unsigned last_to; /* amount of mapping[last] */ + union { + unsigned last_to; /* amount of mapping[last] */ + unsigned count2; /* count used in unmarshalling */ + }; unsigned char unmarshall; /* unmarshalling phase */ bool incoming; /* T if incoming call */ bool send_pages; /* T if data from mapping should be sent */ @@ -127,12 +131,11 @@ struct afs_call_type { */ struct afs_read { loff_t pos; /* Where to start reading */ - loff_t len; /* How much to read */ + loff_t len; /* How much we're asking for */ loff_t actual_len; /* How much we're actually getting */ + loff_t remain; /* Amount remaining */ atomic_t usage; - unsigned int remain; /* Amount remaining */ unsigned int index; /* Which page we're reading into */ - unsigned int pg_offset; /* Offset in page we're at */ unsigned int nr_pages; void (*page_done)(struct afs_call *, struct afs_read *); struct page *pages[]; @@ -247,7 +250,7 @@ struct afs_cache_vhash { */ struct afs_vlocation { atomic_t usage; - time_t time_of_death; /* time at which put reduced usage to 0 */ + time64_t time_of_death; /* time at which put reduced usage to 0 */ struct list_head link; /* link in cell volume location list */ struct list_head grave; /* link in master graveyard list */ struct list_head update; /* link in master update list */ @@ -258,7 +261,7 @@ struct afs_vlocation { struct afs_cache_vlocation vldb; /* volume information DB record */ struct afs_volume *vols[3]; /* volume access record pointer (index by type) */ wait_queue_head_t waitq; /* status change waitqueue */ - time_t update_at; /* time at which record should be updated */ + time64_t update_at; /* time at which record should be updated */ spinlock_t lock; /* access lock */ afs_vlocation_state_t state; /* volume location state */ unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */ @@ -271,7 +274,7 @@ struct afs_vlocation { */ struct afs_server { atomic_t usage; - time_t time_of_death; /* time at which put reduced usage to 0 */ + time64_t time_of_death; /* time at which put reduced usage to 0 */ struct in_addr addr; /* server address */ struct afs_cell *cell; /* cell in which server resides */ struct list_head link; /* link in cell's server list */ @@ -374,8 +377,8 @@ struct afs_vnode { struct rb_node server_rb; /* link in server->fs_vnodes */ struct rb_node cb_promise; /* link in server->cb_promises */ struct work_struct cb_broken_work; /* work to be done on callback break */ - time_t cb_expires; /* time at which callback expires */ - time_t cb_expires_at; /* time used to order cb_promise */ + time64_t cb_expires; /* time at which callback expires */ + time64_t cb_expires_at; /* time used to order cb_promise */ unsigned cb_version; /* callback version */ unsigned cb_expiry; /* callback expiry time */ afs_callback_type_t cb_type; /* type of callback */ @@ -533,7 +536,7 @@ extern struct inode *afs_iget(struct super_block *, struct key *, struct afs_callback *); extern void afs_zap_data(struct afs_vnode *); extern int afs_validate(struct afs_vnode *, struct key *); -extern int afs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int afs_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int afs_setattr(struct dentry *, struct iattr *); extern void afs_evict_inode(struct inode *); extern int afs_drop_inode(struct inode *); @@ -557,7 +560,6 @@ extern const struct inode_operations afs_autocell_inode_operations; extern const struct file_operations afs_mntpt_file_operations; extern struct vfsmount *afs_d_automount(struct path *); -extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *); extern void afs_mntpt_kill_timer(void); /* @@ -718,6 +720,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *); extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *); extern int afs_writeback_all(struct afs_vnode *); +extern int afs_flush(struct file *, fl_owner_t); extern int afs_fsync(struct file *, loff_t, loff_t, int); diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 91ea1aa0d8b3ab..100b207efc9ead 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code) case RXKADDATALEN: return -EKEYREJECTED; case RXKADILLEGALLEVEL: return -EKEYREJECTED; + case RXGEN_OPCODE: return -ENOTSUPP; + default: return -EREMOTEIO; } } diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index d4fb0afc0097d4..bd3b65cde282a2 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -46,59 +46,6 @@ static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out); static unsigned long afs_mntpt_expiry_timeout = 10 * 60; -/* - * check a symbolic link to see whether it actually encodes a mountpoint - * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately - */ -int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) -{ - struct page *page; - size_t size; - char *buf; - int ret; - - _enter("{%x:%u,%u}", - vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); - - /* read the contents of the symlink into the pagecache */ - page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, - afs_page_filler, key); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto out; - } - - ret = -EIO; - if (PageError(page)) - goto out_free; - - buf = kmap(page); - - /* examine the symlink's contents */ - size = vnode->status.size; - _debug("symlink to %*.*s", (int) size, (int) size, buf); - - if (size > 2 && - (buf[0] == '%' || buf[0] == '#') && - buf[size - 1] == '.' - ) { - _debug("symlink is a mountpoint"); - spin_lock(&vnode->lock); - set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); - vnode->vfs_inode.i_flags |= S_AUTOMOUNT; - spin_unlock(&vnode->lock); - } - - ret = 0; - - kunmap(page); -out_free: - put_page(page); -out: - _leave(" = %d", ret); - return ret; -} - /* * no valid lookup procedure on this sort of dir */ diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 95f42872b787ad..8f76b13d55494b 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -10,6 +10,8 @@ */ #include +#include + #include #include #include @@ -257,68 +259,74 @@ void afs_flat_call_destructor(struct afs_call *call) call->buffer = NULL; } +#define AFS_BVEC_MAX 8 + +/* + * Load the given bvec with the next few pages. + */ +static void afs_load_bvec(struct afs_call *call, struct msghdr *msg, + struct bio_vec *bv, pgoff_t first, pgoff_t last, + unsigned offset) +{ + struct page *pages[AFS_BVEC_MAX]; + unsigned int nr, n, i, to, bytes = 0; + + nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX); + n = find_get_pages_contig(call->mapping, first, nr, pages); + ASSERTCMP(n, ==, nr); + + msg->msg_flags |= MSG_MORE; + for (i = 0; i < nr; i++) { + to = PAGE_SIZE; + if (first + i >= last) { + to = call->last_to; + msg->msg_flags &= ~MSG_MORE; + } + bv[i].bv_page = pages[i]; + bv[i].bv_len = to - offset; + bv[i].bv_offset = offset; + bytes += to - offset; + offset = 0; + } + + iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes); +} + /* * attach the data from a bunch of pages on an inode to a call */ -static int afs_send_pages(struct afs_call *call, struct msghdr *msg, - struct kvec *iov) +static int afs_send_pages(struct afs_call *call, struct msghdr *msg) { - struct page *pages[8]; - unsigned count, n, loop, offset, to; + struct bio_vec bv[AFS_BVEC_MAX]; + unsigned int bytes, nr, loop, offset; pgoff_t first = call->first, last = call->last; int ret; - _enter(""); - offset = call->first_offset; call->first_offset = 0; do { - _debug("attach %lx-%lx", first, last); - - count = last - first + 1; - if (count > ARRAY_SIZE(pages)) - count = ARRAY_SIZE(pages); - n = find_get_pages_contig(call->mapping, first, count, pages); - ASSERTCMP(n, ==, count); - - loop = 0; - do { - msg->msg_flags = 0; - to = PAGE_SIZE; - if (first + loop >= last) - to = call->last_to; - else - msg->msg_flags = MSG_MORE; - iov->iov_base = kmap(pages[loop]) + offset; - iov->iov_len = to - offset; - offset = 0; - - _debug("- range %u-%u%s", - offset, to, msg->msg_flags ? " [more]" : ""); - iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, - iov, 1, to - offset); - - /* have to change the state *before* sending the last - * packet as RxRPC might give us the reply before it - * returns from sending the request */ - if (first + loop >= last) - call->state = AFS_CALL_AWAIT_REPLY; - ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, - msg, to - offset); - kunmap(pages[loop]); - if (ret < 0) - break; - } while (++loop < count); - first += count; - - for (loop = 0; loop < count; loop++) - put_page(pages[loop]); + afs_load_bvec(call, msg, bv, first, last, offset); + offset = 0; + bytes = msg->msg_iter.count; + nr = msg->msg_iter.nr_segs; + + /* Have to change the state *before* sending the last + * packet as RxRPC might give us the reply before it + * returns from sending the request. + */ + if (first + nr - 1 >= last) + call->state = AFS_CALL_AWAIT_REPLY; + ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, + msg, bytes); + for (loop = 0; loop < nr; loop++) + put_page(bv[loop].bv_page); if (ret < 0) break; + + first += nr; } while (first <= last); - _leave(" = %d", ret); return ret; } @@ -332,6 +340,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1]; + size_t offset; + u32 abort_code; int ret; _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); @@ -380,9 +390,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, msg.msg_controllen = 0; msg.msg_flags = (call->send_pages ? MSG_MORE : 0); - /* have to change the state *before* sending the last packet as RxRPC - * might give us the reply before it returns from sending the - * request */ + /* We have to change the state *before* sending the last packet as + * rxrpc might give us the reply before it returns from sending the + * request. Further, if the send fails, we may already have been given + * a notification and may have collected it. + */ if (!call->send_pages) call->state = AFS_CALL_AWAIT_REPLY; ret = rxrpc_kernel_send_data(afs_socket, rxcall, @@ -391,7 +403,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, goto error_do_abort; if (call->send_pages) { - ret = afs_send_pages(call, &msg, iov); + ret = afs_send_pages(call, &msg); if (ret < 0) goto error_do_abort; } @@ -404,7 +416,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, return afs_wait_for_call_to_complete(call); error_do_abort: - rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); + call->state = AFS_CALL_COMPLETE; + if (ret != -ECONNABORTED) { + rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, + -ret, "KSD"); + } else { + abort_code = 0; + offset = 0; + rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset, + false, &abort_code); + ret = call->type->abort_to_error(abort_code); + } error_kill_call: afs_put_call(call); _leave(" = %d", ret); @@ -451,16 +473,18 @@ static void afs_deliver_to_call(struct afs_call *call) case -EINPROGRESS: case -EAGAIN: goto out; + case -ECONNABORTED: + goto call_complete; case -ENOTCONN: abort_code = RX_CALL_DEAD; rxrpc_kernel_abort_call(afs_socket, call->rxcall, abort_code, -ret, "KNC"); - goto do_abort; + goto save_error; case -ENOTSUPP: - abort_code = RX_INVALID_OPERATION; + abort_code = RXGEN_OPCODE; rxrpc_kernel_abort_call(afs_socket, call->rxcall, abort_code, -ret, "KIV"); - goto do_abort; + goto save_error; case -ENODATA: case -EBADMSG: case -EMSGSIZE: @@ -470,7 +494,7 @@ static void afs_deliver_to_call(struct afs_call *call) abort_code = RXGEN_SS_UNMARSHAL; rxrpc_kernel_abort_call(afs_socket, call->rxcall, abort_code, EBADMSG, "KUM"); - goto do_abort; + goto save_error; } } @@ -481,8 +505,9 @@ static void afs_deliver_to_call(struct afs_call *call) _leave(""); return; -do_abort: +save_error: call->error = ret; +call_complete: call->state = AFS_CALL_COMPLETE; goto done; } @@ -492,7 +517,6 @@ static void afs_deliver_to_call(struct afs_call *call) */ static int afs_wait_for_call_to_complete(struct afs_call *call) { - const char *abort_why; int ret; DECLARE_WAITQUEUE(myself, current); @@ -511,13 +535,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call) continue; } - abort_why = "KWC"; - ret = call->error; - if (call->state == AFS_CALL_COMPLETE) - break; - abort_why = "KWI"; - ret = -EINTR; - if (signal_pending(current)) + if (call->state == AFS_CALL_COMPLETE || + signal_pending(current)) break; schedule(); } @@ -525,13 +544,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call) remove_wait_queue(&call->waitq, &myself); __set_current_state(TASK_RUNNING); - /* kill the call */ + /* Kill off the call if it's still live. */ if (call->state < AFS_CALL_COMPLETE) { - _debug("call incomplete"); + _debug("call interrupted"); rxrpc_kernel_abort_call(afs_socket, call->rxcall, - RX_CALL_DEAD, -ret, abort_why); + RX_USER_ABORT, -EINTR, "KWI"); } + ret = call->error; _debug("call complete"); afs_put_call(call); _leave(" = %d", ret); diff --git a/fs/afs/security.c b/fs/afs/security.c index 8d010422dc8962..ecb86a6701801c 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c @@ -114,7 +114,7 @@ void afs_clear_permits(struct afs_vnode *vnode) mutex_lock(&vnode->permits_lock); permits = vnode->permits; - rcu_assign_pointer(vnode->permits, NULL); + RCU_INIT_POINTER(vnode->permits, NULL); mutex_unlock(&vnode->permits_lock); if (permits) @@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask) } else { if (!(access & AFS_ACE_LOOKUP)) goto permission_denied; + if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR)) + goto permission_denied; if (mask & (MAY_EXEC | MAY_READ)) { if (!(access & AFS_ACE_READ)) goto permission_denied; + if (!(inode->i_mode & S_IRUSR)) + goto permission_denied; } else if (mask & MAY_WRITE) { if (!(access & AFS_ACE_WRITE)) goto permission_denied; + if (!(inode->i_mode & S_IWUSR)) + goto permission_denied; } } key_put(key); - ret = generic_permission(inode, mask); _leave(" = %d", ret); return ret; diff --git a/fs/afs/server.c b/fs/afs/server.c index d4066ab7dd5505..c001b1f2455fbf 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server) spin_lock(&afs_server_graveyard_lock); if (atomic_read(&server->usage) == 0) { list_move_tail(&server->grave, &afs_server_graveyard); - server->time_of_death = get_seconds(); + server->time_of_death = ktime_get_real_seconds(); queue_delayed_work(afs_wq, &afs_server_reaper, afs_server_timeout * HZ); } @@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work) LIST_HEAD(corpses); struct afs_server *server; unsigned long delay, expiry; - time_t now; + time64_t now; - now = get_seconds(); + now = ktime_get_real_seconds(); spin_lock(&afs_server_graveyard_lock); while (!list_empty(&afs_server_graveyard)) { diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c index d7d8dd8c0b3187..37b7c3b342a6b5 100644 --- a/fs/afs/vlocation.c +++ b/fs/afs/vlocation.c @@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl) struct afs_vlocation *xvl; /* wait at least 10 minutes before updating... */ - vl->update_at = get_seconds() + afs_vlocation_update_timeout; + vl->update_at = ktime_get_real_seconds() + + afs_vlocation_update_timeout; spin_lock(&afs_vlocation_updates_lock); @@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl) if (atomic_read(&vl->usage) == 0) { _debug("buried"); list_move_tail(&vl->grave, &afs_vlocation_graveyard); - vl->time_of_death = get_seconds(); + vl->time_of_death = ktime_get_real_seconds(); queue_delayed_work(afs_wq, &afs_vlocation_reap, afs_vlocation_timeout * HZ); @@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work) LIST_HEAD(corpses); struct afs_vlocation *vl; unsigned long delay, expiry; - time_t now; + time64_t now; _enter(""); - now = get_seconds(); + now = ktime_get_real_seconds(); spin_lock(&afs_vlocation_graveyard_lock); while (!list_empty(&afs_vlocation_graveyard)) { @@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work) { struct afs_cache_vlocation vldb; struct afs_vlocation *vl, *xvl; - time_t now; + time64_t now; long timeout; int ret; _enter(""); - now = get_seconds(); + now = ktime_get_real_seconds(); /* find a record to update */ spin_lock(&afs_vlocation_updates_lock); @@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work) /* and then reschedule */ _debug("reschedule"); - vl->update_at = get_seconds() + afs_vlocation_update_timeout; + vl->update_at = ktime_get_real_seconds() + + afs_vlocation_update_timeout; spin_lock(&afs_vlocation_updates_lock); diff --git a/fs/afs/write.c b/fs/afs/write.c index c83c1a0e851fb3..2d2fccd5044bcd 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -84,10 +84,9 @@ void afs_put_writeback(struct afs_writeback *wb) * partly or wholly fill a page that's under preparation for writing */ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, - loff_t pos, struct page *page) + loff_t pos, unsigned int len, struct page *page) { struct afs_read *req; - loff_t i_size; int ret; _enter(",,%llu", (unsigned long long)pos); @@ -99,14 +98,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, atomic_set(&req->usage, 1); req->pos = pos; + req->len = len; req->nr_pages = 1; req->pages[0] = page; - - i_size = i_size_read(&vnode->vfs_inode); - if (pos + PAGE_SIZE > i_size) - req->len = i_size - pos; - else - req->len = PAGE_SIZE; + get_page(page); ret = afs_vnode_fetch_data(vnode, key, req); afs_put_read(req); @@ -159,12 +154,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping, kfree(candidate); return -ENOMEM; } - *pagep = page; - /* page won't leak in error case: it eventually gets cleaned off LRU */ if (!PageUptodate(page) && len != PAGE_SIZE) { - ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page); + ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page); if (ret < 0) { + unlock_page(page); + put_page(page); kfree(candidate); _leave(" = %d [prep]", ret); return ret; @@ -172,6 +167,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping, SetPageUptodate(page); } + /* page won't leak in error case: it eventually gets cleaned off LRU */ + *pagep = page; + try_again: spin_lock(&vnode->writeback_lock); @@ -233,7 +231,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, if (wb->state == AFS_WBACK_PENDING) wb->state = AFS_WBACK_CONFLICTING; spin_unlock(&vnode->writeback_lock); - if (PageDirty(page)) { + if (clear_page_dirty_for_io(page)) { ret = afs_write_back_from_locked_page(wb, page); if (ret < 0) { afs_put_writeback(candidate); @@ -257,7 +255,9 @@ int afs_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); + struct key *key = file->private_data; loff_t i_size, maybe_i_size; + int ret; _enter("{%x:%u},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index); @@ -273,6 +273,20 @@ int afs_write_end(struct file *file, struct address_space *mapping, spin_unlock(&vnode->writeback_lock); } + if (!PageUptodate(page)) { + if (copied < len) { + /* Try and load any missing data from the server. The + * unmarshalling routine will take care of clearing any + * bits that are beyond the EOF. + */ + ret = afs_fill_page(vnode, key, pos + copied, + len - copied, page); + if (ret < 0) + return ret; + } + SetPageUptodate(page); + } + set_page_dirty(page); if (PageDirty(page)) _debug("dirtied"); @@ -307,10 +321,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error, ASSERTCMP(pv.nr, ==, count); for (loop = 0; loop < count; loop++) { - ClearPageUptodate(pv.pages[loop]); + struct page *page = pv.pages[loop]; + ClearPageUptodate(page); if (error) - SetPageError(pv.pages[loop]); - end_page_writeback(pv.pages[loop]); + SetPageError(page); + if (PageWriteback(page)) + end_page_writeback(page); + if (page->index >= first) + first = page->index + 1; } __pagevec_release(&pv); @@ -335,8 +353,6 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb, _enter(",%lx", primary_page->index); count = 1; - if (!clear_page_dirty_for_io(primary_page)) - BUG(); if (test_set_page_writeback(primary_page)) BUG(); @@ -502,17 +518,17 @@ static int afs_writepages_region(struct address_space *mapping, */ lock_page(page); - if (page->mapping != mapping) { + if (page->mapping != mapping || !PageDirty(page)) { unlock_page(page); put_page(page); continue; } - if (wbc->sync_mode != WB_SYNC_NONE) - wait_on_page_writeback(page); - - if (PageWriteback(page) || !PageDirty(page)) { + if (PageWriteback(page)) { unlock_page(page); + if (wbc->sync_mode != WB_SYNC_NONE) + wait_on_page_writeback(page); + put_page(page); continue; } @@ -523,6 +539,8 @@ static int afs_writepages_region(struct address_space *mapping, wb->state = AFS_WBACK_WRITING; spin_unlock(&wb->vnode->writeback_lock); + if (!clear_page_dirty_for_io(page)) + BUG(); ret = afs_write_back_from_locked_page(wb, page); unlock_page(page); put_page(page); @@ -745,6 +763,20 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) return ret; } +/* + * Flush out all outstanding writes on a file opened for writing when it is + * closed. + */ +int afs_flush(struct file *file, fl_owner_t id) +{ + _enter(""); + + if ((file->f_mode & FMODE_WRITE) == 0) + return 0; + + return vfs_fsync(file, 0); +} + /* * notification that a previously read-only page is about to become writable * - if it returns an error, the caller will deliver a bus error signal diff --git a/fs/aio.c b/fs/aio.c index 7e2ab9c8e39c27..f52d925ee2599d 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include #include @@ -1495,7 +1495,7 @@ static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, return ret; ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); if (!ret) - ret = aio_ret(req, file->f_op->read_iter(req, &iter)); + ret = aio_ret(req, call_read_iter(file, req, &iter)); kfree(iovec); return ret; } @@ -1520,7 +1520,7 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, if (!ret) { req->ki_flags |= IOCB_WRITE; file_start_write(file); - ret = aio_ret(req, file->f_op->write_iter(req, &iter)); + ret = aio_ret(req, call_write_iter(file, req, &iter)); /* * We release freeze protection in aio_complete(). Fool lockdep * by telling it the lock got released so that it doesn't diff --git a/fs/attr.c b/fs/attr.c index c902b3d5350800..135304146120bc 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index c885daae68c8ee..beef981aa54f34 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -14,6 +14,7 @@ #include #include #include +#include /* This is the range of ioctl() numbers we claim as ours */ #define AUTOFS_IOC_FIRST AUTOFS_IOC_READY diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 806df746f1a93b..734cbf8d9676bd 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 79fbd85db4baa3..24a58bf9ca72ce 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "autofs_i.h" diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 5f685c81929818..bb53728c7a31b0 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -89,8 +89,8 @@ static int bad_inode_permission(struct inode *inode, int mask) return -EIO; } -static int bad_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int bad_inode_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { return -EIO; } diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 19407165f4aad9..c500e954debba1 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "befs.h" diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 2a59139f520b96..9be82c4e14a409 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 443a6f537d569f..5075fd5c62c86d 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -35,6 +35,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index ffca4bbc3d63a1..cf93a4fad01218 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -15,6 +15,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 9b2917a3029406..2edcefc0a2949a 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 9b4688ab1d8e0f..bee1a36bc2ec4e 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/block_dev.c b/fs/block_dev.c index 77c30f15a02c31..2eca00ec43706b 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -870,6 +870,7 @@ static void init_once(void *foo) #ifdef CONFIG_SYSFS INIT_LIST_HEAD(&bdev->bd_holder_disks); #endif + bdev->bd_bdi = &noop_backing_dev_info; inode_init_once(&ei->vfs_inode); /* Initialize mutex for freeze. */ mutex_init(&bdev->bd_fsfreeze_mutex); @@ -884,8 +885,10 @@ static void bdev_evict_inode(struct inode *inode) spin_lock(&bdev_lock); list_del_init(&bdev->bd_list); spin_unlock(&bdev_lock); - if (bdev->bd_bdi != &noop_backing_dev_info) + if (bdev->bd_bdi != &noop_backing_dev_info) { bdi_put(bdev->bd_bdi); + bdev->bd_bdi = &noop_backing_dev_info; + } } static const struct super_operations bdev_sops = { @@ -988,7 +991,6 @@ struct block_device *bdget(dev_t dev) bdev->bd_contains = NULL; bdev->bd_super = NULL; bdev->bd_inode = inode; - bdev->bd_bdi = &noop_backing_dev_info; bdev->bd_block_size = i_blocksize(inode); bdev->bd_part_count = 0; bdev->bd_invalidated = 0; diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 819a6d27218a90..0c6baaba0651ce 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -237,20 +237,20 @@ static inline u64 btrfs_ino(struct btrfs_inode *inode) return ino; } -static inline void btrfs_i_size_write(struct inode *inode, u64 size) +static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size) { - i_size_write(inode, size); - BTRFS_I(inode)->disk_i_size = size; + i_size_write(&inode->vfs_inode, size); + inode->disk_i_size = size; } -static inline bool btrfs_is_free_space_inode(struct inode *inode) +static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; if (root == root->fs_info->tree_root && - btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID) + btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID) return true; - if (BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) + if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID) return true; return false; } @@ -311,34 +311,33 @@ struct btrfs_dio_private { * to grab i_mutex. It is used to avoid the endless truncate due to * nonlocked dio read. */ -static inline void btrfs_inode_block_unlocked_dio(struct inode *inode) +static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode) { - set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); smp_mb(); } -static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode) +static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode) { smp_mb__before_atomic(); - clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, - &BTRFS_I(inode)->runtime_flags); + clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); } -static inline void btrfs_print_data_csum_error(struct inode *inode, +static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode, u64 logical_start, u32 csum, u32 csum_expected, int mirror_num) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; /* Output minus objectid, which is more meaningful */ if (root->objectid >= BTRFS_LAST_FREE_OBJECTID) btrfs_warn_rl(root->fs_info, "csum failed root %lld ino %lld off %llu csum 0x%08x expected csum 0x%08x mirror %d", - root->objectid, btrfs_ino(BTRFS_I(inode)), + root->objectid, btrfs_ino(inode), logical_start, csum, csum_expected, mirror_num); else btrfs_warn_rl(root->fs_info, "csum failed root %llu ino %llu off %llu csum 0x%08x expected csum 0x%08x mirror %d", - root->objectid, btrfs_ino(BTRFS_I(inode)), + root->objectid, btrfs_ino(inode), logical_start, csum, csum_expected, mirror_num); } diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 903c32c9eb2221..c7721a6aa3bb53 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -100,7 +100,7 @@ static struct bio *compressed_bio_alloc(struct block_device *bdev, return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags); } -static int check_compressed_csum(struct inode *inode, +static int check_compressed_csum(struct btrfs_inode *inode, struct compressed_bio *cb, u64 disk_start) { @@ -111,7 +111,7 @@ static int check_compressed_csum(struct inode *inode, u32 csum; u32 *cb_sum = &cb->sums; - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) + if (inode->flags & BTRFS_INODE_NODATASUM) return 0; for (i = 0; i < cb->nr_pages; i++) { @@ -125,7 +125,7 @@ static int check_compressed_csum(struct inode *inode, if (csum != *cb_sum) { btrfs_print_data_csum_error(inode, disk_start, csum, - *cb_sum, cb->mirror_num); + *cb_sum, cb->mirror_num); ret = -EIO; goto fail; } @@ -165,7 +165,7 @@ static void end_compressed_bio_read(struct bio *bio) goto out; inode = cb->inode; - ret = check_compressed_csum(inode, cb, + ret = check_compressed_csum(BTRFS_I(inode), cb, (u64)bio->bi_iter.bi_sector << 9); if (ret) goto csum_failed; @@ -911,32 +911,28 @@ static void free_workspaces(void) } /* - * given an address space and start/len, compress the bytes. + * Given an address space and start and length, compress the bytes into @pages + * that are allocated on demand. * - * pages are allocated to hold the compressed result and stored - * in 'pages' + * @out_pages is an in/out parameter, holds maximum number of pages to allocate + * and returns number of actually allocated pages * - * out_pages is used to return the number of pages allocated. There - * may be pages allocated even if we return an error - * - * total_in is used to return the number of bytes actually read. It - * may be smaller then len if we had to exit early because we + * @total_in is used to return the number of bytes actually read. It + * may be smaller than the input length if we had to exit early because we * ran out of room in the pages array or because we cross the * max_out threshold. * - * total_out is used to return the total number of compressed bytes + * @total_out is an in/out parameter, must be set to the input length and will + * be also used to return the total number of compressed bytes * - * max_out tells us the max number of bytes that we're allowed to + * @max_out tells us the max number of bytes that we're allowed to * stuff into pages */ int btrfs_compress_pages(int type, struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, + u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) + unsigned long *total_out) { struct list_head *workspace; int ret; @@ -944,10 +940,9 @@ int btrfs_compress_pages(int type, struct address_space *mapping, workspace = find_workspace(type); ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, - start, len, pages, - nr_dest_pages, out_pages, - total_in, total_out, - max_out); + start, pages, + out_pages, + total_in, total_out); free_workspace(type, workspace); return ret; } @@ -1015,7 +1010,7 @@ void btrfs_exit_compress(void) * * total_out is the last byte of the buffer */ -int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, +int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio *bio) { diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 09879579fbc83d..39ec43ab8df1b7 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -19,20 +19,32 @@ #ifndef __BTRFS_COMPRESSION_ #define __BTRFS_COMPRESSION_ +/* + * We want to make sure that amount of RAM required to uncompress an extent is + * reasonable, so we limit the total size in ram of a compressed extent to + * 128k. This is a crucial number because it also controls how easily we can + * spread reads across cpus for decompression. + * + * We also want to make sure the amount of IO required to do a random read is + * reasonably small, so we limit the size of a compressed extent to 128k. + */ + +/* Maximum length of compressed data stored on disk */ +#define BTRFS_MAX_COMPRESSED (SZ_128K) +/* Maximum size of data before compression */ +#define BTRFS_MAX_UNCOMPRESSED (SZ_128K) + void btrfs_init_compress(void); void btrfs_exit_compress(void); int btrfs_compress_pages(int type, struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, + u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out); + unsigned long *total_out); int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen); -int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, +int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio *bio); @@ -59,13 +71,11 @@ struct btrfs_compress_op { int (*compress_pages)(struct list_head *workspace, struct address_space *mapping, - u64 start, unsigned long len, + u64 start, struct page **pages, - unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out); + unsigned long *total_out); int (*decompress_bio)(struct list_head *workspace, struct page **pages_in, diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 1192bc7d2ee782..7dc8844037e03b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -453,8 +453,6 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) struct rb_node *parent = NULL; struct tree_mod_elem *cur; - BUG_ON(!tm); - tm->seq = btrfs_inc_tree_mod_seq(fs_info); tm_root = &fs_info->tree_mod_log; @@ -4159,6 +4157,9 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans, /* try to push all the items before our slot into the next leaf */ slot = path->slots[0]; + space_needed = data_size; + if (slot > 0) + space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]); ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); if (ret < 0) return ret; @@ -4214,6 +4215,10 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, if (wret < 0) return wret; if (wret) { + space_needed = data_size; + if (slot > 0) + space_needed -= btrfs_leaf_free_space(fs_info, + l); wret = push_leaf_left(trans, root, path, space_needed, space_needed, 0, (u32)-1); if (wret < 0) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 105d4d43993e9f..c4115901d9064f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -20,6 +20,7 @@ #define __BTRFS_CTREE__ #include +#include #include #include #include @@ -1258,7 +1259,7 @@ struct btrfs_root { atomic_t will_be_snapshoted; /* For qgroup metadata space reserve */ - atomic_t qgroup_meta_rsv; + atomic64_t qgroup_meta_rsv; }; static inline u32 btrfs_inode_sectorsize(const struct inode *inode) { @@ -2687,7 +2688,7 @@ enum btrfs_flush_state { }; int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len); -int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes); +int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes); void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len); void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, u64 len); @@ -2695,16 +2696,16 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, - struct inode *inode); -void btrfs_orphan_release_metadata(struct inode *inode); + struct btrfs_inode *inode); +void btrfs_orphan_release_metadata(struct btrfs_inode *inode); int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, struct btrfs_block_rsv *rsv, int nitems, u64 *qgroup_reserved, bool use_global_rsv); void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv); -int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); -void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); +int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); +void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes); int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len); void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len); void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); @@ -2982,7 +2983,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, const char *name, int name_len); int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, - int name_len, struct inode *dir, + int name_len, struct btrfs_inode *dir, struct btrfs_key *location, u8 type, u64 index); struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -3081,7 +3082,7 @@ int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, u64 file_start, int contig); int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list, int search_commit); -void btrfs_extent_item_to_extent_map(struct inode *inode, +void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, const struct btrfs_path *path, struct btrfs_file_extent_item *fi, const bool new_inline, @@ -3100,9 +3101,9 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, int delay_iput); void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); -struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 len, - int create); +struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, + struct page *page, size_t pg_offset, u64 start, + u64 len, int create); noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, u64 *ram_bytes); @@ -3123,13 +3124,13 @@ static inline void btrfs_force_ra(struct address_space *mapping, } struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); -int btrfs_set_inode_index(struct inode *dir, u64 *index); +int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); int btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_inode *dir, struct btrfs_inode *inode, const char *name, int name_len); int btrfs_add_link(struct btrfs_trans_handle *trans, - struct inode *parent_inode, struct inode *inode, + struct btrfs_inode *parent_inode, struct btrfs_inode *inode, const char *name, int name_len, int add_backref, u64 index); int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -3166,15 +3167,16 @@ void btrfs_destroy_cachep(void); long btrfs_ioctl_trans_end(struct file *file); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct btrfs_root *root, int *was_new); -struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 end, - int create); +struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, + struct page *page, size_t pg_offset, + u64 start, u64 end, int create); int btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); -int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); +int btrfs_orphan_add(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode); int btrfs_orphan_cleanup(struct btrfs_root *root); void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, struct btrfs_root *root); @@ -3215,11 +3217,11 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen, int btrfs_auto_defrag_init(void); void btrfs_auto_defrag_exit(void); int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, - struct inode *inode); + struct btrfs_inode *inode); int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); -void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, +void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, int skip_pinned); extern const struct file_operations btrfs_file_operations; int __btrfs_drop_extents(struct btrfs_trans_handle *trans, @@ -3233,7 +3235,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, int drop_cache); int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, - struct inode *inode, u64 start, u64 end); + struct btrfs_inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); int btrfs_dirty_pages(struct inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index f7a6ee5ccc809a..1aff676f0e5b5b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1790,7 +1790,7 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) i_uid_write(inode, btrfs_stack_inode_uid(inode_item)); i_gid_write(inode, btrfs_stack_inode_gid(inode_item)); - btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); + btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item)); inode->i_mode = btrfs_stack_inode_mode(inode_item); set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 5de280b9ad7303..e653921f05d939 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -304,8 +304,9 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info) dev_replace->cursor_left_last_write_of_item; } -int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name, - u64 srcdevid, char *srcdev_name, int read_src) +int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, + const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, + int read_src) { struct btrfs_root *root = fs_info->dev_root; struct btrfs_trans_handle *trans; diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index 54ea12bda15b30..f94a76844ae742 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -27,8 +27,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans, void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info); int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args); -int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name, - u64 srcdevid, char *srcdev_name, int read_src); +int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, + const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, + int read_src); void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args); int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index 724504a2d7ac56..60a750678a82b3 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -80,7 +80,8 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; u32 data_size; - BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)); + if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)) + return -ENOSPC; key.objectid = objectid; key.type = BTRFS_XATTR_ITEM_KEY; @@ -120,7 +121,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, - struct inode *dir, struct btrfs_key *location, + struct btrfs_inode *dir, struct btrfs_key *location, u8 type, u64 index) { int ret = 0; @@ -133,7 +134,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root struct btrfs_disk_key disk_key; u32 data_size; - key.objectid = btrfs_ino(BTRFS_I(dir)); + key.objectid = btrfs_ino(dir); key.type = BTRFS_DIR_ITEM_KEY; key.offset = btrfs_name_hash(name, name_len); @@ -174,7 +175,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_release_path(path); ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name, - name_len, BTRFS_I(dir), &disk_key, type, index); + name_len, dir, &disk_key, type, index); out_free: btrfs_free_path(path); if (ret) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 207db0270b1502..eb1ee7b6f532b7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -219,12 +219,12 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, * extents on the btree inode are pretty simple, there's one extent * that covers the entire device */ -static struct extent_map *btree_get_extent(struct inode *inode, +static struct extent_map *btree_get_extent(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; int ret; @@ -265,7 +265,7 @@ static struct extent_map *btree_get_extent(struct inode *inode, return em; } -u32 btrfs_csum_data(char *data, u32 seed, size_t len) +u32 btrfs_csum_data(const char *data, u32 seed, size_t len) { return btrfs_crc32c(seed, data, len); } @@ -1342,7 +1342,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, atomic_set(&root->orphan_inodes, 0); atomic_set(&root->refs, 1); atomic_set(&root->will_be_snapshoted, 0); - atomic_set(&root->qgroup_meta_rsv, 0); + atomic64_set(&root->qgroup_meta_rsv, 0); root->log_transid = 0; root->log_transid_committed = -1; root->last_log_commit = 0; @@ -2205,11 +2205,9 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->delalloc_workers); btrfs_destroy_workqueue(fs_info->workers); btrfs_destroy_workqueue(fs_info->endio_workers); - btrfs_destroy_workqueue(fs_info->endio_meta_workers); btrfs_destroy_workqueue(fs_info->endio_raid56_workers); btrfs_destroy_workqueue(fs_info->endio_repair_workers); btrfs_destroy_workqueue(fs_info->rmw_workers); - btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_freespace_worker); btrfs_destroy_workqueue(fs_info->submit_workers); @@ -2219,6 +2217,13 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->flush_workers); btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); btrfs_destroy_workqueue(fs_info->extent_workers); + /* + * Now that all other work queues are destroyed, we can safely destroy + * the queues used for metadata I/O, since tasks from those other work + * queues can do metadata I/O operations. + */ + btrfs_destroy_workqueue(fs_info->endio_meta_workers); + btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); } static void free_root_extent_buffers(struct btrfs_root *root) @@ -3261,7 +3266,6 @@ int open_ctree(struct super_block *sb, fail_block_groups: btrfs_put_block_group_cache(fs_info); - btrfs_free_block_groups(fs_info); fail_tree_roots: free_root_pointers(fs_info, 1); @@ -3269,6 +3273,7 @@ int open_ctree(struct super_block *sb, fail_sb_buffer: btrfs_stop_all_workers(fs_info); + btrfs_free_block_groups(fs_info); fail_alloc: fail_iput: btrfs_mapping_tree_free(&fs_info->mapping_tree); @@ -3448,7 +3453,7 @@ static int write_dev_supers(struct btrfs_device *device, btrfs_set_super_bytenr(sb, bytenr); crc = ~(u32)0; - crc = btrfs_csum_data((char *)sb + + crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); @@ -3977,8 +3982,6 @@ void close_ctree(struct btrfs_fs_info *fs_info) btrfs_put_block_group_cache(fs_info); - btrfs_free_block_groups(fs_info); - /* * we must make sure there is not any read request to * submit after we stopping all workers. @@ -3986,6 +3989,8 @@ void close_ctree(struct btrfs_fs_info *fs_info) invalidate_inode_pages2(fs_info->btree_inode->i_mapping); btrfs_stop_all_workers(fs_info); + btrfs_free_block_groups(fs_info); + clear_bit(BTRFS_FS_OPEN, &fs_info->flags); free_root_pointers(fs_info, 1); @@ -4653,9 +4658,12 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) } static const struct extent_io_ops btree_extent_io_ops = { - .readpage_end_io_hook = btree_readpage_end_io_hook, - .readpage_io_failed_hook = btree_io_failed_hook, + /* mandatory callbacks */ .submit_bio_hook = btree_submit_bio_hook, + .readpage_end_io_hook = btree_readpage_end_io_hook, /* note we're sharing with inode.c for the merge bio hook */ .merge_bio_hook = btrfs_merge_bio_hook, + .readpage_io_failed_hook = btree_io_failed_hook, + + /* optional callbacks */ }; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 0be2d4fe705b4d..2e0ec29bfd69f0 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -116,7 +116,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf); int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, int atomic); int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); -u32 btrfs_csum_data(char *data, u32 seed, size_t len); +u32 btrfs_csum_data(const char *data, u32 seed, size_t len); void btrfs_csum_final(u32 crc, u8 *result); int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, enum btrfs_wq_endio_type metadata); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c35b966335543c..be5477676cc829 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -16,6 +16,7 @@ * Boston, MA 021110-1307, USA. */ #include +#include #include #include #include @@ -4135,10 +4136,10 @@ static u64 btrfs_space_info_used(struct btrfs_space_info *s_info, (may_use_included ? s_info->bytes_may_use : 0); } -int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes) +int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes) { struct btrfs_space_info *data_sinfo; - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; u64 used; int ret = 0; @@ -4281,7 +4282,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len) round_down(start, fs_info->sectorsize); start = round_down(start, fs_info->sectorsize); - ret = btrfs_alloc_data_chunk_ondemand(inode, len); + ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len); if (ret < 0) return ret; @@ -5742,10 +5743,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) /* Can only return 0 or -ENOSPC */ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; /* * We always use trans->block_rsv here as we will have reserved space * for our orphan when starting the transaction, using get_block_rsv() @@ -5762,19 +5763,19 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, */ u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); - trace_btrfs_space_reservation(fs_info, "orphan", - btrfs_ino(BTRFS_I(inode)), num_bytes, 1); + trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), + num_bytes, 1); return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); } -void btrfs_orphan_release_metadata(struct inode *inode) +void btrfs_orphan_release_metadata(struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); - trace_btrfs_space_reservation(fs_info, "orphan", - btrfs_ino(BTRFS_I(inode)), num_bytes, 0); + trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), + num_bytes, 0); btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes); } @@ -5846,7 +5847,8 @@ void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, * reserved extents that need to be freed. This must be called with * BTRFS_I(inode)->lock held. */ -static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) +static unsigned drop_outstanding_extent(struct btrfs_inode *inode, + u64 num_bytes) { unsigned drop_inode_space = 0; unsigned dropped_extents = 0; @@ -5854,25 +5856,23 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) num_extents = count_max_extents(num_bytes); ASSERT(num_extents); - ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents); - BTRFS_I(inode)->outstanding_extents -= num_extents; + ASSERT(inode->outstanding_extents >= num_extents); + inode->outstanding_extents -= num_extents; - if (BTRFS_I(inode)->outstanding_extents == 0 && + if (inode->outstanding_extents == 0 && test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) drop_inode_space = 1; /* * If we have more or the same amount of outstanding extents than we have * reserved then we need to leave the reserved extents count alone. */ - if (BTRFS_I(inode)->outstanding_extents >= - BTRFS_I(inode)->reserved_extents) + if (inode->outstanding_extents >= inode->reserved_extents) return drop_inode_space; - dropped_extents = BTRFS_I(inode)->reserved_extents - - BTRFS_I(inode)->outstanding_extents; - BTRFS_I(inode)->reserved_extents -= dropped_extents; + dropped_extents = inode->reserved_extents - inode->outstanding_extents; + inode->reserved_extents -= dropped_extents; return dropped_extents + drop_inode_space; } @@ -5894,24 +5894,21 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) * * This must be called with BTRFS_I(inode)->lock held. */ -static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, +static u64 calc_csum_metadata_size(struct btrfs_inode *inode, u64 num_bytes, int reserve) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 old_csums, num_csums; - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM && - BTRFS_I(inode)->csum_bytes == 0) + if (inode->flags & BTRFS_INODE_NODATASUM && inode->csum_bytes == 0) return 0; - old_csums = btrfs_csum_bytes_to_leaves(fs_info, - BTRFS_I(inode)->csum_bytes); + old_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); if (reserve) - BTRFS_I(inode)->csum_bytes += num_bytes; + inode->csum_bytes += num_bytes; else - BTRFS_I(inode)->csum_bytes -= num_bytes; - num_csums = btrfs_csum_bytes_to_leaves(fs_info, - BTRFS_I(inode)->csum_bytes); + inode->csum_bytes -= num_bytes; + num_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); /* No change, no need to reserve more */ if (old_csums == num_csums) @@ -5924,10 +5921,10 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums); } -int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) +int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv; u64 to_reserve = 0; u64 csum_bytes; @@ -5959,25 +5956,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) schedule_timeout(1); if (delalloc_lock) - mutex_lock(&BTRFS_I(inode)->delalloc_mutex); + mutex_lock(&inode->delalloc_mutex); num_bytes = ALIGN(num_bytes, fs_info->sectorsize); - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); nr_extents = count_max_extents(num_bytes); - BTRFS_I(inode)->outstanding_extents += nr_extents; + inode->outstanding_extents += nr_extents; nr_extents = 0; - if (BTRFS_I(inode)->outstanding_extents > - BTRFS_I(inode)->reserved_extents) - nr_extents += BTRFS_I(inode)->outstanding_extents - - BTRFS_I(inode)->reserved_extents; + if (inode->outstanding_extents > inode->reserved_extents) + nr_extents += inode->outstanding_extents - + inode->reserved_extents; /* We always want to reserve a slot for updating the inode. */ to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1); to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); - csum_bytes = BTRFS_I(inode)->csum_bytes; - spin_unlock(&BTRFS_I(inode)->lock); + csum_bytes = inode->csum_bytes; + spin_unlock(&inode->lock); if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { ret = btrfs_qgroup_reserve_meta(root, @@ -5993,38 +5989,38 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) goto out_fail; } - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) { + &inode->runtime_flags)) { to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1); release_extra = true; } - BTRFS_I(inode)->reserved_extents += nr_extents; - spin_unlock(&BTRFS_I(inode)->lock); + inode->reserved_extents += nr_extents; + spin_unlock(&inode->lock); if (delalloc_lock) - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); + mutex_unlock(&inode->delalloc_mutex); if (to_reserve) trace_btrfs_space_reservation(fs_info, "delalloc", - btrfs_ino(BTRFS_I(inode)), to_reserve, 1); + btrfs_ino(inode), to_reserve, 1); if (release_extra) btrfs_block_rsv_release(fs_info, block_rsv, btrfs_calc_trans_metadata_size(fs_info, 1)); return 0; out_fail: - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); dropped = drop_outstanding_extent(inode, num_bytes); /* * If the inodes csum_bytes is the same as the original * csum_bytes then we know we haven't raced with any free()ers * so we can just reduce our inodes csum bytes and carry on. */ - if (BTRFS_I(inode)->csum_bytes == csum_bytes) { + if (inode->csum_bytes == csum_bytes) { calc_csum_metadata_size(inode, num_bytes, 0); } else { - u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes; + u64 orig_csum_bytes = inode->csum_bytes; u64 bytes; /* @@ -6035,8 +6031,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) * number of bytes that were freed while we were trying our * reservation. */ - bytes = csum_bytes - BTRFS_I(inode)->csum_bytes; - BTRFS_I(inode)->csum_bytes = csum_bytes; + bytes = csum_bytes - inode->csum_bytes; + inode->csum_bytes = csum_bytes; to_free = calc_csum_metadata_size(inode, bytes, 0); @@ -6045,7 +6041,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) * been making this reservation and our ->csum_bytes were not * artificially inflated. */ - BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes; + inode->csum_bytes = csum_bytes - num_bytes; bytes = csum_bytes - orig_csum_bytes; bytes = calc_csum_metadata_size(inode, bytes, 0); @@ -6057,23 +6053,23 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) * need to do anything, the other free-ers did the correct * thing. */ - BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes; + inode->csum_bytes = orig_csum_bytes - num_bytes; if (bytes > to_free) to_free = bytes - to_free; else to_free = 0; } - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); if (dropped) to_free += btrfs_calc_trans_metadata_size(fs_info, dropped); if (to_free) { btrfs_block_rsv_release(fs_info, block_rsv, to_free); trace_btrfs_space_reservation(fs_info, "delalloc", - btrfs_ino(BTRFS_I(inode)), to_free, 0); + btrfs_ino(inode), to_free, 0); } if (delalloc_lock) - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); + mutex_unlock(&inode->delalloc_mutex); return ret; } @@ -6086,27 +6082,27 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) * once we complete IO for a given set of bytes to release their metadata * reservations. */ -void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) +void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 to_free = 0; unsigned dropped; num_bytes = ALIGN(num_bytes, fs_info->sectorsize); - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); dropped = drop_outstanding_extent(inode, num_bytes); if (num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes, 0); - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); if (dropped > 0) to_free += btrfs_calc_trans_metadata_size(fs_info, dropped); if (btrfs_is_testing(fs_info)) return; - trace_btrfs_space_reservation(fs_info, "delalloc", - btrfs_ino(BTRFS_I(inode)), to_free, 0); + trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode), + to_free, 0); btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free); } @@ -6141,7 +6137,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len) ret = btrfs_check_data_free_space(inode, start, len); if (ret < 0) return ret; - ret = btrfs_delalloc_reserve_metadata(inode, len); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len); if (ret < 0) btrfs_free_reserved_data_space(inode, start, len); return ret; @@ -6164,7 +6160,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len) */ void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len) { - btrfs_delalloc_release_metadata(inode, len); + btrfs_delalloc_release_metadata(BTRFS_I(inode), len); btrfs_free_reserved_data_space(inode, start, len); } @@ -9740,6 +9736,11 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) } } +/* + * Must be called only after stopping all workers, since we could have block + * group caching kthreads running, and therefore they could race with us if we + * freed the block groups before stopping them. + */ int btrfs_free_block_groups(struct btrfs_fs_info *info) { struct btrfs_block_group_cache *block_group; @@ -9779,9 +9780,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) list_del(&block_group->list); up_write(&block_group->space_info->groups_sem); - if (block_group->cached == BTRFS_CACHE_STARTED) - wait_block_group_cache_done(block_group); - /* * We haven't cached this block group, which means we could * possibly have excluded extents on this block group. @@ -9791,6 +9789,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) free_excluded_extents(info, block_group); btrfs_remove_free_space_cache(block_group); + ASSERT(block_group->cached != BTRFS_CACHE_STARTED); ASSERT(list_empty(&block_group->dirty_list)); ASSERT(list_empty(&block_group->io_list)); ASSERT(list_empty(&block_group->bg_list)); @@ -10342,7 +10341,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, mutex_unlock(&trans->transaction->cache_write_mutex); if (!IS_ERR(inode)) { - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) { btrfs_add_delayed_iput(inode); goto out; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d15b5ddb6732c6..27fdb250b4467f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -428,7 +428,8 @@ static void clear_state_cb(struct extent_io_tree *tree, struct extent_state *state, unsigned *bits) { if (tree->ops && tree->ops->clear_bit_hook) - tree->ops->clear_bit_hook(tree->mapping->host, state, bits); + tree->ops->clear_bit_hook(BTRFS_I(tree->mapping->host), + state, bits); } static void set_state_bits(struct extent_io_tree *tree, @@ -1713,7 +1714,8 @@ static int __process_pages_contig(struct address_space *mapping, * can we find nothing at @index. */ ASSERT(page_ops & PAGE_LOCK); - return ret; + err = -EAGAIN; + goto out; } for (i = 0; i < ret; i++) { @@ -1959,11 +1961,11 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) SetPageUptodate(page); } -int free_io_failure(struct inode *inode, struct io_failure_record *rec) +int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec) { int ret; int err = 0; - struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; + struct extent_io_tree *failure_tree = &inode->io_failure_tree; set_state_failrec(failure_tree, rec->start, NULL); ret = clear_extent_bits(failure_tree, rec->start, @@ -1972,7 +1974,7 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec) if (ret) err = ret; - ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, + ret = clear_extent_bits(&inode->io_tree, rec->start, rec->start + rec->len - 1, EXTENT_DAMAGED); if (ret && !err) @@ -1992,10 +1994,11 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec) * currently, there can be no more than two copies of every data bit. thus, * exactly one rewrite is required. */ -int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, - struct page *page, unsigned int pg_offset, int mirror_num) +int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, + u64 logical, struct page *page, + unsigned int pg_offset, int mirror_num) { - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct bio *bio; struct btrfs_device *dev; u64 map_length = 0; @@ -2054,7 +2057,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, btrfs_info_rl_in_rcu(fs_info, "read error corrected: ino %llu off %llu (dev %s sector %llu)", - btrfs_ino(BTRFS_I(inode)), start, + btrfs_ino(inode), start, rcu_str_deref(dev->name), sector); btrfs_bio_counter_dec(fs_info); bio_put(bio); @@ -2074,7 +2077,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info, for (i = 0; i < num_pages; i++) { struct page *p = eb->pages[i]; - ret = repair_io_failure(fs_info->btree_inode, start, + ret = repair_io_failure(BTRFS_I(fs_info->btree_inode), start, PAGE_SIZE, start, p, start - page_offset(p), mirror_num); if (ret) @@ -2089,23 +2092,23 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info, * each time an IO finishes, we do a fast check in the IO failure tree * to see if we need to process or clean up an io_failure_record */ -int clean_io_failure(struct inode *inode, u64 start, struct page *page, +int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page, unsigned int pg_offset) { u64 private; struct io_failure_record *failrec; - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_state *state; int num_copies; int ret; private = 0; - ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, + ret = count_range_bits(&inode->io_failure_tree, &private, (u64)-1, 1, EXTENT_DIRTY, 0); if (!ret) return 0; - ret = get_state_failrec(&BTRFS_I(inode)->io_failure_tree, start, + ret = get_state_failrec(&inode->io_failure_tree, start, &failrec); if (ret) return 0; @@ -2122,11 +2125,11 @@ int clean_io_failure(struct inode *inode, u64 start, struct page *page, if (fs_info->sb->s_flags & MS_RDONLY) goto out; - spin_lock(&BTRFS_I(inode)->io_tree.lock); - state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, + spin_lock(&inode->io_tree.lock); + state = find_first_extent_bit_state(&inode->io_tree, failrec->start, EXTENT_LOCKED); - spin_unlock(&BTRFS_I(inode)->io_tree.lock); + spin_unlock(&inode->io_tree.lock); if (state && state->start <= failrec->start && state->end >= failrec->start + failrec->len - 1) { @@ -2151,9 +2154,9 @@ int clean_io_failure(struct inode *inode, u64 start, struct page *page, * - under ordered extent * - the inode is freeing */ -void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end) +void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end) { - struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; + struct extent_io_tree *failure_tree = &inode->io_failure_tree; struct io_failure_record *failrec; struct extent_state *state, *next; @@ -2393,7 +2396,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror); if (!ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } @@ -2406,7 +2409,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, (int)phy_offset, failed_bio->bi_end_io, NULL); if (!bio) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } bio_set_op_attrs(bio, REQ_OP_READ, read_mode); @@ -2418,7 +2421,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, failrec->bio_flags, 0); if (ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); bio_put(bio); } @@ -2435,12 +2438,9 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end) tree = &BTRFS_I(page->mapping->host)->io_tree; - if (tree->ops && tree->ops->writepage_end_io_hook) { - ret = tree->ops->writepage_end_io_hook(page, start, - end, NULL, uptodate); - if (ret) - uptodate = 0; - } + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, end, NULL, + uptodate); if (!uptodate) { ClearPageUptodate(page); @@ -2568,42 +2568,52 @@ static void end_bio_extent_readpage(struct bio *bio) len = bvec->bv_len; mirror = io_bio->mirror_num; - if (likely(uptodate && tree->ops && - tree->ops->readpage_end_io_hook)) { + if (likely(uptodate && tree->ops)) { ret = tree->ops->readpage_end_io_hook(io_bio, offset, page, start, end, mirror); if (ret) uptodate = 0; else - clean_io_failure(inode, start, page, 0); + clean_io_failure(BTRFS_I(inode), start, + page, 0); } if (likely(uptodate)) goto readpage_ok; - if (tree->ops && tree->ops->readpage_io_failed_hook) { + if (tree->ops) { ret = tree->ops->readpage_io_failed_hook(page, mirror); - if (!ret && !bio->bi_error) - uptodate = 1; - } else { + if (ret == -EAGAIN) { + /* + * Data inode's readpage_io_failed_hook() always + * returns -EAGAIN. + * + * The generic bio_readpage_error handles errors + * the following way: If possible, new read + * requests are created and submitted and will + * end up in end_bio_extent_readpage as well (if + * we're lucky, not in the !uptodate case). In + * that case it returns 0 and we just go on with + * the next page in our bio. If it can't handle + * the error it will return -EIO and we remain + * responsible for that page. + */ + ret = bio_readpage_error(bio, offset, page, + start, end, mirror); + if (ret == 0) { + uptodate = !bio->bi_error; + offset += len; + continue; + } + } + /* - * The generic bio_readpage_error handles errors the - * following way: If possible, new read requests are - * created and submitted and will end up in - * end_bio_extent_readpage as well (if we're lucky, not - * in the !uptodate case). In that case it returns 0 and - * we just go on with the next page in our bio. If it - * can't handle the error it will return -EIO and we - * remain responsible for that page. + * metadata's readpage_io_failed_hook() always returns + * -EIO and fixes nothing. -EIO is also returned if + * data inode error could not be fixed. */ - ret = bio_readpage_error(bio, offset, page, start, end, - mirror); - if (ret == 0) { - uptodate = !bio->bi_error; - offset += len; - continue; - } + ASSERT(ret == -EIO); } readpage_ok: if (likely(uptodate)) { @@ -2731,7 +2741,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num, bio->bi_private = NULL; bio_get(bio); - if (tree->ops && tree->ops->submit_bio_hook) + if (tree->ops) ret = tree->ops->submit_bio_hook(page->mapping->host, bio, mirror_num, bio_flags, start); else @@ -2746,7 +2756,7 @@ static int merge_bio(struct extent_io_tree *tree, struct page *page, unsigned long bio_flags) { int ret = 0; - if (tree->ops && tree->ops->merge_bio_hook) + if (tree->ops) ret = tree->ops->merge_bio_hook(page, offset, size, bio, bio_flags); return ret; @@ -2857,7 +2867,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, *em_cached = NULL; } - em = get_extent(inode, page, pg_offset, start, len, 0); + em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0); if (em_cached && !IS_ERR_OR_NULL(em)) { BUG_ON(*em_cached); atomic_inc(&em->refs); @@ -3101,7 +3111,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, inode = pages[0]->mapping->host; while (1) { lock_extent(tree, start, end); - ordered = btrfs_lookup_ordered_range(inode, start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, end - start + 1); if (!ordered) break; @@ -3173,7 +3183,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, while (1) { lock_extent(tree, start, end); - ordered = btrfs_lookup_ordered_range(inode, start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, PAGE_SIZE); if (!ordered) break; @@ -3370,7 +3380,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, page_end, NULL, 1); break; } - em = epd->get_extent(inode, page, pg_offset, cur, + em = epd->get_extent(BTRFS_I(inode), page, pg_offset, cur, end - cur + 1, 1); if (IS_ERR_OR_NULL(em)) { SetPageError(page); @@ -4335,7 +4345,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode, if (len == 0) break; len = ALIGN(len, sectorsize); - em = get_extent(inode, NULL, 0, offset, len, 0); + em = get_extent(BTRFS_I(inode), NULL, 0, offset, len, 0); if (IS_ERR_OR_NULL(em)) return em; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 270d03be290eec..3e4fad4a909d11 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -84,6 +84,7 @@ extern void le_bitmap_clear(u8 *map, unsigned int start, int len); struct extent_state; struct btrfs_root; +struct btrfs_inode; struct btrfs_io_bio; struct io_failure_record; @@ -91,24 +92,34 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset); struct extent_io_ops { - int (*fill_delalloc)(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started, - unsigned long *nr_written); - int (*writepage_start_hook)(struct page *page, u64 start, u64 end); + /* + * The following callbacks must be allways defined, the function + * pointer will be called unconditionally. + */ extent_submit_bio_hook_t *submit_bio_hook; + int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, + struct page *page, u64 start, u64 end, + int mirror); int (*merge_bio_hook)(struct page *page, unsigned long offset, size_t size, struct bio *bio, unsigned long bio_flags); int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); - int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, - struct page *page, u64 start, u64 end, - int mirror); - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, + + /* + * Optional hooks, called if the pointer is not NULL + */ + int (*fill_delalloc)(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written); + + int (*writepage_start_hook)(struct page *page, u64 start, u64 end); + void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate); void (*set_bit_hook)(struct inode *inode, struct extent_state *state, unsigned *bits); - void (*clear_bit_hook)(struct inode *inode, struct extent_state *state, - unsigned *bits); + void (*clear_bit_hook)(struct btrfs_inode *inode, + struct extent_state *state, + unsigned *bits); void (*merge_extent_hook)(struct inode *inode, struct extent_state *new, struct extent_state *other); @@ -209,7 +220,7 @@ static inline int extent_compress_type(unsigned long bio_flags) struct extent_map_tree; -typedef struct extent_map *(get_extent_t)(struct inode *inode, +typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, @@ -451,12 +462,13 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs); struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask); struct btrfs_fs_info; +struct btrfs_inode; -int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, - struct page *page, unsigned int pg_offset, - int mirror_num); -int clean_io_failure(struct inode *inode, u64 start, struct page *page, - unsigned int pg_offset); +int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, + u64 logical, struct page *page, + unsigned int pg_offset, int mirror_num); +int clean_io_failure(struct btrfs_inode *inode, u64 start, + struct page *page, unsigned int pg_offset); void end_extent_writepage(struct page *page, int err, u64 start, u64 end); int repair_eb_io_failure(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int mirror_num); @@ -480,7 +492,9 @@ struct io_failure_record { int in_validation; }; -void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end); + +void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, + u64 end); int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, struct io_failure_record **failrec_ret); int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, @@ -489,7 +503,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, struct io_failure_record *failrec, struct page *page, int pg_offset, int icsum, bio_end_io_t *endio_func, void *data); -int free_io_failure(struct inode *inode, struct io_failure_record *rec); +int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS noinline u64 find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index f7b9a92ad56d17..64fcb31d71633c 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -214,7 +214,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, * read from the commit root and sidestep a nasty deadlock * between reading the free space cache and updating the csum tree. */ - if (btrfs_is_free_space_inode(inode)) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) { path->search_commit_root = 1; path->skip_locking = 1; } @@ -643,7 +643,33 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, /* delete the entire item, it is inside our range */ if (key.offset >= bytenr && csum_end <= end_byte) { - ret = btrfs_del_item(trans, root, path); + int del_nr = 1; + + /* + * Check how many csum items preceding this one in this + * leaf correspond to our range and then delete them all + * at once. + */ + if (key.offset > bytenr && path->slots[0] > 0) { + int slot = path->slots[0] - 1; + + while (slot >= 0) { + struct btrfs_key pk; + + btrfs_item_key_to_cpu(leaf, &pk, slot); + if (pk.offset < bytenr || + pk.type != BTRFS_EXTENT_CSUM_KEY || + pk.objectid != + BTRFS_EXTENT_CSUM_OBJECTID) + break; + path->slots[0] = slot; + del_nr++; + key.offset = pk.offset; + slot--; + } + } + ret = btrfs_del_items(trans, root, path, + path->slots[0], del_nr); if (ret) goto out; if (key.offset == bytenr) @@ -904,14 +930,14 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, goto out; } -void btrfs_extent_item_to_extent_map(struct inode *inode, +void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, const struct btrfs_path *path, struct btrfs_file_extent_item *fi, const bool new_inline, struct extent_map *em) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct extent_buffer *leaf = path->nodes[0]; const int slot = path->slots[0]; struct btrfs_key key; @@ -976,8 +1002,8 @@ void btrfs_extent_item_to_extent_map(struct inode *inode, } } else { btrfs_err(fs_info, - "unknown file extent item type %d, inode %llu, offset %llu, root %llu", - type, btrfs_ino(BTRFS_I(inode)), extent_start, + "unknown file extent item type %d, inode %llu, offset %llu, " + "root %llu", type, btrfs_ino(inode), extent_start, root->root_key.objectid); } } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c1d2a07205daf0..520cb7230b2d2c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -92,10 +92,10 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1, * If an existing record is found the defrag item you * pass in is freed */ -static int __btrfs_add_inode_defrag(struct inode *inode, +static int __btrfs_add_inode_defrag(struct btrfs_inode *inode, struct inode_defrag *defrag) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); struct inode_defrag *entry; struct rb_node **p; struct rb_node *parent = NULL; @@ -123,7 +123,7 @@ static int __btrfs_add_inode_defrag(struct inode *inode, return -EEXIST; } } - set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); rb_link_node(&defrag->rb_node, parent, p); rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); return 0; @@ -145,10 +145,10 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info) * enabled */ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct inode_defrag *defrag; u64 transid; int ret; @@ -156,24 +156,24 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (!__need_auto_defrag(fs_info)) return 0; - if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) + if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) return 0; if (trans) transid = trans->transid; else - transid = BTRFS_I(inode)->root->last_trans; + transid = inode->root->last_trans; defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); if (!defrag) return -ENOMEM; - defrag->ino = btrfs_ino(BTRFS_I(inode)); + defrag->ino = btrfs_ino(inode); defrag->transid = transid; defrag->root = root->root_key.objectid; spin_lock(&fs_info->defrag_inodes_lock); - if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) { + if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { /* * If we set IN_DEFRAG flag and evict the inode from memory, * and then re-read this inode, this new inode doesn't have @@ -194,10 +194,10 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, * the same inode in the tree, we will merge them together (by * __btrfs_add_inode_defrag()) and free the one that we want to requeue. */ -static void btrfs_requeue_inode_defrag(struct inode *inode, +static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode, struct inode_defrag *defrag) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret; if (!__need_auto_defrag(fs_info)) @@ -334,7 +334,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, */ if (num_defrag == BTRFS_DEFRAG_BATCH) { defrag->last_offset = range.start; - btrfs_requeue_inode_defrag(inode, defrag); + btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); } else if (defrag->last_offset && !defrag->cycled) { /* * we didn't fill our defrag batch, but @@ -343,7 +343,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, */ defrag->last_offset = 0; defrag->cycled = 1; - btrfs_requeue_inode_defrag(inode, defrag); + btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); } else { kmem_cache_free(btrfs_inode_defrag_cachep, defrag); } @@ -529,13 +529,13 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, * this drops all the extents in the cache that intersect the range * [start, end]. Existing extents are split as required. */ -void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, +void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, int skip_pinned) { struct extent_map *em; struct extent_map *split = NULL; struct extent_map *split2 = NULL; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; u64 len = end - start + 1; u64 gen; int ret; @@ -720,7 +720,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, int leafs_visited = 0; if (drop_cache) - btrfs_drop_extent_cache(inode, start, end - 1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0); if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent) modify_tree = 0; @@ -1082,10 +1082,10 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot, * two or three. */ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, - struct inode *inode, u64 start, u64 end) + struct btrfs_inode *inode, u64 start, u64 end) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct extent_buffer *leaf; struct btrfs_path *path; struct btrfs_file_extent_item *fi; @@ -1102,7 +1102,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int del_slot = 0; int recow; int ret; - u64 ino = btrfs_ino(BTRFS_I(inode)); + u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); if (!path) @@ -1415,13 +1415,13 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages, * the other < 0 number - Something wrong happens */ static noinline int -lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, +lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, u64 *lockstart, u64 *lockend, struct extent_state **cached_state) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 start_pos; u64 last_pos; int i; @@ -1432,30 +1432,30 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, + round_up(pos + write_bytes - start_pos, fs_info->sectorsize) - 1; - if (start_pos < inode->i_size) { + if (start_pos < inode->vfs_inode.i_size) { struct btrfs_ordered_extent *ordered; - lock_extent_bits(&BTRFS_I(inode)->io_tree, - start_pos, last_pos, cached_state); + lock_extent_bits(&inode->io_tree, start_pos, last_pos, + cached_state); ordered = btrfs_lookup_ordered_range(inode, start_pos, last_pos - start_pos + 1); if (ordered && ordered->file_offset + ordered->len > start_pos && ordered->file_offset <= last_pos) { - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - start_pos, last_pos, - cached_state, GFP_NOFS); + unlock_extent_cached(&inode->io_tree, start_pos, + last_pos, cached_state, GFP_NOFS); for (i = 0; i < num_pages; i++) { unlock_page(pages[i]); put_page(pages[i]); } - btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_start_ordered_extent(&inode->vfs_inode, + ordered, 1); btrfs_put_ordered_extent(ordered); return -EAGAIN; } if (ordered) btrfs_put_ordered_extent(ordered); - clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, + clear_extent_bit(&inode->io_tree, start_pos, last_pos, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached_state, GFP_NOFS); @@ -1474,11 +1474,11 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, return ret; } -static noinline int check_can_nocow(struct inode *inode, loff_t pos, +static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, size_t *write_bytes) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct btrfs_ordered_extent *ordered; u64 lockstart, lockend; u64 num_bytes; @@ -1493,19 +1493,20 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos, fs_info->sectorsize) - 1; while (1) { - lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); + lock_extent(&inode->io_tree, lockstart, lockend); ordered = btrfs_lookup_ordered_range(inode, lockstart, lockend - lockstart + 1); if (!ordered) { break; } - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); - btrfs_start_ordered_extent(inode, ordered, 1); + unlock_extent(&inode->io_tree, lockstart, lockend); + btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); btrfs_put_ordered_extent(ordered); } num_bytes = lockend - lockstart + 1; - ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL); + ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, + NULL, NULL, NULL); if (ret <= 0) { ret = 0; btrfs_end_write_no_snapshoting(root); @@ -1514,7 +1515,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos, num_bytes - pos + lockstart); } - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); + unlock_extent(&inode->io_tree, lockstart, lockend); return ret; } @@ -1579,7 +1580,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, if (ret < 0) { if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) && - check_can_nocow(inode, pos, &write_bytes) > 0) { + check_can_nocow(BTRFS_I(inode), pos, + &write_bytes) > 0) { /* * For nodata cow case, no need to reserve * data space. @@ -1599,7 +1601,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, } } - ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), + reserve_bytes); if (ret) { if (!only_release_metadata) btrfs_free_reserved_data_space(inode, pos, @@ -1623,9 +1626,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, if (ret) break; - ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages, - pos, write_bytes, &lockstart, - &lockend, &cached_state); + ret = lock_and_cleanup_extent_if_need(BTRFS_I(inode), pages, + num_pages, pos, write_bytes, &lockstart, + &lockend, &cached_state); if (ret < 0) { if (ret == -EAGAIN) goto again; @@ -1677,7 +1680,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, spin_unlock(&BTRFS_I(inode)->lock); } if (only_release_metadata) { - btrfs_delalloc_release_metadata(inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), release_bytes); } else { u64 __pos; @@ -1738,7 +1741,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, if (release_bytes) { if (only_release_metadata) { btrfs_end_write_no_snapshoting(root); - btrfs_delalloc_release_metadata(inode, release_bytes); + btrfs_delalloc_release_metadata(BTRFS_I(inode), + release_bytes); } else { btrfs_delalloc_release_space(inode, round_down(pos, fs_info->sectorsize), @@ -2193,7 +2197,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) return 0; } -static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, +static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf, int slot, u64 start, u64 end) { struct btrfs_file_extent_item *fi; @@ -2203,7 +2207,7 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, return 0; btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.objectid != btrfs_ino(BTRFS_I(inode)) || + if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) return 0; @@ -2222,22 +2226,23 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, return 0; } -static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, - struct btrfs_path *path, u64 offset, u64 end) +static int fill_holes(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode, + struct btrfs_path *path, u64 offset, u64 end) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct extent_buffer *leaf; struct btrfs_file_extent_item *fi; struct extent_map *hole_em; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; struct btrfs_key key; int ret; if (btrfs_fs_incompat(fs_info, NO_HOLES)) goto out; - key.objectid = btrfs_ino(BTRFS_I(inode)); + key.objectid = btrfs_ino(inode); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = offset; @@ -2253,7 +2258,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, } leaf = path->nodes[0]; - if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) { + if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) { u64 num_bytes; path->slots[0]--; @@ -2285,7 +2290,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, } btrfs_release_path(path); - ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)), + ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0); if (ret) return ret; @@ -2296,8 +2301,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, hole_em = alloc_extent_map(); if (!hole_em) { btrfs_drop_extent_cache(inode, offset, end - 1, 0); - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); } else { hole_em->start = offset; hole_em->len = end - offset; @@ -2320,7 +2324,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, free_extent_map(hole_em); if (ret) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); } return 0; @@ -2337,7 +2341,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len) struct extent_map *em; int ret = 0; - em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0); if (IS_ERR_OR_NULL(em)) { if (!em) ret = -ENOMEM; @@ -2550,8 +2554,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) trans->block_rsv = &fs_info->trans_block_rsv; if (cur_offset < drop_end && cur_offset < ino_size) { - ret = fill_holes(trans, inode, path, cur_offset, - drop_end); + ret = fill_holes(trans, BTRFS_I(inode), path, + cur_offset, drop_end); if (ret) { /* * If we failed then we didn't insert our hole @@ -2622,7 +2626,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) * cur_offset == drop_end). */ if (cur_offset < ino_size && cur_offset < drop_end) { - ret = fill_holes(trans, inode, path, cur_offset, drop_end); + ret = fill_holes(trans, BTRFS_I(inode), path, + cur_offset, drop_end); if (ret) { /* Same comment as above. */ btrfs_abort_transaction(trans, ret); @@ -2747,7 +2752,8 @@ static long btrfs_fallocate(struct file *file, int mode, * * For qgroup space, it will be checked later. */ - ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start); + ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), + alloc_end - alloc_start); if (ret < 0) return ret; @@ -2827,7 +2833,7 @@ static long btrfs_fallocate(struct file *file, int mode, /* First, check if we exceed the qgroup limit */ INIT_LIST_HEAD(&reserve_list); while (1) { - em = btrfs_get_extent(inode, NULL, 0, cur_offset, + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, alloc_end - cur_offset, 0); if (IS_ERR_OR_NULL(em)) { if (!em) @@ -2954,7 +2960,8 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) &cached_state); while (start < inode->i_size) { - em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0, + start, len, 0); if (IS_ERR(em)) { ret = PTR_ERR(em); em = NULL; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 1a131f7d6c1bed..da6841efac26b1 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -260,7 +261,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, btrfs_free_path(path); } - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); truncate_pagecache(inode, 0); /* @@ -3545,7 +3546,8 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, if (ret) { if (release_metadata) - btrfs_delalloc_release_metadata(inode, inode->i_size); + btrfs_delalloc_release_metadata(BTRFS_I(inode), + inode->i_size); #ifdef DEBUG btrfs_err(fs_info, "failed to write free ino cache for root %llu", diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 3bbb8f09595352..5c6c20ec64d8a9 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -499,7 +499,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, prealloc, prealloc, &alloc_hint); if (ret) { - btrfs_delalloc_release_metadata(inode, prealloc); + btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc); goto out_put; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f02823f088c2b1..a18510be76c141 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -316,8 +316,8 @@ static noinline int cow_file_range_inline(struct btrfs_root *root, } set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); - btrfs_delalloc_release_metadata(inode, end + 1 - start); - btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); + btrfs_delalloc_release_metadata(BTRFS_I(inode), end + 1 - start); + btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0); out: /* * Don't forget to free the reserved space, as for inlined extent @@ -389,12 +389,12 @@ static inline int inode_need_compress(struct inode *inode) return 0; } -static inline void inode_should_defrag(struct inode *inode, +static inline void inode_should_defrag(struct btrfs_inode *inode, u64 start, u64 end, u64 num_bytes, u64 small_write) { /* If this is a small write inside eof, kick off a defrag */ if (num_bytes < small_write && - (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) + (start > 0 || end + 1 < inode->disk_i_size)) btrfs_add_inode_defrag(NULL, inode); } @@ -430,23 +430,23 @@ static noinline void compress_file_range(struct inode *inode, int ret = 0; struct page **pages = NULL; unsigned long nr_pages; - unsigned long nr_pages_ret = 0; unsigned long total_compressed = 0; unsigned long total_in = 0; - unsigned long max_compressed = SZ_128K; - unsigned long max_uncompressed = SZ_128K; int i; int will_compress; int compress_type = fs_info->compress_type; int redirty = 0; - inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); + inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, + SZ_16K); actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; - nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE); + BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0); + nr_pages = min_t(unsigned long, nr_pages, + BTRFS_MAX_COMPRESSED / PAGE_SIZE); /* * we don't want to send crud past the end of i_size through @@ -471,17 +471,8 @@ static noinline void compress_file_range(struct inode *inode, (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) goto cleanup_and_bail_uncompressed; - /* we want to make sure that amount of ram required to uncompress - * an extent is reasonable, so we limit the total size in ram - * of a compressed extent to 128k. This is a crucial number - * because it also controls how easily we can spread reads across - * cpus for decompression. - * - * We also want to make sure the amount of IO required to do - * a random read is reasonably small, so we limit the size of - * a compressed extent to 128k. - */ - total_compressed = min(total_compressed, max_uncompressed); + total_compressed = min_t(unsigned long, total_compressed, + BTRFS_MAX_UNCOMPRESSED); num_bytes = ALIGN(end - start + 1, blocksize); num_bytes = max(blocksize, num_bytes); total_in = 0; @@ -516,16 +507,15 @@ static noinline void compress_file_range(struct inode *inode, redirty = 1; ret = btrfs_compress_pages(compress_type, inode->i_mapping, start, - total_compressed, pages, - nr_pages, &nr_pages_ret, + pages, + &nr_pages, &total_in, - &total_compressed, - max_compressed); + &total_compressed); if (!ret) { unsigned long offset = total_compressed & (PAGE_SIZE - 1); - struct page *page = pages[nr_pages_ret - 1]; + struct page *page = pages[nr_pages - 1]; char *kaddr; /* zero the tail end of the last page, we might be @@ -606,7 +596,7 @@ static noinline void compress_file_range(struct inode *inode, * will submit them to the elevator. */ add_async_extent(async_cow, start, num_bytes, - total_compressed, pages, nr_pages_ret, + total_compressed, pages, nr_pages, compress_type); if (start + num_bytes < end) { @@ -623,14 +613,14 @@ static noinline void compress_file_range(struct inode *inode, * the compression code ran but failed to make things smaller, * free any pages it allocated and our page pointer array */ - for (i = 0; i < nr_pages_ret; i++) { + for (i = 0; i < nr_pages; i++) { WARN_ON(pages[i]->mapping); put_page(pages[i]); } kfree(pages); pages = NULL; total_compressed = 0; - nr_pages_ret = 0; + nr_pages = 0; /* flag the file so we don't compress in the future */ if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && @@ -659,7 +649,7 @@ static noinline void compress_file_range(struct inode *inode, return; free_pages_out: - for (i = 0; i < nr_pages_ret; i++) { + for (i = 0; i < nr_pages; i++) { WARN_ON(pages[i]->mapping); put_page(pages[i]); } @@ -806,7 +796,8 @@ static noinline void submit_compressed_extents(struct inode *inode, BTRFS_ORDERED_COMPRESSED, async_extent->compress_type); if (ret) { - btrfs_drop_extent_cache(inode, async_extent->start, + btrfs_drop_extent_cache(BTRFS_I(inode), + async_extent->start, async_extent->start + async_extent->ram_size - 1, 0); goto out_free_reserve; @@ -933,7 +924,7 @@ static noinline int cow_file_range(struct inode *inode, struct extent_map *em; int ret = 0; - if (btrfs_is_free_space_inode(inode)) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) { WARN_ON_ONCE(1); ret = -EINVAL; goto out_unlock; @@ -943,7 +934,7 @@ static noinline int cow_file_range(struct inode *inode, num_bytes = max(blocksize, num_bytes); disk_num_bytes = num_bytes; - inode_should_defrag(inode, start, end, num_bytes, SZ_64K); + inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K); if (start == 0) { /* lets try to make an inline extent */ @@ -971,7 +962,8 @@ static noinline int cow_file_range(struct inode *inode, btrfs_super_total_bytes(fs_info->super_copy)); alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); - btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, + start + num_bytes - 1, 0); while (disk_num_bytes > 0) { unsigned long op; @@ -1039,7 +1031,7 @@ static noinline int cow_file_range(struct inode *inode, return ret; out_drop_extent_cache: - btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0); out_reserve: btrfs_dec_block_group_reservations(fs_info, ins.objectid); btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); @@ -1231,7 +1223,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, return -ENOMEM; } - nolock = btrfs_is_free_space_inode(inode); + nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); cow_start = (u64)-1; cur_offset = start; @@ -1331,10 +1323,16 @@ static noinline int run_delalloc_nocow(struct inode *inode, * either valid or do not exist. */ if (csum_exist_in_range(fs_info, disk_bytenr, - num_bytes)) + num_bytes)) { + if (!nolock) + btrfs_end_write_no_snapshoting(root); goto out_check; - if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) + } + if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) { + if (!nolock) + btrfs_end_write_no_snapshoting(root); goto out_check; + } nocow = 1; } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { extent_end = found_key.offset + @@ -1629,15 +1627,15 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root, } static void btrfs_del_delalloc_inode(struct btrfs_root *root, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); spin_lock(&root->delalloc_lock); - if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) { - list_del_init(&BTRFS_I(inode)->delalloc_inodes); + if (!list_empty(&inode->delalloc_inodes)) { + list_del_init(&inode->delalloc_inodes); clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); root->nr_delalloc_inodes--; if (!root->nr_delalloc_inodes) { spin_lock(&fs_info->delalloc_root_lock); @@ -1670,7 +1668,7 @@ static void btrfs_set_bit_hook(struct inode *inode, if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - bool do_list = !btrfs_is_free_space_inode(inode); + bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode)); if (*bits & EXTENT_FIRST_DELALLOC) { *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1700,18 +1698,18 @@ static void btrfs_set_bit_hook(struct inode *inode, /* * extent_io.c clear_bit_hook, see set_bit_hook for why */ -static void btrfs_clear_bit_hook(struct inode *inode, +static void btrfs_clear_bit_hook(struct btrfs_inode *inode, struct extent_state *state, unsigned *bits) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 len = state->end + 1 - state->start; u32 num_extents = count_max_extents(len); - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) - BTRFS_I(inode)->defrag_bytes -= len; - spin_unlock(&BTRFS_I(inode)->lock); + inode->defrag_bytes -= len; + spin_unlock(&inode->lock); /* * set_bit and clear bit hooks normally require _irqsave/restore @@ -1719,15 +1717,15 @@ static void btrfs_clear_bit_hook(struct inode *inode, * bit, which is only set or cleared with irqs on */ if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; bool do_list = !btrfs_is_free_space_inode(inode); if (*bits & EXTENT_FIRST_DELALLOC) { *bits &= ~EXTENT_FIRST_DELALLOC; } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->outstanding_extents -= num_extents; - spin_unlock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); + inode->outstanding_extents -= num_extents; + spin_unlock(&inode->lock); } /* @@ -1747,18 +1745,19 @@ static void btrfs_clear_bit_hook(struct inode *inode, && do_list && !(state->state & EXTENT_NORESERVE) && (*bits & (EXTENT_DO_ACCOUNTING | EXTENT_CLEAR_DATA_RESV))) - btrfs_free_reserved_data_space_noquota(inode, + btrfs_free_reserved_data_space_noquota( + &inode->vfs_inode, state->start, len); __percpu_counter_add(&fs_info->delalloc_bytes, -len, fs_info->delalloc_batch); - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->delalloc_bytes -= len; - if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && + spin_lock(&inode->lock); + inode->delalloc_bytes -= len; + if (do_list && inode->delalloc_bytes == 0 && test_bit(BTRFS_INODE_IN_DELALLOC_LIST, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) btrfs_del_delalloc_inode(root, inode); - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); } } @@ -1854,7 +1853,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - if (btrfs_is_free_space_inode(inode)) + if (btrfs_is_free_space_inode(BTRFS_I(inode))) metadata = BTRFS_WQ_ENDIO_FREE_SPACE; if (bio_op(bio) != REQ_OP_WRITE) { @@ -1963,7 +1962,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) if (PagePrivate2(page)) goto out; - ordered = btrfs_lookup_ordered_range(inode, page_start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE); if (ordered) { unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, @@ -2793,16 +2792,17 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) bool nolock; bool truncated = false; - nolock = btrfs_is_free_space_inode(inode); + nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { ret = -EIO; goto out; } - btrfs_free_io_failure_record(inode, ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len - 1); + btrfs_free_io_failure_record(BTRFS_I(inode), + ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1); if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { truncated = true; @@ -2873,7 +2873,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) compress_type = ordered_extent->compress_type; if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { BUG_ON(compress_type); - ret = btrfs_mark_extent_written(trans, inode, + ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), ordered_extent->file_offset, ordered_extent->file_offset + logical_len); @@ -2914,7 +2914,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) ordered_extent->len - 1, &cached_state, GFP_NOFS); out: if (root != fs_info->tree_root) - btrfs_delalloc_release_metadata(inode, ordered_extent->len); + btrfs_delalloc_release_metadata(BTRFS_I(inode), + ordered_extent->len); if (trans) btrfs_end_transaction(trans); @@ -2929,7 +2930,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS); /* Drop the cache for the part of the extent we didn't write. */ - btrfs_drop_extent_cache(inode, start, end, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); /* * If the ordered extent had an IOERR or something else went @@ -2977,7 +2978,7 @@ static void finish_ordered_fn(struct btrfs_work *work) btrfs_finish_ordered_io(ordered_extent); } -static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, +static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate) { struct inode *inode = page->mapping->host; @@ -2991,9 +2992,9 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, ClearPagePrivate2(page); if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, end - start + 1, uptodate)) - return 0; + return; - if (btrfs_is_free_space_inode(inode)) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) { wq = fs_info->endio_freespace_worker; func = btrfs_freespace_write_helper; } else { @@ -3004,8 +3005,6 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, NULL); btrfs_queue_work(wq, &ordered_extent->work); - - return 0; } static int __readpage_endio_check(struct inode *inode, @@ -3028,7 +3027,7 @@ static int __readpage_endio_check(struct inode *inode, kunmap_atomic(kaddr); return 0; zeroit: - btrfs_print_data_csum_error(inode, start, csum, csum_expected, + btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, io_bio->mirror_num); memset(kaddr + pgoff, 1, len); flush_dcache_page(page); @@ -3167,10 +3166,11 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, * NOTE: caller of this function should reserve 5 units of metadata for * this function. */ -int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) +int btrfs_orphan_add(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct btrfs_block_rsv *block_rsv = NULL; int reserve = 0; int insert = 0; @@ -3192,7 +3192,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) } if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags)) { + &inode->runtime_flags)) { #if 0 /* * For proper ENOSPC handling, we should do orphan @@ -3209,7 +3209,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) } if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) reserve = 1; spin_unlock(&root->orphan_lock); @@ -3220,28 +3220,27 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) if (ret) { atomic_dec(&root->orphan_inodes); clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); if (insert) clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); return ret; } } /* insert an orphan item to track this unlinked/truncated file */ if (insert >= 1) { - ret = btrfs_insert_orphan_item(trans, root, - btrfs_ino(BTRFS_I(inode))); + ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); if (ret) { atomic_dec(&root->orphan_inodes); if (reserve) { clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); btrfs_orphan_release_metadata(inode); } if (ret != -EEXIST) { clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); btrfs_abort_transaction(trans, ret); return ret; } @@ -3266,20 +3265,20 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) * item for this particular inode. */ static int btrfs_orphan_del(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; int delete_item = 0; int release_rsv = 0; int ret = 0; spin_lock(&root->orphan_lock); if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) delete_item = 1; if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) release_rsv = 1; spin_unlock(&root->orphan_lock); @@ -3287,7 +3286,7 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans, atomic_dec(&root->orphan_inodes); if (trans) ret = btrfs_del_orphan_item(trans, root, - btrfs_ino(BTRFS_I(inode))); + btrfs_ino(inode)); } if (release_rsv) @@ -3453,7 +3452,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ret = PTR_ERR(trans); goto out; } - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); btrfs_end_transaction(trans); if (ret) { iput(inode); @@ -3462,7 +3461,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ret = btrfs_truncate(inode); if (ret) - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); } else { nr_unlink++; } @@ -3617,7 +3616,7 @@ static int btrfs_read_locked_inode(struct inode *inode) set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); - btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); + btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); @@ -3865,7 +3864,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, * The data relocation inode should also be directly updated * without delay */ - if (!btrfs_is_free_space_inode(inode) + if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { btrfs_update_root_times(trans, root); @@ -3988,8 +3987,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, if (ret) goto out; - btrfs_i_size_write(&dir->vfs_inode, - dir->vfs_inode.i_size - name_len * 2); + btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); inode_inc_iversion(&inode->vfs_inode); inode_inc_iversion(&dir->vfs_inode); inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime = @@ -4056,7 +4054,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) goto out; if (inode->i_nlink == 0) { - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) goto out; } @@ -4137,7 +4135,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, goto out; } - btrfs_i_size_write(dir, dir->i_size - name_len * 2); + btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); inode_inc_iversion(dir); dir->i_mtime = dir->i_ctime = current_time(dir); ret = btrfs_update_inode_fallback(trans, root, dir); @@ -4173,7 +4171,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) goto out; } - err = btrfs_orphan_add(trans, inode); + err = btrfs_orphan_add(trans, BTRFS_I(inode)); if (err) goto out; @@ -4184,7 +4182,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) BTRFS_I(d_inode(dentry)), dentry->d_name.name, dentry->d_name.len); if (!err) { - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); /* * Propagate the last_unlink_trans value of the deleted dir to * its parent directory. This is to prevent an unrecoverable @@ -4320,7 +4318,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, * for non-free space inodes and ref cows, we want to back off from * time to time */ - if (!btrfs_is_free_space_inode(inode) && + if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && test_bit(BTRFS_ROOT_REF_COWS, &root->state)) be_nice = 1; @@ -4336,7 +4334,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, */ if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || root == fs_info->tree_root) - btrfs_drop_extent_cache(inode, ALIGN(new_size, + btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size, fs_info->sectorsize), (u64)-1, 0); @@ -4412,19 +4410,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, if (found_type > min_type) { del_item = 1; } else { - if (item_end < new_size) { - /* - * With NO_HOLES mode, for the following mapping - * - * [0-4k][hole][8k-12k] - * - * if truncating isize down to 6k, it ends up - * isize being 8k. - */ - if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) - last_size = new_size; + if (item_end < new_size) break; - } if (found_key.offset >= new_size) del_item = 1; else @@ -4607,8 +4594,12 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, btrfs_abort_transaction(trans, ret); } error: - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { + ASSERT(last_size >= new_size); + if (!err && last_size > new_size) + last_size = new_size; btrfs_ordered_update_i_size(inode, last_size, NULL); + } btrfs_free_path(path); @@ -4835,7 +4826,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) lock_extent_bits(io_tree, hole_start, block_end - 1, &cached_state); - ordered = btrfs_lookup_ordered_range(inode, hole_start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start, block_end - hole_start); if (!ordered) break; @@ -4847,7 +4838,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) cur_offset = hole_start; while (1) { - em = btrfs_get_extent(inode, NULL, 0, cur_offset, + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, block_end - cur_offset, 0); if (IS_ERR(em)) { err = PTR_ERR(em); @@ -4864,7 +4855,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) hole_size); if (err) break; - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, cur_offset + hole_size - 1, 0); hole_em = alloc_extent_map(); if (!hole_em) { @@ -4890,7 +4881,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) write_unlock(&em_tree->lock); if (err != -EEXIST) break; - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), + cur_offset, cur_offset + hole_size - 1, 0); } @@ -4987,7 +4979,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) * so we need to guarantee from this point on that everything * will be consistent. */ - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); btrfs_end_transaction(trans); if (ret) return ret; @@ -4996,9 +4988,9 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) truncate_setsize(inode, newsize); /* Disable nonlocked read DIO to avoid the end less truncate */ - btrfs_inode_block_unlocked_dio(inode); + btrfs_inode_block_unlocked_dio(BTRFS_I(inode)); inode_dio_wait(inode); - btrfs_inode_resume_unlocked_dio(inode); + btrfs_inode_resume_unlocked_dio(BTRFS_I(inode)); ret = btrfs_truncate(inode); if (ret && inode->i_nlink) { @@ -5007,7 +4999,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) /* To get a stable disk_i_size */ err = btrfs_wait_ordered_range(inode, 0, (u64)-1); if (err) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); return err; } @@ -5019,11 +5011,11 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) */ trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); return ret; } i_size_write(inode, BTRFS_I(inode)->disk_i_size); - err = btrfs_orphan_del(trans, inode); + err = btrfs_orphan_del(trans, BTRFS_I(inode)); if (err) btrfs_abort_transaction(trans, err); btrfs_end_transaction(trans); @@ -5181,18 +5173,18 @@ void btrfs_evict_inode(struct inode *inode) if (inode->i_nlink && ((btrfs_root_refs(&root->root_item) != 0 && root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || - btrfs_is_free_space_inode(inode))) + btrfs_is_free_space_inode(BTRFS_I(inode)))) goto no_delete; if (is_bad_inode(inode)) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); goto no_delete; } /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ if (!special_file(inode->i_mode)) btrfs_wait_ordered_range(inode, 0, (u64)-1); - btrfs_free_io_failure_record(inode, 0, (u64)-1); + btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, @@ -5208,20 +5200,20 @@ void btrfs_evict_inode(struct inode *inode) ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); if (ret) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); goto no_delete; } rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); if (!rsv) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); goto no_delete; } rsv->size = min_size; rsv->failfast = 1; global_rsv = &fs_info->global_block_rsv; - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); /* * This is a bit simpler than btrfs_truncate since we've already @@ -5256,14 +5248,14 @@ void btrfs_evict_inode(struct inode *inode) btrfs_warn(fs_info, "Could not get space for a delete, will truncate on mount %d", ret); - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); btrfs_free_block_rsv(fs_info, rsv); goto no_delete; } trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); btrfs_free_block_rsv(fs_info, rsv); goto no_delete; } @@ -5289,7 +5281,7 @@ void btrfs_evict_inode(struct inode *inode) if (ret) { ret = btrfs_commit_transaction(trans); if (ret) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); btrfs_free_block_rsv(fs_info, rsv); goto no_delete; } @@ -5318,9 +5310,9 @@ void btrfs_evict_inode(struct inode *inode) */ if (ret == 0) { trans->block_rsv = root->orphan_block_rsv; - btrfs_orphan_del(trans, inode); + btrfs_orphan_del(trans, BTRFS_I(inode)); } else { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); } trans->block_rsv = &fs_info->trans_block_rsv; @@ -5898,7 +5890,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) return 0; - if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode)) + if (btrfs_fs_closing(root->fs_info) && + btrfs_is_free_space_inode(BTRFS_I(inode))) nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { @@ -5978,15 +5971,15 @@ static int btrfs_update_time(struct inode *inode, struct timespec *now, * and then set the in-memory index_cnt variable to reflect * free sequence numbers */ -static int btrfs_set_inode_index_count(struct inode *inode) +static int btrfs_set_inode_index_count(struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_key key, found_key; struct btrfs_path *path; struct extent_buffer *leaf; int ret; - key.objectid = btrfs_ino(BTRFS_I(inode)); + key.objectid = btrfs_ino(inode); key.type = BTRFS_DIR_INDEX_KEY; key.offset = (u64)-1; @@ -6009,7 +6002,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) * else has to start at 2 */ if (path->slots[0] == 0) { - BTRFS_I(inode)->index_cnt = 2; + inode->index_cnt = 2; goto out; } @@ -6018,13 +6011,13 @@ static int btrfs_set_inode_index_count(struct inode *inode) leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) || + if (found_key.objectid != btrfs_ino(inode) || found_key.type != BTRFS_DIR_INDEX_KEY) { - BTRFS_I(inode)->index_cnt = 2; + inode->index_cnt = 2; goto out; } - BTRFS_I(inode)->index_cnt = found_key.offset + 1; + inode->index_cnt = found_key.offset + 1; out: btrfs_free_path(path); return ret; @@ -6034,12 +6027,12 @@ static int btrfs_set_inode_index_count(struct inode *inode) * helper to find a free sequence number in a given directory. This current * code is very simple, later versions will do smarter things in the btree */ -int btrfs_set_inode_index(struct inode *dir, u64 *index) +int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) { int ret = 0; - if (BTRFS_I(dir)->index_cnt == (u64)-1) { - ret = btrfs_inode_delayed_dir_index_count(BTRFS_I(dir)); + if (dir->index_cnt == (u64)-1) { + ret = btrfs_inode_delayed_dir_index_count(dir); if (ret) { ret = btrfs_set_inode_index_count(dir); if (ret) @@ -6047,8 +6040,8 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) } } - *index = BTRFS_I(dir)->index_cnt; - BTRFS_I(dir)->index_cnt++; + *index = dir->index_cnt; + dir->index_cnt++; return ret; } @@ -6109,7 +6102,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, if (dir && name) { trace_btrfs_inode_request(dir); - ret = btrfs_set_inode_index(dir, index); + ret = btrfs_set_inode_index(BTRFS_I(dir), index); if (ret) { btrfs_free_path(path); iput(inode); @@ -6244,18 +6237,18 @@ static inline u8 btrfs_inode_type(struct inode *inode) * inode to the parent directory. */ int btrfs_add_link(struct btrfs_trans_handle *trans, - struct inode *parent_inode, struct inode *inode, + struct btrfs_inode *parent_inode, struct btrfs_inode *inode, const char *name, int name_len, int add_backref, u64 index) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret = 0; struct btrfs_key key; - struct btrfs_root *root = BTRFS_I(parent_inode)->root; - u64 ino = btrfs_ino(BTRFS_I(inode)); - u64 parent_ino = btrfs_ino(BTRFS_I(parent_inode)); + struct btrfs_root *root = parent_inode->root; + u64 ino = btrfs_ino(inode); + u64 parent_ino = btrfs_ino(parent_inode); if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { - memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); + memcpy(&key, &inode->root->root_key, sizeof(key)); } else { key.objectid = ino; key.type = BTRFS_INODE_ITEM_KEY; @@ -6277,7 +6270,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, ret = btrfs_insert_dir_item(trans, root, name, name_len, parent_inode, &key, - btrfs_inode_type(inode), index); + btrfs_inode_type(&inode->vfs_inode), index); if (ret == -EEXIST || ret == -EOVERFLOW) goto fail_dir_item; else if (ret) { @@ -6285,12 +6278,12 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, return ret; } - btrfs_i_size_write(parent_inode, parent_inode->i_size + + btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + name_len * 2); - inode_inc_iversion(parent_inode); - parent_inode->i_mtime = parent_inode->i_ctime = - current_time(parent_inode); - ret = btrfs_update_inode(trans, root, parent_inode); + inode_inc_iversion(&parent_inode->vfs_inode); + parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime = + current_time(&parent_inode->vfs_inode); + ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode); if (ret) btrfs_abort_transaction(trans, ret); return ret; @@ -6314,8 +6307,8 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, } static int btrfs_add_nondir(struct btrfs_trans_handle *trans, - struct inode *dir, struct dentry *dentry, - struct inode *inode, int backref, u64 index) + struct btrfs_inode *dir, struct dentry *dentry, + struct btrfs_inode *inode, int backref, u64 index) { int err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, dentry->d_name.len, @@ -6371,7 +6364,8 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (err) goto out_unlock_inode; - err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), + 0, index); if (err) { goto out_unlock_inode; } else { @@ -6448,7 +6442,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, if (err) goto out_unlock_inode; - err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), + 0, index); if (err) goto out_unlock_inode; @@ -6490,7 +6485,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (inode->i_nlink >= BTRFS_LINK_MAX) return -EMLINK; - err = btrfs_set_inode_index(dir, &index); + err = btrfs_set_inode_index(BTRFS_I(dir), &index); if (err) goto fail; @@ -6514,7 +6509,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, ihold(inode); set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); - err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), + 1, index); if (err) { drop_inode = 1; @@ -6528,7 +6524,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, * If new hard link count is 1, it's a file created * with open(2) O_TMPFILE flag. */ - err = btrfs_orphan_del(trans, inode); + err = btrfs_orphan_del(trans, BTRFS_I(inode)); if (err) goto fail; } @@ -6589,13 +6585,14 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) if (err) goto out_fail_inode; - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); err = btrfs_update_inode(trans, root, inode); if (err) goto out_fail_inode; - err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, - dentry->d_name.len, 0, index); + err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), + dentry->d_name.name, + dentry->d_name.len, 0, index); if (err) goto out_fail_inode; @@ -6712,6 +6709,20 @@ static noinline int uncompress_inline(struct btrfs_path *path, max_size = min_t(unsigned long, PAGE_SIZE, max_size); ret = btrfs_decompress(compress_type, tmp, page, extent_offset, inline_size, max_size); + + /* + * decompression code contains a memset to fill in any space between the end + * of the uncompressed data and the end of max_size in case the decompressed + * data ends up shorter than ram_bytes. That doesn't cover the hole between + * the end of an inline extent and the beginning of the next block, so we + * cover that region here. + */ + + if (max_size + pg_offset < PAGE_SIZE) { + char *map = kmap(page); + memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); + kunmap(page); + } kfree(tmp); return ret; } @@ -6725,25 +6736,26 @@ static noinline int uncompress_inline(struct btrfs_path *path, * This also copies inline extents directly into the page. */ -struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 len, - int create) +struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, + struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret; int err = 0; u64 extent_start = 0; u64 extent_end = 0; - u64 objectid = btrfs_ino(BTRFS_I(inode)); + u64 objectid = btrfs_ino(inode); u32 found_type; struct btrfs_path *path = NULL; - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_file_extent_item *item; struct extent_buffer *leaf; struct btrfs_key found_key; struct extent_map *em = NULL; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; + struct extent_io_tree *io_tree = &inode->io_tree; struct btrfs_trans_handle *trans = NULL; const bool new_inline = !page || create; @@ -6856,7 +6868,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, goto not_found_em; } - btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em); + btrfs_extent_item_to_extent_map(inode, path, item, + new_inline, em); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { @@ -6992,7 +7005,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, write_unlock(&em_tree->lock); out: - trace_btrfs_get_extent(root, BTRFS_I(inode), em); + trace_btrfs_get_extent(root, inode, em); btrfs_free_path(path); if (trans) { @@ -7008,9 +7021,10 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, return em; } -struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 len, - int create) +struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, + struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) { struct extent_map *em; struct extent_map *hole_em = NULL; @@ -7047,7 +7061,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag em = NULL; /* ok, we didn't find anything, lets look for delalloc */ - found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, + found = count_range_bits(&inode->io_tree, &range_start, end, len, EXTENT_DELALLOC, 1); found_end = range_start + found; if (found_end < range_start) @@ -7162,7 +7176,7 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, if (ret) { if (em) { free_extent_map(em); - btrfs_drop_extent_cache(inode, start, + btrfs_drop_extent_cache(BTRFS_I(inode), start, start + len - 1, 0); } em = ERR_PTR(ret); @@ -7423,7 +7437,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, * doing DIO to, so we need to make sure there's no ordered * extents in this range. */ - ordered = btrfs_lookup_ordered_range(inode, lockstart, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, lockend - lockstart + 1); /* @@ -7529,7 +7543,7 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, } do { - btrfs_drop_extent_cache(inode, em->start, + btrfs_drop_extent_cache(BTRFS_I(inode), em->start, em->start + em->len - 1, 0); write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 1); @@ -7617,7 +7631,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, goto err; } - em = btrfs_get_extent(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); if (IS_ERR(em)) { ret = PTR_ERR(em); goto unlock_err; @@ -7854,7 +7868,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, failed_mirror); if (!ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } @@ -7868,7 +7882,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, pgoff, isector, repair_endio, repair_arg); if (!bio) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } bio_set_op_attrs(bio, REQ_OP_READ, read_mode); @@ -7879,7 +7893,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror); if (ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); bio_put(bio); } @@ -7909,7 +7923,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio) done->uptodate = 1; bio_for_each_segment_all(bvec, bio, i) - clean_io_failure(done->inode, done->start, bvec->bv_page, 0); + clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); end: complete(&done->done); bio_put(bio); @@ -7995,7 +8009,7 @@ static void btrfs_retry_endio(struct bio *bio) bvec->bv_page, bvec->bv_offset, done->start, bvec->bv_len); if (!ret) - clean_io_failure(done->inode, done->start, + clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, bvec->bv_offset); else uptodate = 0; @@ -8796,7 +8810,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, lock_extent_bits(tree, page_start, page_end, &cached_state); again: start = page_start; - ordered = btrfs_lookup_ordered_range(inode, start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, page_end - start + 1); if (ordered) { end = min(page_end, ordered->file_offset + ordered->len - 1); @@ -8962,7 +8976,8 @@ int btrfs_page_mkwrite(struct vm_fault *vmf) * we can't set the delalloc bits if there are pending ordered * extents. Drop our locks and wait for them to finish */ - ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, + PAGE_SIZE); if (ordered) { unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); @@ -9160,7 +9175,7 @@ static int btrfs_truncate(struct inode *inode) if (ret == 0 && inode->i_nlink > 0) { trans->block_rsv = root->orphan_block_rsv; - ret = btrfs_orphan_del(trans, inode); + ret = btrfs_orphan_del(trans, BTRFS_I(inode)); if (ret) err = ret; } @@ -9205,7 +9220,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, inode->i_fop = &btrfs_dir_file_operations; set_nlink(inode, 1); - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); unlock_new_inode(inode); err = btrfs_subvol_inherit_props(trans, new_root, parent_root); @@ -9278,7 +9293,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS void btrfs_test_destroy_inode(struct inode *inode) { - btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); } #endif @@ -9333,7 +9348,7 @@ void btrfs_destroy_inode(struct inode *inode) } btrfs_qgroup_check_reserved_leak(inode); inode_tree_del(inode); - btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); free: call_rcu(&inode->i_rcu, btrfs_i_callback); } @@ -9412,11 +9427,11 @@ int btrfs_init_cachep(void) return -ENOMEM; } -static int btrfs_getattr(struct vfsmount *mnt, - struct dentry *dentry, struct kstat *stat) +static int btrfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { u64 delalloc_bytes; - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); u32 blocksize = inode->i_sb->s_blocksize; generic_fillattr(inode, stat); @@ -9480,10 +9495,10 @@ static int btrfs_rename_exchange(struct inode *old_dir, * We need to find a free sequence number both in the source and * in the destination directory for the exchange. */ - ret = btrfs_set_inode_index(new_dir, &old_idx); + ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); if (ret) goto out_fail; - ret = btrfs_set_inode_index(old_dir, &new_idx); + ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); if (ret) goto out_fail; @@ -9581,7 +9596,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, goto out_fail; } - ret = btrfs_add_link(trans, new_dir, old_inode, + ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), new_dentry->d_name.name, new_dentry->d_name.len, 0, old_idx); if (ret) { @@ -9589,7 +9604,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, goto out_fail; } - ret = btrfs_add_link(trans, old_dir, new_inode, + ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), old_dentry->d_name.name, old_dentry->d_name.len, 0, new_idx); if (ret) { @@ -9691,8 +9706,8 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, if (ret) goto out; - ret = btrfs_add_nondir(trans, dir, dentry, - inode, 0, index); + ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, + BTRFS_I(inode), 0, index); if (ret) goto out; @@ -9791,7 +9806,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (dest != root) btrfs_record_root_in_trans(trans, dest); - ret = btrfs_set_inode_index(new_dir, &index); + ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); if (ret) goto out_fail; @@ -9858,14 +9873,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len); } if (!ret && new_inode->i_nlink == 0) - ret = btrfs_orphan_add(trans, d_inode(new_dentry)); + ret = btrfs_orphan_add(trans, + BTRFS_I(d_inode(new_dentry))); if (ret) { btrfs_abort_transaction(trans, ret); goto out_fail; } } - ret = btrfs_add_link(trans, new_dir, old_inode, + ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), new_dentry->d_name.name, new_dentry->d_name.len, 0, index); if (ret) { @@ -10232,7 +10248,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode_nohighmem(inode); inode->i_mapping->a_ops = &btrfs_symlink_aops; inode_set_bytes(inode, name_len); - btrfs_i_size_write(inode, name_len); + btrfs_i_size_write(BTRFS_I(inode), name_len); err = btrfs_update_inode(trans, root, inode); /* * Last step, add directory indexes for our symlink inode. This is the @@ -10240,7 +10256,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, * elsewhere above. */ if (!err) - err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, + BTRFS_I(inode), 0, index); if (err) { drop_inode = 1; goto out_unlock_inode; @@ -10326,7 +10343,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, break; } - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, cur_offset + ins.offset -1, 0); em = alloc_extent_map(); @@ -10353,7 +10370,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, write_unlock(&em_tree->lock); if (ret != -EEXIST) break; - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, cur_offset + ins.offset - 1, 0); } @@ -10475,7 +10492,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) ret = btrfs_update_inode(trans, root, inode); if (ret) goto out_inode; - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) goto out_inode; @@ -10505,6 +10522,12 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) } +__attribute__((const)) +static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror) +{ + return -EAGAIN; +} + static const struct inode_operations btrfs_dir_inode_operations = { .getattr = btrfs_getattr, .lookup = btrfs_lookup, @@ -10543,10 +10566,14 @@ static const struct file_operations btrfs_dir_file_operations = { }; static const struct extent_io_ops btrfs_extent_io_ops = { - .fill_delalloc = run_delalloc_range, + /* mandatory callbacks */ .submit_bio_hook = btrfs_submit_bio_hook, - .merge_bio_hook = btrfs_merge_bio_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, + .merge_bio_hook = btrfs_merge_bio_hook, + .readpage_io_failed_hook = btrfs_readpage_io_failed_hook, + + /* optional callbacks */ + .fill_delalloc = run_delalloc_range, .writepage_end_io_hook = btrfs_writepage_end_io_hook, .writepage_start_hook = btrfs_writepage_start_hook, .set_bit_hook = btrfs_set_bit_hook, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d8539979b44ff2..dabfc7ac48a674 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -434,7 +434,7 @@ int btrfs_is_empty_uuid(u8 *uuid) static noinline int create_subvol(struct inode *dir, struct dentry *dentry, - char *name, int namelen, + const char *name, int namelen, u64 *async_transid, struct btrfs_qgroup_inherit *inherit) { @@ -580,21 +580,21 @@ static noinline int create_subvol(struct inode *dir, /* * insert the directory item */ - ret = btrfs_set_inode_index(dir, &index); + ret = btrfs_set_inode_index(BTRFS_I(dir), &index); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir, &key, + name, namelen, BTRFS_I(dir), &key, BTRFS_FT_DIR, index); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } - btrfs_i_size_write(dir, dir->i_size + namelen * 2); + btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2); ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); @@ -832,7 +832,7 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) * inside this filesystem so it's quite a bit simpler. */ static noinline int btrfs_mksubvol(const struct path *parent, - char *name, int namelen, + const char *name, int namelen, struct btrfs_root *snap_src, u64 *async_transid, bool readonly, struct btrfs_qgroup_inherit *inherit) @@ -1009,7 +1009,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) /* get the big lock and read metadata off disk */ lock_extent_bits(io_tree, start, end, &cached); - em = btrfs_get_extent(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS); if (IS_ERR(em)) @@ -1625,7 +1625,7 @@ static noinline int btrfs_ioctl_resize(struct file *file, } static noinline int btrfs_ioctl_snap_create_transid(struct file *file, - char *name, unsigned long fd, int subvol, + const char *name, unsigned long fd, int subvol, u64 *transid, bool readonly, struct btrfs_qgroup_inherit *inherit) { @@ -3298,7 +3298,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans, if (endoff > destoff + olen) endoff = destoff + olen; if (endoff > inode->i_size) - btrfs_i_size_write(inode, endoff); + btrfs_i_size_write(BTRFS_I(inode), endoff); ret = btrfs_update_inode(trans, root, inode); if (ret) { @@ -3311,20 +3311,19 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans, return ret; } -static void clone_update_extent_map(struct inode *inode, +static void clone_update_extent_map(struct btrfs_inode *inode, const struct btrfs_trans_handle *trans, const struct btrfs_path *path, const u64 hole_offset, const u64 hole_len) { - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; int ret; em = alloc_extent_map(); if (!em) { - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); return; } @@ -3338,7 +3337,7 @@ static void clone_update_extent_map(struct inode *inode, if (btrfs_file_extent_type(path->nodes[0], fi) == BTRFS_FILE_EXTENT_INLINE) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); } else { em->start = hole_offset; em->len = hole_len; @@ -3364,8 +3363,7 @@ static void clone_update_extent_map(struct inode *inode, } if (ret) - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); } /* @@ -3791,11 +3789,12 @@ static int btrfs_clone(struct inode *src, struct inode *inode, /* If we have an implicit hole (NO_HOLES feature). */ if (drop_start < new_key.offset) - clone_update_extent_map(inode, trans, + clone_update_extent_map(BTRFS_I(inode), trans, NULL, drop_start, new_key.offset - drop_start); - clone_update_extent_map(inode, trans, path, 0, 0); + clone_update_extent_map(BTRFS_I(inode), trans, + path, 0, 0); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); @@ -3845,8 +3844,9 @@ static int btrfs_clone(struct inode *src, struct inode *inode, btrfs_end_transaction(trans); goto out; } - clone_update_extent_map(inode, trans, NULL, last_dest_end, - destoff + len - last_dest_end); + clone_update_extent_map(BTRFS_I(inode), trans, NULL, + last_dest_end, + destoff + len - last_dest_end); ret = clone_finish_inode_update(trans, inode, destoff + len, destoff, olen, no_time_update); } diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 45d26980caf97d..f48c8c14dc144b 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -76,7 +76,7 @@ static inline void write_compress_length(char *buf, size_t len) memcpy(buf, &dlen, LZO_LEN); } -static inline size_t read_compress_length(char *buf) +static inline size_t read_compress_length(const char *buf) { __le32 dlen; @@ -86,13 +86,11 @@ static inline size_t read_compress_length(char *buf) static int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, - u64 start, unsigned long len, + u64 start, struct page **pages, - unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) + unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; @@ -102,7 +100,9 @@ static int lzo_compress_pages(struct list_head *ws, struct page *in_page = NULL; struct page *out_page = NULL; unsigned long bytes_left; - + unsigned long len = *total_out; + unsigned long nr_dest_pages = *out_pages; + const unsigned long max_out = nr_dest_pages * PAGE_SIZE; size_t in_len; size_t out_len; char *buf; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index bc2aba8106293c..9a46878ba60fa9 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -879,15 +879,14 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, /* Since the DIO code tries to lock a wide area we need to look for any ordered * extents that exist in the range, rather than just the start of the range. */ -struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, - u64 file_offset, - u64 len) +struct btrfs_ordered_extent *btrfs_lookup_ordered_range( + struct btrfs_inode *inode, u64 file_offset, u64 len) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; - tree = &BTRFS_I(inode)->ordered_tree; + tree = &inode->ordered_tree; spin_lock_irq(&tree->lock); node = tree_search(tree, file_offset); if (!node) { @@ -923,7 +922,7 @@ bool btrfs_have_ordered_extents_in_range(struct inode *inode, { struct btrfs_ordered_extent *oe; - oe = btrfs_lookup_ordered_range(inode, file_offset, len); + oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len); if (oe) { btrfs_put_ordered_extent(oe); return true; diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index a8cb8efe6fae50..195c93b67fe002 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -189,9 +189,10 @@ void btrfs_start_ordered_extent(struct inode *inode, int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); -struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, - u64 file_offset, - u64 len); +struct btrfs_ordered_extent *btrfs_lookup_ordered_range( + struct btrfs_inode *inode, + u64 file_offset, + u64 len); bool btrfs_have_ordered_extents_in_range(struct inode *inode, u64 file_offset, u64 len); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index a5da750c1087fd..a59801dc2a340b 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2948,20 +2948,20 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, ret = qgroup_reserve(root, num_bytes, enforce); if (ret < 0) return ret; - atomic_add(num_bytes, &root->qgroup_meta_rsv); + atomic64_add(num_bytes, &root->qgroup_meta_rsv); return ret; } void btrfs_qgroup_free_meta_all(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; - int reserved; + u64 reserved; if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || !is_fstree(root->objectid)) return; - reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); + reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0); if (reserved == 0) return; btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); @@ -2976,8 +2976,8 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes) return; BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); - WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); - atomic_sub(num_bytes, &root->qgroup_meta_rsv); + WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes); + atomic64_sub(num_bytes, &root->qgroup_meta_rsv); btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ddbde0f0836537..d60df51959f7ab 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1714,8 +1714,8 @@ int replace_file_extents(struct btrfs_trans_handle *trans, if (!ret) continue; - btrfs_drop_extent_cache(inode, key.offset, end, - 1); + btrfs_drop_extent_cache(BTRFS_I(inode), + key.offset, end, 1); unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, end); } @@ -2130,7 +2130,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, /* the lock_extent waits for readpage to complete */ lock_extent(&BTRFS_I(inode)->io_tree, start, end); - btrfs_drop_extent_cache(inode, start, end, 1); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1); unlock_extent(&BTRFS_I(inode)->io_tree, start, end); } return 0; @@ -3161,7 +3161,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, free_extent_map(em); break; } - btrfs_drop_extent_cache(inode, start, end, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); } unlock_extent(&BTRFS_I(inode)->io_tree, start, end); return ret; @@ -3203,7 +3203,8 @@ static int relocate_file_extent_cluster(struct inode *inode, index = (cluster->start - offset) >> PAGE_SHIFT; last_index = (cluster->end - offset) >> PAGE_SHIFT; while (index <= last_index) { - ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), + PAGE_SIZE); if (ret) goto out; @@ -3215,7 +3216,7 @@ static int relocate_file_extent_cluster(struct inode *inode, page = find_or_create_page(inode->i_mapping, index, mask); if (!page) { - btrfs_delalloc_release_metadata(inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE); ret = -ENOMEM; goto out; @@ -3234,7 +3235,7 @@ static int relocate_file_extent_cluster(struct inode *inode, if (!PageUptodate(page)) { unlock_page(page); put_page(page); - btrfs_delalloc_release_metadata(inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE); ret = -EIO; goto out; @@ -4245,7 +4246,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); BTRFS_I(inode)->index_cnt = group->key.objectid; - err = btrfs_orphan_add(trans, inode); + err = btrfs_orphan_add(trans, BTRFS_I(inode)); out: btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index ff9a11c39f5e1d..b0251eb1239fce 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -731,7 +731,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) ret = -EIO; goto out; } - ret = repair_io_failure(inode, offset, PAGE_SIZE, + ret = repair_io_failure(BTRFS_I(inode), offset, PAGE_SIZE, fixup->logical, page, offset - page_offset(page), fixup->mirror_num); @@ -4236,7 +4236,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work) scrub_pending_trans_workers_dec(sctx); } -static int check_extent_to_block(struct inode *inode, u64 start, u64 len, +static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len, u64 logical) { struct extent_state *cached_state = NULL; @@ -4246,7 +4246,7 @@ static int check_extent_to_block(struct inode *inode, u64 start, u64 len, u64 lockstart = start, lockend = start + len - 1; int ret = 0; - io_tree = &BTRFS_I(inode)->io_tree; + io_tree = &inode->io_tree; lock_extent_bits(io_tree, lockstart, lockend, &cached_state); ordered = btrfs_lookup_ordered_range(inode, lockstart, len); @@ -4325,7 +4325,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, io_tree = &BTRFS_I(inode)->io_tree; nocow_ctx_logical = nocow_ctx->logical; - ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical); + ret = check_extent_to_block(BTRFS_I(inode), offset, len, + nocow_ctx_logical); if (ret) { ret = ret > 0 ? 0 : ret; goto out; @@ -4372,7 +4373,7 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, } } - ret = check_extent_to_block(inode, offset, len, + ret = check_extent_to_block(BTRFS_I(inode), offset, len, nocow_ctx_logical); if (ret) { ret = ret > 0 ? 0 : ret; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index d145ce80462021..a60d5bfb8a49e2 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1681,6 +1681,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) { int ret; + if (ino == BTRFS_FIRST_FREE_OBJECTID) + return 1; + ret = get_cur_inode_state(sctx, ino, gen); if (ret < 0) goto out; @@ -1866,7 +1869,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, * not deleted and then re-created, if it was then we have no overwrite * and we can just unlink this entry. */ - if (sctx->parent_root) { + if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) { ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, NULL, NULL); if (ret < 0 && ret != -ENOENT) @@ -1934,6 +1937,19 @@ static int did_overwrite_ref(struct send_ctx *sctx, if (ret <= 0) goto out; + if (dir != BTRFS_FIRST_FREE_OBJECTID) { + ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, + NULL, NULL, NULL); + if (ret < 0 && ret != -ENOENT) + goto out; + if (ret) { + ret = 0; + goto out; + } + if (gen != dir_gen) + goto out; + } + /* check if the ref was overwritten by another ref */ ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, &ow_inode, &other_type); @@ -3556,6 +3572,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, { int ret = 0; u64 ino = parent_ref->dir; + u64 ino_gen = parent_ref->dir_gen; u64 parent_ino_before, parent_ino_after; struct fs_path *path_before = NULL; struct fs_path *path_after = NULL; @@ -3576,6 +3593,8 @@ static int wait_for_parent_move(struct send_ctx *sctx, * at get_cur_path()). */ while (ino > BTRFS_FIRST_FREE_OBJECTID) { + u64 parent_ino_after_gen; + if (is_waiting_for_move(sctx, ino)) { /* * If the current inode is an ancestor of ino in the @@ -3598,7 +3617,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, fs_path_reset(path_after); ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, - NULL, path_after); + &parent_ino_after_gen, path_after); if (ret < 0) goto out; ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, @@ -3615,10 +3634,20 @@ static int wait_for_parent_move(struct send_ctx *sctx, if (ino > sctx->cur_ino && (parent_ino_before != parent_ino_after || len1 != len2 || memcmp(path_before->start, path_after->start, len1))) { - ret = 1; - break; + u64 parent_ino_gen; + + ret = get_inode_info(sctx->parent_root, ino, NULL, + &parent_ino_gen, NULL, NULL, NULL, + NULL); + if (ret < 0) + goto out; + if (ino_gen == parent_ino_gen) { + ret = 1; + break; + } } ino = parent_ino_after; + ino_gen = parent_ino_after_gen; } out: @@ -5277,6 +5306,81 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset) return ret; } +static int range_is_hole_in_parent(struct send_ctx *sctx, + const u64 start, + const u64 end) +{ + struct btrfs_path *path; + struct btrfs_key key; + struct btrfs_root *root = sctx->parent_root; + u64 search_start = start; + int ret; + + path = alloc_path_for_send(); + if (!path) + return -ENOMEM; + + key.objectid = sctx->cur_ino; + key.type = BTRFS_EXTENT_DATA_KEY; + key.offset = search_start; + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + if (ret > 0 && path->slots[0] > 0) + path->slots[0]--; + + while (search_start < end) { + struct extent_buffer *leaf = path->nodes[0]; + int slot = path->slots[0]; + struct btrfs_file_extent_item *fi; + u64 extent_end; + + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto out; + else if (ret > 0) + break; + continue; + } + + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.objectid < sctx->cur_ino || + key.type < BTRFS_EXTENT_DATA_KEY) + goto next; + if (key.objectid > sctx->cur_ino || + key.type > BTRFS_EXTENT_DATA_KEY || + key.offset >= end) + break; + + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + if (btrfs_file_extent_type(leaf, fi) == + BTRFS_FILE_EXTENT_INLINE) { + u64 size = btrfs_file_extent_inline_len(leaf, slot, fi); + + extent_end = ALIGN(key.offset + size, + root->fs_info->sectorsize); + } else { + extent_end = key.offset + + btrfs_file_extent_num_bytes(leaf, fi); + } + if (extent_end <= start) + goto next; + if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { + search_start = extent_end; + goto next; + } + ret = 0; + goto out; +next: + path->slots[0]++; + } + ret = 1; +out: + btrfs_free_path(path); + return ret; +} + static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key) { @@ -5321,8 +5425,17 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, return ret; } - if (sctx->cur_inode_last_extent < key->offset) - ret = send_hole(sctx, key->offset); + if (sctx->cur_inode_last_extent < key->offset) { + ret = range_is_hole_in_parent(sctx, + sctx->cur_inode_last_extent, + key->offset); + if (ret < 0) + return ret; + else if (ret == 0) + ret = send_hole(sctx, key->offset); + else + ret = 0; + } sctx->cur_inode_last_extent = extent_end; return ret; } @@ -6192,8 +6305,13 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) goto out; } + /* + * Check that we don't overflow at later allocations, we request + * clone_sources_count + 1 items, and compare to unsigned long inside + * access_ok. + */ if (arg->clone_sources_count > - ULLONG_MAX / sizeof(*arg->clone_sources)) { + ULONG_MAX / sizeof(struct clone_root) - 1) { ret = -EINVAL; goto out; } diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 4d0f038e14f1f7..8c91d03cc82d8a 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -278,7 +278,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) /* First with no extents */ BTRFS_I(inode)->root = root; - em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, sectorsize, 0); if (IS_ERR(em)) { em = NULL; test_msg("Got an error when we shouldn't have\n"); @@ -293,7 +293,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } free_extent_map(em); - btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); /* * All of the magic numbers are based on the mapping setup in @@ -302,7 +302,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) */ setup_file_extents(root, sectorsize); - em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, (u64)-1, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -323,7 +323,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -350,7 +350,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -372,7 +372,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Regular extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -399,7 +399,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* The next 3 are split extents */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -428,7 +428,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -450,7 +450,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -484,7 +484,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Prealloc extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -513,7 +513,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* The next 3 are a half written prealloc extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -543,7 +543,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -576,7 +576,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -611,7 +611,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Now for the compressed extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -645,7 +645,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Split compressed extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -680,7 +680,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -707,7 +707,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -742,7 +742,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* A hole between regular extents but no hole extent */ - em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset + 6, + sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -769,7 +770,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, 4096 * 1024, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, 4096 * 1024, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -802,7 +803,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -885,7 +886,7 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) insert_inode_item_key(root); insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize, sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1); - em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, 2 * sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -907,7 +908,8 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) } free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize, + 2 * sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6b3e0fc2fe7ac2..61b807de3e164e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1505,7 +1505,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, /* * insert the directory item */ - ret = btrfs_set_inode_index(parent_inode, &index); + ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index); BUG_ON(ret); /* -ENOMEM */ /* check if there is a file/dir which has the same name. */ @@ -1644,7 +1644,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode, &key, + BTRFS_I(parent_inode), &key, BTRFS_FT_DIR, index); /* We have check then name at the beginning, so it is impossible. */ BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); @@ -1653,7 +1653,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - btrfs_i_size_write(parent_inode, parent_inode->i_size + + btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + dentry->d_name.len * 2); parent_inode->i_mtime = parent_inode->i_ctime = current_time(parent_inode); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 3806853cde08d8..a59674c3e69efb 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -673,6 +673,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, unsigned long dest_offset; struct btrfs_key ins; + if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && + btrfs_fs_incompat(fs_info, NO_HOLES)) + goto update_inode; + ret = btrfs_insert_empty_item(trans, root, path, key, sizeof(*item)); if (ret) @@ -825,6 +829,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, } inode_add_bytes(inode, nbytes); +update_inode: ret = btrfs_update_inode(trans, root, inode); out: if (inode) @@ -1322,8 +1327,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, } /* insert our name */ - ret = btrfs_add_link(trans, dir, inode, name, namelen, - 0, ref_index); + ret = btrfs_add_link(trans, BTRFS_I(dir), + BTRFS_I(inode), + name, namelen, 0, ref_index); if (ret) goto out; @@ -1641,7 +1647,8 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, return -EIO; } - ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); + ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, + name_len, 1, index); /* FIXME, put inode into FIXUP list */ @@ -1780,7 +1787,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, out: btrfs_release_path(path); if (!ret && update_size) { - btrfs_i_size_write(dir, dir->i_size + name_len * 2); + btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); ret = btrfs_update_inode(trans, root, dir); } kfree(name); @@ -5045,14 +5052,14 @@ static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, * a full commit is required. */ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, - struct inode *inode, + struct btrfs_inode *inode, struct dentry *parent, struct super_block *sb, u64 last_committed) { int ret = 0; struct dentry *old_parent = NULL; - struct inode *orig_inode = inode; + struct btrfs_inode *orig_inode = inode; /* * for regular files, if its inode is already on disk, we don't @@ -5060,15 +5067,15 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, * we can use the last_unlink_trans field to record renames * and other fun in this file. */ - if (S_ISREG(inode->i_mode) && - BTRFS_I(inode)->generation <= last_committed && - BTRFS_I(inode)->last_unlink_trans <= last_committed) - goto out; + if (S_ISREG(inode->vfs_inode.i_mode) && + inode->generation <= last_committed && + inode->last_unlink_trans <= last_committed) + goto out; - if (!S_ISDIR(inode->i_mode)) { + if (!S_ISDIR(inode->vfs_inode.i_mode)) { if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) goto out; - inode = d_inode(parent); + inode = BTRFS_I(d_inode(parent)); } while (1) { @@ -5079,10 +5086,10 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, * think this inode has already been logged. */ if (inode != orig_inode) - BTRFS_I(inode)->logged_trans = trans->transid; + inode->logged_trans = trans->transid; smp_mb(); - if (btrfs_must_commit_transaction(trans, BTRFS_I(inode))) { + if (btrfs_must_commit_transaction(trans, inode)) { ret = 1; break; } @@ -5091,8 +5098,8 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, break; if (IS_ROOT(parent)) { - inode = d_inode(parent); - if (btrfs_must_commit_transaction(trans, BTRFS_I(inode))) + inode = BTRFS_I(d_inode(parent)); + if (btrfs_must_commit_transaction(trans, inode)) ret = 1; break; } @@ -5100,7 +5107,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, parent = dget_parent(parent); dput(old_parent); old_parent = parent; - inode = d_inode(parent); + inode = BTRFS_I(d_inode(parent)); } dput(old_parent); @@ -5287,15 +5294,15 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, } static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, - struct inode *inode, + struct btrfs_inode *inode, struct btrfs_log_ctx *ctx) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret; struct btrfs_path *path; struct btrfs_key key; - struct btrfs_root *root = BTRFS_I(inode)->root; - const u64 ino = btrfs_ino(BTRFS_I(inode)); + struct btrfs_root *root = inode->root; + const u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); if (!path) @@ -5390,7 +5397,8 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, * the last committed transaction */ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, + struct btrfs_inode *inode, struct dentry *parent, const loff_t start, const loff_t end, @@ -5404,9 +5412,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, int ret = 0; u64 last_committed = fs_info->last_trans_committed; bool log_dentries = false; - struct inode *orig_inode = inode; + struct btrfs_inode *orig_inode = inode; - sb = inode->i_sb; + sb = inode->vfs_inode.i_sb; if (btrfs_test_opt(fs_info, NOTREELOG)) { ret = 1; @@ -5423,18 +5431,17 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, goto end_no_trans; } - if (root != BTRFS_I(inode)->root || - btrfs_root_refs(&root->root_item) == 0) { + if (root != inode->root || btrfs_root_refs(&root->root_item) == 0) { ret = 1; goto end_no_trans; } - ret = check_parent_dirs_for_sync(trans, inode, parent, - sb, last_committed); + ret = check_parent_dirs_for_sync(trans, inode, parent, sb, + last_committed); if (ret) goto end_no_trans; - if (btrfs_inode_in_log(BTRFS_I(inode), trans->transid)) { + if (btrfs_inode_in_log(inode, trans->transid)) { ret = BTRFS_NO_LOG_SYNC; goto end_no_trans; } @@ -5443,8 +5450,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (ret) goto end_no_trans; - ret = btrfs_log_inode(trans, root, BTRFS_I(inode), inode_only, - start, end, ctx); + ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx); if (ret) goto end_trans; @@ -5454,14 +5460,14 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, * we can use the last_unlink_trans field to record renames * and other fun in this file. */ - if (S_ISREG(inode->i_mode) && - BTRFS_I(inode)->generation <= last_committed && - BTRFS_I(inode)->last_unlink_trans <= last_committed) { + if (S_ISREG(inode->vfs_inode.i_mode) && + inode->generation <= last_committed && + inode->last_unlink_trans <= last_committed) { ret = 0; goto end_trans; } - if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries) + if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries) log_dentries = true; /* @@ -5505,7 +5511,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, * but the file inode does not have a matching BTRFS_INODE_REF_KEY item * and has a link count of 2. */ - if (BTRFS_I(inode)->last_unlink_trans > last_committed) { + if (inode->last_unlink_trans > last_committed) { ret = btrfs_log_all_parents(trans, orig_inode, ctx); if (ret) goto end_trans; @@ -5515,14 +5521,13 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) break; - inode = d_inode(parent); - if (root != BTRFS_I(inode)->root) + inode = BTRFS_I(d_inode(parent)); + if (root != inode->root) break; - if (BTRFS_I(inode)->generation > last_committed) { - ret = btrfs_log_inode(trans, root, BTRFS_I(inode), - LOG_INODE_EXISTS, - 0, LLONG_MAX, ctx); + if (inode->generation > last_committed) { + ret = btrfs_log_inode(trans, root, inode, + LOG_INODE_EXISTS, 0, LLONG_MAX, ctx); if (ret) goto end_trans; } @@ -5534,7 +5539,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, old_parent = parent; } if (log_dentries) - ret = log_new_dir_dentries(trans, root, BTRFS_I(orig_inode), ctx); + ret = log_new_dir_dentries(trans, root, orig_inode, ctx); else ret = 0; end_trans: @@ -5566,8 +5571,8 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, struct dentry *parent = dget_parent(dentry); int ret; - ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent, - start, end, 0, ctx); + ret = btrfs_log_inode_parent(trans, root, BTRFS_I(d_inode(dentry)), + parent, start, end, 0, ctx); dput(parent); return ret; @@ -5829,7 +5834,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans, (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) return 0; - return btrfs_log_inode_parent(trans, root, &inode->vfs_inode, parent, 0, + return btrfs_log_inode_parent(trans, root, inode, parent, 0, LLONG_MAX, 1, NULL); } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 13e55d13045d03..73d56eef5e60f3 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1725,7 +1725,7 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans, * Function to update ctime/mtime for a given device path. * Mainly used for ctime/mtime based probe like libblkid. */ -static void update_dev_time(char *path_name) +static void update_dev_time(const char *path_name) { struct file *filp; @@ -1851,7 +1851,8 @@ void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, fs_info->fs_devices->latest_bdev = next_device->bdev; } -int btrfs_rm_device(struct btrfs_fs_info *fs_info, char *device_path, u64 devid) +int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, + u64 devid) { struct btrfs_device *device; struct btrfs_fs_devices *cur_devices; @@ -2091,7 +2092,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, } static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device **device) { int ret = 0; @@ -2118,7 +2119,7 @@ static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info, } int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device **device) { *device = NULL; @@ -2151,7 +2152,8 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, * Lookup a device given by device id, or the path if the id is 0. */ int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, - char *devpath, struct btrfs_device **device) + const char *devpath, + struct btrfs_device **device) { int ret; @@ -2307,7 +2309,7 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, return ret; } -int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path) +int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) { struct btrfs_root *root = fs_info->dev_root; struct request_queue *q; @@ -2515,7 +2517,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path) } int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device *srcdev, struct btrfs_device **device_out) { @@ -6954,7 +6956,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, key.offset = device->devid; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); if (ret < 0) { btrfs_warn_in_rcu(fs_info, @@ -7102,7 +7105,7 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, return 0; } -void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path) +void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path) { struct buffer_head *bh; struct btrfs_super_block *disk_super; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 24ba6bc3ec3466..59be81206dd7b9 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -422,16 +422,16 @@ void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step); void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, struct btrfs_device *device, struct btrfs_device *this_dev); int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device **device); int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, - char *devpath, + const char *devpath, struct btrfs_device **device); struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, const u64 *devid, const u8 *uuid); int btrfs_rm_device(struct btrfs_fs_info *fs_info, - char *device_path, u64 devid); + const char *device_path, u64 devid); void btrfs_cleanup_fs_uuids(void); int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); int btrfs_grow_device(struct btrfs_trans_handle *trans, @@ -439,9 +439,9 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, u8 *uuid, u8 *fsid); int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); -int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *path); +int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path); int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device *srcdev, struct btrfs_device **device_out); int btrfs_balance(struct btrfs_balance_control *bctl, @@ -474,7 +474,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, struct btrfs_device *tgtdev); void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, struct btrfs_device *tgtdev); -void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path); +void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path); int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len, int mirror_num); unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index da497f184ff4d2..135b10823c6dbd 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -73,13 +73,11 @@ static struct list_head *zlib_alloc_workspace(void) static int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, - u64 start, unsigned long len, + u64 start, struct page **pages, - unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) + unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret; @@ -89,6 +87,9 @@ static int zlib_compress_pages(struct list_head *ws, struct page *in_page = NULL; struct page *out_page = NULL; unsigned long bytes_left; + unsigned long len = *total_out; + unsigned long nr_dest_pages = *out_pages; + const unsigned long max_out = nr_dest_pages * PAGE_SIZE; *out_pages = 0; *total_out = 0; diff --git a/fs/buffer.c b/fs/buffer.c index 28484b3ebc98c7..9196f2a270daac 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -19,6 +19,7 @@ */ #include +#include #include #include #include diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index cd1effee8a4912..9bf90bcc56acd6 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index f297a9e1864293..1a3e1b40799a08 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "super.h" #include "mds_client.h" diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index cd966f276a8d70..68c78be19d5b78 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include #include diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index fd8f771f99b7d7..d449e1c03cbd79 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -2187,10 +2187,10 @@ int ceph_permission(struct inode *inode, int mask) * Get all attributes. Hopefully somedata we'll have a statlite() * and can limit the fields we require to be accurate. */ -int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int ceph_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct ceph_inode_info *ci = ceph_inode(inode); int err; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index e9410bcf41135b..fe6b9cfc4013e6 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -784,8 +784,8 @@ static inline int ceph_do_getattr(struct inode *inode, int mask, bool force) extern int ceph_permission(struct inode *inode, int mask); extern int __ceph_setattr(struct inode *inode, struct iattr *attr); extern int ceph_setattr(struct dentry *dentry, struct iattr *attr); -extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +extern int ceph_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); /* xattr.c */ int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 9156be545b0f10..6b61df117fd48c 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -303,7 +303,9 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) * gives us the latter, so we must adjust the result. */ mnt = ERR_PTR(-ENOMEM); - full_path = build_path_from_dentry(mntpt); + + /* always use tree name prefix */ + full_path = build_path_from_dentry_optional_prefix(mntpt, true); if (full_path == NULL) goto cdda_exit; diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 479bc0a941f35f..3d7298cc0aeb35 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h @@ -130,10 +130,10 @@ wchar_t cifs_toupper(wchar_t in); * Returns: * Address of the first string */ -static inline wchar_t * -UniStrcat(wchar_t *ucs1, const wchar_t *ucs2) +static inline __le16 * +UniStrcat(__le16 *ucs1, const __le16 *ucs2) { - wchar_t *anchor = ucs1; /* save a pointer to start of ucs1 */ + __le16 *anchor = ucs1; /* save a pointer to start of ucs1 */ while (*ucs1++) ; /* To end of first string */ ucs1--; /* Return to the null */ diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index c9c00a862036f6..da717fee30260b 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -83,7 +83,7 @@ extern int cifs_revalidate_dentry(struct dentry *); extern int cifs_invalidate_mapping(struct inode *inode); extern int cifs_revalidate_mapping(struct inode *inode); extern int cifs_zap_mapping(struct inode *inode); -extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int cifs_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int cifs_setattr(struct dentry *, struct iattr *); extern const struct inode_operations cifs_file_inode_ops; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 1a90bb3e29866c..d42dd328864780 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -443,6 +443,9 @@ struct smb_version_operations { int (*is_transform_hdr)(void *buf); int (*receive_transform)(struct TCP_Server_Info *, struct mid_q_entry **); + enum securityEnum (*select_sectype)(struct TCP_Server_Info *, + enum securityEnum); + }; struct smb_version_values { @@ -822,7 +825,7 @@ struct cifs_ses { int ses_count; /* reference counter */ enum statusEnum status; unsigned overrideSecFlg; /* if non-zero override global sec flags */ - __u16 ipc_tid; /* special tid for connection to IPC share */ + __u32 ipc_tid; /* special tid for connection to IPC share */ char *serverOS; /* name of operating system underlying server */ char *serverNOS; /* name of network operating system of server */ char *serverDomain; /* security realm of server */ diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index f5b87303ce46d5..1ce733f3582f66 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -2086,17 +2086,21 @@ typedef struct dfs_referral_level_3 { /* version 4 is same, + one flag bit */ __u8 ServiceSiteGuid[16]; /* MBZ, ignored */ } __attribute__((packed)) REFERRAL3; -typedef struct smb_com_transaction_get_dfs_refer_rsp { - struct smb_hdr hdr; /* wct = 10 */ - struct trans2_resp t2; - __u16 ByteCount; - __u8 Pad; +struct get_dfs_referral_rsp { __le16 PathConsumed; __le16 NumberOfReferrals; __le32 DFSFlags; REFERRAL3 referrals[1]; /* array of level 3 dfs_referral structures */ /* followed by the strings pointed to by the referral structures */ -} __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_RSP; +} __packed; + +typedef struct smb_com_transaction_get_dfs_refer_rsp { + struct smb_hdr hdr; /* wct = 10 */ + struct trans2_resp t2; + __u16 ByteCount; + __u8 Pad; + struct get_dfs_referral_rsp dfs_data; +} __packed TRANSACTION2_GET_DFS_REFER_RSP; /* DFS Flags */ #define DFSREF_REFERRAL_SERVER 0x00000001 /* all targets are DFS roots */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 406d2c10ba78d0..97e5d236d26559 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -61,6 +61,8 @@ extern void exit_cifs_idmap(void); extern int init_cifs_spnego(void); extern void exit_cifs_spnego(void); extern char *build_path_from_dentry(struct dentry *); +extern char *build_path_from_dentry_optional_prefix(struct dentry *direntry, + bool prefix); extern char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, @@ -284,6 +286,11 @@ extern int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *nls_codepage, unsigned int *num_referrals, struct dfs_info3_param **referrals, int remap); +extern int parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, + unsigned int *num_of_nodes, + struct dfs_info3_param **target_nodes, + const struct nls_table *nls_codepage, int remap, + const char *searchName, bool is_unicode); extern void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct smb_vol *vol); @@ -526,4 +533,6 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, int __cifs_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, char *signature, struct shash_desc *shash); +enum securityEnum cifs_select_sectype(struct TCP_Server_Info *, + enum securityEnum); #endif /* _CIFSPROTO_H */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index f5099fb8a22f14..06695067192968 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -4786,117 +4786,6 @@ CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon, return rc; } -/* parses DFS refferal V3 structure - * caller is responsible for freeing target_nodes - * returns: - * on success - 0 - * on failure - errno - */ -static int -parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, - unsigned int *num_of_nodes, - struct dfs_info3_param **target_nodes, - const struct nls_table *nls_codepage, int remap, - const char *searchName) -{ - int i, rc = 0; - char *data_end; - bool is_unicode; - struct dfs_referral_level_3 *ref; - - if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) - is_unicode = true; - else - is_unicode = false; - *num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals); - - if (*num_of_nodes < 1) { - cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n", - *num_of_nodes); - rc = -EINVAL; - goto parse_DFS_referrals_exit; - } - - ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals); - if (ref->VersionNumber != cpu_to_le16(3)) { - cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", - le16_to_cpu(ref->VersionNumber)); - rc = -EINVAL; - goto parse_DFS_referrals_exit; - } - - /* get the upper boundary of the resp buffer */ - data_end = (char *)(&(pSMBr->PathConsumed)) + - le16_to_cpu(pSMBr->t2.DataCount); - - cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n", - *num_of_nodes, le32_to_cpu(pSMBr->DFSFlags)); - - *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param), - GFP_KERNEL); - if (*target_nodes == NULL) { - rc = -ENOMEM; - goto parse_DFS_referrals_exit; - } - - /* collect necessary data from referrals */ - for (i = 0; i < *num_of_nodes; i++) { - char *temp; - int max_len; - struct dfs_info3_param *node = (*target_nodes)+i; - - node->flags = le32_to_cpu(pSMBr->DFSFlags); - if (is_unicode) { - __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, - GFP_KERNEL); - if (tmp == NULL) { - rc = -ENOMEM; - goto parse_DFS_referrals_exit; - } - cifsConvertToUTF16((__le16 *) tmp, searchName, - PATH_MAX, nls_codepage, remap); - node->path_consumed = cifs_utf16_bytes(tmp, - le16_to_cpu(pSMBr->PathConsumed), - nls_codepage); - kfree(tmp); - } else - node->path_consumed = le16_to_cpu(pSMBr->PathConsumed); - - node->server_type = le16_to_cpu(ref->ServerType); - node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); - - /* copy DfsPath */ - temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); - max_len = data_end - temp; - node->path_name = cifs_strndup_from_utf16(temp, max_len, - is_unicode, nls_codepage); - if (!node->path_name) { - rc = -ENOMEM; - goto parse_DFS_referrals_exit; - } - - /* copy link target UNC */ - temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); - max_len = data_end - temp; - node->node_name = cifs_strndup_from_utf16(temp, max_len, - is_unicode, nls_codepage); - if (!node->node_name) { - rc = -ENOMEM; - goto parse_DFS_referrals_exit; - } - - ref++; - } - -parse_DFS_referrals_exit: - if (rc) { - free_dfs_info_array(*target_nodes, *num_of_nodes); - *target_nodes = NULL; - *num_of_nodes = 0; - } - return rc; -} - int CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, const char *search_name, struct dfs_info3_param **target_nodes, @@ -4993,9 +4882,11 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, get_bcc(&pSMBr->hdr), le16_to_cpu(pSMBr->t2.DataOffset)); /* parse returned result into more usable form */ - rc = parse_DFS_referrals(pSMBr, num_of_nodes, - target_nodes, nls_codepage, remap, - search_name); + rc = parse_dfs_referrals(&pSMBr->dfs_data, + le16_to_cpu(pSMBr->t2.DataCount), + num_of_nodes, target_nodes, nls_codepage, + remap, search_name, + (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) != 0); GetDFSRefExit: cifs_buf_release(pSMB); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 777ad9f4fc3c84..9ae695ae3ed7be 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -2073,7 +2074,8 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol) * that was specified, or "Unspecified" if that sectype was not * compatible with the given NEGOTIATE request. */ - if (select_sectype(server, vol->sectype) == Unspecified) + if (server->ops->select_sectype(server, vol->sectype) + == Unspecified) return false; /* @@ -2455,7 +2457,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) } down_read(&key->sem); - upayload = user_key_payload(key); + upayload = user_key_payload_locked(key); if (IS_ERR_OR_NULL(upayload)) { rc = upayload ? PTR_ERR(upayload) : -EINVAL; goto out_key_put; diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 2c227a99f369f0..56366e9840769d 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -80,6 +80,17 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb, /* Note: caller must free return buffer */ char * build_path_from_dentry(struct dentry *direntry) +{ + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); + bool prefix = tcon->Flags & SMB_SHARE_IS_IN_DFS; + + return build_path_from_dentry_optional_prefix(direntry, + prefix); +} + +char * +build_path_from_dentry_optional_prefix(struct dentry *direntry, bool prefix) { struct dentry *temp; int namelen; @@ -92,7 +103,7 @@ build_path_from_dentry(struct dentry *direntry) unsigned seq; dirsep = CIFS_DIR_SEP(cifs_sb); - if (tcon->Flags & SMB_SHARE_IS_IN_DFS) + if (prefix) dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); else dfsplen = 0; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 7ab5be7944aa84..b261db34103ce9 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -23,6 +23,8 @@ #include #include #include +#include + #include #include "cifsfs.h" #include "cifspdu.h" @@ -1990,9 +1992,10 @@ int cifs_revalidate_dentry(struct dentry *dentry) return cifs_revalidate_mapping(inode); } -int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int cifs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct inode *inode = d_inode(dentry); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index c6729156f9a00c..d3fb11529ed960 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -640,3 +640,108 @@ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink, cifs_add_pending_open_locked(fid, tlink, open); spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); } + +/* parses DFS refferal V3 structure + * caller is responsible for freeing target_nodes + * returns: + * - on success - 0 + * - on failure - errno + */ +int +parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, + unsigned int *num_of_nodes, + struct dfs_info3_param **target_nodes, + const struct nls_table *nls_codepage, int remap, + const char *searchName, bool is_unicode) +{ + int i, rc = 0; + char *data_end; + struct dfs_referral_level_3 *ref; + + *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals); + + if (*num_of_nodes < 1) { + cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n", + *num_of_nodes); + rc = -EINVAL; + goto parse_DFS_referrals_exit; + } + + ref = (struct dfs_referral_level_3 *) &(rsp->referrals); + if (ref->VersionNumber != cpu_to_le16(3)) { + cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", + le16_to_cpu(ref->VersionNumber)); + rc = -EINVAL; + goto parse_DFS_referrals_exit; + } + + /* get the upper boundary of the resp buffer */ + data_end = (char *)rsp + rsp_size; + + cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n", + *num_of_nodes, le32_to_cpu(rsp->DFSFlags)); + + *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param), + GFP_KERNEL); + if (*target_nodes == NULL) { + rc = -ENOMEM; + goto parse_DFS_referrals_exit; + } + + /* collect necessary data from referrals */ + for (i = 0; i < *num_of_nodes; i++) { + char *temp; + int max_len; + struct dfs_info3_param *node = (*target_nodes)+i; + + node->flags = le32_to_cpu(rsp->DFSFlags); + if (is_unicode) { + __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, + GFP_KERNEL); + if (tmp == NULL) { + rc = -ENOMEM; + goto parse_DFS_referrals_exit; + } + cifsConvertToUTF16((__le16 *) tmp, searchName, + PATH_MAX, nls_codepage, remap); + node->path_consumed = cifs_utf16_bytes(tmp, + le16_to_cpu(rsp->PathConsumed), + nls_codepage); + kfree(tmp); + } else + node->path_consumed = le16_to_cpu(rsp->PathConsumed); + + node->server_type = le16_to_cpu(ref->ServerType); + node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); + + /* copy DfsPath */ + temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); + max_len = data_end - temp; + node->path_name = cifs_strndup_from_utf16(temp, max_len, + is_unicode, nls_codepage); + if (!node->path_name) { + rc = -ENOMEM; + goto parse_DFS_referrals_exit; + } + + /* copy link target UNC */ + temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); + max_len = data_end - temp; + node->node_name = cifs_strndup_from_utf16(temp, max_len, + is_unicode, nls_codepage); + if (!node->node_name) { + rc = -ENOMEM; + goto parse_DFS_referrals_exit; + } + + ref++; + } + +parse_DFS_referrals_exit: + if (rc) { + free_dfs_info_array(*target_nodes, *num_of_nodes); + *target_nodes = NULL; + *num_of_nodes = 0; + } + return rc; +} diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index dcbcc927399a0f..8b0502cd39afb6 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -498,7 +498,7 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer, } enum securityEnum -select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) +cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) { switch (server->negflavor) { case CIFS_NEGFLAVOR_EXTENDED: @@ -1391,7 +1391,7 @@ static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data) { int type; - type = select_sectype(ses->server, ses->sectype); + type = cifs_select_sectype(ses->server, ses->sectype); cifs_dbg(FYI, "sess setup type %d\n", type); if (type == Unspecified) { cifs_dbg(VFS, diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 67a987e4d026e0..cc93ba4da9b592 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -1087,6 +1087,7 @@ struct smb_version_operations smb1_operations = { .is_read_op = cifs_is_read_op, .wp_retry_size = cifs_wp_retry_size, .dir_needs_close = cifs_dir_needs_close, + .select_sectype = cifs_select_sectype, #ifdef CONFIG_CIFS_XATTR .query_all_EAs = CIFSSMBQAllEAs, .set_EA = CIFSSMBSetEA, diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index b2aff0c6f22c52..b4b1f0305f2994 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -73,7 +73,8 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */ nr_ioctl_req.Reserved = 0; rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, - fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, true, + fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, + true /* is_fsctl */, false /* use_ipc */, (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), NULL, NULL /* no return info */); if (rc == -EOPNOTSUPP) { diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index a44b4dbe4aaec9..0231108d9387a4 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -282,6 +282,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, + false /* use_ipc */, NULL /* no data input */, 0 /* no data input */, (char **)&out_buf, &ret_data_len); if (rc != 0) @@ -571,6 +572,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, + false /* use_ipc */, NULL, 0 /* no input */, (char **)&res_key, &ret_data_len); @@ -635,7 +637,8 @@ smb2_clone_range(const unsigned int xid, /* Request server copy to target from src identified by key */ rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, - true /* is_fsctl */, (char *)pcchunk, + true /* is_fsctl */, false /* use_ipc */, + (char *)pcchunk, sizeof(struct copychunk_ioctl), (char **)&retbuf, &ret_data_len); if (rc == 0) { @@ -787,7 +790,8 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_SPARSE, - true /* is_fctl */, &setsparse, 1, NULL, NULL); + true /* is_fctl */, false /* use_ipc */, + &setsparse, 1, NULL, NULL); if (rc) { tcon->broken_sparse_sup = true; cifs_dbg(FYI, "set sparse rc = %d\n", rc); @@ -857,7 +861,8 @@ smb2_duplicate_extents(const unsigned int xid, rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_DUPLICATE_EXTENTS_TO_FILE, - true /* is_fsctl */, (char *)&dup_ext_buf, + true /* is_fsctl */, false /* use_ipc */, + (char *)&dup_ext_buf, sizeof(struct duplicate_extents_to_file), NULL, &ret_data_len); @@ -891,7 +896,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_INTEGRITY_INFORMATION, - true /* is_fsctl */, (char *)&integr_info, + true /* is_fsctl */, false /* use_ipc */, + (char *)&integr_info, sizeof(struct fsctl_set_integrity_information_req), NULL, &ret_data_len); @@ -910,7 +916,8 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SRV_ENUMERATE_SNAPSHOTS, - true /* is_fsctl */, NULL, 0 /* no input data */, + true /* is_fsctl */, false /* use_ipc */, + NULL, 0 /* no input data */, (char **)&retbuf, &ret_data_len); cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n", @@ -1097,6 +1104,103 @@ smb2_new_lease_key(struct cifs_fid *fid) generate_random_uuid(fid->lease_key); } +static int +smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, + const char *search_name, + struct dfs_info3_param **target_nodes, + unsigned int *num_of_nodes, + const struct nls_table *nls_codepage, int remap) +{ + int rc; + __le16 *utf16_path = NULL; + int utf16_path_len = 0; + struct cifs_tcon *tcon; + struct fsctl_get_dfs_referral_req *dfs_req = NULL; + struct get_dfs_referral_rsp *dfs_rsp = NULL; + u32 dfs_req_size = 0, dfs_rsp_size = 0; + + cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name); + + /* + * Use any tcon from the current session. Here, the first one. + */ + spin_lock(&cifs_tcp_ses_lock); + tcon = list_first_entry_or_null(&ses->tcon_list, struct cifs_tcon, + tcon_list); + if (tcon) + tcon->tc_count++; + spin_unlock(&cifs_tcp_ses_lock); + + if (!tcon) { + cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n", + ses); + rc = -ENOTCONN; + goto out; + } + + utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX, + &utf16_path_len, + nls_codepage, remap); + if (!utf16_path) { + rc = -ENOMEM; + goto out; + } + + dfs_req_size = sizeof(*dfs_req) + utf16_path_len; + dfs_req = kzalloc(dfs_req_size, GFP_KERNEL); + if (!dfs_req) { + rc = -ENOMEM; + goto out; + } + + /* Highest DFS referral version understood */ + dfs_req->MaxReferralLevel = DFS_VERSION; + + /* Path to resolve in an UTF-16 null-terminated string */ + memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len); + + do { + /* try first with IPC */ + rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, + FSCTL_DFS_GET_REFERRALS, + true /* is_fsctl */, true /* use_ipc */, + (char *)dfs_req, dfs_req_size, + (char **)&dfs_rsp, &dfs_rsp_size); + if (rc == -ENOTCONN) { + /* try with normal tcon */ + rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, + FSCTL_DFS_GET_REFERRALS, + true /* is_fsctl */, false /*use_ipc*/, + (char *)dfs_req, dfs_req_size, + (char **)&dfs_rsp, &dfs_rsp_size); + } + } while (rc == -EAGAIN); + + if (rc) { + cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc); + goto out; + } + + rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size, + num_of_nodes, target_nodes, + nls_codepage, remap, search_name, + true /* is_unicode */); + if (rc) { + cifs_dbg(VFS, "parse error in smb2_get_dfs_refer rc=%d\n", rc); + goto out; + } + + out: + if (tcon) { + spin_lock(&cifs_tcp_ses_lock); + tcon->tc_count--; + spin_unlock(&cifs_tcp_ses_lock); + } + kfree(utf16_path); + kfree(dfs_req); + kfree(dfs_rsp); + return rc; +} #define SMB2_SYMLINK_STRUCT_SIZE \ (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp)) @@ -1220,7 +1324,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, - true /* is_fctl */, (char *)&fsctl_buf, + true /* is_fctl */, false /* use_ipc */, + (char *)&fsctl_buf, sizeof(struct file_zero_data_information), NULL, NULL); free_xid(xid); return rc; @@ -1254,7 +1359,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, - true /* is_fctl */, (char *)&fsctl_buf, + true /* is_fctl */, false /* use_ipc */, + (char *)&fsctl_buf, sizeof(struct file_zero_data_information), NULL, NULL); free_xid(xid); return rc; @@ -1609,6 +1715,26 @@ static void cifs_crypt_complete(struct crypto_async_request *req, int err) complete(&res->completion); } +static int +smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) +{ + struct cifs_ses *ses; + u8 *ses_enc_key; + + spin_lock(&cifs_tcp_ses_lock); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + if (ses->Suid != ses_id) + continue; + ses_enc_key = enc ? ses->smb3encryptionkey : + ses->smb3decryptionkey; + memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE); + spin_unlock(&cifs_tcp_ses_lock); + return 0; + } + spin_unlock(&cifs_tcp_ses_lock); + + return 1; +} /* * Encrypt or decrypt @rqst message. @rqst has the following format: * iov[0] - transform header (associate data), @@ -1622,10 +1748,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base; unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24; - struct cifs_ses *ses; int rc = 0; struct scatterlist *sg; u8 sign[SMB2_SIGNATURE_SIZE] = {}; + u8 key[SMB3_SIGN_KEY_SIZE]; struct aead_request *req; char *iv; unsigned int iv_len; @@ -1635,9 +1761,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) init_completion(&result.completion); - ses = smb2_find_smb_ses(server, tr_hdr->SessionId); - if (!ses) { - cifs_dbg(VFS, "%s: Could not find session\n", __func__); + rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key); + if (rc) { + cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__, + enc ? "en" : "de"); return 0; } @@ -1649,8 +1776,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) tfm = enc ? server->secmech.ccmaesencrypt : server->secmech.ccmaesdecrypt; - rc = crypto_aead_setkey(tfm, enc ? ses->smb3encryptionkey : - ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE); + rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE); if (rc) { cifs_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc); return rc; @@ -2254,6 +2380,8 @@ struct smb_version_operations smb20_operations = { .clone_range = smb2_clone_range, .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, + .get_dfs_refer = smb2_get_dfs_refer, + .select_sectype = smb2_select_sectype, }; struct smb_version_operations smb21_operations = { @@ -2335,6 +2463,8 @@ struct smb_version_operations smb21_operations = { .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .enum_snapshots = smb3_enum_snapshots, + .get_dfs_refer = smb2_get_dfs_refer, + .select_sectype = smb2_select_sectype, }; struct smb_version_operations smb30_operations = { @@ -2426,6 +2556,8 @@ struct smb_version_operations smb30_operations = { .free_transform_rq = smb3_free_transform_rq, .is_transform_hdr = smb3_is_transform_hdr, .receive_transform = smb3_receive_transform, + .get_dfs_refer = smb2_get_dfs_refer, + .select_sectype = smb2_select_sectype, }; #ifdef CONFIG_CIFS_SMB311 @@ -2518,6 +2650,8 @@ struct smb_version_operations smb311_operations = { .free_transform_rq = smb3_free_transform_rq, .is_transform_hdr = smb3_is_transform_hdr, .receive_transform = smb3_receive_transform, + .get_dfs_refer = smb2_get_dfs_refer, + .select_sectype = smb2_select_sectype, }; #endif /* CIFS_SMB311 */ diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index ad83b3db284028..7446496850a3bd 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -620,6 +620,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, + false /* use_ipc */, (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req), (char **)&pneg_rsp, &rsplen); @@ -656,6 +657,28 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) return -EIO; } +enum securityEnum +smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) +{ + switch (requested) { + case Kerberos: + case RawNTLMSSP: + return requested; + case NTLMv2: + return RawNTLMSSP; + case Unspecified: + if (server->sec_ntlmssp && + (global_secflags & CIFSSEC_MAY_NTLMSSP)) + return RawNTLMSSP; + if ((server->sec_kerberos || server->sec_mskerberos) && + (global_secflags & CIFSSEC_MAY_KRB5)) + return Kerberos; + /* Fallthrough */ + default: + return Unspecified; + } +} + struct SMB2_sess_data { unsigned int xid; struct cifs_ses *ses; @@ -1008,10 +1031,17 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) static int SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data) { - if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP) - ses->sectype = RawNTLMSSP; + int type; + + type = smb2_select_sectype(ses->server, ses->sectype); + cifs_dbg(FYI, "sess setup type %d\n", type); + if (type == Unspecified) { + cifs_dbg(VFS, + "Unable to select appropriate authentication method!"); + return -EINVAL; + } - switch (ses->sectype) { + switch (type) { case Kerberos: sess_data->func = SMB2_auth_kerberos; break; @@ -1019,7 +1049,7 @@ SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data) sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; break; default: - cifs_dbg(VFS, "secType %d not supported!\n", ses->sectype); + cifs_dbg(VFS, "secType %d not supported!\n", type); return -EOPNOTSUPP; } @@ -1167,8 +1197,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, /* since no tcon, smb2_init can not do this, so do here */ req->hdr.sync_hdr.SessionId = ses->Suid; - /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED) - req->hdr.Flags |= SMB2_FLAGS_SIGNED; */ + if (ses->server->sign) + req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED; } else if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; @@ -1527,6 +1557,51 @@ add_durable_context(struct kvec *iov, unsigned int *num_iovec, return 0; } +static int +alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len, + const char *treename, const __le16 *path) +{ + int treename_len, path_len; + struct nls_table *cp; + const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)}; + + /* + * skip leading "\\" + */ + treename_len = strlen(treename); + if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\')) + return -EINVAL; + + treename += 2; + treename_len -= 2; + + path_len = UniStrnlen((wchar_t *)path, PATH_MAX); + + /* + * make room for one path separator between the treename and + * path + */ + *out_len = treename_len + 1 + path_len; + + /* + * final path needs to be null-terminated UTF16 with a + * size aligned to 8 + */ + + *out_size = roundup((*out_len+1)*2, 8); + *out_path = kzalloc(*out_size, GFP_KERNEL); + if (!*out_path) + return -ENOMEM; + + cp = load_nls_default(); + cifs_strtoUTF16(*out_path, treename, treename_len, cp); + UniStrcat(*out_path, sep); + UniStrcat(*out_path, path); + unload_nls(cp); + + return 0; +} + int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, __u8 *oplock, struct smb2_file_all_info *buf, @@ -1575,30 +1650,49 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, req->ShareAccess = FILE_SHARE_ALL_LE; req->CreateDisposition = cpu_to_le32(oparms->disposition); req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); - uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; - /* do not count rfc1001 len field */ - req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4); iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; - - /* MUST set path len (NameLength) to 0 opening root of share */ - req->NameLength = cpu_to_le16(uni_path_len - 2); /* -1 since last byte is buf[0] which is sent below (path) */ iov[0].iov_len--; - if (uni_path_len % 8 != 0) { - copy_size = uni_path_len / 8 * 8; - if (copy_size < uni_path_len) - copy_size += 8; - - copy_path = kzalloc(copy_size, GFP_KERNEL); - if (!copy_path) - return -ENOMEM; - memcpy((char *)copy_path, (const char *)path, - uni_path_len); + + req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4); + + /* [MS-SMB2] 2.2.13 NameOffset: + * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of + * the SMB2 header, the file name includes a prefix that will + * be processed during DFS name normalization as specified in + * section 3.3.5.9. Otherwise, the file name is relative to + * the share that is identified by the TreeId in the SMB2 + * header. + */ + if (tcon->share_flags & SHI1005_FLAGS_DFS) { + int name_len; + + req->hdr.sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; + rc = alloc_path_with_tree_prefix(©_path, ©_size, + &name_len, + tcon->treeName, path); + if (rc) + return rc; + req->NameLength = cpu_to_le16(name_len * 2); uni_path_len = copy_size; path = copy_path; + } else { + uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; + /* MUST set path len (NameLength) to 0 opening root of share */ + req->NameLength = cpu_to_le16(uni_path_len - 2); + if (uni_path_len % 8 != 0) { + copy_size = roundup(uni_path_len, 8); + copy_path = kzalloc(copy_size, GFP_KERNEL); + if (!copy_path) + return -ENOMEM; + memcpy((char *)copy_path, (const char *)path, + uni_path_len); + uni_path_len = copy_size; + path = copy_path; + } } iov[1].iov_len = uni_path_len; @@ -1683,8 +1777,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, - u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data, - u32 indatalen, char **out_data, u32 *plen /* returned data len */) + u64 volatile_fid, u32 opcode, bool is_fsctl, bool use_ipc, + char *in_data, u32 indatalen, + char **out_data, u32 *plen /* returned data len */) { struct smb2_ioctl_req *req; struct smb2_ioctl_rsp *rsp; @@ -1721,6 +1816,16 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, if (rc) return rc; + if (use_ipc) { + if (ses->ipc_tid == 0) { + cifs_small_buf_release(req); + return -ENOTCONN; + } + + cifs_dbg(FYI, "replacing tid 0x%x with IPC tid 0x%x\n", + req->hdr.sync_hdr.TreeId, ses->ipc_tid); + req->hdr.sync_hdr.TreeId = ses->ipc_tid; + } if (encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; @@ -1843,6 +1948,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, FSCTL_SET_COMPRESSION, true /* is_fsctl */, + false /* use_ipc */, (char *)&fsctl_input /* data input */, 2 /* in data len */, &ret_data /* out data */, NULL); diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index c03b252501a155..18700fd25a0b3f 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -695,6 +695,14 @@ struct fsctl_get_integrity_information_rsp { /* Integrity flags for above */ #define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF 0x00000001 +/* See MS-DFSC 2.2.2 */ +struct fsctl_get_dfs_referral_req { + __le16 MaxReferralLevel; + __u8 RequestFileName[]; +} __packed; + +/* DFS response is struct get_dfs_refer_rsp */ + /* See MS-SMB2 2.2.31.3 */ struct network_resiliency_req { __le32 Timeout; diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 85fc7a78933441..69e35873b1de73 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -121,7 +121,8 @@ extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, struct smb2_err_rsp **err_buf); extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + bool is_fsctl, bool use_ipc, + char *in_data, u32 indatalen, char **out_data, u32 *plen /* returned data len */); extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_file_id, u64 volatile_file_id); @@ -180,4 +181,6 @@ extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, __u8 *lease_key, const __le32 lease_state); extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *); +extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *, + enum securityEnum); #endif /* _SMB2PROTO_H */ diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h index 5104d84c4f6425..d3c361883c2844 100644 --- a/fs/coda/coda_linux.h +++ b/fs/coda/coda_linux.h @@ -47,7 +47,7 @@ int coda_open(struct inode *i, struct file *f); int coda_release(struct inode *i, struct file *f); int coda_permission(struct inode *inode, int mask); int coda_revalidate_inode(struct inode *); -int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); +int coda_getattr(const struct path *, struct kstat *, u32, unsigned int); int coda_setattr(struct dentry *, struct iattr *); /* this file: heloers */ diff --git a/fs/coda/file.c b/fs/coda/file.c index 6e0154eb6fcc1c..9d956cd6d46f93 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -96,7 +96,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) cfi->cfi_mapcount++; spin_unlock(&cii->c_lock); - return host_file->f_op->mmap(host_file, vma); + return call_mmap(host_file, vma); } int coda_open(struct inode *coda_inode, struct file *coda_file) diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 71dbe7e287cef9..2dea594da19968 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -255,11 +255,12 @@ static void coda_evict_inode(struct inode *inode) coda_cache_clear_inode(inode); } -int coda_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int coda_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - int err = coda_revalidate_inode(d_inode(dentry)); + int err = coda_revalidate_inode(d_inode(path->dentry)); if (!err) - generic_fillattr(d_inode(dentry), stat); + generic_fillattr(d_inode(path->dentry), stat); return err; } diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index 822629126e89fd..f40e3953e7fe3c 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index f6c6c8adbc01ef..e82357c8997934 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -15,7 +15,7 @@ */ #include -#include +#include #include #include #include diff --git a/fs/compat.c b/fs/compat.c index e50a2114f47408..c61b506f5bc94b 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/coredump.c b/fs/coredump.c index ae6b05629ca174..592683711c6432 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -16,6 +16,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -33,7 +36,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 02a7a9286449d4..6d6eca394d4d41 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -327,7 +327,6 @@ EXPORT_SYMBOL(fscrypt_decrypt_page); static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) { struct dentry *dir; - struct fscrypt_info *ci; int dir_has_key, cached_with_key; if (flags & LOOKUP_RCU) @@ -339,18 +338,11 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) return 0; } - ci = d_inode(dir)->i_crypt_info; - if (ci && ci->ci_keyring_key && - (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | - (1 << KEY_FLAG_REVOKED) | - (1 << KEY_FLAG_DEAD)))) - ci = NULL; - /* this should eventually be an flag in d_flags */ spin_lock(&dentry->d_lock); cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; spin_unlock(&dentry->d_lock); - dir_has_key = (ci != NULL); + dir_has_key = (d_inode(dir)->i_crypt_info != NULL); dput(dir); /* diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 13052b85c3930f..37b49894c76234 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, fname->disk_name.len = iname->len; return 0; } - ret = fscrypt_get_crypt_info(dir); + ret = fscrypt_get_encryption_info(dir); if (ret && ret != -EOPNOTSUPP) return ret; diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index fdbb8af32eafdb..e39696e644942a 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -67,7 +67,6 @@ struct fscrypt_info { u8 ci_filename_mode; u8 ci_flags; struct crypto_skcipher *ci_ctfm; - struct key *ci_keyring_key; u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; }; @@ -101,7 +100,4 @@ extern int fscrypt_do_page_crypto(const struct inode *inode, extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags); -/* keyinfo.c */ -extern int fscrypt_get_crypt_info(struct inode *); - #endif /* _FSCRYPT_PRIVATE_H */ diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 02eb6b9e44387d..8cdfddce2b3486 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -95,6 +95,7 @@ static int validate_user_key(struct fscrypt_info *crypt_info, kfree(description); if (IS_ERR(keyring_key)) return PTR_ERR(keyring_key); + down_read(&keyring_key->sem); if (keyring_key->type != &key_type_logon) { printk_once(KERN_WARNING @@ -102,11 +103,9 @@ static int validate_user_key(struct fscrypt_info *crypt_info, res = -ENOKEY; goto out; } - down_read(&keyring_key->sem); - ukp = user_key_payload(keyring_key); + ukp = user_key_payload_locked(keyring_key); if (ukp->datalen != sizeof(struct fscrypt_key)) { res = -EINVAL; - up_read(&keyring_key->sem); goto out; } master_key = (struct fscrypt_key *)ukp->data; @@ -117,17 +116,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info, "%s: key size incorrect: %d\n", __func__, master_key->size); res = -ENOKEY; - up_read(&keyring_key->sem); goto out; } res = derive_key_aes(ctx->nonce, master_key->raw, raw_key); - up_read(&keyring_key->sem); - if (res) - goto out; - - crypt_info->ci_keyring_key = keyring_key; - return 0; out: + up_read(&keyring_key->sem); key_put(keyring_key); return res; } @@ -169,12 +162,11 @@ static void put_crypt_info(struct fscrypt_info *ci) if (!ci) return; - key_put(ci->ci_keyring_key); crypto_free_skcipher(ci->ci_ctfm); kmem_cache_free(fscrypt_info_cachep, ci); } -int fscrypt_get_crypt_info(struct inode *inode) +int fscrypt_get_encryption_info(struct inode *inode) { struct fscrypt_info *crypt_info; struct fscrypt_context ctx; @@ -184,21 +176,15 @@ int fscrypt_get_crypt_info(struct inode *inode) u8 *raw_key = NULL; int res; + if (inode->i_crypt_info) + return 0; + res = fscrypt_initialize(inode->i_sb->s_cop->flags); if (res) return res; if (!inode->i_sb->s_cop->get_context) return -EOPNOTSUPP; -retry: - crypt_info = ACCESS_ONCE(inode->i_crypt_info); - if (crypt_info) { - if (!crypt_info->ci_keyring_key || - key_validate(crypt_info->ci_keyring_key) == 0) - return 0; - fscrypt_put_encryption_info(inode, crypt_info); - goto retry; - } res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { @@ -229,7 +215,6 @@ int fscrypt_get_crypt_info(struct inode *inode) crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; - crypt_info->ci_keyring_key = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); @@ -273,14 +258,8 @@ int fscrypt_get_crypt_info(struct inode *inode) if (res) goto out; - kzfree(raw_key); - raw_key = NULL; - if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { - put_crypt_info(crypt_info); - goto retry; - } - return 0; - + if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL) + crypt_info = NULL; out: if (res == -ENOKEY) res = 0; @@ -288,6 +267,7 @@ int fscrypt_get_crypt_info(struct inode *inode) kzfree(raw_key); return res; } +EXPORT_SYMBOL(fscrypt_get_encryption_info); void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) { @@ -305,17 +285,3 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) put_crypt_info(ci); } EXPORT_SYMBOL(fscrypt_put_encryption_info); - -int fscrypt_get_encryption_info(struct inode *inode) -{ - struct fscrypt_info *ci = inode->i_crypt_info; - - if (!ci || - (ci->ci_keyring_key && - (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | - (1 << KEY_FLAG_REVOKED) | - (1 << KEY_FLAG_DEAD))))) - return fscrypt_get_crypt_info(inode); - return 0; -} -EXPORT_SYMBOL(fscrypt_get_encryption_info); diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 14b76da7126948..4908906d54d562 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -33,17 +33,10 @@ static int create_encryption_context_from_policy(struct inode *inode, const struct fscrypt_policy *policy) { struct fscrypt_context ctx; - int res; if (!inode->i_sb->s_cop->set_context) return -EOPNOTSUPP; - if (inode->i_sb->s_cop->prepare_context) { - res = inode->i_sb->s_cop->prepare_context(inode); - if (res) - return res; - } - ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, FS_KEY_DESCRIPTOR_SIZE); diff --git a/fs/dax.c b/fs/dax.c index 7436c98b92c82d..de622d4282a650 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 7d398d300e972c..9382db998ec954 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -743,7 +743,7 @@ static int tcp_accept_from_sock(struct connection *con) newsock->type = con->sock->type; newsock->ops = con->sock->ops; - result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); + result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK, true); if (result < 0) goto accept_err; diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 1ce908c2232c38..23488f559cf969 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "dlm_internal.h" #include "lockspace.h" diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 599a29237cfea5..95c1c8d3453922 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -117,7 +117,7 @@ ecryptfs_get_key_payload_data(struct key *key) auth_tok = ecryptfs_get_encrypted_key_payload_data(key); if (!auth_tok) - return (struct ecryptfs_auth_tok *)user_key_payload(key)->data; + return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data; else return auth_tok; } diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index e7413f82d27bf3..efc2db42d17513 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -959,9 +959,10 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) return rc; } -static int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int ecryptfs_getattr_link(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct ecryptfs_mount_crypt_stat *mount_crypt_stat; int rc = 0; @@ -983,13 +984,15 @@ static int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, return rc; } -static int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int ecryptfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct kstat lower_stat; int rc; - rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat); + rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat, + request_mask, flags); if (!rc) { fsstack_copy_attr_all(d_inode(dentry), ecryptfs_inode_to_lower(d_inode(dentry))); diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 158a3a39f82de7..039e627194a93b 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c @@ -22,6 +22,8 @@ #include #include +#include + #include "ecryptfs_kernel.h" /** diff --git a/fs/eventfd.c b/fs/eventfd.c index 1231cd1999d8fe..68b9fffcb2c8e7 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 5ec16313da1a1c..341251421ced00 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include #include diff --git a/fs/exec.c b/fs/exec.c index 698a86094f7672..65145a3df06519 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -32,6 +32,11 @@ #include #include #include +#include +#include +#include +#include +#include #include #include #include @@ -1088,7 +1093,7 @@ static int de_thread(struct task_struct *tsk) struct task_struct *leader = tsk->group_leader; for (;;) { - threadgroup_change_begin(tsk); + cgroup_threadgroup_change_begin(tsk); write_lock_irq(&tasklist_lock); /* * Do this under tasklist_lock to ensure that @@ -1099,7 +1104,7 @@ static int de_thread(struct task_struct *tsk) break; __set_current_state(TASK_KILLABLE); write_unlock_irq(&tasklist_lock); - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); schedule(); if (unlikely(__fatal_signal_pending(tsk))) goto killed; @@ -1157,7 +1162,7 @@ static int de_thread(struct task_struct *tsk) if (unlikely(leader->ptrace)) __wake_up_parent(leader, leader->parent); write_unlock_irq(&tasklist_lock); - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); release_task(leader); } diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index a4b531be9168d5..329a5d10384614 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -15,6 +15,7 @@ #include #include #include +#include #define dprintk(fmt, args...) do{}while(0) @@ -299,7 +300,8 @@ static int get_name(const struct path *path, char *name, struct dentry *child) * filesystem supports 64-bit inode numbers. So we need to * actually call ->getattr, not just read i_ino: */ - error = vfs_getattr_nosec(&child_path, &stat); + error = vfs_getattr_nosec(&child_path, &stat, + STATX_INO, AT_STATX_SYNC_AS_STAT); if (error) return error; buffer.ino = stat.ino; diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c index 4c40c0786e168b..d0bdb74f0e151b 100644 --- a/fs/ext2/balloc.c +++ b/fs/ext2/balloc.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2fd17e8e498416..f493af66659134 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -2462,8 +2463,7 @@ extern struct inode *ext4_iget(struct super_block *, unsigned long); extern struct inode *ext4_iget_normal(struct super_block *, unsigned long); extern int ext4_write_inode(struct inode *, struct writeback_control *); extern int ext4_setattr(struct dentry *, struct iattr *); -extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int); extern void ext4_evict_inode(struct inode *); extern void ext4_clear_inode(struct inode *); extern int ext4_sync_inode(handle_t *, struct inode *); diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index b14bae2598bc53..17bc043308f33b 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -21,6 +21,8 @@ #include #include #include +#include + #include #include "ext4.h" diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 30a9f210d1e32c..375fb1c05d49ce 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1169,10 +1169,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle, set_buffer_uptodate(dir_block); err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); if (err) - goto out; + return err; set_buffer_verified(dir_block); -out: - return err; + return ext4_mark_inode_dirty(handle, inode); } static int ext4_convert_inline_data_nolock(handle_t *handle, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 971f6634208032..4247d8d2568781 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5387,20 +5387,20 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) return error; } -int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int ext4_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { struct inode *inode; unsigned long long delalloc_blocks; - inode = d_inode(dentry); + inode = d_inode(path->dentry); generic_fillattr(inode, stat); /* * If there is inline data in the inode, the inode will normally not * have data blocks allocated (it may have an external xattr block). * Report at least one sector for such files, so tools like tar, rsync, - * others doen't incorrectly think the file is completely sparse. + * others don't incorrectly think the file is completely sparse. */ if (unlikely(ext4_has_inline_data(inode))) stat->blocks += (stat->size + 511) >> 9; diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 578f8c33fb44ad..c992ef2c2f94c0 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -511,7 +511,7 @@ mext_check_arguments(struct inode *orig_inode, if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) != (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) { ext4_debug("ext4 move extent: orig and donor's start " - "offset are not alligned [ino:orig %lu, donor %lu]\n", + "offsets are not aligned [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 2e03a0a88d92f7..a9448db1cf7e87 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1120,17 +1120,16 @@ static int ext4_get_context(struct inode *inode, void *ctx, size_t len) EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); } -static int ext4_prepare_context(struct inode *inode) -{ - return ext4_convert_inline_data(inode); -} - static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, void *fs_data) { handle_t *handle = fs_data; int res, res2, retries = 0; + res = ext4_convert_inline_data(inode); + if (res) + return res; + /* * If a journal handle was specified, then the encryption context is * being set on a new inode via inheritance and is part of a larger @@ -1196,7 +1195,6 @@ static unsigned ext4_max_namelen(struct inode *inode) static const struct fscrypt_operations ext4_cryptops = { .key_prefix = "ext4:", .get_context = ext4_get_context, - .prepare_context = ext4_prepare_context, .set_context = ext4_set_context, .dummy_context = ext4_dummy_context, .is_encrypted = ext4_encrypted_inode, diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 67636acf762475..996e7900d4c8ea 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -131,31 +131,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode, } static int ext4_xattr_block_csum_verify(struct inode *inode, - sector_t block_nr, - struct ext4_xattr_header *hdr) + struct buffer_head *bh) { - if (ext4_has_metadata_csum(inode->i_sb) && - (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) - return 0; - return 1; -} - -static void ext4_xattr_block_csum_set(struct inode *inode, - sector_t block_nr, - struct ext4_xattr_header *hdr) -{ - if (!ext4_has_metadata_csum(inode->i_sb)) - return; + struct ext4_xattr_header *hdr = BHDR(bh); + int ret = 1; - hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); + if (ext4_has_metadata_csum(inode->i_sb)) { + lock_buffer(bh); + ret = (hdr->h_checksum == ext4_xattr_block_csum(inode, + bh->b_blocknr, hdr)); + unlock_buffer(bh); + } + return ret; } -static inline int ext4_handle_dirty_xattr_block(handle_t *handle, - struct inode *inode, - struct buffer_head *bh) +static void ext4_xattr_block_csum_set(struct inode *inode, + struct buffer_head *bh) { - ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh)); - return ext4_handle_dirty_metadata(handle, inode, bh); + if (ext4_has_metadata_csum(inode->i_sb)) + BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode, + bh->b_blocknr, BHDR(bh)); } static inline const struct xattr_handler * @@ -233,7 +228,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) return -EFSCORRUPTED; - if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) + if (!ext4_xattr_block_csum_verify(inode, bh)) return -EFSBADCRC; error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, bh->b_data); @@ -618,23 +613,22 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, } } + ext4_xattr_block_csum_set(inode, bh); /* * Beware of this ugliness: Releasing of xattr block references * from different inodes can race and so we have to protect * from a race where someone else frees the block (and releases * its journal_head) before we are done dirtying the buffer. In * nojournal mode this race is harmless and we actually cannot - * call ext4_handle_dirty_xattr_block() with locked buffer as + * call ext4_handle_dirty_metadata() with locked buffer as * that function can call sync_dirty_buffer() so for that case * we handle the dirtying after unlocking the buffer. */ if (ext4_handle_valid(handle)) - error = ext4_handle_dirty_xattr_block(handle, inode, - bh); + error = ext4_handle_dirty_metadata(handle, inode, bh); unlock_buffer(bh); if (!ext4_handle_valid(handle)) - error = ext4_handle_dirty_xattr_block(handle, inode, - bh); + error = ext4_handle_dirty_metadata(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); @@ -863,13 +857,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ext4_xattr_cache_insert(ext4_mb_cache, bs->bh); } + ext4_xattr_block_csum_set(inode, bs->bh); unlock_buffer(bs->bh); if (error == -EFSCORRUPTED) goto bad_block; if (!error) - error = ext4_handle_dirty_xattr_block(handle, - inode, - bs->bh); + error = ext4_handle_dirty_metadata(handle, + inode, + bs->bh); if (error) goto cleanup; goto inserted; @@ -967,10 +962,11 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ce->e_reusable = 0; ea_bdebug(new_bh, "reusing; refcount now=%d", ref); + ext4_xattr_block_csum_set(inode, new_bh); unlock_buffer(new_bh); - error = ext4_handle_dirty_xattr_block(handle, - inode, - new_bh); + error = ext4_handle_dirty_metadata(handle, + inode, + new_bh); if (error) goto cleanup_dquot; } @@ -1020,11 +1016,12 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, goto getblk_failed; } memcpy(new_bh->b_data, s->base, new_bh->b_size); + ext4_xattr_block_csum_set(inode, new_bh); set_buffer_uptodate(new_bh); unlock_buffer(new_bh); ext4_xattr_cache_insert(ext4_mb_cache, new_bh); - error = ext4_handle_dirty_xattr_block(handle, - inode, new_bh); + error = ext4_handle_dirty_metadata(handle, inode, + new_bh); if (error) goto cleanup; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 1375fef11146d5..1602b4bccae61e 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "f2fs.h" #include "node.h" diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index a77df377e2e819..ee2d0a485fc347 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -196,6 +196,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS); si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE; si->base_mem += NM_I(sbi)->nat_blocks / 8; + si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short); get_cache: si->cache_mem = 0; diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 4650c9b85de776..8d5c62b07b283f 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -750,7 +750,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, dentry_blk = page_address(page); bit_pos = dentry - dentry_blk->dentry; for (i = 0; i < slots; i++) - clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); + __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); /* Let's check and deallocate this dentry page */ bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d1483136fed68e..0a6e115562f62e 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -561,6 +561,8 @@ struct f2fs_nm_info { struct mutex build_lock; /* lock for build free nids */ unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; unsigned char *nat_block_bitmap; + unsigned short *free_nid_count; /* free nid count of NAT block */ + spinlock_t free_nid_lock; /* protect updating of nid count */ /* for checkpoint */ char *nat_bitmap; /* NAT bitmap pointer */ @@ -2040,8 +2042,8 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); void truncate_data_blocks(struct dnode_of_data *dn); int truncate_blocks(struct inode *inode, u64 from, bool lock); int f2fs_truncate(struct inode *inode); -int f2fs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +int f2fs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); int f2fs_setattr(struct dentry *dentry, struct iattr *attr); int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); int truncate_data_blocks_range(struct dnode_of_data *dn, int count); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 78e65288f2b280..5f7317875a6726 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -633,10 +633,10 @@ int f2fs_truncate(struct inode *inode) return 0; } -int f2fs_getattr(struct vfsmount *mnt, - struct dentry *dentry, struct kstat *stat) +int f2fs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); generic_fillattr(inode, stat); stat->blocks <<= 3; return 0; diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 94967171dee87a..481aa8dc79f46f 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, set_nat_flag(e, IS_CHECKPOINTED, false); __set_nat_cache_dirty(nm_i, e); - if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR) - clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits); - /* update fsync_mark if its inode nat entry is still alive */ if (ni->nid != ni->ino) e = __lookup_nat_cache(nm_i, ni->ino); @@ -1823,7 +1820,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) kmem_cache_free(free_nid_slab, i); } -void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set) +static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, + bool set, bool build, bool locked) { struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); @@ -1833,9 +1831,18 @@ void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set) return; if (set) - set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); + __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); else - clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); + __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); + + if (!locked) + spin_lock(&nm_i->free_nid_lock); + if (set) + nm_i->free_nid_count[nat_ofs]++; + else if (!build) + nm_i->free_nid_count[nat_ofs]--; + if (!locked) + spin_unlock(&nm_i->free_nid_lock); } static void scan_nat_page(struct f2fs_sb_info *sbi, @@ -1847,7 +1854,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); int i; - set_bit_le(nat_ofs, nm_i->nat_block_bitmap); + if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) + return; + + __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); i = start_nid % NAT_ENTRY_PER_BLOCK; @@ -1861,7 +1871,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, f2fs_bug_on(sbi, blk_addr == NEW_ADDR); if (blk_addr == NULL_ADDR) freed = add_free_nid(sbi, start_nid, true); - update_free_nid_bitmap(sbi, start_nid, freed); + update_free_nid_bitmap(sbi, start_nid, freed, true, false); } } @@ -1877,6 +1887,8 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi) for (i = 0; i < nm_i->nat_blocks; i++) { if (!test_bit_le(i, nm_i->nat_block_bitmap)) continue; + if (!nm_i->free_nid_count[i]) + continue; for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { nid_t nid; @@ -1907,58 +1919,6 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi) up_read(&nm_i->nat_tree_lock); } -static int scan_nat_bits(struct f2fs_sb_info *sbi) -{ - struct f2fs_nm_info *nm_i = NM_I(sbi); - struct page *page; - unsigned int i = 0; - nid_t nid; - - if (!enabled_nat_bits(sbi, NULL)) - return -EAGAIN; - - down_read(&nm_i->nat_tree_lock); -check_empty: - i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); - if (i >= nm_i->nat_blocks) { - i = 0; - goto check_partial; - } - - for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK; - nid++) { - if (unlikely(nid >= nm_i->max_nid)) - break; - add_free_nid(sbi, nid, true); - } - - if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) - goto out; - i++; - goto check_empty; - -check_partial: - i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); - if (i >= nm_i->nat_blocks) { - disable_nat_bits(sbi, true); - up_read(&nm_i->nat_tree_lock); - return -EINVAL; - } - - nid = i * NAT_ENTRY_PER_BLOCK; - page = get_current_nat_page(sbi, nid); - scan_nat_page(sbi, page, nid); - f2fs_put_page(page, 1); - - if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) { - i++; - goto check_partial; - } -out: - up_read(&nm_i->nat_tree_lock); - return 0; -} - static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) { struct f2fs_nm_info *nm_i = NM_I(sbi); @@ -1980,21 +1940,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) if (nm_i->nid_cnt[FREE_NID_LIST]) return; - - /* try to find free nids with nat_bits */ - if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST]) - return; - } - - /* find next valid candidate */ - if (enabled_nat_bits(sbi, NULL)) { - int idx = find_next_zero_bit_le(nm_i->full_nat_bits, - nm_i->nat_blocks, 0); - - if (idx >= nm_i->nat_blocks) - set_sbi_flag(sbi, SBI_NEED_FSCK); - else - nid = idx * NAT_ENTRY_PER_BLOCK; } /* readahead nat pages to be scanned */ @@ -2081,7 +2026,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); nm_i->available_nids--; - update_free_nid_bitmap(sbi, *nid, false); + update_free_nid_bitmap(sbi, *nid, false, false, false); spin_unlock(&nm_i->nid_list_lock); return true; @@ -2137,7 +2082,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) nm_i->available_nids++; - update_free_nid_bitmap(sbi, nid, true); + update_free_nid_bitmap(sbi, nid, true, false, false); spin_unlock(&nm_i->nid_list_lock); @@ -2383,7 +2328,7 @@ static void __adjust_nat_entry_set(struct nat_entry_set *nes, list_add_tail(&nes->set_list, head); } -void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, +static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, struct page *page) { struct f2fs_nm_info *nm_i = NM_I(sbi); @@ -2402,16 +2347,16 @@ void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, valid++; } if (valid == 0) { - set_bit_le(nat_index, nm_i->empty_nat_bits); - clear_bit_le(nat_index, nm_i->full_nat_bits); + __set_bit_le(nat_index, nm_i->empty_nat_bits); + __clear_bit_le(nat_index, nm_i->full_nat_bits); return; } - clear_bit_le(nat_index, nm_i->empty_nat_bits); + __clear_bit_le(nat_index, nm_i->empty_nat_bits); if (valid == NAT_ENTRY_PER_BLOCK) - set_bit_le(nat_index, nm_i->full_nat_bits); + __set_bit_le(nat_index, nm_i->full_nat_bits); else - clear_bit_le(nat_index, nm_i->full_nat_bits); + __clear_bit_le(nat_index, nm_i->full_nat_bits); } static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, @@ -2467,11 +2412,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, add_free_nid(sbi, nid, false); spin_lock(&NM_I(sbi)->nid_list_lock); NM_I(sbi)->available_nids++; - update_free_nid_bitmap(sbi, nid, true); + update_free_nid_bitmap(sbi, nid, true, false, false); spin_unlock(&NM_I(sbi)->nid_list_lock); } else { spin_lock(&NM_I(sbi)->nid_list_lock); - update_free_nid_bitmap(sbi, nid, false); + update_free_nid_bitmap(sbi, nid, false, false, false); spin_unlock(&NM_I(sbi)->nid_list_lock); } } @@ -2577,6 +2522,40 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) return 0; } +inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + unsigned int i = 0; + nid_t nid, last_nid; + + if (!enabled_nat_bits(sbi, NULL)) + return; + + for (i = 0; i < nm_i->nat_blocks; i++) { + i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); + if (i >= nm_i->nat_blocks) + break; + + __set_bit_le(i, nm_i->nat_block_bitmap); + + nid = i * NAT_ENTRY_PER_BLOCK; + last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK; + + spin_lock(&nm_i->free_nid_lock); + for (; nid < last_nid; nid++) + update_free_nid_bitmap(sbi, nid, true, true, true); + spin_unlock(&nm_i->free_nid_lock); + } + + for (i = 0; i < nm_i->nat_blocks; i++) { + i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); + if (i >= nm_i->nat_blocks) + break; + + __set_bit_le(i, nm_i->nat_block_bitmap); + } +} + static int init_node_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); @@ -2638,7 +2617,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi) return 0; } -int init_free_nid_cache(struct f2fs_sb_info *sbi) +static int init_free_nid_cache(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); @@ -2651,6 +2630,14 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi) GFP_KERNEL); if (!nm_i->nat_block_bitmap) return -ENOMEM; + + nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks * + sizeof(unsigned short), GFP_KERNEL); + if (!nm_i->free_nid_count) + return -ENOMEM; + + spin_lock_init(&nm_i->free_nid_lock); + return 0; } @@ -2670,6 +2657,9 @@ int build_node_manager(struct f2fs_sb_info *sbi) if (err) return err; + /* load free nid status from nat_bits table */ + load_free_nid_bitmap(sbi); + build_free_nids(sbi, true, true); return 0; } @@ -2730,6 +2720,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) kvfree(nm_i->nat_block_bitmap); kvfree(nm_i->free_nid_bitmap); + kvfree(nm_i->free_nid_count); kfree(nm_i->nat_bitmap); kfree(nm_i->nat_bits); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 4bd7a8b19332d1..29ef7088c5582a 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1163,6 +1163,12 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) if (f2fs_discard_en(sbi) && !f2fs_test_and_set_bit(offset, se->discard_map)) sbi->discard_blks--; + + /* don't overwrite by SSR to keep node chain */ + if (se->type == CURSEG_WARM_NODE) { + if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) + se->ckpt_valid_blocks++; + } } else { if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { #ifdef CONFIG_F2FS_CHECK_FS diff --git a/fs/fat/fat.h b/fs/fat/fat.h index e6b764a17a9c84..051dac1ce3be17 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -364,8 +364,8 @@ extern const struct file_operations fat_file_operations; extern const struct inode_operations fat_file_inode_operations; extern int fat_setattr(struct dentry *dentry, struct iattr *attr); extern void fat_truncate_blocks(struct inode *inode, loff_t offset); -extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +extern int fat_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); extern int fat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); diff --git a/fs/fat/file.c b/fs/fat/file.c index 3d04b124bce099..4724cc9ad65021 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -365,9 +365,10 @@ void fat_truncate_blocks(struct inode *inode, loff_t offset) fat_flush_inodes(inode->i_sb, inode, NULL); } -int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int fat_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); generic_fillattr(inode, stat); stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 338d2f73eb29c8..a2c05f2ada6dd8 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1359,6 +1359,16 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, return 0; } +static void fat_dummy_inode_init(struct inode *inode) +{ + /* Initialize this dummy inode to work as no-op. */ + MSDOS_I(inode)->mmu_private = 0; + MSDOS_I(inode)->i_start = 0; + MSDOS_I(inode)->i_logstart = 0; + MSDOS_I(inode)->i_attrs = 0; + MSDOS_I(inode)->i_pos = 0; +} + static int fat_read_root(struct inode *inode) { struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); @@ -1803,12 +1813,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, fat_inode = new_inode(sb); if (!fat_inode) goto out_fail; - MSDOS_I(fat_inode)->i_pos = 0; + fat_dummy_inode_init(fat_inode); sbi->fat_inode = fat_inode; fsinfo_inode = new_inode(sb); if (!fsinfo_inode) goto out_fail; + fat_dummy_inode_init(fsinfo_inode); fsinfo_inode->i_ino = MSDOS_FSINFO_INO; sbi->fsinfo_inode = fsinfo_inode; insert_inode_hash(fsinfo_inode); diff --git a/fs/fcntl.c b/fs/fcntl.c index e1c54f20325ca8..be8fbe289087e6 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/file.c b/fs/file.c index 69d6990e30210b..ad6f094f2eff2f 100644 --- a/fs/file.c +++ b/fs/file.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/file_table.c b/fs/file_table.c index 6d982b57de9241..954d510b765af8 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index ef600591d96f9a..63ee2940775ce9 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb) spin_unlock_bh(&wb->work_lock); } +static void finish_writeback_work(struct bdi_writeback *wb, + struct wb_writeback_work *work) +{ + struct wb_completion *done = work->done; + + if (work->auto_free) + kfree(work); + if (done && atomic_dec_and_test(&done->cnt)) + wake_up_all(&wb->bdi->wb_waitq); +} + static void wb_queue_work(struct bdi_writeback *wb, struct wb_writeback_work *work) { trace_writeback_queue(wb, work); - spin_lock_bh(&wb->work_lock); - if (!test_bit(WB_registered, &wb->state)) - goto out_unlock; if (work->done) atomic_inc(&work->done->cnt); - list_add_tail(&work->list, &wb->work_list); - mod_delayed_work(bdi_wq, &wb->dwork, 0); -out_unlock: + + spin_lock_bh(&wb->work_lock); + + if (test_bit(WB_registered, &wb->state)) { + list_add_tail(&work->list, &wb->work_list); + mod_delayed_work(bdi_wq, &wb->dwork, 0); + } else + finish_writeback_work(wb, work); + spin_unlock_bh(&wb->work_lock); } @@ -1873,16 +1887,9 @@ static long wb_do_writeback(struct bdi_writeback *wb) set_bit(WB_writeback_running, &wb->state); while ((work = get_next_work_item(wb)) != NULL) { - struct wb_completion *done = work->done; - trace_writeback_exec(wb, work); - wrote += wb_writeback(wb, work); - - if (work->auto_free) - kfree(work); - if (done && atomic_dec_and_test(&done->cnt)) - wake_up_all(&wb->bdi->wb_waitq); + finish_writeback_work(wb, work); } /* diff --git a/fs/fs_struct.c b/fs/fs_struct.c index 7dca743b2ce1c8..be0250788b737c 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -1,5 +1,6 @@ #include -#include +#include +#include #include #include #include diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c index 5d5ddaa84b215f..67f940892ef810 100644 --- a/fs/fscache/object-list.c +++ b/fs/fscache/object-list.c @@ -329,7 +329,7 @@ static void fscache_objlist_config(struct fscache_objlist_data *data) config = 0; rcu_read_lock(); - confkey = user_key_payload(key); + confkey = user_key_payload_rcu(key); buf = confkey->data; for (len = confkey->datalen - 1; len >= 0; len--) { diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index f11792672977d2..b681b43c766e11 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 811fd8929a18c1..00800c07ba1c89 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -473,7 +473,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, if (err) { fuse_sync_release(ff, flags); } else { - file->private_data = fuse_file_get(ff); + file->private_data = ff; fuse_finish_open(inode, file); } return err; @@ -1777,10 +1777,10 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) return ret; } -static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, - struct kstat *stat) +static int fuse_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(entry); + struct inode *inode = d_inode(path->dentry); struct fuse_conn *fc = get_fuse_conn(inode); if (!fuse_allow_current_process(fc)) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e80bfd06daf5fc..ec238fb5a584b1 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -58,7 +58,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) } INIT_LIST_HEAD(&ff->write_entry); - atomic_set(&ff->count, 0); + atomic_set(&ff->count, 1); RB_CLEAR_NODE(&ff->polled_node); init_waitqueue_head(&ff->poll_wait); @@ -75,7 +75,7 @@ void fuse_file_free(struct fuse_file *ff) kfree(ff); } -struct fuse_file *fuse_file_get(struct fuse_file *ff) +static struct fuse_file *fuse_file_get(struct fuse_file *ff) { atomic_inc(&ff->count); return ff; @@ -100,6 +100,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) iput(req->misc.release.inode); fuse_put_request(ff->fc, req); } else if (sync) { + __set_bit(FR_FORCE, &req->flags); __clear_bit(FR_BACKGROUND, &req->flags); fuse_request_send(ff->fc, req); iput(req->misc.release.inode); @@ -146,7 +147,7 @@ int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, ff->open_flags &= ~FOPEN_DIRECT_IO; ff->nodeid = nodeid; - file->private_data = fuse_file_get(ff); + file->private_data = ff; return 0; } @@ -245,14 +246,9 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) void fuse_release_common(struct file *file, int opcode) { - struct fuse_file *ff; - struct fuse_req *req; - - ff = file->private_data; - if (unlikely(!ff)) - return; + struct fuse_file *ff = file->private_data; + struct fuse_req *req = ff->reserved_req; - req = ff->reserved_req; fuse_prepare_release(ff, file->f_flags, opcode); if (ff->flock) { @@ -297,13 +293,13 @@ static int fuse_release(struct inode *inode, struct file *file) void fuse_sync_release(struct fuse_file *ff, int flags) { - WARN_ON(atomic_read(&ff->count) > 1); + WARN_ON(atomic_read(&ff->count) != 1); fuse_prepare_release(ff, flags, FUSE_RELEASE); - __set_bit(FR_FORCE, &ff->reserved_req->flags); - __clear_bit(FR_BACKGROUND, &ff->reserved_req->flags); - fuse_request_send(ff->fc, ff->reserved_req); - fuse_put_request(ff->fc, ff->reserved_req); - kfree(ff); + /* + * iput(NULL) is a no-op and since the refcount is 1 and everything's + * synchronous, we are fine with not doing igrab() here" + */ + fuse_file_put(ff, true); } EXPORT_SYMBOL_GPL(fuse_sync_release); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 052f8d3c41cb04..32ac2c9b09c030 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -732,7 +732,6 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, int fuse_open_common(struct inode *inode, struct file *file, bool isdir); struct fuse_file *fuse_file_alloc(struct fuse_conn *fc); -struct fuse_file *fuse_file_get(struct fuse_file *ff); void fuse_file_free(struct fuse_file *ff); void fuse_finish_open(struct inode *inode, struct file *file); diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index c45084ac642d19..511e1ed7e2ded7 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -207,7 +207,7 @@ struct lm_lockname { struct gfs2_sbd *ln_sbd; u64 ln_number; unsigned int ln_type; -}; +} __packed __aligned(sizeof(int)); #define lm_name_equal(name1, name2) \ (((name1)->ln_number == (name2)->ln_number) && \ diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index eb7724b8578a04..e279c3ce27be3c 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -1959,9 +1960,10 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr) /** * gfs2_getattr - Read out an inode's attributes - * @mnt: The vfsmount the inode is being accessed from - * @dentry: The dentry to stat + * @path: Object to query * @stat: The inode's stats + * @request_mask: Mask of STATX_xxx flags indicating the caller's interests + * @flags: AT_STATX_xxx setting * * This may be called from the VFS directly, or from within GFS2 with the * inode locked, so we look to see if the glock is already locked and only @@ -1972,10 +1974,10 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr) * Returns: errno */ -static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int gfs2_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int error; diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 8b907c5cc9135c..0515f0a686375f 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "incore.h" #include "glock.h" diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index e3ee387a6dfebf..361796a84fce4f 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -10,7 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include +#include #include #include #include diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index f8d30e41d1d33b..7a515345610c28 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index 5de5c48b418da2..75b254280ff631 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -169,7 +169,7 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx) * Can be done after the list insertion; exclusion with * hfs_delete_cat() is provided by directory lock. */ - memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key)); + memcpy(&rd->key, &fd.key->cat, sizeof(struct hfs_cat_key)); out: hfs_find_exit(&fd); return err; diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index f776acf2378a1c..bfbba799430f15 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 2e796f8302ffac..e8638d52819519 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "hfsplus_fs.h" diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index aebb78f9e47f2a..d352f3a6af7f09 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 54de77e78775ed..7163fe014b57f4 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -11,7 +11,7 @@ #include #include -#include /* remove ASAP */ +#include /* remove ASAP */ #include #include #include @@ -695,14 +695,11 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb, inode = new_inode(sb); if (inode) { - struct hugetlbfs_inode_info *info; inode->i_ino = get_next_ino(); inode->i_mode = S_IFDIR | config->mode; inode->i_uid = config->uid; inode->i_gid = config->gid; inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); - info = HUGETLBFS_I(inode); - mpol_shared_policy_init(&info->policy, NULL); inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ @@ -733,7 +730,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, inode = new_inode(sb); if (inode) { - struct hugetlbfs_inode_info *info; inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, @@ -741,15 +737,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, inode->i_mapping->a_ops = &hugetlbfs_aops; inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); inode->i_mapping->private_data = resv_map; - info = HUGETLBFS_I(inode); - /* - * The policy is initialized here even if we are creating a - * private inode because initialization simply creates an - * an empty rb tree and calls rwlock_init(), later when we - * call mpol_free_shared_policy() it will just return because - * the rb tree will still be empty. - */ - mpol_shared_policy_init(&info->policy, NULL); switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); @@ -937,6 +924,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) hugetlbfs_inc_free_inodes(sbinfo); return NULL; } + + /* + * Any time after allocation, hugetlbfs_destroy_inode can be called + * for the inode. mpol_free_shared_policy is unconditionally called + * as part of hugetlbfs_destroy_inode. So, initialize policy here + * in case of a quick call to destroy. + * + * Note that the policy is initialized even if we are creating a + * private inode. This simplifies hugetlbfs_destroy_inode. + */ + mpol_shared_policy_init(&p->policy, NULL); + return &p->vfs_inode; } diff --git a/fs/ioctl.c b/fs/ioctl.c index cb9b02940805cb..569db68d02b317 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -15,6 +15,8 @@ #include #include #include +#include + #include "internal.h" #include diff --git a/fs/iomap.c b/fs/iomap.c index 0f85f24106054a..141c3cd55a8b2d 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -26,6 +26,8 @@ #include #include #include +#include + #include "internal.h" /* @@ -844,7 +846,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, struct address_space *mapping = iocb->ki_filp->f_mapping; struct inode *inode = file_inode(iocb->ki_filp); size_t count = iov_iter_count(iter); - loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0; + loff_t pos = iocb->ki_pos, start = pos; + loff_t end = iocb->ki_pos + count - 1, ret = 0; unsigned int flags = IOMAP_DIRECT; struct blk_plug plug; struct iomap_dio *dio; @@ -885,12 +888,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, } if (mapping->nrpages) { - ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end); + ret = filemap_write_and_wait_range(mapping, start, end); if (ret) goto out_free_dio; ret = invalidate_inode_pages2_range(mapping, - iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT); + start >> PAGE_SHIFT, end >> PAGE_SHIFT); WARN_ON_ONCE(ret); ret = 0; } @@ -939,6 +942,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, __set_current_state(TASK_RUNNING); } + ret = iomap_dio_complete(dio); + /* * Try again to invalidate clean pages which might have been cached by * non-direct readahead, or faulted in by get_user_pages() if the source @@ -947,12 +952,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, * this invalidation fails, tough, the write still worked... */ if (iov_iter_rw(iter) == WRITE && mapping->nrpages) { - ret = invalidate_inode_pages2_range(mapping, - iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT); - WARN_ON_ONCE(ret); + int err = invalidate_inode_pages2_range(mapping, + start >> PAGE_SHIFT, end >> PAGE_SHIFT); + WARN_ON_ONCE(err); } - return iomap_dio_complete(dio); + return ret; out_free_dio: kfree(dio); diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 871c8b39209913..020ba093614641 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a1a359bfcc9cd4..5adc2fb62b0fab 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1125,10 +1125,8 @@ static journal_t *journal_init_common(struct block_device *bdev, /* Set up a default-sized revoke table for the new mount. */ err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); - if (err) { - kfree(journal); - return NULL; - } + if (err) + goto err_cleanup; spin_lock_init(&journal->j_history_lock); @@ -1145,23 +1143,25 @@ static journal_t *journal_init_common(struct block_device *bdev, journal->j_wbufsize = n; journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *), GFP_KERNEL); - if (!journal->j_wbuf) { - kfree(journal); - return NULL; - } + if (!journal->j_wbuf) + goto err_cleanup; bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize); if (!bh) { pr_err("%s: Cannot get buffer for journal superblock\n", __func__); - kfree(journal->j_wbuf); - kfree(journal); - return NULL; + goto err_cleanup; } journal->j_sb_buffer = bh; journal->j_superblock = (journal_superblock_t *)bh->b_data; return journal; + +err_cleanup: + kfree(journal->j_wbuf); + jbd2_journal_destroy_revoke(journal); + kfree(journal); + return NULL; } /* jbd2_journal_init_dev and jbd2_journal_init_inode: diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index cfc38b5521189f..f9aefcda585418 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -280,6 +280,7 @@ int jbd2_journal_init_revoke(journal_t *journal, int hash_size) fail1: jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); + journal->j_revoke_table[0] = NULL; fail0: return -ENOMEM; } diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index e5c1783ab64a05..453a6a1fff34ec 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include "nodelist.h" diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 567653f7c0ce2e..76fa814df3d1bf 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index cda0774c2c9c53..a7bbe879cfc3dd 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c @@ -14,7 +14,7 @@ #include #include #include -#include /* For cond_resched() */ +#include #include "nodelist.h" #include "debug.h" diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 35043a8c452905..ac2dfe0c5a9c85 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include "kernfs-internal.h" @@ -809,7 +809,8 @@ void kernfs_drain_open_files(struct kernfs_node *kn) if (kn->flags & KERNFS_HAS_MMAP) unmap_mapping_range(inode->i_mapping, 0, 0, 1); - kernfs_release_file(kn, of); + if (kn->flags & KERNFS_HAS_RELEASE) + kernfs_release_file(kn, of); } mutex_unlock(&kernfs_open_file_mutex); diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index ac9e108ce1eacb..fb4b4a79a0d6b4 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -200,11 +200,11 @@ static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode) set_nlink(inode, kn->dir.subdirs + 2); } -int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int kernfs_iop_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct kernfs_node *kn = dentry->d_fsdata; - struct inode *inode = d_inode(dentry); + struct kernfs_node *kn = path->dentry->d_fsdata; + struct inode *inode = d_inode(path->dentry); mutex_lock(&kernfs_mutex); kernfs_refresh_inode(kn, inode); diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index 3100987cf8baf7..2d5144ab425159 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -80,8 +80,8 @@ extern const struct xattr_handler *kernfs_xattr_handlers[]; void kernfs_evict_inode(struct inode *inode); int kernfs_iop_permission(struct inode *inode, int mask); int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr); -int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +int kernfs_iop_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags); ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size); /* diff --git a/fs/libfs.c b/fs/libfs.c index 28d6f35feed628..a8b62e5d43a972 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -20,10 +21,10 @@ #include "internal.h" -int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int simple_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); generic_fillattr(inode, stat); stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9); return 0; @@ -1143,10 +1144,10 @@ static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry, return ERR_PTR(-ENOENT); } -static int empty_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int empty_dir_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); generic_fillattr(inode, stat); return 0; } diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 7e4ea3b9f4724f..e7c8b9c76e4857 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include #include diff --git a/fs/minix/inode.c b/fs/minix/inode.c index e7d9bf86d97595..6ac76b0434e937 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -622,11 +622,14 @@ static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) return err; } -int minix_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int minix_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct super_block *sb = dentry->d_sb; - generic_fillattr(d_inode(dentry), stat); - if (INODE_VERSION(d_inode(dentry)) == MINIX_V1) + struct super_block *sb = path->dentry->d_sb; + struct inode *inode = d_inode(path->dentry); + + generic_fillattr(inode, stat); + if (INODE_VERSION(inode) == MINIX_V1) stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb); else stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb); diff --git a/fs/minix/minix.h b/fs/minix/minix.h index 01ad81dcacc5a4..663d66138d06df 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h @@ -51,7 +51,7 @@ extern unsigned long minix_count_free_inodes(struct super_block *sb); extern int minix_new_block(struct inode * inode); extern void minix_free_block(struct inode *inode, unsigned long block); extern unsigned long minix_count_free_blocks(struct super_block *sb); -extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int minix_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); extern void V1_minix_truncate(struct inode *); diff --git a/fs/namei.c b/fs/namei.c index da689c9c005ee2..d41fab78798b2e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -672,52 +672,83 @@ static bool legitimize_links(struct nameidata *nd) /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data - * @dentry: child of nd->path.dentry or NULL - * @seq: seq number to check dentry against * Returns: 0 on success, -ECHILD on failure * - * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry - * for ref-walk mode. @dentry must be a path found by a do_lookup call on - * @nd or NULL. Must be called from rcu-walk context. + * unlazy_walk attempts to legitimize the current nd->path and nd->root + * for ref-walk mode. + * Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_walk() failure and * terminate_walk(). */ -static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq) +static int unlazy_walk(struct nameidata *nd) { struct dentry *parent = nd->path.dentry; BUG_ON(!(nd->flags & LOOKUP_RCU)); + nd->flags &= ~LOOKUP_RCU; + if (unlikely(!legitimize_links(nd))) + goto out2; + if (unlikely(!legitimize_path(nd, &nd->path, nd->seq))) + goto out1; + if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { + if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) + goto out; + } + rcu_read_unlock(); + BUG_ON(nd->inode != parent->d_inode); + return 0; + +out2: + nd->path.mnt = NULL; + nd->path.dentry = NULL; +out1: + if (!(nd->flags & LOOKUP_ROOT)) + nd->root.mnt = NULL; +out: + rcu_read_unlock(); + return -ECHILD; +} + +/** + * unlazy_child - try to switch to ref-walk mode. + * @nd: nameidata pathwalk data + * @dentry: child of nd->path.dentry + * @seq: seq number to check dentry against + * Returns: 0 on success, -ECHILD on failure + * + * unlazy_child attempts to legitimize the current nd->path, nd->root and dentry + * for ref-walk mode. @dentry must be a path found by a do_lookup call on + * @nd. Must be called from rcu-walk context. + * Nothing should touch nameidata between unlazy_child() failure and + * terminate_walk(). + */ +static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq) +{ + BUG_ON(!(nd->flags & LOOKUP_RCU)); + nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out2; if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq))) goto out2; - if (unlikely(!lockref_get_not_dead(&parent->d_lockref))) + if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref))) goto out1; /* - * For a negative lookup, the lookup sequence point is the parents - * sequence point, and it only needs to revalidate the parent dentry. - * - * For a positive lookup, we need to move both the parent and the - * dentry from the RCU domain to be properly refcounted. And the - * sequence number in the dentry validates *both* dentry counters, - * since we checked the sequence number of the parent after we got - * the child sequence number. So we know the parent must still - * be valid if the child sequence number is still valid. + * We need to move both the parent and the dentry from the RCU domain + * to be properly refcounted. And the sequence number in the dentry + * validates *both* dentry counters, since we checked the sequence + * number of the parent after we got the child sequence number. So we + * know the parent must still be valid if the child sequence number is */ - if (!dentry) { - if (read_seqcount_retry(&parent->d_seq, nd->seq)) - goto out; - BUG_ON(nd->inode != parent->d_inode); - } else { - if (!lockref_get_not_dead(&dentry->d_lockref)) - goto out; - if (read_seqcount_retry(&dentry->d_seq, seq)) - goto drop_dentry; + if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) + goto out; + if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) { + rcu_read_unlock(); + dput(dentry); + goto drop_root_mnt; } - /* * Sequence counts matched. Now make sure that the root is * still valid and get it if required. @@ -733,10 +764,6 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq rcu_read_unlock(); return 0; -drop_dentry: - rcu_read_unlock(); - dput(dentry); - goto drop_root_mnt; out2: nd->path.mnt = NULL; out1: @@ -749,27 +776,12 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq return -ECHILD; } -static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq) -{ - if (unlikely(!legitimize_path(nd, link, seq))) { - drop_links(nd); - nd->depth = 0; - nd->flags &= ~LOOKUP_RCU; - nd->path.mnt = NULL; - nd->path.dentry = NULL; - if (!(nd->flags & LOOKUP_ROOT)) - nd->root.mnt = NULL; - rcu_read_unlock(); - } else if (likely(unlazy_walk(nd, NULL, 0)) == 0) { - return 0; - } - path_put(link); - return -ECHILD; -} - static inline int d_revalidate(struct dentry *dentry, unsigned int flags) { - return dentry->d_op->d_revalidate(dentry, flags); + if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) + return dentry->d_op->d_revalidate(dentry, flags); + else + return 1; } /** @@ -790,7 +802,7 @@ static int complete_walk(struct nameidata *nd) if (nd->flags & LOOKUP_RCU) { if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; - if (unlikely(unlazy_walk(nd, NULL, 0))) + if (unlikely(unlazy_walk(nd))) return -ECHILD; } @@ -1016,7 +1028,7 @@ const char *get_link(struct nameidata *nd) touch_atime(&last->link); cond_resched(); } else if (atime_needs_update_rcu(&last->link, inode)) { - if (unlikely(unlazy_walk(nd, NULL, 0))) + if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); touch_atime(&last->link); } @@ -1035,7 +1047,7 @@ const char *get_link(struct nameidata *nd) if (nd->flags & LOOKUP_RCU) { res = get(NULL, inode, &last->done); if (res == ERR_PTR(-ECHILD)) { - if (unlikely(unlazy_walk(nd, NULL, 0))) + if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); res = get(dentry, inode, &last->done); } @@ -1469,19 +1481,14 @@ static struct dentry *lookup_dcache(const struct qstr *name, struct dentry *dir, unsigned int flags) { - struct dentry *dentry; - int error; - - dentry = d_lookup(dir, name); + struct dentry *dentry = d_lookup(dir, name); if (dentry) { - if (dentry->d_flags & DCACHE_OP_REVALIDATE) { - error = d_revalidate(dentry, flags); - if (unlikely(error <= 0)) { - if (!error) - d_invalidate(dentry); - dput(dentry); - return ERR_PTR(error); - } + int error = d_revalidate(dentry, flags); + if (unlikely(error <= 0)) { + if (!error) + d_invalidate(dentry); + dput(dentry); + return ERR_PTR(error); } } return dentry; @@ -1546,7 +1553,7 @@ static int lookup_fast(struct nameidata *nd, bool negative; dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (unlikely(!dentry)) { - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; return 0; } @@ -1571,14 +1578,8 @@ static int lookup_fast(struct nameidata *nd, return -ECHILD; *seqp = seq; - if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) - status = d_revalidate(dentry, nd->flags); - if (unlikely(status <= 0)) { - if (unlazy_walk(nd, dentry, seq)) - return -ECHILD; - if (status == -ECHILD) - status = d_revalidate(dentry, nd->flags); - } else { + status = d_revalidate(dentry, nd->flags); + if (likely(status > 0)) { /* * Note: do negative dentry check after revalidation in * case that drops it. @@ -1589,15 +1590,17 @@ static int lookup_fast(struct nameidata *nd, path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 1; - if (unlazy_walk(nd, dentry, seq)) - return -ECHILD; } + if (unlazy_child(nd, dentry, seq)) + return -ECHILD; + if (unlikely(status == -ECHILD)) + /* we'd been told to redo it in non-rcu mode */ + status = d_revalidate(dentry, nd->flags); } else { dentry = __d_lookup(parent, &nd->last); if (unlikely(!dentry)) return 0; - if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) - status = d_revalidate(dentry, nd->flags); + status = d_revalidate(dentry, nd->flags); } if (unlikely(status <= 0)) { if (!status) @@ -1636,8 +1639,7 @@ static struct dentry *lookup_slow(const struct qstr *name, if (IS_ERR(dentry)) goto out; if (unlikely(!d_in_lookup(dentry))) { - if ((dentry->d_flags & DCACHE_OP_REVALIDATE) && - !(flags & LOOKUP_NO_REVAL)) { + if (!(flags & LOOKUP_NO_REVAL)) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) { @@ -1668,7 +1670,7 @@ static inline int may_lookup(struct nameidata *nd) int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; } return inode_permission(nd->inode, MAY_EXEC); @@ -1703,9 +1705,17 @@ static int pick_link(struct nameidata *nd, struct path *link, error = nd_alloc_stack(nd); if (unlikely(error)) { if (error == -ECHILD) { - if (unlikely(unlazy_link(nd, link, seq))) - return -ECHILD; - error = nd_alloc_stack(nd); + if (unlikely(!legitimize_path(nd, link, seq))) { + drop_links(nd); + nd->depth = 0; + nd->flags &= ~LOOKUP_RCU; + nd->path.mnt = NULL; + nd->path.dentry = NULL; + if (!(nd->flags & LOOKUP_ROOT)) + nd->root.mnt = NULL; + rcu_read_unlock(); + } else if (likely(unlazy_walk(nd)) == 0) + error = nd_alloc_stack(nd); } if (error) { path_put(link); @@ -2122,7 +2132,7 @@ static int link_path_walk(const char *name, struct nameidata *nd) } if (unlikely(!d_can_lookup(nd->path.dentry))) { if (nd->flags & LOOKUP_RCU) { - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; } return -ENOTDIR; @@ -2579,7 +2589,7 @@ mountpoint_last(struct nameidata *nd) /* If we're in rcuwalk, drop out of it to handle last component */ if (nd->flags & LOOKUP_RCU) { - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; } @@ -3072,9 +3082,6 @@ static int lookup_open(struct nameidata *nd, struct path *path, if (d_in_lookup(dentry)) break; - if (!(dentry->d_flags & DCACHE_OP_REVALIDATE)) - break; - error = d_revalidate(dentry, nd->flags); if (likely(error > 0)) break; @@ -3356,13 +3363,50 @@ static int do_last(struct nameidata *nd, return error; } +struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag) +{ + static const struct qstr name = QSTR_INIT("/", 1); + struct dentry *child = NULL; + struct inode *dir = dentry->d_inode; + struct inode *inode; + int error; + + /* we want directory to be writable */ + error = inode_permission(dir, MAY_WRITE | MAY_EXEC); + if (error) + goto out_err; + error = -EOPNOTSUPP; + if (!dir->i_op->tmpfile) + goto out_err; + error = -ENOMEM; + child = d_alloc(dentry, &name); + if (unlikely(!child)) + goto out_err; + error = dir->i_op->tmpfile(dir, child, mode); + if (error) + goto out_err; + error = -ENOENT; + inode = child->d_inode; + if (unlikely(!inode)) + goto out_err; + if (!(open_flag & O_EXCL)) { + spin_lock(&inode->i_lock); + inode->i_state |= I_LINKABLE; + spin_unlock(&inode->i_lock); + } + return child; + +out_err: + dput(child); + return ERR_PTR(error); +} +EXPORT_SYMBOL(vfs_tmpfile); + static int do_tmpfile(struct nameidata *nd, unsigned flags, const struct open_flags *op, struct file *file, int *opened) { - static const struct qstr name = QSTR_INIT("/", 1); struct dentry *child; - struct inode *dir; struct path path; int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path); if (unlikely(error)) @@ -3370,25 +3414,12 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags, error = mnt_want_write(path.mnt); if (unlikely(error)) goto out; - dir = path.dentry->d_inode; - /* we want directory to be writable */ - error = inode_permission(dir, MAY_WRITE | MAY_EXEC); - if (error) + child = vfs_tmpfile(path.dentry, op->mode, op->open_flag); + error = PTR_ERR(child); + if (unlikely(IS_ERR(child))) goto out2; - if (!dir->i_op->tmpfile) { - error = -EOPNOTSUPP; - goto out2; - } - child = d_alloc(path.dentry, &name); - if (unlikely(!child)) { - error = -ENOMEM; - goto out2; - } dput(path.dentry); path.dentry = child; - error = dir->i_op->tmpfile(dir, child, op->mode); - if (error) - goto out2; audit_inode(nd->name, child, 0); /* Don't check for other permissions, the inode was just created */ error = may_open(&path, 0, op->open_flag); @@ -3399,14 +3430,8 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags, if (error) goto out2; error = open_check_o_direct(file); - if (error) { + if (error) fput(file); - } else if (!(op->open_flag & O_EXCL)) { - struct inode *inode = file_inode(file); - spin_lock(&inode->i_lock); - inode->i_state |= I_LINKABLE; - spin_unlock(&inode->i_lock); - } out2: mnt_drop_write(path.mnt); out: diff --git a/fs/namespace.c b/fs/namespace.c index 8bfad42c1ccf21..cc1375eff88c75 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include /* init_rootfs */ #include /* get_fs_root et.al. */ @@ -24,6 +25,8 @@ #include #include #include +#include + #include "pnode.h" #include "internal.h" diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 7eb89c23c8470b..d5606099712a4c 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index 4434e4977cf36c..12550c2320ccbb 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c @@ -19,6 +19,7 @@ #include #include #include +#include #include diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 97b111d79489c5..98b6db0ed63e03 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -40,19 +41,12 @@ static int _recv(struct socket *sock, void *buf, int size, unsigned flags) return kernel_recvmsg(sock, &msg, &iov, 1, size, flags); } -static inline int do_send(struct socket *sock, struct kvec *vec, int count, - int len, unsigned flags) -{ - struct msghdr msg = { .msg_flags = flags }; - return kernel_sendmsg(sock, &msg, vec, count, len); -} - static int _send(struct socket *sock, const void *buff, int len) { - struct kvec vec; - vec.iov_base = (void *) buff; - vec.iov_len = len; - return do_send(sock, &vec, 1, len, 0); + struct msghdr msg = { .msg_flags = 0 }; + struct kvec vec = {.iov_base = (void *)buff, .iov_len = len}; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len); + return sock_sendmsg(sock, &msg); } struct ncp_request_reply { @@ -63,9 +57,7 @@ struct ncp_request_reply { size_t datalen; int result; enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status; - struct kvec* tx_ciov; - size_t tx_totallen; - size_t tx_iovlen; + struct iov_iter from; struct kvec tx_iov[3]; u_int16_t tx_type; u_int32_t sign[6]; @@ -205,28 +197,22 @@ static inline void __ncptcp_abort(struct ncp_server *server) static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) { - struct kvec vec[3]; - /* sock_sendmsg updates iov pointers for us :-( */ - memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0])); - return do_send(sock, vec, req->tx_iovlen, - req->tx_totallen, MSG_DONTWAIT); + struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT }; + return sock_sendmsg(sock, &msg); } static void __ncptcp_try_send(struct ncp_server *server) { struct ncp_request_reply *rq; - struct kvec *iov; - struct kvec iovc[3]; + struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT }; int result; rq = server->tx.creq; if (!rq) return; - /* sock_sendmsg updates iov pointers for us :-( */ - memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0])); - result = do_send(server->ncp_sock, iovc, rq->tx_iovlen, - rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT); + msg.msg_iter = rq->from; + result = sock_sendmsg(server->ncp_sock, &msg); if (result == -EAGAIN) return; @@ -236,21 +222,12 @@ static void __ncptcp_try_send(struct ncp_server *server) __ncp_abort_request(server, rq, result); return; } - if (result >= rq->tx_totallen) { + if (!msg_data_left(&msg)) { server->rcv.creq = rq; server->tx.creq = NULL; return; } - rq->tx_totallen -= result; - iov = rq->tx_ciov; - while (iov->iov_len <= result) { - result -= iov->iov_len; - iov++; - rq->tx_iovlen--; - } - iov->iov_base += result; - iov->iov_len -= result; - rq->tx_ciov = iov; + rq->from = msg.msg_iter; } static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h) @@ -263,22 +240,21 @@ static inline void ncp_init_header(struct ncp_server *server, struct ncp_request static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req) { - size_t signlen; - struct ncp_request_header* h; + size_t signlen, len = req->tx_iov[1].iov_len; + struct ncp_request_header *h = req->tx_iov[1].iov_base; - req->tx_ciov = req->tx_iov + 1; - - h = req->tx_iov[1].iov_base; ncp_init_header(server, req, h); - signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, - req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, - cpu_to_le32(req->tx_totallen), req->sign); + signlen = sign_packet(server, + req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, + len - sizeof(struct ncp_request_header) + 1, + cpu_to_le32(len), req->sign); if (signlen) { - req->tx_ciov[1].iov_base = req->sign; - req->tx_ciov[1].iov_len = signlen; - req->tx_iovlen += 1; - req->tx_totallen += signlen; + /* NCP over UDP appends signature */ + req->tx_iov[2].iov_base = req->sign; + req->tx_iov[2].iov_len = signlen; } + iov_iter_kvec(&req->from, WRITE | ITER_KVEC, + req->tx_iov + 1, signlen ? 2 : 1, len + signlen); server->rcv.creq = req; server->timeout_last = server->m.time_out; server->timeout_retries = server->m.retry_count; @@ -292,24 +268,23 @@ static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req) { - size_t signlen; - struct ncp_request_header* h; + size_t signlen, len = req->tx_iov[1].iov_len; + struct ncp_request_header *h = req->tx_iov[1].iov_base; - req->tx_ciov = req->tx_iov; - h = req->tx_iov[1].iov_base; ncp_init_header(server, req, h); signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, - req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, - cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16; + len - sizeof(struct ncp_request_header) + 1, + cpu_to_be32(len + 24), req->sign + 4) + 16; req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC); - req->sign[1] = htonl(req->tx_totallen + signlen); + req->sign[1] = htonl(len + signlen); req->sign[2] = htonl(NCP_TCP_XMIT_VERSION); req->sign[3] = htonl(req->datalen + 8); + /* NCP over TCP prepends signature */ req->tx_iov[0].iov_base = req->sign; req->tx_iov[0].iov_len = signlen; - req->tx_iovlen += 1; - req->tx_totallen += signlen; + iov_iter_kvec(&req->from, WRITE | ITER_KVEC, + req->tx_iov, 2, len + signlen); server->tx.creq = req; __ncptcp_try_send(server); @@ -364,18 +339,17 @@ static void __ncp_next_request(struct ncp_server *server) static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len) { if (server->info_sock) { - struct kvec iov[2]; - __be32 hdr[2]; - - hdr[0] = cpu_to_be32(len + 8); - hdr[1] = cpu_to_be32(id); - - iov[0].iov_base = hdr; - iov[0].iov_len = 8; - iov[1].iov_base = (void *) data; - iov[1].iov_len = len; + struct msghdr msg = { .msg_flags = MSG_NOSIGNAL }; + __be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)}; + struct kvec iov[2] = { + {.iov_base = hdr, .iov_len = 8}, + {.iov_base = (void *)data, .iov_len = len}, + }; + + iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE, + iov, 2, len + 8); - do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL); + sock_sendmsg(server->info_sock, &msg); } } @@ -711,8 +685,6 @@ static int do_ncp_rpc_call(struct ncp_server *server, int size, req->datalen = max_reply_size; req->tx_iov[1].iov_base = server->packet; req->tx_iov[1].iov_len = size; - req->tx_iovlen = 1; - req->tx_totallen = size; req->tx_type = *(u_int16_t*)server->packet; result = ncp_add_request(server, req); diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 484bebc20bca6a..773774531aff5f 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -231,12 +232,12 @@ static struct svc_serv_ops nfs41_cb_sv_ops = { .svo_module = THIS_MODULE, }; -struct svc_serv_ops *nfs4_cb_sv_ops[] = { +static struct svc_serv_ops *nfs4_cb_sv_ops[] = { [0] = &nfs40_cb_sv_ops, [1] = &nfs41_cb_sv_ops, }; #else -struct svc_serv_ops *nfs4_cb_sv_ops[] = { +static struct svc_serv_ops *nfs4_cb_sv_ops[] = { [0] = &nfs40_cb_sv_ops, [1] = NULL, }; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 91a8d610ba0fa6..390ada8741bcbf 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -325,10 +325,33 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat return NULL; } -static bool nfs_client_init_is_complete(const struct nfs_client *clp) +/* + * Return true if @clp is done initializing, false if still working on it. + * + * Use nfs_client_init_status to check if it was successful. + */ +bool nfs_client_init_is_complete(const struct nfs_client *clp) { return clp->cl_cons_state <= NFS_CS_READY; } +EXPORT_SYMBOL_GPL(nfs_client_init_is_complete); + +/* + * Return 0 if @clp was successfully initialized, -errno otherwise. + * + * This must be called *after* nfs_client_init_is_complete() returns true, + * otherwise it will pop WARN_ON_ONCE and return -EINVAL + */ +int nfs_client_init_status(const struct nfs_client *clp) +{ + /* called without checking nfs_client_init_is_complete */ + if (clp->cl_cons_state > NFS_CS_READY) { + WARN_ON_ONCE(1); + return -EINVAL; + } + return clp->cl_cons_state; +} +EXPORT_SYMBOL_GPL(nfs_client_init_status); int nfs_wait_client_init_complete(const struct nfs_client *clp) { diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index fb499a3f21b58e..f92ba8d6c55690 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2055,7 +2055,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, { struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); - struct dentry *dentry = NULL, *rehash = NULL; + struct dentry *dentry = NULL; struct rpc_task *task; int error = -EBUSY; @@ -2078,10 +2078,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, * To prevent any new references to the target during the * rename, we unhash the dentry in advance. */ - if (!d_unhashed(new_dentry)) { + if (!d_unhashed(new_dentry)) d_drop(new_dentry); - rehash = new_dentry; - } if (d_count(new_dentry) > 2) { int err; @@ -2098,7 +2096,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto out; new_dentry = dentry; - rehash = NULL; new_inode = NULL; } } @@ -2119,8 +2116,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, error = task->tk_status; rpc_put_task(task); out: - if (rehash) - d_rehash(rehash); trace_nfs_rename_exit(old_dir, old_dentry, new_dir, new_dentry, error); /* new dentry created? */ diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 44347f4bdc1516..acd30baca46166 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -202,10 +202,10 @@ static int filelayout_async_handle_error(struct rpc_task *task, task->tk_status); nfs4_mark_deviceid_unavailable(devid); pnfs_error_mark_layout_for_return(inode, lseg); - pnfs_set_lo_fail(lseg); rpc_wake_up(&tbl->slot_tbl_waitq); /* fall through */ default: + pnfs_set_lo_fail(lseg); reset: dprintk("%s Retry through MDS. Error %d\n", __func__, task->tk_status); @@ -560,6 +560,50 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync) return PNFS_ATTEMPTED; } +static int +filelayout_check_deviceid(struct pnfs_layout_hdr *lo, + struct nfs4_filelayout_segment *fl, + gfp_t gfp_flags) +{ + struct nfs4_deviceid_node *d; + struct nfs4_file_layout_dsaddr *dsaddr; + int status = -EINVAL; + + /* find and reference the deviceid */ + d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid, + lo->plh_lc_cred, gfp_flags); + if (d == NULL) + goto out; + + dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node); + /* Found deviceid is unavailable */ + if (filelayout_test_devid_unavailable(&dsaddr->id_node)) + goto out_put; + + fl->dsaddr = dsaddr; + + if (fl->first_stripe_index >= dsaddr->stripe_count) { + dprintk("%s Bad first_stripe_index %u\n", + __func__, fl->first_stripe_index); + goto out_put; + } + + if ((fl->stripe_type == STRIPE_SPARSE && + fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) || + (fl->stripe_type == STRIPE_DENSE && + fl->num_fh != dsaddr->stripe_count)) { + dprintk("%s num_fh %u not valid for given packing\n", + __func__, fl->num_fh); + goto out_put; + } + status = 0; +out: + return status; +out_put: + nfs4_fl_put_deviceid(dsaddr); + goto out; +} + /* * filelayout_check_layout() * @@ -572,11 +616,8 @@ static int filelayout_check_layout(struct pnfs_layout_hdr *lo, struct nfs4_filelayout_segment *fl, struct nfs4_layoutget_res *lgr, - struct nfs4_deviceid *id, gfp_t gfp_flags) { - struct nfs4_deviceid_node *d; - struct nfs4_file_layout_dsaddr *dsaddr; int status = -EINVAL; dprintk("--> %s\n", __func__); @@ -601,41 +642,10 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo, goto out; } - /* find and reference the deviceid */ - d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), id, - lo->plh_lc_cred, gfp_flags); - if (d == NULL) - goto out; - - dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node); - /* Found deviceid is unavailable */ - if (filelayout_test_devid_unavailable(&dsaddr->id_node)) - goto out_put; - - fl->dsaddr = dsaddr; - - if (fl->first_stripe_index >= dsaddr->stripe_count) { - dprintk("%s Bad first_stripe_index %u\n", - __func__, fl->first_stripe_index); - goto out_put; - } - - if ((fl->stripe_type == STRIPE_SPARSE && - fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) || - (fl->stripe_type == STRIPE_DENSE && - fl->num_fh != dsaddr->stripe_count)) { - dprintk("%s num_fh %u not valid for given packing\n", - __func__, fl->num_fh); - goto out_put; - } - status = 0; out: dprintk("--> %s returns %d\n", __func__, status); return status; -out_put: - nfs4_fl_put_deviceid(dsaddr); - goto out; } static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) @@ -657,7 +667,6 @@ static int filelayout_decode_layout(struct pnfs_layout_hdr *flo, struct nfs4_filelayout_segment *fl, struct nfs4_layoutget_res *lgr, - struct nfs4_deviceid *id, gfp_t gfp_flags) { struct xdr_stream stream; @@ -682,9 +691,9 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, if (unlikely(!p)) goto out_err; - memcpy(id, p, sizeof(*id)); + memcpy(&fl->deviceid, p, sizeof(fl->deviceid)); p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); - nfs4_print_deviceid(id); + nfs4_print_deviceid(&fl->deviceid); nfl_util = be32_to_cpup(p++); if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS) @@ -831,15 +840,14 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, { struct nfs4_filelayout_segment *fl; int rc; - struct nfs4_deviceid id; dprintk("--> %s\n", __func__); fl = kzalloc(sizeof(*fl), gfp_flags); if (!fl) return NULL; - rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); - if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { + rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags); + if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) { _filelayout_free_lseg(fl); return NULL; } @@ -888,18 +896,51 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, return min(stripe_unit - (unsigned int)stripe_offset, size); } +static struct pnfs_layout_segment * +fl_pnfs_update_layout(struct inode *ino, + struct nfs_open_context *ctx, + loff_t pos, + u64 count, + enum pnfs_iomode iomode, + bool strict_iomode, + gfp_t gfp_flags) +{ + struct pnfs_layout_segment *lseg = NULL; + struct pnfs_layout_hdr *lo; + struct nfs4_filelayout_segment *fl; + int status; + + lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode, + gfp_flags); + if (!lseg) + lseg = ERR_PTR(-ENOMEM); + if (IS_ERR(lseg)) + goto out; + + lo = NFS_I(ino)->layout; + fl = FILELAYOUT_LSEG(lseg); + + status = filelayout_check_deviceid(lo, fl, gfp_flags); + if (status) + lseg = ERR_PTR(status); +out: + if (IS_ERR(lseg)) + pnfs_put_lseg(lseg); + return lseg; +} + static void filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { if (!pgio->pg_lseg) { - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, - req->wb_context, - 0, - NFS4_MAX_UINT64, - IOMODE_READ, - false, - GFP_KERNEL); + pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_READ, + false, + GFP_KERNEL); if (IS_ERR(pgio->pg_lseg)) { pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_lseg = NULL; @@ -919,13 +960,13 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio, int status; if (!pgio->pg_lseg) { - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, - req->wb_context, - 0, - NFS4_MAX_UINT64, - IOMODE_RW, - false, - GFP_NOFS); + pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_RW, + false, + GFP_NOFS); if (IS_ERR(pgio->pg_lseg)) { pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_lseg = NULL; diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h index 2896cb833a1137..79323b5dab0cb3 100644 --- a/fs/nfs/filelayout/filelayout.h +++ b/fs/nfs/filelayout/filelayout.h @@ -55,15 +55,16 @@ struct nfs4_file_layout_dsaddr { }; struct nfs4_filelayout_segment { - struct pnfs_layout_segment generic_hdr; - u32 stripe_type; - u32 commit_through_mds; - u32 stripe_unit; - u32 first_stripe_index; - u64 pattern_offset; - struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ - unsigned int num_fh; - struct nfs_fh **fh_array; + struct pnfs_layout_segment generic_hdr; + u32 stripe_type; + u32 commit_through_mds; + u32 stripe_unit; + u32 first_stripe_index; + u64 pattern_offset; + struct nfs4_deviceid deviceid; + struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ + unsigned int num_fh; + struct nfs_fh **fh_array; }; struct nfs4_filelayout { diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index f956ca20a8a359..d913e818858f3f 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -266,6 +266,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); struct nfs4_pnfs_ds *ret = ds; struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); + int status; if (ds == NULL) { printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", @@ -277,9 +278,14 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) if (ds->ds_clp) goto out_test_devid; - nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, + status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, dataserver_retrans, 4, s->nfs_client->cl_minorversion); + if (status) { + nfs4_mark_deviceid_unavailable(devid); + ret = NULL; + goto out; + } out_test_devid: if (ret->ds_clp == NULL || diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h index f4f39b0ab09b25..98b34c9b0564b3 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.h +++ b/fs/nfs/flexfilelayout/flexfilelayout.h @@ -175,7 +175,19 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg) static inline bool ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node) { - return nfs4_test_deviceid_unavailable(node); + /* + * Flexfiles should never mark a DS unavailable, but if it does + * print a (ratelimited) warning as this can affect performance. + */ + if (nfs4_test_deviceid_unavailable(node)) { + u32 *p = (u32 *)node->deviceid.data; + + pr_warn_ratelimited("NFS: flexfiles layout referencing an " + "unavailable device [%x%x%x%x]\n", + p[0], p[1], p[2], p[3]); + return true; + } + return false; } static inline int diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index e5a6f248697b36..457cfeb1d5c162 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -208,6 +208,10 @@ static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg, } else goto outerr; } + + if (IS_ERR(mirror->mirror_ds)) + goto outerr; + if (mirror->mirror_ds->ds == NULL) { struct nfs4_deviceid_node *devid; devid = &mirror->mirror_ds->id_node; @@ -384,6 +388,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, struct inode *ino = lseg->pls_layout->plh_inode; struct nfs_server *s = NFS_SERVER(ino); unsigned int max_payload; + int status; if (!ff_layout_mirror_valid(lseg, mirror, true)) { pr_err_ratelimited("NFS: %s: No data server for offset index %d\n", @@ -404,7 +409,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, /* FIXME: For now we assume the server sent only one version of NFS * to use for the DS. */ - nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, + status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, dataserver_retrans, mirror->mirror_ds->ds_versions[0].version, mirror->mirror_ds->ds_versions[0].minor_version); @@ -420,11 +425,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, mirror->mirror_ds->ds_versions[0].wsize = max_payload; goto out; } +out_fail: ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), mirror, lseg->pls_range.offset, lseg->pls_range.length, NFS4ERR_NXIO, OP_ILLEGAL, GFP_NOIO); -out_fail: if (fail_return || !ff_layout_has_available_ds(lseg)) pnfs_error_mark_layout_for_return(ino, lseg); ds = NULL; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 5ca4d96b194218..f489a5a71bd5cd 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include @@ -703,9 +703,10 @@ static bool nfs_need_revalidate_inode(struct inode *inode) return false; } -int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int nfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; int err = 0; @@ -726,17 +727,17 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) * - NFS never sets MS_NOATIME or MS_NODIRATIME so there is * no point in checking those. */ - if ((mnt->mnt_flags & MNT_NOATIME) || - ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) + if ((path->mnt->mnt_flags & MNT_NOATIME) || + ((path->mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) need_atime = 0; if (need_atime || nfs_need_revalidate_inode(inode)) { struct nfs_server *server = NFS_SERVER(inode); - nfs_readdirplus_parent_cache_miss(dentry); + nfs_readdirplus_parent_cache_miss(path->dentry); err = __nfs_revalidate_inode(server, inode); } else - nfs_readdirplus_parent_cache_hit(dentry); + nfs_readdirplus_parent_cache_hit(path->dentry); if (!err) { generic_fillattr(inode, stat); stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 09ca5095c04e42..7b38fedb7e0328 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -186,6 +186,8 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); +extern bool nfs_client_init_is_complete(const struct nfs_client *clp); +extern int nfs_client_init_status(const struct nfs_client *clp); extern int nfs_wait_client_init_complete(const struct nfs_client *clp); extern void nfs_mark_client_ready(struct nfs_client *clp, int state); extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv, diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index e49d831c4e8531..786f175805827d 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -178,11 +178,12 @@ struct vfsmount *nfs_d_automount(struct path *path) } static int -nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +nfs_namespace_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - if (NFS_FH(d_inode(dentry))->size != 0) - return nfs_getattr(mnt, dentry, stat); - generic_fillattr(d_inode(dentry), stat); + if (NFS_FH(d_inode(path->dentry))->size != 0) + return nfs_getattr(path, stat, request_mask, query_flags); + generic_fillattr(d_inode(path->dentry), stat); return 0; } diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 5ae9d64ea08bc8..8346ccbf2d52e5 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -1023,9 +1023,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server) server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead; server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead; - if (server->rsize > server_resp_sz) + if (!server->rsize || server->rsize > server_resp_sz) server->rsize = server_resp_sz; - if (server->wsize > server_rqst_sz) + if (!server->wsize || server->wsize > server_rqst_sz) server->wsize = server_rqst_sz; #endif /* CONFIG_NFS_V4_1 */ } diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index c444285bb1b169..835c163f61af53 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -316,7 +316,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen, if (ret < 0) goto out_up; - payload = user_key_payload(rkey); + payload = user_key_payload_rcu(rkey); if (IS_ERR_OR_NULL(payload)) { ret = PTR_ERR(payload); goto out_up; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1b183686c6d4f0..201ca3f2c4bac1 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2258,8 +2258,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred, if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) return 0; - /* even though OPEN succeeded, access is denied. Close the file */ - nfs4_close_state(state, fmode); return -EACCES; } @@ -2444,17 +2442,14 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) } nfs4_stateid_copy(&stateid, &delegation->stateid); - if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { + if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) || + !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, + &delegation->flags)) { rcu_read_unlock(); nfs_finish_clear_delegation_stateid(state, &stateid); return; } - if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) { - rcu_read_unlock(); - return; - } - cred = get_rpccred(delegation->cred); rcu_read_unlock(); status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); @@ -7427,11 +7422,11 @@ static void nfs4_exchange_id_release(void *data) struct nfs41_exchange_id_data *cdata = (struct nfs41_exchange_id_data *)data; - nfs_put_client(cdata->args.client); if (cdata->xprt) { xprt_put(cdata->xprt); rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient); } + nfs_put_client(cdata->args.client); kfree(cdata->res.impl_id); kfree(cdata->res.server_scope); kfree(cdata->res.server_owner); @@ -7538,10 +7533,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, task_setup_data.callback_data = calldata; task = rpc_run_task(&task_setup_data); - if (IS_ERR(task)) { - status = PTR_ERR(task); - goto out_impl_id; - } + if (IS_ERR(task)) + return PTR_ERR(task); if (!xprt) { status = rpc_wait_for_completion_task(task); @@ -7569,6 +7562,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, kfree(calldata->res.server_owner); out_calldata: kfree(calldata); + nfs_put_client(clp); goto out; } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f0369e36275341..80ce289eea0532 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3942,7 +3942,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, if (len <= 0) goto out; dprintk("%s: name=%s\n", __func__, group_name->data); - return NFS_ATTR_FATTR_OWNER_NAME; + return NFS_ATTR_FATTR_GROUP_NAME; } else { len = xdr_stream_decode_opaque_inline(xdr, (void **)&p, XDR_MAX_NETOBJ); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 63f77b49a586a5..590e1e35781f0b 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -367,7 +367,7 @@ void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags); void nfs4_pnfs_v3_ds_connect_unload(void); -void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, +int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_deviceid_node *devid, unsigned int timeo, unsigned int retrans, u32 version, u32 minor_version); struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 9414b492439fbf..7250b95549ecc7 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -745,15 +745,17 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv, /* * Create an rpc connection to the nfs4_pnfs_ds data server. * Currently only supports IPv4 and IPv6 addresses. - * If connection fails, make devid unavailable. + * If connection fails, make devid unavailable and return a -errno. */ -void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, +int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_deviceid_node *devid, unsigned int timeo, unsigned int retrans, u32 version, u32 minor_version) { - if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { - int err = 0; + int err; +again: + err = 0; + if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { if (version == 3) { err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, retrans); @@ -766,12 +768,29 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, err = -EPROTONOSUPPORT; } - if (err) - nfs4_mark_deviceid_unavailable(devid); nfs4_clear_ds_conn_bit(ds); } else { nfs4_wait_ds_connect(ds); + + /* what was waited on didn't connect AND didn't mark unavail */ + if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid)) + goto again; } + + /* + * At this point the ds->ds_clp should be ready, but it might have + * hit an error. + */ + if (!err) { + if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) { + WARN_ON_ONCE(ds->ds_clp || + !nfs4_test_deviceid_unavailable(devid)); + return -EINVAL; + } + err = nfs_client_init_status(ds->ds_clp); + } + + return err; } EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e75b056f46f435..abb2c8a3be42e4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1784,7 +1784,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) (long long)req_offset(req)); if (status < 0) { nfs_context_set_write_error(req->wb_context, status); - nfs_inode_remove_request(req); + if (req->wb_page) + nfs_inode_remove_request(req); dprintk_cont(", error = %d\n", status); goto next; } @@ -1793,7 +1794,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) * returned by the server against all stored verfs. */ if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { /* We have a match */ - nfs_inode_remove_request(req); + if (req->wb_page) + nfs_inode_remove_request(req); dprintk_cont(" OK\n"); goto next; } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 382c1fd05b4c8d..33017d652b1da2 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2301,7 +2301,7 @@ static int get_parent_attributes(struct svc_export *exp, struct kstat *stat) if (path.dentry != path.mnt->mnt_root) break; } - err = vfs_getattr(&path, stat); + err = vfs_getattr(&path, stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); path_put(&path); return err; } @@ -2385,7 +2385,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, goto out; } - err = vfs_getattr(&path, &stat); + err = vfs_getattr(&path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) goto out_nfserr; if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE | diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 73e75ac905258c..8bf8f667a8cf2f 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -538,13 +538,21 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size) static ssize_t nfsd_print_version_support(char *buf, int remaining, const char *sep, - unsigned vers, unsigned minor) + unsigned vers, int minor) { - const char *format = (minor == 0) ? "%s%c%u" : "%s%c%u.%u"; + const char *format = minor < 0 ? "%s%c%u" : "%s%c%u.%u"; bool supported = !!nfsd_vers(vers, NFSD_TEST); - if (vers == 4 && !nfsd_minorversion(minor, NFSD_TEST)) + if (vers == 4 && minor >= 0 && + !nfsd_minorversion(minor, NFSD_TEST)) supported = false; + if (minor == 0 && supported) + /* + * special case for backward compatability. + * +4.0 is never reported, it is implied by + * +4, unless -4.0 is present. + */ + return 0; return snprintf(buf, remaining, format, sep, supported ? '+' : '-', vers, minor); } @@ -554,7 +562,6 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) char *mesg = buf; char *vers, *minorp, sign; int len, num, remaining; - unsigned minor; ssize_t tlen = 0; char *sep; struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); @@ -575,6 +582,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) if (len <= 0) return -EINVAL; do { enum vers_op cmd; + unsigned minor; sign = *vers; if (sign == '+' || sign == '-') num = simple_strtol((vers+1), &minorp, 0); @@ -585,8 +593,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) return -EINVAL; if (kstrtouint(minorp+1, 0, &minor) < 0) return -EINVAL; - } else - minor = 0; + } + cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET; switch(num) { case 2: @@ -594,8 +602,20 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) nfsd_vers(num, cmd); break; case 4: - if (nfsd_minorversion(minor, cmd) >= 0) - break; + if (*minorp == '.') { + if (nfsd_minorversion(minor, cmd) < 0) + return -EINVAL; + } else if ((cmd == NFSD_SET) != nfsd_vers(num, NFSD_TEST)) { + /* + * Either we have +4 and no minors are enabled, + * or we have -4 and at least one minor is enabled. + * In either case, propagate 'cmd' to all minors. + */ + minor = 0; + while (nfsd_minorversion(minor, cmd) >= 0) + minor++; + } + break; default: return -EINVAL; } @@ -612,9 +632,11 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) sep = ""; remaining = SIMPLE_TRANSACTION_LIMIT; for (num=2 ; num <= 4 ; num++) { + int minor; if (!nfsd_vers(num, NFSD_AVAIL)) continue; - minor = 0; + + minor = -1; do { len = nfsd_print_version_support(buf, remaining, sep, num, minor); @@ -624,7 +646,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) buf += len; tlen += len; minor++; - sep = " "; + if (len) + sep = " "; } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION); } out: diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index fa82b7707e8531..03a7e9da4da022 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -786,6 +786,7 @@ nfserrno (int errno) { nfserr_serverfault, -ESERVERFAULT }, { nfserr_serverfault, -ENFILE }, { nfserr_io, -EUCLEAN }, + { nfserr_perm, -ENOKEY }, }; int i; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index efd66da992010f..31e1f959345715 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -6,7 +6,7 @@ * Copyright (C) 1995, 1996, 1997 Olaf Kirch */ -#include +#include #include #include #include @@ -167,7 +167,8 @@ nfsd_adjust_nfsd_versions4(void) int nfsd_minorversion(u32 minorversion, enum vers_op change) { - if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) + if (minorversion > NFSD_SUPPORTED_MINOR_VERSION && + change != NFSD_AVAIL) return -1; switch(change) { case NFSD_SET: @@ -415,23 +416,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net) void nfsd_reset_versions(void) { - int found_one = 0; int i; - for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { - if (nfsd_program.pg_vers[i]) - found_one = 1; - } + for (i = 0; i < NFSD_NRVERS; i++) + if (nfsd_vers(i, NFSD_TEST)) + return; - if (!found_one) { - for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) - nfsd_program.pg_vers[i] = nfsd_version[i]; -#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) - for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) - nfsd_acl_program.pg_vers[i] = - nfsd_acl_version[i]; -#endif - } + for (i = 0; i < NFSD_NRVERS; i++) + if (i != 4) + nfsd_vers(i, NFSD_SET); + else { + int minor = 0; + while (nfsd_minorversion(minor, NFSD_SET) >= 0) + minor++; + } } /* diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index db98c48c735aaa..1bbdccecbf3df8 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -135,7 +135,8 @@ static inline __be32 fh_getattr(struct svc_fh *fh, struct kstat *stat) { struct path p = {.mnt = fh->fh_export->ex_path.mnt, .dentry = fh->fh_dentry}; - return nfserrno(vfs_getattr(&p, stat)); + return nfserrno(vfs_getattr(&p, stat, STATX_BASIC_STATS, + AT_STATX_SYNC_AS_STAT)); } static inline int nfsd_create_is_exclusive(int createmode) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 7d18d62e8e079c..febed1217b3fd7 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -30,6 +30,8 @@ #include #include #include +#include + #include "nilfs.h" #include "btnode.h" #include "page.h" diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index a4c46221755ea6..e5f7e47de68e4b 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -6,6 +6,7 @@ #include /* UINT_MAX */ #include #include +#include #include #include diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 7ebfca6a14272e..2b37f27858345c 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -14,6 +14,7 @@ #include #include #include +#include #include diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index f36c29398de371..1aeb837ae41405 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -30,6 +30,7 @@ #include /* kmem_* */ #include #include +#include #include "inotify.h" diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 1cf41c623be1d1..498d609b26c7db 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -30,7 +30,7 @@ #include #include /* roundup() */ #include /* LOOKUP_FOLLOW */ -#include /* struct user */ +#include #include /* struct kmem_cache */ #include #include diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 358ed7e1195a5c..c4f68c338735b9 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index d4ec0d8961a6e9..fb15a96df0b606 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -30,6 +30,7 @@ #include #include #include +#include #include diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index ec000575e86343..d0ab7e56d0b41a 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -54,6 +54,7 @@ */ #include +#include #include #include #include @@ -1862,7 +1863,7 @@ static int o2net_accept_one(struct socket *sock, int *more) new_sock->type = sock->type; new_sock->ops = sock->ops; - ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); + ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false); if (ret < 0) goto out; diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 32fd261ae13d02..a2b19fbdcf4695 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c index f70cda2f090d54..9cecf4857195ce 100644 --- a/fs/ocfs2/dlmfs/userdlm.c +++ b/fs/ocfs2/dlmfs/userdlm.c @@ -28,6 +28,7 @@ */ #include +#include #include #include diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8dce4099a6cae2..3b7c937a36b528 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -33,6 +33,7 @@ #include #include #include +#include #define MLOG_MASK_PREFIX ML_DLM_GLUE #include diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 8836305eb37865..bfeb647459d95e 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1306,16 +1306,15 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) return status; } -int ocfs2_getattr(struct vfsmount *mnt, - struct dentry *dentry, - struct kstat *stat) +int ocfs2_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct inode *inode = d_inode(dentry); - struct super_block *sb = dentry->d_sb; + struct inode *inode = d_inode(path->dentry); + struct super_block *sb = path->dentry->d_sb; struct ocfs2_super *osb = sb->s_fs_info; int err; - err = ocfs2_inode_revalidate(dentry); + err = ocfs2_inode_revalidate(path->dentry); if (err) { if (err != -ENOENT) mlog_errno(err); diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index 897fd9a2e51dbe..1fdc9839cd931d 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h @@ -68,8 +68,8 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh, int ocfs2_extend_allocation(struct inode *inode, u32 logical_start, u32 clusters_to_add, int mark_unwritten); int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); -int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +int ocfs2_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); int ocfs2_permission(struct inode *inode, int mask); int ocfs2_should_update_atime(struct inode *inode, diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index a24e42f953418b..ca1646fbcaefe7 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -42,6 +42,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "ocfs2_trace.h" diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index df7ea8543a2ef0..8c9034ee7383a9 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/open.c b/fs/open.c index 9921f70bc5ca07..949cef29c3bba9 100644 --- a/fs/open.c +++ b/fs/open.c @@ -301,12 +301,10 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (S_ISFIFO(inode->i_mode)) return -ESPIPE; - /* - * Let individual file system decide if it supports preallocation - * for directories or not. - */ - if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) && - !S_ISBLK(inode->i_mode)) + if (S_ISDIR(inode->i_mode)) + return -EISDIR; + + if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) return -ENODEV; /* Check for wrap through zero too */ @@ -316,7 +314,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (!file->f_op->fallocate) return -EOPNOTSUPP; - sb_start_write(inode->i_sb); + file_start_write(file); ret = file->f_op->fallocate(file, mode, offset, len); /* @@ -329,7 +327,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (ret == 0) fsnotify_modify(file); - sb_end_write(inode->i_sb); + file_end_write(file); return ret; } EXPORT_SYMBOL_GPL(vfs_fallocate); diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 5cd617980fbfa2..a304bf34b2127d 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -245,25 +245,24 @@ int orangefs_setattr(struct dentry *dentry, struct iattr *iattr) /* * Obtain attributes of an object given a dentry */ -int orangefs_getattr(struct vfsmount *mnt, - struct dentry *dentry, - struct kstat *kstat) +int orangefs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { int ret = -ENOENT; - struct inode *inode = dentry->d_inode; + struct inode *inode = path->dentry->d_inode; struct orangefs_inode_s *orangefs_inode = NULL; gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_getattr: called on %pd\n", - dentry); + path->dentry); ret = orangefs_inode_getattr(inode, 0, 0); if (ret == 0) { - generic_fillattr(inode, kstat); + generic_fillattr(inode, stat); /* override block size reported to stat */ orangefs_inode = ORANGEFS_I(inode); - kstat->blksize = orangefs_inode->blksize; + stat->blksize = orangefs_inode->blksize; } return ret; } diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h index 70355a9a25969b..5e48a0be976194 100644 --- a/fs/orangefs/orangefs-kernel.h +++ b/fs/orangefs/orangefs-kernel.h @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include #include @@ -439,9 +439,8 @@ struct inode *orangefs_new_inode(struct super_block *sb, int orangefs_setattr(struct dentry *dentry, struct iattr *iattr); -int orangefs_getattr(struct vfsmount *mnt, - struct dentry *dentry, - struct kstat *kstat); +int orangefs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); int orangefs_permission(struct inode *inode, int mask); diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index c48859f16e7b1f..67c24351a67f8d 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -115,6 +115,13 @@ static struct inode *orangefs_alloc_inode(struct super_block *sb) return &orangefs_inode->vfs_inode; } +static void orangefs_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + kmem_cache_free(orangefs_inode_cache, orangefs_inode); +} + static void orangefs_destroy_inode(struct inode *inode) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); @@ -123,7 +130,7 @@ static void orangefs_destroy_inode(struct inode *inode) "%s: deallocated %p destroying inode %pU\n", __func__, orangefs_inode, get_khandle_from_ino(inode)); - kmem_cache_free(orangefs_inode_cache, orangefs_inode); + call_rcu(&inode->i_rcu, orangefs_i_callback); } /* diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index f57043dace6287..906ea6c9326017 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -15,11 +15,13 @@ #include #include #include -#include +#include +#include #include #include #include #include "overlayfs.h" +#include "ovl_entry.h" #define OVL_COPY_UP_CHUNK_SIZE (1 << 20) @@ -232,12 +234,14 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, struct dentry *dentry, struct path *lowerpath, - struct kstat *stat, const char *link) + struct kstat *stat, const char *link, + struct kstat *pstat, bool tmpfile) { struct inode *wdir = workdir->d_inode; struct inode *udir = upperdir->d_inode; struct dentry *newdentry = NULL; struct dentry *upper = NULL; + struct dentry *temp = NULL; int err; const struct cred *old_creds = NULL; struct cred *new_creds = NULL; @@ -248,25 +252,30 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, .link = link }; - newdentry = ovl_lookup_temp(workdir, dentry); - err = PTR_ERR(newdentry); - if (IS_ERR(newdentry)) - goto out; - upper = lookup_one_len(dentry->d_name.name, upperdir, dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) - goto out1; + goto out; err = security_inode_copy_up(dentry, &new_creds); if (err < 0) - goto out2; + goto out1; if (new_creds) old_creds = override_creds(new_creds); - err = ovl_create_real(wdir, newdentry, &cattr, NULL, true); + if (tmpfile) + temp = ovl_do_tmpfile(upperdir, stat->mode); + else + temp = ovl_lookup_temp(workdir, dentry); + err = PTR_ERR(temp); + if (IS_ERR(temp)) + goto out1; + + err = 0; + if (!tmpfile) + err = ovl_create_real(wdir, temp, &cattr, NULL, true); if (new_creds) { revert_creds(old_creds); @@ -281,39 +290,55 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, ovl_path_upper(dentry, &upperpath); BUG_ON(upperpath.dentry != NULL); - upperpath.dentry = newdentry; + upperpath.dentry = temp; + + if (tmpfile) { + inode_unlock(udir); + err = ovl_copy_up_data(lowerpath, &upperpath, + stat->size); + inode_lock_nested(udir, I_MUTEX_PARENT); + } else { + err = ovl_copy_up_data(lowerpath, &upperpath, + stat->size); + } - err = ovl_copy_up_data(lowerpath, &upperpath, stat->size); if (err) goto out_cleanup; } - err = ovl_copy_xattr(lowerpath->dentry, newdentry); + err = ovl_copy_xattr(lowerpath->dentry, temp); if (err) goto out_cleanup; - inode_lock(newdentry->d_inode); - err = ovl_set_attr(newdentry, stat); - inode_unlock(newdentry->d_inode); + inode_lock(temp->d_inode); + err = ovl_set_attr(temp, stat); + inode_unlock(temp->d_inode); if (err) goto out_cleanup; - err = ovl_do_rename(wdir, newdentry, udir, upper, 0); + if (tmpfile) + err = ovl_do_link(temp, udir, upper, true); + else + err = ovl_do_rename(wdir, temp, udir, upper, 0); if (err) goto out_cleanup; + newdentry = dget(tmpfile ? upper : temp); ovl_dentry_update(dentry, newdentry); ovl_inode_update(d_inode(dentry), d_inode(newdentry)); - newdentry = NULL; + + /* Restore timestamps on parent (best effort) */ + ovl_set_timestamps(upperdir, pstat); out2: - dput(upper); + dput(temp); out1: - dput(newdentry); + dput(upper); out: return err; out_cleanup: - ovl_cleanup(wdir, newdentry); + if (!tmpfile) + ovl_cleanup(wdir, temp); goto out2; } @@ -337,6 +362,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, struct dentry *lowerdentry = lowerpath->dentry; struct dentry *upperdir; const char *link = NULL; + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; if (WARN_ON(!workdir)) return -EROFS; @@ -346,7 +372,8 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, ovl_path_upper(parent, &parentpath); upperdir = parentpath.dentry; - err = vfs_getattr(&parentpath, &pstat); + err = vfs_getattr(&parentpath, &pstat, + STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT); if (err) return err; @@ -356,6 +383,25 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, return PTR_ERR(link); } + /* Should we copyup with O_TMPFILE or with workdir? */ + if (S_ISREG(stat->mode) && ofs->tmpfile) { + err = ovl_copy_up_start(dentry); + /* err < 0: interrupted, err > 0: raced with another copy-up */ + if (unlikely(err)) { + pr_debug("ovl_copy_up_start(%pd2) = %i\n", dentry, err); + if (err > 0) + err = 0; + goto out_done; + } + + inode_lock_nested(upperdir->d_inode, I_MUTEX_PARENT); + err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, + stat, link, &pstat, true); + inode_unlock(upperdir->d_inode); + ovl_copy_up_end(dentry); + goto out_done; + } + err = -EIO; if (lock_rename(workdir, upperdir) != NULL) { pr_err("overlayfs: failed to lock workdir+upperdir\n"); @@ -368,13 +414,10 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, } err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, - stat, link); - if (!err) { - /* Restore timestamps on parent (best effort) */ - ovl_set_timestamps(upperdir, &pstat); - } + stat, link, &pstat, false); out_unlock: unlock_rename(workdir, upperdir); +out_done: do_delayed_call(&done); return err; @@ -409,7 +452,8 @@ int ovl_copy_up_flags(struct dentry *dentry, int flags) } ovl_path_lower(next, &lowerpath); - err = vfs_getattr(&lowerpath, &stat); + err = vfs_getattr(&lowerpath, &stat, + STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); /* maybe truncate regular file. this has no effect on dirs */ if (flags & O_TRUNC) stat.size = 0; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 16e06dd8945759..6515796460dfe1 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -138,9 +138,10 @@ static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) return err; } -static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int ovl_dir_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; int err; enum ovl_path_type type; struct path realpath; @@ -148,7 +149,7 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, type = ovl_path_real(dentry, &realpath); old_cred = ovl_override_creds(dentry->d_sb); - err = vfs_getattr(&realpath, stat); + err = vfs_getattr(&realpath, stat, request_mask, flags); revert_creds(old_cred); if (err) return err; @@ -264,7 +265,8 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, goto out; ovl_path_upper(dentry, &upperpath); - err = vfs_getattr(&upperpath, &stat); + err = vfs_getattr(&upperpath, &stat, + STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) goto out_unlock; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 08643ac44a0278..f8fe6bf2036df3 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include "overlayfs.h" @@ -56,16 +57,17 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr) return err; } -static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int ovl_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct path realpath; const struct cred *old_cred; int err; ovl_path_real(dentry, &realpath); old_cred = ovl_override_creds(dentry->d_sb); - err = vfs_getattr(&realpath, stat); + err = vfs_getattr(&realpath, stat, request_mask, flags); revert_creds(old_cred); return err; } diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 023bb0b03352f4..b8b077821fb03b 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 8af450b0e57a2d..741dc0b6931fe9 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -127,6 +127,15 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry) return err; } +static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode) +{ + struct dentry *ret = vfs_tmpfile(dentry, mode, 0); + int err = IS_ERR(ret) ? PTR_ERR(ret) : 0; + + pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err); + return ret; +} + static inline struct inode *ovl_inode_real(struct inode *inode, bool *is_upper) { unsigned long x = (unsigned long) READ_ONCE(inode->i_private); @@ -169,6 +178,8 @@ void ovl_dentry_version_inc(struct dentry *dentry); u64 ovl_dentry_version_get(struct dentry *dentry); bool ovl_is_whiteout(struct dentry *dentry); struct file *ovl_path_open(struct path *path, int flags); +int ovl_copy_up_start(struct dentry *dentry); +void ovl_copy_up_end(struct dentry *dentry); /* namei.c */ int ovl_path_next(int idx, struct dentry *dentry, struct path *path); diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index d14bca1850d95a..59614faa14c315 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -27,6 +27,8 @@ struct ovl_fs { struct ovl_config config; /* creds of process who forced instantiation of super block */ const struct cred *creator_cred; + bool tmpfile; + wait_queue_head_t copyup_wq; }; /* private information held for every overlayfs dentry */ @@ -38,6 +40,7 @@ struct ovl_entry { u64 version; const char *redirect; bool opaque; + bool copying; }; struct rcu_head rcu; }; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 20f48abbb82fd3..c9e70d39c1ea1c 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -7,6 +7,7 @@ * the Free Software Foundation. */ +#include #include #include #include @@ -160,6 +161,25 @@ static void ovl_put_super(struct super_block *sb) kfree(ufs); } +static int ovl_sync_fs(struct super_block *sb, int wait) +{ + struct ovl_fs *ufs = sb->s_fs_info; + struct super_block *upper_sb; + int ret; + + if (!ufs->upper_mnt) + return 0; + upper_sb = ufs->upper_mnt->mnt_sb; + if (!upper_sb->s_op->sync_fs) + return 0; + + /* real inodes have already been synced by sync_filesystem(ovl_sb) */ + down_read(&upper_sb->s_umount); + ret = upper_sb->s_op->sync_fs(upper_sb, wait); + up_read(&upper_sb->s_umount); + return ret; +} + /** * ovl_statfs * @sb: The overlayfs super block @@ -222,6 +242,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) static const struct super_operations ovl_super_operations = { .put_super = ovl_put_super, + .sync_fs = ovl_sync_fs, .statfs = ovl_statfs, .show_options = ovl_show_options, .remount_fs = ovl_remount, @@ -701,6 +722,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) unsigned int stacklen = 0; unsigned int i; bool remote = false; + struct cred *cred; int err; err = -ENOMEM; @@ -708,6 +730,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (!ufs) goto out; + init_waitqueue_head(&ufs->copyup_wq); ufs->config.redirect_dir = ovl_redirect_dir_def; err = ovl_parse_opt((char *) data, &ufs->config); if (err) @@ -825,6 +848,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) * creation of workdir in previous step. */ if (ufs->workdir) { + struct dentry *temp; + err = ovl_check_d_type_supported(&workpath); if (err < 0) goto out_put_workdir; @@ -836,6 +861,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) */ if (!err) pr_warn("overlayfs: upper fs needs to support d_type.\n"); + + /* Check if upper/work fs supports O_TMPFILE */ + temp = ovl_do_tmpfile(ufs->workdir, S_IFREG | 0); + ufs->tmpfile = !IS_ERR(temp); + if (ufs->tmpfile) + dput(temp); + else + pr_warn("overlayfs: upper fs does not support tmpfile.\n"); } } @@ -870,10 +903,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) else sb->s_d_op = &ovl_dentry_operations; - ufs->creator_cred = prepare_creds(); - if (!ufs->creator_cred) + ufs->creator_cred = cred = prepare_creds(); + if (!cred) goto out_put_lower_mnt; + /* Never override disk quota limits or use reserved space */ + cap_lower(cred->cap_effective, CAP_SYS_RESOURCE); + err = -ENOMEM; oe = ovl_alloc_entry(numlower); if (!oe) diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 952286f4826cc5..6e610a205e1556 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "overlayfs.h" #include "ovl_entry.h" @@ -263,3 +264,33 @@ struct file *ovl_path_open(struct path *path, int flags) { return dentry_open(path, flags | O_NOATIME, current_cred()); } + +int ovl_copy_up_start(struct dentry *dentry) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + struct ovl_entry *oe = dentry->d_fsdata; + int err; + + spin_lock(&ofs->copyup_wq.lock); + err = wait_event_interruptible_locked(ofs->copyup_wq, !oe->copying); + if (!err) { + if (oe->__upperdentry) + err = 1; /* Already copied up */ + else + oe->copying = true; + } + spin_unlock(&ofs->copyup_wq.lock); + + return err; +} + +void ovl_copy_up_end(struct dentry *dentry) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + struct ovl_entry *oe = dentry->d_fsdata; + + spin_lock(&ofs->copyup_wq.lock); + oe->copying = false; + wake_up_locked(&ofs->copyup_wq); + spin_unlock(&ofs->copyup_wq.lock); +} diff --git a/fs/posix_acl.c b/fs/posix_acl.c index c9d48dc784953f..eebf5f6cf6d564 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/proc/array.c b/fs/proc/array.c index fe12b519d09b53..88c355574aa0af 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -60,6 +60,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include diff --git a/fs/proc/base.c b/fs/proc/base.c index 1e1e182d571b4a..c87b6b9a8a76b0 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -85,6 +85,11 @@ #include #include #include +#include +#include +#include +#include +#include #include #include #ifdef CONFIG_HARDWALL @@ -1724,11 +1729,12 @@ struct inode *proc_pid_make_inode(struct super_block * sb, return NULL; } -int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int pid_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct task_struct *task; - struct pid_namespace *pid = dentry->d_sb->s_fs_info; + struct pid_namespace *pid = path->dentry->d_sb->s_fs_info; generic_fillattr(inode, stat); @@ -3511,9 +3517,10 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx) return 0; } -static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +static int proc_task_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct task_struct *p = get_proc_task(inode); generic_fillattr(inode, stat); diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 00ce1531b2f5fd..c330495c3115ed 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -1,4 +1,4 @@ -#include +#include #include #include #include diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 06c73904d497ad..ee27feb34cf4d5 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -118,10 +118,10 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) return 0; } -static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int proc_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct proc_dir_entry *de = PDE(inode); if (de && de->nlink) set_nlink(inode, de->nlink); diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 5d6960f5f1c03c..c5ae09b6c726ab 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -14,6 +14,8 @@ #include #include #include +#include +#include struct ctl_table_header; struct mempolicy; @@ -149,7 +151,7 @@ extern int proc_pid_statm(struct seq_file *, struct pid_namespace *, * base.c */ extern const struct dentry_operations pid_dentry_operations; -extern int pid_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int pid_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int proc_setattr(struct dentry *, struct iattr *); extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t); extern int pid_revalidate(struct dentry *, unsigned int); diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index ea9f3d1ae83063..4ee55274f155fc 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "internal.h" diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index aec66e6c2060b8..983fce5c24183d 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -3,6 +3,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index ffd72a6c6e0446..d72fc40241d9c6 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -140,10 +141,10 @@ static struct dentry *proc_tgid_net_lookup(struct inode *dir, return de; } -static int proc_tgid_net_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int proc_tgid_net_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct net *net; net = get_proc_task_net(inode); diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 3e64c6502dc854..8f91ec66baa326 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -801,9 +802,10 @@ static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr) return 0; } -static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +static int proc_sys_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; diff --git a/fs/proc/root.c b/fs/proc/root.c index b90da888b81a3a..deecb397daa30d 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -14,12 +14,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include "internal.h" @@ -149,10 +151,10 @@ void __init proc_root_init(void) proc_sys_init(); } -static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat -) +static int proc_root_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - generic_fillattr(d_inode(dentry), stat); + generic_fillattr(d_inode(path->dentry), stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } diff --git a/fs/proc/stat.c b/fs/proc/stat.c index e47c3e8c4dfed5..bd4e55f4aa20b2 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -5,11 +5,12 @@ #include #include #include +#include #include #include #include #include -#include +#include #include #ifndef arch_irq_stat_cpu diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index ee3efb229ef6a6..f08bd31c1081cc 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 1ef97cfcf42287..23266694db1171 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -7,6 +7,8 @@ #include #include #include +#include + #include "internal.h" /* diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 3f1190d1899153..b5713fefb4c1b5 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -10,6 +10,8 @@ #include #include #include +#include + #include "proc/internal.h" /* only for get_proc_task() in ->open() */ #include "pnode.h" diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 406fed92362a3d..74b489e3714d51 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/read_write.c b/fs/read_write.c index 5816d4c4cab09c..c4f88afbc67f49 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -4,8 +4,9 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ -#include +#include #include +#include #include #include #include @@ -23,9 +24,6 @@ #include #include -typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *); -typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *); - const struct file_operations generic_ro_fops = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, @@ -370,7 +368,7 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos) kiocb.ki_pos = *ppos; iter->type |= READ; - ret = file->f_op->read_iter(&kiocb, iter); + ret = call_read_iter(file, &kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; @@ -390,7 +388,7 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) kiocb.ki_pos = *ppos; iter->type |= WRITE; - ret = file->f_op->write_iter(&kiocb, iter); + ret = call_write_iter(file, &kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; @@ -439,7 +437,7 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo kiocb.ki_pos = *ppos; iov_iter_init(&iter, READ, &iov, 1, len); - ret = filp->f_op->read_iter(&kiocb, &iter); + ret = call_read_iter(filp, &kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; @@ -496,7 +494,7 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t kiocb.ki_pos = *ppos; iov_iter_init(&iter, WRITE, &iov, 1, len); - ret = filp->f_op->write_iter(&kiocb, &iter); + ret = call_write_iter(filp, &kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; @@ -675,7 +673,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to) EXPORT_SYMBOL(iov_shorten); static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, - loff_t *ppos, iter_fn_t fn, int flags) + loff_t *ppos, int type, int flags) { struct kiocb kiocb; ssize_t ret; @@ -692,7 +690,10 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC); kiocb.ki_pos = *ppos; - ret = fn(&kiocb, iter); + if (type == READ) + ret = call_read_iter(filp, &kiocb, iter); + else + ret = call_write_iter(filp, &kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; @@ -700,7 +701,7 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, /* Do it by hand, with file-ops */ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter, - loff_t *ppos, io_fn_t fn, int flags) + loff_t *ppos, int type, int flags) { ssize_t ret = 0; @@ -711,7 +712,13 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter, struct iovec iovec = iov_iter_iovec(iter); ssize_t nr; - nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos); + if (type == READ) { + nr = filp->f_op->read(filp, iovec.iov_base, + iovec.iov_len, ppos); + } else { + nr = filp->f_op->write(filp, iovec.iov_base, + iovec.iov_len, ppos); + } if (nr < 0) { if (!ret) @@ -834,50 +841,32 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, return ret; } -static ssize_t do_readv_writev(int type, struct file *file, - const struct iovec __user * uvector, - unsigned long nr_segs, loff_t *pos, - int flags) +static ssize_t __do_readv_writev(int type, struct file *file, + struct iov_iter *iter, loff_t *pos, int flags) { size_t tot_len; - struct iovec iovstack[UIO_FASTIOV]; - struct iovec *iov = iovstack; - struct iov_iter iter; - ssize_t ret; - io_fn_t fn; - iter_fn_t iter_fn; - - ret = import_iovec(type, uvector, nr_segs, - ARRAY_SIZE(iovstack), &iov, &iter); - if (ret < 0) - return ret; + ssize_t ret = 0; - tot_len = iov_iter_count(&iter); + tot_len = iov_iter_count(iter); if (!tot_len) goto out; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; - if (type == READ) { - fn = file->f_op->read; - iter_fn = file->f_op->read_iter; - } else { - fn = (io_fn_t)file->f_op->write; - iter_fn = file->f_op->write_iter; + if (type != READ) file_start_write(file); - } - if (iter_fn) - ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags); + if ((type == READ && file->f_op->read_iter) || + (type == WRITE && file->f_op->write_iter)) + ret = do_iter_readv_writev(file, iter, pos, type, flags); else - ret = do_loop_readv_writev(file, &iter, pos, fn, flags); + ret = do_loop_readv_writev(file, iter, pos, type, flags); if (type != READ) file_end_write(file); out: - kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file); @@ -887,6 +876,27 @@ static ssize_t do_readv_writev(int type, struct file *file, return ret; } +static ssize_t do_readv_writev(int type, struct file *file, + const struct iovec __user *uvector, + unsigned long nr_segs, loff_t *pos, + int flags) +{ + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov = iovstack; + struct iov_iter iter; + ssize_t ret; + + ret = import_iovec(type, uvector, nr_segs, + ARRAY_SIZE(iovstack), &iov, &iter); + if (ret < 0) + return ret; + + ret = __do_readv_writev(type, file, &iter, pos, flags); + kfree(iov); + + return ret; +} + ssize_t vfs_readv(struct file *file, const struct iovec __user *vec, unsigned long vlen, loff_t *pos, int flags) { @@ -1064,51 +1074,19 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, unsigned long nr_segs, loff_t *pos, int flags) { - compat_ssize_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; ssize_t ret; - io_fn_t fn; - iter_fn_t iter_fn; ret = compat_import_iovec(type, uvector, nr_segs, UIO_FASTIOV, &iov, &iter); if (ret < 0) return ret; - tot_len = iov_iter_count(&iter); - if (!tot_len) - goto out; - ret = rw_verify_area(type, file, pos, tot_len); - if (ret < 0) - goto out; - - if (type == READ) { - fn = file->f_op->read; - iter_fn = file->f_op->read_iter; - } else { - fn = (io_fn_t)file->f_op->write; - iter_fn = file->f_op->write_iter; - file_start_write(file); - } - - if (iter_fn) - ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags); - else - ret = do_loop_readv_writev(file, &iter, pos, fn, flags); - - if (type != READ) - file_end_write(file); - -out: + ret = __do_readv_writev(type, file, &iter, pos, flags); kfree(iov); - if ((ret + (type == READ)) > 0) { - if (type == READ) - fsnotify_access(file); - else - fsnotify_modify(file); - } + return ret; } @@ -1518,6 +1496,11 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, if (flags != 0) return -EINVAL; + if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) + return -EISDIR; + if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) + return -EINVAL; + ret = rw_verify_area(READ, file_in, &pos_in, len); if (unlikely(ret)) return ret; @@ -1538,7 +1521,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, if (len == 0) return 0; - sb_start_write(inode_out->i_sb); + file_start_write(file_out); /* * Try cloning first, this is supported by more file systems, and @@ -1574,7 +1557,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, inc_syscr(current); inc_syscw(current); - sb_end_write(inode_out->i_sb); + file_end_write(file_out); return ret; } diff --git a/fs/select.c b/fs/select.c index 305c0daf5d678b..e2112270d75a5f 100644 --- a/fs/select.c +++ b/fs/select.c @@ -15,7 +15,8 @@ */ #include -#include +#include +#include #include #include #include @@ -26,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/splice.c b/fs/splice.c index 4ef78aa8ef61e8..006ba50f4ece67 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -33,6 +33,8 @@ #include #include #include +#include + #include "internal.h" /* @@ -307,7 +309,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, idx = to.idx; init_sync_kiocb(&kiocb, in); kiocb.ki_pos = *ppos; - ret = in->f_op->read_iter(&kiocb, &to); + ret = call_read_iter(in, &kiocb, &to); if (ret > 0) { *ppos = kiocb.ki_pos; file_accessed(in); diff --git a/fs/stat.c b/fs/stat.c index 3f14d1ef086805..fa0be59340cc91 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -12,12 +12,22 @@ #include #include #include +#include #include #include #include #include +/** + * generic_fillattr - Fill in the basic attributes from the inode struct + * @inode: Inode to use as the source + * @stat: Where to fill in the attributes + * + * Fill in the basic attributes in the kstat structure from data that's to be + * found on the VFS inode structure. This is the default if no getattr inode + * operation is supplied. + */ void generic_fillattr(struct inode *inode, struct kstat *stat) { stat->dev = inode->i_sb->s_dev; @@ -33,81 +43,147 @@ void generic_fillattr(struct inode *inode, struct kstat *stat) stat->ctime = inode->i_ctime; stat->blksize = i_blocksize(inode); stat->blocks = inode->i_blocks; -} + if (IS_NOATIME(inode)) + stat->result_mask &= ~STATX_ATIME; + if (IS_AUTOMOUNT(inode)) + stat->attributes |= STATX_ATTR_AUTOMOUNT; +} EXPORT_SYMBOL(generic_fillattr); /** * vfs_getattr_nosec - getattr without security checks * @path: file to get attributes from * @stat: structure to return attributes in + * @request_mask: STATX_xxx flags indicating what the caller wants + * @query_flags: Query mode (KSTAT_QUERY_FLAGS) * * Get attributes without calling security_inode_getattr. * * Currently the only caller other than vfs_getattr is internal to the - * filehandle lookup code, which uses only the inode number and returns - * no attributes to any user. Any other code probably wants - * vfs_getattr. + * filehandle lookup code, which uses only the inode number and returns no + * attributes to any user. Any other code probably wants vfs_getattr. */ -int vfs_getattr_nosec(struct path *path, struct kstat *stat) +int vfs_getattr_nosec(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { struct inode *inode = d_backing_inode(path->dentry); + memset(stat, 0, sizeof(*stat)); + stat->result_mask |= STATX_BASIC_STATS; + request_mask &= STATX_ALL; + query_flags &= KSTAT_QUERY_FLAGS; if (inode->i_op->getattr) - return inode->i_op->getattr(path->mnt, path->dentry, stat); + return inode->i_op->getattr(path, stat, request_mask, + query_flags); generic_fillattr(inode, stat); return 0; } - EXPORT_SYMBOL(vfs_getattr_nosec); -int vfs_getattr(struct path *path, struct kstat *stat) +/* + * vfs_getattr - Get the enhanced basic attributes of a file + * @path: The file of interest + * @stat: Where to return the statistics + * @request_mask: STATX_xxx flags indicating what the caller wants + * @query_flags: Query mode (KSTAT_QUERY_FLAGS) + * + * Ask the filesystem for a file's attributes. The caller must indicate in + * request_mask and query_flags to indicate what they want. + * + * If the file is remote, the filesystem can be forced to update the attributes + * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can + * suppress the update by passing AT_STATX_DONT_SYNC. + * + * Bits must have been set in request_mask to indicate which attributes the + * caller wants retrieving. Any such attribute not requested may be returned + * anyway, but the value may be approximate, and, if remote, may not have been + * synchronised with the server. + * + * 0 will be returned on success, and a -ve error code if unsuccessful. + */ +int vfs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { int retval; retval = security_inode_getattr(path); if (retval) return retval; - return vfs_getattr_nosec(path, stat); + return vfs_getattr_nosec(path, stat, request_mask, query_flags); } - EXPORT_SYMBOL(vfs_getattr); -int vfs_fstat(unsigned int fd, struct kstat *stat) +/** + * vfs_statx_fd - Get the enhanced basic attributes by file descriptor + * @fd: The file descriptor referring to the file of interest + * @stat: The result structure to fill in. + * @request_mask: STATX_xxx flags indicating what the caller wants + * @query_flags: Query mode (KSTAT_QUERY_FLAGS) + * + * This function is a wrapper around vfs_getattr(). The main difference is + * that it uses a file descriptor to determine the file location. + * + * 0 will be returned on success, and a -ve error code if unsuccessful. + */ +int vfs_statx_fd(unsigned int fd, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { struct fd f = fdget_raw(fd); int error = -EBADF; if (f.file) { - error = vfs_getattr(&f.file->f_path, stat); + error = vfs_getattr(&f.file->f_path, stat, + request_mask, query_flags); fdput(f); } return error; } -EXPORT_SYMBOL(vfs_fstat); +EXPORT_SYMBOL(vfs_statx_fd); -int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, - int flag) +/** + * vfs_statx - Get basic and extra attributes by filename + * @dfd: A file descriptor representing the base dir for a relative filename + * @filename: The name of the file of interest + * @flags: Flags to control the query + * @stat: The result structure to fill in. + * @request_mask: STATX_xxx flags indicating what the caller wants + * + * This function is a wrapper around vfs_getattr(). The main difference is + * that it uses a filename and base directory to determine the file location. + * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink + * at the given name from being referenced. + * + * The caller must have preset stat->request_mask as for vfs_getattr(). The + * flags are also used to load up stat->query_flags. + * + * 0 will be returned on success, and a -ve error code if unsuccessful. + */ +int vfs_statx(int dfd, const char __user *filename, int flags, + struct kstat *stat, u32 request_mask) { struct path path; int error = -EINVAL; - unsigned int lookup_flags = 0; + unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT; - if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | - AT_EMPTY_PATH)) != 0) - goto out; + if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | + AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0) + return -EINVAL; - if (!(flag & AT_SYMLINK_NOFOLLOW)) - lookup_flags |= LOOKUP_FOLLOW; - if (flag & AT_EMPTY_PATH) + if (flags & AT_SYMLINK_NOFOLLOW) + lookup_flags &= ~LOOKUP_FOLLOW; + if (flags & AT_NO_AUTOMOUNT) + lookup_flags &= ~LOOKUP_AUTOMOUNT; + if (flags & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; + retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) goto out; - error = vfs_getattr(&path, stat); + error = vfs_getattr(&path, stat, request_mask, flags); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; @@ -116,19 +192,7 @@ int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, out: return error; } -EXPORT_SYMBOL(vfs_fstatat); - -int vfs_stat(const char __user *name, struct kstat *stat) -{ - return vfs_fstatat(AT_FDCWD, name, stat, 0); -} -EXPORT_SYMBOL(vfs_stat); - -int vfs_lstat(const char __user *name, struct kstat *stat) -{ - return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW); -} -EXPORT_SYMBOL(vfs_lstat); +EXPORT_SYMBOL(vfs_statx); #ifdef __ARCH_WANT_OLD_STAT @@ -141,7 +205,7 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta { static int warncount = 5; struct __old_kernel_stat tmp; - + if (warncount > 0) { warncount--; printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", @@ -166,7 +230,7 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta #if BITS_PER_LONG == 32 if (stat->size > MAX_NON_LFS) return -EOVERFLOW; -#endif +#endif tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; @@ -445,6 +509,81 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, } #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ +static inline int __put_timestamp(struct timespec *kts, + struct statx_timestamp __user *uts) +{ + return (__put_user(kts->tv_sec, &uts->tv_sec ) || + __put_user(kts->tv_nsec, &uts->tv_nsec ) || + __put_user(0, &uts->__reserved )); +} + +/* + * Set the statx results. + */ +static long statx_set_result(struct kstat *stat, struct statx __user *buffer) +{ + uid_t uid = from_kuid_munged(current_user_ns(), stat->uid); + gid_t gid = from_kgid_munged(current_user_ns(), stat->gid); + + if (__put_user(stat->result_mask, &buffer->stx_mask ) || + __put_user(stat->mode, &buffer->stx_mode ) || + __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) || + __put_user(stat->nlink, &buffer->stx_nlink ) || + __put_user(uid, &buffer->stx_uid ) || + __put_user(gid, &buffer->stx_gid ) || + __put_user(stat->attributes, &buffer->stx_attributes ) || + __put_user(stat->blksize, &buffer->stx_blksize ) || + __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) || + __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) || + __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) || + __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) || + __put_timestamp(&stat->atime, &buffer->stx_atime ) || + __put_timestamp(&stat->btime, &buffer->stx_btime ) || + __put_timestamp(&stat->ctime, &buffer->stx_ctime ) || + __put_timestamp(&stat->mtime, &buffer->stx_mtime ) || + __put_user(stat->ino, &buffer->stx_ino ) || + __put_user(stat->size, &buffer->stx_size ) || + __put_user(stat->blocks, &buffer->stx_blocks ) || + __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) || + __clear_user(&buffer->__spare2, sizeof(buffer->__spare2))) + return -EFAULT; + + return 0; +} + +/** + * sys_statx - System call to get enhanced stats + * @dfd: Base directory to pathwalk from *or* fd to stat. + * @filename: File to stat *or* NULL. + * @flags: AT_* flags to control pathwalk. + * @mask: Parts of statx struct actually required. + * @buffer: Result buffer. + * + * Note that if filename is NULL, then it does the equivalent of fstat() using + * dfd to indicate the file of interest. + */ +SYSCALL_DEFINE5(statx, + int, dfd, const char __user *, filename, unsigned, flags, + unsigned int, mask, + struct statx __user *, buffer) +{ + struct kstat stat; + int error; + + if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) + return -EINVAL; + if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer))) + return -EFAULT; + + if (filename) + error = vfs_statx(dfd, filename, flags, &stat, mask); + else + error = vfs_statx_fd(dfd, &stat, mask, flags); + if (error) + return error; + return statx_set_result(&stat, buffer); +} + /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ void __inode_add_bytes(struct inode *inode, loff_t bytes) { diff --git a/fs/sync.c b/fs/sync.c index 2a54c1f2203595..11ba023434b14b 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -192,7 +192,7 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) spin_unlock(&inode->i_lock); mark_inode_dirty_sync(inode); } - return file->f_op->fsync(file, start, end, datasync); + return call_fsync(file, start, end, datasync); } EXPORT_SYMBOL(vfs_fsync_range); diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 08d3e630b49c86..83809f5b5eca25 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -440,10 +440,11 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size) return blocks; } -int sysv_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +int sysv_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { - struct super_block *s = dentry->d_sb; - generic_fillattr(d_inode(dentry), stat); + struct super_block *s = path->dentry->d_sb; + generic_fillattr(d_inode(path->dentry), stat); stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size); stat->blksize = s->s_blocksize; return 0; diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h index 6c212288adcb09..1e7e27c729affb 100644 --- a/fs/sysv/sysv.h +++ b/fs/sysv/sysv.h @@ -142,7 +142,7 @@ extern struct inode *sysv_iget(struct super_block *, unsigned int); extern int sysv_write_inode(struct inode *, struct writeback_control *wbc); extern int sysv_sync_inode(struct inode *); extern void sysv_set_inode(struct inode *, dev_t); -extern int sysv_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int sysv_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int sysv_init_icache(void); extern void sysv_destroy_icache(void); diff --git a/fs/timerfd.c b/fs/timerfd.c index 384fa759a56334..c543cdb5f8ed9b 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -400,9 +400,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) clockid != CLOCK_BOOTTIME_ALARM)) return -EINVAL; - if (!capable(CAP_WAKE_ALARM) && - (clockid == CLOCK_REALTIME_ALARM || - clockid == CLOCK_BOOTTIME_ALARM)) + if ((clockid == CLOCK_REALTIME_ALARM || + clockid == CLOCK_BOOTTIME_ALARM) && + !capable(CAP_WAKE_ALARM)) return -EPERM; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -449,7 +449,7 @@ static int do_timerfd_settime(int ufd, int flags, return ret; ctx = f.file->private_data; - if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) { + if (isalarm(ctx) && !capable(CAP_WAKE_ALARM)) { fdput(f); return -EPERM; } diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 528369f3e47208..30825d882aa94a 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -1622,11 +1622,11 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, return do_rename(old_dir, old_dentry, new_dir, new_dentry, flags); } -int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +int ubifs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { loff_t size; - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct ubifs_inode *ui = ubifs_inode(inode); mutex_lock(&ui->ui_mutex); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index f0c86f076535a6..4d57e488038e34 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1749,8 +1749,8 @@ int ubifs_update_time(struct inode *inode, struct timespec *time, int flags); /* dir.c */ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir, umode_t mode); -int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat); +int ubifs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); int ubifs_check_dir_empty(struct inode *dir); /* xattr.c */ diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index f7dfef53f73968..6023c97c6da2f2 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -152,9 +152,10 @@ static int udf_symlink_filler(struct file *file, struct page *page) return err; } -static int udf_symlink_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int udf_symlink_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) { + struct dentry *dentry = path->dentry; struct inode *inode = d_backing_inode(dentry); struct page *page; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 3c421d06a18e6e..1d227b0fcf49ff 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -14,7 +14,8 @@ #include #include -#include +#include +#include #include #include #include @@ -137,8 +138,6 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd * context. * @ctx: [in] Pointer to the userfaultfd context. - * - * Returns: In case of success, returns not zero. */ static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) { @@ -266,6 +265,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, { struct mm_struct *mm = ctx->mm; pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd, _pmd; pte_t *pte; @@ -276,7 +276,10 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; - pud = pud_offset(pgd, address); + p4d = p4d_offset(pgd, address); + if (!p4d_present(*p4d)) + goto out; + pud = pud_offset(p4d, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); @@ -489,7 +492,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) * in such case. */ down_read(&mm->mmap_sem); - ret = 0; + ret = VM_FAULT_NOPAGE; } } @@ -526,10 +529,11 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) return ret; } -static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, - struct userfaultfd_wait_queue *ewq) +static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, + struct userfaultfd_wait_queue *ewq) { - int ret = 0; + if (WARN_ON_ONCE(current->flags & PF_EXITING)) + goto out; ewq->ctx = ctx; init_waitqueue_entry(&ewq->wq, current); @@ -546,8 +550,16 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, break; if (ACCESS_ONCE(ctx->released) || fatal_signal_pending(current)) { - ret = -1; __remove_wait_queue(&ctx->event_wqh, &ewq->wq); + if (ewq->msg.event == UFFD_EVENT_FORK) { + struct userfaultfd_ctx *new; + + new = (struct userfaultfd_ctx *) + (unsigned long) + ewq->msg.arg.reserved.reserved1; + + userfaultfd_ctx_put(new); + } break; } @@ -565,9 +577,8 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, * ctx may go away after this if the userfault pseudo fd is * already released. */ - +out: userfaultfd_ctx_put(ctx); - return ret; } static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, @@ -625,7 +636,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) return 0; } -static int dup_fctx(struct userfaultfd_fork_ctx *fctx) +static void dup_fctx(struct userfaultfd_fork_ctx *fctx) { struct userfaultfd_ctx *ctx = fctx->orig; struct userfaultfd_wait_queue ewq; @@ -635,17 +646,15 @@ static int dup_fctx(struct userfaultfd_fork_ctx *fctx) ewq.msg.event = UFFD_EVENT_FORK; ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; - return userfaultfd_event_wait_completion(ctx, &ewq); + userfaultfd_event_wait_completion(ctx, &ewq); } void dup_userfaultfd_complete(struct list_head *fcs) { - int ret = 0; struct userfaultfd_fork_ctx *fctx, *n; list_for_each_entry_safe(fctx, n, fcs, list) { - if (!ret) - ret = dup_fctx(fctx); + dup_fctx(fctx); list_del(&fctx->list); kfree(fctx); } @@ -688,8 +697,7 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, userfaultfd_event_wait_completion(ctx, &ewq); } -void userfaultfd_remove(struct vm_area_struct *vma, - struct vm_area_struct **prev, +bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; @@ -698,13 +706,11 @@ void userfaultfd_remove(struct vm_area_struct *vma, ctx = vma->vm_userfaultfd_ctx.ctx; if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) - return; + return true; userfaultfd_ctx_get(ctx); up_read(&mm->mmap_sem); - *prev = NULL; /* We wait for ACK w/o the mmap semaphore */ - msg_init(&ewq.msg); ewq.msg.event = UFFD_EVENT_REMOVE; @@ -713,7 +719,7 @@ void userfaultfd_remove(struct vm_area_struct *vma, userfaultfd_event_wait_completion(ctx, &ewq); - down_read(&mm->mmap_sem); + return false; } static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, @@ -774,34 +780,6 @@ void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) } } -void userfaultfd_exit(struct mm_struct *mm) -{ - struct vm_area_struct *vma = mm->mmap; - - /* - * We can do the vma walk without locking because the caller - * (exit_mm) knows it now has exclusive access - */ - while (vma) { - struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; - - if (ctx && (ctx->features & UFFD_FEATURE_EVENT_EXIT)) { - struct userfaultfd_wait_queue ewq; - - userfaultfd_ctx_get(ctx); - - msg_init(&ewq.msg); - ewq.msg.event = UFFD_EVENT_EXIT; - - userfaultfd_event_wait_completion(ctx, &ewq); - - ctx->features &= ~UFFD_FEATURE_EVENT_EXIT; - } - - vma = vma->vm_next; - } -} - static int userfaultfd_release(struct inode *inode, struct file *file) { struct userfaultfd_ctx *ctx = file->private_data; diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index 339c696bbc0186..70a5b55e0870a0 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -16,6 +16,7 @@ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include +#include #include #include #include @@ -24,24 +25,6 @@ #include "kmem.h" #include "xfs_message.h" -/* - * Greedy allocation. May fail and may return vmalloced memory. - */ -void * -kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize) -{ - void *ptr; - size_t kmsize = maxsize; - - while (!(ptr = vzalloc(kmsize))) { - if ((kmsize >>= 1) <= minsize) - kmsize = minsize; - } - if (ptr) - *size = kmsize; - return ptr; -} - void * kmem_alloc(size_t size, xfs_km_flags_t flags) { diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 689f746224e7ab..f0fc84fcaac255 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -69,8 +69,6 @@ static inline void kmem_free(const void *ptr) } -extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); - static inline void * kmem_zalloc(size_t size, xfs_km_flags_t flags) { diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index a9c66d47757a75..9bd104f3290896 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -763,8 +763,8 @@ xfs_bmap_extents_to_btree( args.type = XFS_ALLOCTYPE_START_BNO; args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); } else if (dfops->dop_low) { -try_another_ag: args.type = XFS_ALLOCTYPE_START_BNO; +try_another_ag: args.fsbno = *firstblock; } else { args.type = XFS_ALLOCTYPE_NEAR_BNO; @@ -790,13 +790,17 @@ xfs_bmap_extents_to_btree( if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && args.fsbno == NULLFSBLOCK && args.type == XFS_ALLOCTYPE_NEAR_BNO) { - dfops->dop_low = true; + args.type = XFS_ALLOCTYPE_FIRST_AG; goto try_another_ag; } + if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { + xfs_iroot_realloc(ip, -1, whichfork); + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return -ENOSPC; + } /* * Allocation can't fail, the space was reserved. */ - ASSERT(args.fsbno != NULLFSBLOCK); ASSERT(*firstblock == NULLFSBLOCK || args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock)); *firstblock = cur->bc_private.b.firstblock = args.fsbno; @@ -4150,6 +4154,19 @@ xfs_bmapi_read( return 0; } +/* + * Add a delayed allocation extent to an inode. Blocks are reserved from the + * global pool and the extent inserted into the inode in-core extent tree. + * + * On entry, got refers to the first extent beyond the offset of the extent to + * allocate or eof is specified if no such extent exists. On return, got refers + * to the extent record that was inserted to the inode fork. + * + * Note that the allocated extent may have been merged with contiguous extents + * during insertion into the inode fork. Thus, got does not reflect the current + * state of the inode fork on return. If necessary, the caller can use lastx to + * look up the updated record in the inode fork. + */ int xfs_bmapi_reserve_delalloc( struct xfs_inode *ip, @@ -4236,13 +4253,8 @@ xfs_bmapi_reserve_delalloc( got->br_startblock = nullstartblock(indlen); got->br_blockcount = alen; got->br_state = XFS_EXT_NORM; - xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got); - /* - * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay - * might have merged it into one of the neighbouring ones. - */ - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got); + xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got); /* * Tag the inode if blocks were preallocated. Note that COW fork @@ -4254,10 +4266,6 @@ xfs_bmapi_reserve_delalloc( if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) xfs_inode_set_cowblocks_tag(ip); - ASSERT(got->br_startoff <= aoff); - ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen); - ASSERT(isnullstartblock(got->br_startblock)); - ASSERT(got->br_state == XFS_EXT_NORM); return 0; out_unreserve_blocks: diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index f93072b58a5832..fd55db47938562 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -447,8 +447,8 @@ xfs_bmbt_alloc_block( if (args.fsbno == NULLFSBLOCK) { args.fsbno = be64_to_cpu(start->l); -try_another_ag: args.type = XFS_ALLOCTYPE_START_BNO; +try_another_ag: /* * Make sure there is sufficient room left in the AG to * complete a full tree split for an extent insert. If @@ -488,8 +488,8 @@ xfs_bmbt_alloc_block( if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && args.fsbno == NULLFSBLOCK && args.type == XFS_ALLOCTYPE_NEAR_BNO) { - cur->bc_private.b.dfops->dop_low = true; args.fsbno = cur->bc_private.b.firstblock; + args.type = XFS_ALLOCTYPE_FIRST_AG; goto try_another_ag; } @@ -506,7 +506,7 @@ xfs_bmbt_alloc_block( goto error0; cur->bc_private.b.dfops->dop_low = true; } - if (args.fsbno == NULLFSBLOCK) { + if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h index d04547fcf274af..39f8604f764e13 100644 --- a/fs/xfs/libxfs/xfs_dir2_priv.h +++ b/fs/xfs/libxfs/xfs_dir2_priv.h @@ -125,6 +125,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); extern int xfs_dir2_sf_removename(struct xfs_da_args *args); extern int xfs_dir2_sf_replace(struct xfs_da_args *args); +extern int xfs_dir2_sf_verify(struct xfs_inode *ip); /* xfs_dir2_readdir.c */ extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index c6809ff41197d9..e84af093b2ab99 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -629,6 +629,112 @@ xfs_dir2_sf_check( } #endif /* DEBUG */ +/* Verify the consistency of an inline directory. */ +int +xfs_dir2_sf_verify( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_dir2_sf_hdr *sfp; + struct xfs_dir2_sf_entry *sfep; + struct xfs_dir2_sf_entry *next_sfep; + char *endp; + const struct xfs_dir_ops *dops; + struct xfs_ifork *ifp; + xfs_ino_t ino; + int i; + int i8count; + int offset; + int size; + int error; + __uint8_t filetype; + + ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL); + /* + * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops, + * so we can only trust the mountpoint to have the right pointer. + */ + dops = xfs_dir_get_ops(mp, NULL); + + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data; + size = ifp->if_bytes; + + /* + * Give up if the directory is way too short. + */ + if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) || + size < xfs_dir2_sf_hdr_size(sfp->i8count)) + return -EFSCORRUPTED; + + endp = (char *)sfp + size; + + /* Check .. entry */ + ino = dops->sf_get_parent_ino(sfp); + i8count = ino > XFS_DIR2_MAX_SHORT_INUM; + error = xfs_dir_ino_validate(mp, ino); + if (error) + return error; + offset = dops->data_first_offset; + + /* Check all reported entries */ + sfep = xfs_dir2_sf_firstentry(sfp); + for (i = 0; i < sfp->count; i++) { + /* + * struct xfs_dir2_sf_entry has a variable length. + * Check the fixed-offset parts of the structure are + * within the data buffer. + */ + if (((char *)sfep + sizeof(*sfep)) >= endp) + return -EFSCORRUPTED; + + /* Don't allow names with known bad length. */ + if (sfep->namelen == 0) + return -EFSCORRUPTED; + + /* + * Check that the variable-length part of the structure is + * within the data buffer. The next entry starts after the + * name component, so nextentry is an acceptable test. + */ + next_sfep = dops->sf_nextentry(sfp, sfep); + if (endp < (char *)next_sfep) + return -EFSCORRUPTED; + + /* Check that the offsets always increase. */ + if (xfs_dir2_sf_get_offset(sfep) < offset) + return -EFSCORRUPTED; + + /* Check the inode number. */ + ino = dops->sf_get_ino(sfp, sfep); + i8count += ino > XFS_DIR2_MAX_SHORT_INUM; + error = xfs_dir_ino_validate(mp, ino); + if (error) + return error; + + /* Check the file type. */ + filetype = dops->sf_get_ftype(sfep); + if (filetype >= XFS_DIR3_FT_MAX) + return -EFSCORRUPTED; + + offset = xfs_dir2_sf_get_offset(sfep) + + dops->data_entsize(sfep->namelen); + + sfep = next_sfep; + } + if (i8count != sfp->i8count) + return -EFSCORRUPTED; + if ((void *)sfep != (void *)endp) + return -EFSCORRUPTED; + + /* Make sure this whole thing ought to be in local format. */ + if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + + (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize) + return -EFSCORRUPTED; + + return 0; +} + /* * Create a new (shortform) directory. */ diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 25c1e078aef6a5..8a37efe04de323 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -33,6 +33,8 @@ #include "xfs_trace.h" #include "xfs_attr_sf.h" #include "xfs_da_format.h" +#include "xfs_da_btree.h" +#include "xfs_dir2_priv.h" kmem_zone_t *xfs_ifork_zone; @@ -210,6 +212,16 @@ xfs_iformat_fork( if (error) return error; + /* Check inline dir contents. */ + if (S_ISDIR(VFS_I(ip)->i_mode) && + dip->di_format == XFS_DINODE_FMT_LOCAL) { + error = xfs_dir2_sf_verify(ip); + if (error) { + xfs_idestroy_fork(ip, XFS_DATA_FORK); + return error; + } + } + if (xfs_is_reflink_inode(ip)) { ASSERT(ip->i_cowfp == NULL); xfs_ifork_init_cow(ip); @@ -320,7 +332,6 @@ xfs_iformat_local( int whichfork, int size) { - /* * If the size is unreasonable, then something * is wrong and we just bail out rather than crash in diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index bf65a9ea864293..61494295d92fe1 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -274,54 +274,49 @@ xfs_end_io( struct xfs_ioend *ioend = container_of(work, struct xfs_ioend, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); + xfs_off_t offset = ioend->io_offset; + size_t size = ioend->io_size; int error = ioend->io_bio->bi_error; /* - * Set an error if the mount has shut down and proceed with end I/O - * processing so it can perform whatever cleanups are necessary. + * Just clean up the in-memory strutures if the fs has been shut down. */ - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { error = -EIO; + goto done; + } /* - * For a CoW extent, we need to move the mapping from the CoW fork - * to the data fork. If instead an error happened, just dump the - * new blocks. + * Clean up any COW blocks on an I/O error. */ - if (ioend->io_type == XFS_IO_COW) { - if (error) - goto done; - if (ioend->io_bio->bi_error) { - error = xfs_reflink_cancel_cow_range(ip, - ioend->io_offset, ioend->io_size); - goto done; + if (unlikely(error)) { + switch (ioend->io_type) { + case XFS_IO_COW: + xfs_reflink_cancel_cow_range(ip, offset, size, true); + break; } - error = xfs_reflink_end_cow(ip, ioend->io_offset, - ioend->io_size); - if (error) - goto done; + + goto done; } /* - * For unwritten extents we need to issue transactions to convert a - * range to normal written extens after the data I/O has finished. - * Detecting and handling completion IO errors is done individually - * for each case as different cleanup operations need to be performed - * on error. + * Success: commit the COW or unwritten blocks if needed. */ - if (ioend->io_type == XFS_IO_UNWRITTEN) { - if (error) - goto done; - error = xfs_iomap_write_unwritten(ip, ioend->io_offset, - ioend->io_size); - } else if (ioend->io_append_trans) { - error = xfs_setfilesize_ioend(ioend, error); - } else { - ASSERT(!xfs_ioend_is_append(ioend) || - ioend->io_type == XFS_IO_COW); + switch (ioend->io_type) { + case XFS_IO_COW: + error = xfs_reflink_end_cow(ip, offset, size); + break; + case XFS_IO_UNWRITTEN: + error = xfs_iomap_write_unwritten(ip, offset, size); + break; + default: + ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans); + break; } done: + if (ioend->io_append_trans) + error = xfs_setfilesize_ioend(ioend, error); xfs_destroy_ioend(ioend, error); } diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 8b75dcea596680..828532ce0adca8 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1311,8 +1311,16 @@ xfs_free_file_space( /* * Now that we've unmap all full blocks we'll have to zero out any * partial block at the beginning and/or end. xfs_zero_range is - * smart enough to skip any holes, including those we just created. + * smart enough to skip any holes, including those we just created, + * but we must take care not to zero beyond EOF and enlarge i_size. */ + + if (offset >= XFS_ISIZE(ip)) + return 0; + + if (offset + len > XFS_ISIZE(ip)) + len = XFS_ISIZE(ip) - offset; + return xfs_zero_range(ip, offset, len, NULL); } diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 8c7d01b759221b..b6208728ba3976 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "xfs_format.h" #include "xfs_log_format.h" diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c index 003a99b83bd884..ad9396e516f6e3 100644 --- a/fs/xfs/xfs_dir2_readdir.c +++ b/fs/xfs/xfs_dir2_readdir.c @@ -71,22 +71,11 @@ xfs_dir2_sf_getdents( struct xfs_da_geometry *geo = args->geo; ASSERT(dp->i_df.if_flags & XFS_IFINLINE); - /* - * Give up if the directory is way too short. - */ - if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { - ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); - return -EIO; - } - ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); ASSERT(dp->i_df.if_u1.if_data != NULL); sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; - if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count)) - return -EFSCORRUPTED; - /* * If the block number in the offset is out of range, we're done. */ diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 7234b9748c36e0..3531f8f72fa5e1 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -1608,7 +1608,7 @@ xfs_inode_free_cowblocks( xfs_ilock(ip, XFS_IOLOCK_EXCL); xfs_ilock(ip, XFS_MMAPLOCK_EXCL); - ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF); + ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); xfs_iunlock(ip, XFS_IOLOCK_EXCL); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index edfa6a55b0646d..7605d839659635 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -50,6 +50,7 @@ #include "xfs_log.h" #include "xfs_bmap_btree.h" #include "xfs_reflink.h" +#include "xfs_dir2_priv.h" kmem_zone_t *xfs_inode_zone; @@ -1615,7 +1616,7 @@ xfs_itruncate_extents( /* Remove all pending CoW reservations. */ error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block, - last_block); + last_block, true); if (error) goto out; @@ -3546,6 +3547,12 @@ xfs_iflush_int( if (ip->i_d.di_version < 3) ip->i_d.di_flushiter++; + /* Check the inline directory data. */ + if (S_ISDIR(VFS_I(ip)->i_mode) && + ip->i_d.di_format == XFS_DINODE_FMT_LOCAL && + xfs_dir2_sf_verify(ip)) + goto corrupt_out; + /* * Copy the dirty parts of the inode into the on-disk inode. We always * copy out the core of the inode, because if the inode is dirty at all diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index cf1363dbf32b91..2fd7fdf5438f0b 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -43,6 +43,7 @@ #include "xfs_acl.h" #include +#include #include #include #include diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 41662fb14e87d8..288ee5b840d738 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -630,6 +630,11 @@ xfs_file_iomap_begin_delay( goto out_unlock; } + /* + * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch + * them out if the write happens to fail. + */ + iomap->flags = IOMAP_F_NEW; trace_xfs_iomap_alloc(ip, offset, count, 0, &got); done: if (isnullstartblock(got.br_startblock)) @@ -1071,16 +1076,22 @@ xfs_file_iomap_end_delalloc( struct xfs_inode *ip, loff_t offset, loff_t length, - ssize_t written) + ssize_t written, + struct iomap *iomap) { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t start_fsb; xfs_fileoff_t end_fsb; int error = 0; - /* behave as if the write failed if drop writes is enabled */ - if (xfs_mp_drop_writes(mp)) + /* + * Behave as if the write failed if drop writes is enabled. Set the NEW + * flag to force delalloc cleanup. + */ + if (xfs_mp_drop_writes(mp)) { + iomap->flags |= IOMAP_F_NEW; written = 0; + } /* * start_fsb refers to the first unused block after a short write. If @@ -1094,14 +1105,14 @@ xfs_file_iomap_end_delalloc( end_fsb = XFS_B_TO_FSB(mp, offset + length); /* - * Trim back delalloc blocks if we didn't manage to write the whole - * range reserved. + * Trim delalloc blocks if they were allocated by this write and we + * didn't manage to write the whole range. * * We don't need to care about racing delalloc as we hold i_mutex * across the reserve/allocate/unreserve calls. If there are delalloc * blocks in the range, they are ours. */ - if (start_fsb < end_fsb) { + if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) { truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), XFS_FSB_TO_B(mp, end_fsb) - 1); @@ -1131,7 +1142,7 @@ xfs_file_iomap_end( { if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, - length, written); + length, written, iomap); return 0; } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 22c16155f1b42a..229cc6a6d8ef03 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -489,11 +489,12 @@ xfs_vn_get_link_inline( STATIC int xfs_vn_getattr( - struct vfsmount *mnt, - struct dentry *dentry, - struct kstat *stat) + const struct path *path, + struct kstat *stat, + u32 request_mask, + unsigned int query_flags) { - struct inode *inode = d_inode(dentry); + struct inode *inode = d_inode(path->dentry); struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 66e881790c1710..26d67ce3c18d90 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -361,7 +361,6 @@ xfs_bulkstat( xfs_agino_t agino; /* inode # in allocation group */ xfs_agnumber_t agno; /* allocation group number */ xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ - size_t irbsize; /* size of irec buffer in bytes */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ int nirbuf; /* size of irbuf */ int ubcount; /* size of user's buffer */ @@ -388,11 +387,10 @@ xfs_bulkstat( *ubcountp = 0; *done = 0; - irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); + irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP); if (!irbuf) return -ENOMEM; - - nirbuf = irbsize / sizeof(*irbuf); + nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf); /* * Loop over the allocation groups, starting from the last @@ -585,7 +583,7 @@ xfs_inumbers( return error; bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); - buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); + buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP); do { struct xfs_inobt_rec_incore r; int stat; diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 7a989de224f4b7..592fdf7111cbfb 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -55,7 +55,7 @@ typedef __u32 xfs_nlink_t; #include #include #include -#include +#include #include #include #include diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 450bde68bb7528..688ebff1f66384 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -513,8 +513,7 @@ STATIC void xfs_set_inoalignment(xfs_mount_t *mp) { if (xfs_sb_version_hasalign(&mp->m_sb) && - mp->m_sb.sb_inoalignmt >= - XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) + mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp)) mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; else mp->m_inoalign_mask = 0; diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index da6d08fb359c8e..4a84c5ea266d8f 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -548,14 +548,18 @@ xfs_reflink_trim_irec_to_next_cow( } /* - * Cancel all pending CoW reservations for some block range of an inode. + * Cancel CoW reservations for some block range of an inode. + * + * If cancel_real is true this function cancels all COW fork extents for the + * inode; if cancel_real is false, real extents are not cleared. */ int xfs_reflink_cancel_cow_blocks( struct xfs_inode *ip, struct xfs_trans **tpp, xfs_fileoff_t offset_fsb, - xfs_fileoff_t end_fsb) + xfs_fileoff_t end_fsb, + bool cancel_real) { struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); struct xfs_bmbt_irec got, del; @@ -579,7 +583,7 @@ xfs_reflink_cancel_cow_blocks( &idx, &got, &del); if (error) break; - } else { + } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) { xfs_trans_ijoin(*tpp, ip, 0); xfs_defer_init(&dfops, &firstfsb); @@ -621,13 +625,17 @@ xfs_reflink_cancel_cow_blocks( } /* - * Cancel all pending CoW reservations for some byte range of an inode. + * Cancel CoW reservations for some byte range of an inode. + * + * If cancel_real is true this function cancels all COW fork extents for the + * inode; if cancel_real is false, real extents are not cleared. */ int xfs_reflink_cancel_cow_range( struct xfs_inode *ip, xfs_off_t offset, - xfs_off_t count) + xfs_off_t count, + bool cancel_real) { struct xfs_trans *tp; xfs_fileoff_t offset_fsb; @@ -653,7 +661,8 @@ xfs_reflink_cancel_cow_range( xfs_trans_ijoin(tp, ip, 0); /* Scrape out the old CoW reservations */ - error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb); + error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb, + cancel_real); if (error) goto out_cancel; @@ -1450,7 +1459,7 @@ xfs_reflink_clear_inode_flag( * We didn't find any shared blocks so turn off the reflink flag. * First, get rid of any leftover CoW mappings. */ - error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF); + error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true); if (error) return error; diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h index 33ac9b8db68380..d29a7967f0290e 100644 --- a/fs/xfs/xfs_reflink.h +++ b/fs/xfs/xfs_reflink.h @@ -39,9 +39,9 @@ extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip, extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip, struct xfs_trans **tpp, xfs_fileoff_t offset_fsb, - xfs_fileoff_t end_fsb); + xfs_fileoff_t end_fsb, bool cancel_real); extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset, - xfs_off_t count); + xfs_off_t count, bool cancel_real); extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t count); extern int xfs_reflink_recover_cow(struct xfs_mount *mp); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 890862f2447c19..685c042a120f16 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -953,7 +953,7 @@ xfs_fs_destroy_inode( XFS_STATS_INC(ip->i_mount, vn_remove); if (xfs_is_reflink_inode(ip)) { - error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF); + error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) xfs_warn(ip->i_mount, "Error %d while evicting CoW blocks for inode %llu.", diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h index 5bdab6bffd23cf..928fd66b127122 100644 --- a/include/asm-generic/4level-fixup.h +++ b/include/asm-generic/4level-fixup.h @@ -15,7 +15,6 @@ ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ NULL: pmd_offset(pud, address)) -#define pud_alloc(mm, pgd, address) (pgd) #define pud_offset(pgd, start) (pgd) #define pud_none(pud) 0 #define pud_bad(pud) 0 @@ -35,4 +34,6 @@ #undef pud_addr_end #define pud_addr_end(addr, end) (end) +#include + #endif diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h new file mode 100644 index 00000000000000..b5ca82dc41753f --- /dev/null +++ b/include/asm-generic/5level-fixup.h @@ -0,0 +1,41 @@ +#ifndef _5LEVEL_FIXUP_H +#define _5LEVEL_FIXUP_H + +#define __ARCH_HAS_5LEVEL_HACK +#define __PAGETABLE_P4D_FOLDED + +#define P4D_SHIFT PGDIR_SHIFT +#define P4D_SIZE PGDIR_SIZE +#define P4D_MASK PGDIR_MASK +#define PTRS_PER_P4D 1 + +#define p4d_t pgd_t + +#define pud_alloc(mm, p4d, address) \ + ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \ + NULL : pud_offset(p4d, address)) + +#define p4d_alloc(mm, pgd, address) (pgd) +#define p4d_offset(pgd, start) (pgd) +#define p4d_none(p4d) 0 +#define p4d_bad(p4d) 0 +#define p4d_present(p4d) 1 +#define p4d_ERROR(p4d) do { } while (0) +#define p4d_clear(p4d) pgd_clear(p4d) +#define p4d_val(p4d) pgd_val(p4d) +#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) +#define p4d_page(p4d) pgd_page(p4d) +#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) + +#define __p4d(x) __pgd(x) +#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d) + +#undef p4d_free_tlb +#define p4d_free_tlb(tlb, x, addr) do { } while (0) +#define p4d_free(mm, x) do { } while (0) +#define __p4d_free_tlb(tlb, x, addr) do { } while (0) + +#undef p4d_addr_end +#define p4d_addr_end(addr, end) (end) + +#endif diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h new file mode 100644 index 00000000000000..752fb7511750e0 --- /dev/null +++ b/include/asm-generic/pgtable-nop4d-hack.h @@ -0,0 +1,62 @@ +#ifndef _PGTABLE_NOP4D_HACK_H +#define _PGTABLE_NOP4D_HACK_H + +#ifndef __ASSEMBLY__ +#include + +#define __PAGETABLE_PUD_FOLDED + +/* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into + * without casting. + */ +typedef struct { pgd_t pgd; } pud_t; + +#define PUD_SHIFT PGDIR_SHIFT +#define PTRS_PER_PUD 1 +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) + +#define pgd_populate(mm, pgd, pud) do { } while (0) +/* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) + +static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) +{ + return (pud_t *)pgd; +} + +#define pud_val(x) (pgd_val((x).pgd)) +#define __pud(x) ((pud_t) { __pgd(x) }) + +#define pgd_page(pgd) (pud_page((pud_t){ pgd })) +#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) + +/* + * allocating and freeing a pud is trivial: the 1-entry pud is + * inside the pgd, so has no extra memory associated with it. + */ +#define pud_alloc_one(mm, address) NULL +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, a) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* _PGTABLE_NOP4D_HACK_H */ diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h new file mode 100644 index 00000000000000..de364ecb8df68f --- /dev/null +++ b/include/asm-generic/pgtable-nop4d.h @@ -0,0 +1,56 @@ +#ifndef _PGTABLE_NOP4D_H +#define _PGTABLE_NOP4D_H + +#ifndef __ASSEMBLY__ + +#define __PAGETABLE_P4D_FOLDED + +typedef struct { pgd_t pgd; } p4d_t; + +#define P4D_SHIFT PGDIR_SHIFT +#define PTRS_PER_P4D 1 +#define P4D_SIZE (1UL << P4D_SHIFT) +#define P4D_MASK (~(P4D_SIZE-1)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the p4d is never bad, and a p4d always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) + +#define pgd_populate(mm, pgd, p4d) do { } while (0) +/* + * (p4ds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval }) + +static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +{ + return (p4d_t *)pgd; +} + +#define p4d_val(x) (pgd_val((x).pgd)) +#define __p4d(x) ((p4d_t) { __pgd(x) }) + +#define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) +#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd })) + +/* + * allocating and freeing a p4d is trivial: the 1-entry p4d is + * inside the pgd, so has no extra memory associated with it. + */ +#define p4d_alloc_one(mm, address) NULL +#define p4d_free(mm, x) do { } while (0) +#define __p4d_free_tlb(tlb, x, a) do { } while (0) + +#undef p4d_addr_end +#define p4d_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* _PGTABLE_NOP4D_H */ diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index 810431d8351b16..c2b9b96d6268f4 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -3,52 +3,57 @@ #ifndef __ASSEMBLY__ +#ifdef __ARCH_USE_5LEVEL_HACK +#include +#else +#include + #define __PAGETABLE_PUD_FOLDED /* - * Having the pud type consist of a pgd gets the size right, and allows - * us to conceptually access the pgd entry that this pud is folded into + * Having the pud type consist of a p4d gets the size right, and allows + * us to conceptually access the p4d entry that this pud is folded into * without casting. */ -typedef struct { pgd_t pgd; } pud_t; +typedef struct { p4d_t p4d; } pud_t; -#define PUD_SHIFT PGDIR_SHIFT +#define PUD_SHIFT P4D_SHIFT #define PTRS_PER_PUD 1 #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) /* - * The "pgd_xxx()" functions here are trivial for a folded two-level + * The "p4d_xxx()" functions here are trivial for a folded two-level * setup: the pud is never bad, and a pud always exists (as it's folded - * into the pgd entry) + * into the p4d entry) */ -static inline int pgd_none(pgd_t pgd) { return 0; } -static inline int pgd_bad(pgd_t pgd) { return 0; } -static inline int pgd_present(pgd_t pgd) { return 1; } -static inline void pgd_clear(pgd_t *pgd) { } -#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) +static inline int p4d_none(p4d_t p4d) { return 0; } +static inline int p4d_bad(p4d_t p4d) { return 0; } +static inline int p4d_present(p4d_t p4d) { return 1; } +static inline void p4d_clear(p4d_t *p4d) { } +#define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) -#define pgd_populate(mm, pgd, pud) do { } while (0) +#define p4d_populate(mm, p4d, pud) do { } while (0) /* - * (puds are folded into pgds so this doesn't get actually called, + * (puds are folded into p4ds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ -#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) +#define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval }) -static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { - return (pud_t *)pgd; + return (pud_t *)p4d; } -#define pud_val(x) (pgd_val((x).pgd)) -#define __pud(x) ((pud_t) { __pgd(x) } ) +#define pud_val(x) (p4d_val((x).p4d)) +#define __pud(x) ((pud_t) { __p4d(x) }) -#define pgd_page(pgd) (pud_page((pud_t){ pgd })) -#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) +#define p4d_page(p4d) (pud_page((pud_t){ p4d })) +#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) /* * allocating and freeing a pud is trivial: the 1-entry pud is - * inside the pgd, so has no extra memory associated with it. + * inside the p4d, so has no extra memory associated with it. */ #define pud_alloc_one(mm, address) NULL #define pud_free(mm, x) do { } while (0) @@ -58,4 +63,5 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) #define pud_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ +#endif /* !__ARCH_USE_5LEVEL_HACK */ #endif /* _PGTABLE_NOPUD_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f4ca23b158b3b7..1fad160f35de8e 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -10,9 +10,9 @@ #include #include -#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \ - CONFIG_PGTABLE_LEVELS -#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED +#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ + defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS +#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED #endif /* @@ -424,6 +424,13 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#ifndef p4d_addr_end +#define p4d_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + #ifndef pud_addr_end #define pud_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ @@ -444,6 +451,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) * Do the tests inline, but report and clear the bad entry in mm/memory.c. */ void pgd_clear_bad(pgd_t *); +void p4d_clear_bad(p4d_t *); void pud_clear_bad(pud_t *); void pmd_clear_bad(pmd_t *); @@ -458,6 +466,17 @@ static inline int pgd_none_or_clear_bad(pgd_t *pgd) return 0; } +static inline int p4d_none_or_clear_bad(p4d_t *p4d) +{ + if (p4d_none(*p4d)) + return 1; + if (unlikely(p4d_bad(*p4d))) { + p4d_clear_bad(p4d); + return 1; + } + return 0; +} + static inline int pud_none_or_clear_bad(pud_t *pud) { if (pud_none(*pud)) @@ -844,11 +863,30 @@ static inline int pmd_protnone(pmd_t pmd) #endif /* CONFIG_MMU */ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#ifndef __PAGETABLE_P4D_FOLDED +int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); +int p4d_clear_huge(p4d_t *p4d); +#else +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +#endif /* !__PAGETABLE_P4D_FOLDED */ + int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { return 0; @@ -857,6 +895,10 @@ static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) { return 0; } +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} static inline int pud_clear_huge(pud_t *pud) { return 0; diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 4df64a1fc09e7a..532372c6cf15c8 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -14,8 +14,8 @@ * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* * and/or .init.* sections. * [__start_rodata, __end_rodata]: contains .rodata.* sections - * [__start_data_ro_after_init, __end_data_ro_after_init]: - * contains data.ro_after_init section + * [__start_ro_after_init, __end_ro_after_init]: + * contains .data..ro_after_init section * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* * may be out of this range on some architectures. * [_sinittext, _einittext]: contains .init.text.* sections @@ -33,7 +33,7 @@ extern char _data[], _sdata[], _edata[]; extern char __bss_start[], __bss_stop[]; extern char __init_begin[], __init_end[]; extern char _sinittext[], _einittext[]; -extern char __start_data_ro_after_init[], __end_data_ro_after_init[]; +extern char __start_ro_after_init[], __end_ro_after_init[]; extern char _end[]; extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; extern char __kprobes_text_start[], __kprobes_text_end[]; diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 4329bc6ef04b7b..8afa4335e5b2bf 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -270,6 +270,12 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, __pte_free_tlb(tlb, ptep, address); \ } while (0) +#define pmd_free_tlb(tlb, pmdp, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __pmd_free_tlb(tlb, pmdp, address); \ + } while (0) + #ifndef __ARCH_HAS_4LEVEL_HACK #define pud_free_tlb(tlb, pudp, address) \ do { \ @@ -278,11 +284,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, } while (0) #endif -#define pmd_free_tlb(tlb, pmdp, address) \ +#ifndef __ARCH_HAS_5LEVEL_HACK +#define p4d_free_tlb(tlb, pudp, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ - __pmd_free_tlb(tlb, pmdp, address); \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __p4d_free_tlb(tlb, pudp, address); \ } while (0) +#endif #define tlb_migrate_finish(mm) do {} while (0) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 0968d13b388591..7cdfe167074f87 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -173,6 +173,7 @@ KEEP(*(__##name##_of_table_end)) #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) +#define CLKEVT_OF_TABLES() OF_TABLE(CONFIG_CLKEVT_OF, clkevt) #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) @@ -260,9 +261,9 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ - __start_data_ro_after_init = .; \ + __start_ro_after_init = .; \ *(.data..ro_after_init) \ - __end_data_ro_after_init = .; + __end_ro_after_init = .; #endif /* @@ -559,6 +560,7 @@ CLK_OF_TABLES() \ RESERVEDMEM_OF_TABLES() \ CLKSRC_OF_TABLES() \ + CLKEVT_OF_TABLES() \ IOMMU_OF_TABLES() \ CPU_METHOD_OF_TABLES() \ CPUIDLE_METHOD_OF_TABLES() \ diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index ebe4ded0c55d7f..436c4c2683c7dc 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -360,13 +360,18 @@ static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, return crypto_attr_alg(tb[1], type, mask); } +static inline int crypto_requires_off(u32 type, u32 mask, u32 off) +{ + return (type ^ off) & mask & off; +} + /* * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. * Otherwise returns zero. */ static inline int crypto_requires_sync(u32 type, u32 mask) { - return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; + return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); } noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index a2bfd7843f18f6..e2b9c6fe271496 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -73,7 +73,7 @@ int af_alg_unregister_type(const struct af_alg_type *type); int af_alg_release(struct socket *sock); void af_alg_release_parent(struct sock *sk); -int af_alg_accept(struct sock *sk, struct socket *newsock); +int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern); int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); void af_alg_free_sg(struct af_alg_sgl *sgl); diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index d81b0ba9921fb5..2ef16bf258267e 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #ifdef CONFIG_DRM_DEBUG_MM diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h index 86ab99bc0ac50e..35e1482ba8a155 100644 --- a/include/drm/drm_os_linux.h +++ b/include/drm/drm_os_linux.h @@ -4,6 +4,7 @@ */ #include /* For task queue support */ +#include #include #ifndef readq diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h index ed953f98f0e144..1487011fe057ba 100644 --- a/include/drm/ttm/ttm_object.h +++ b/include/drm/ttm/ttm_object.h @@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); * @ref_type: The type of reference. * @existed: Upon completion, indicates that an identical reference object * already existed, and the refcount was upped on that object instead. + * @require_existed: Fail with -EPERM if an identical ref object didn't + * already exist. * * Checks that the base object is shareable and adds a ref object to it. * @@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); */ extern int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed); + enum ttm_ref_type ref_type, bool *existed, + bool require_existed); extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, struct ttm_base_object *base); diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h index baade6f429d01a..692846c7941b53 100644 --- a/include/dt-bindings/clock/gxbb-clkc.h +++ b/include/dt-bindings/clock/gxbb-clkc.h @@ -14,15 +14,21 @@ #define CLKID_MPLL2 15 #define CLKID_SPI 34 #define CLKID_I2C 22 +#define CLKID_SAR_ADC 23 #define CLKID_ETH 36 #define CLKID_USB0 50 #define CLKID_USB1 51 #define CLKID_USB 55 +#define CLKID_HDMI_PCLK 63 #define CLKID_USB1_DDR_BRIDGE 64 #define CLKID_USB0_DDR_BRIDGE 65 +#define CLKID_SANA 69 +#define CLKID_GCLK_VENCI_INT0 77 #define CLKID_AO_I2C 93 #define CLKID_SD_EMMC_A 94 #define CLKID_SD_EMMC_B 95 #define CLKID_SD_EMMC_C 96 +#define CLKID_SAR_ADC_CLK 97 +#define CLKID_SAR_ADC_SEL 98 #endif /* __GXBB_CLKC_H */ diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h index e0ebb20ffdd3a7..b7aa3646208b1c 100644 --- a/include/dt-bindings/pinctrl/samsung.h +++ b/include/dt-bindings/pinctrl/samsung.h @@ -68,4 +68,12 @@ #define EXYNOS_PIN_FUNC_6 6 #define EXYNOS_PIN_FUNC_F 0xf +/* Drive strengths for Exynos7 FSYS1 block */ +#define EXYNOS7_FSYS1_PIN_DRV_LV1 0 +#define EXYNOS7_FSYS1_PIN_DRV_LV2 4 +#define EXYNOS7_FSYS1_PIN_DRV_LV3 2 +#define EXYNOS7_FSYS1_PIN_DRV_LV4 6 +#define EXYNOS7_FSYS1_PIN_DRV_LV5 1 +#define EXYNOS7_FSYS1_PIN_DRV_LV6 5 + #endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */ diff --git a/include/dt-bindings/sound/cs42l42.h b/include/dt-bindings/sound/cs42l42.h index 399a123aed5815..db69d84ed7d141 100644 --- a/include/dt-bindings/sound/cs42l42.h +++ b/include/dt-bindings/sound/cs42l42.h @@ -20,7 +20,7 @@ #define CS42L42_HPOUT_LOAD_1NF 0 #define CS42L42_HPOUT_LOAD_10NF 1 -/* HPOUT Clamp to GND Overide */ +/* HPOUT Clamp to GND Override */ #define CS42L42_HPOUT_CLAMP_EN 0 #define CS42L42_HPOUT_CLAMP_DIS 1 diff --git a/include/keys/user-type.h b/include/keys/user-type.h index c56fef40f53efa..e098cbe27db546 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -48,9 +48,14 @@ extern void user_describe(const struct key *user, struct seq_file *m); extern long user_read(const struct key *key, char __user *buffer, size_t buflen); -static inline const struct user_key_payload *user_key_payload(const struct key *key) +static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) { - return (struct user_key_payload *)rcu_dereference_key(key); + return (struct user_key_payload *)dereference_key_rcu(key); +} + +static inline struct user_key_payload *user_key_payload_locked(const struct key *key) +{ + return (struct user_key_payload *)dereference_key_locked((struct key *)key); } #endif /* CONFIG_KEYS */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 673acda012af44..9b05886f9773cd 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -287,18 +287,15 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) } /* Validate the processor object's proc_id */ -bool acpi_processor_validate_proc_id(int proc_id); +bool acpi_duplicate_processor_id(int proc_id); #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu); int acpi_unmap_cpu(int cpu); -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ -void acpi_set_processor_mapping(void); - #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); #endif diff --git a/include/linux/average.h b/include/linux/average.h index d04aa58280ded5..7ddaf340d2ac98 100644 --- a/include/linux/average.h +++ b/include/linux/average.h @@ -1,45 +1,66 @@ #ifndef _LINUX_AVERAGE_H #define _LINUX_AVERAGE_H -/* Exponentially weighted moving average (EWMA) */ +/* + * Exponentially weighted moving average (EWMA) + * + * This implements a fixed-precision EWMA algorithm, with both the + * precision and fall-off coefficient determined at compile-time + * and built into the generated helper funtions. + * + * The first argument to the macro is the name that will be used + * for the struct and helper functions. + * + * The second argument, the precision, expresses how many bits are + * used for the fractional part of the fixed-precision values. + * + * The third argument, the weight reciprocal, determines how the + * new values will be weighed vs. the old state, new values will + * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note + * that this parameter must be a power of two for efficiency. + */ -#define DECLARE_EWMA(name, _factor, _weight) \ +#define DECLARE_EWMA(name, _precision, _weight_rcp) \ struct ewma_##name { \ unsigned long internal; \ }; \ static inline void ewma_##name##_init(struct ewma_##name *e) \ { \ - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + /* \ + * Even if you want to feed it just 0/1 you should have \ + * some bits for the non-fractional part... \ + */ \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ e->internal = 0; \ } \ static inline unsigned long \ ewma_##name##_read(struct ewma_##name *e) \ { \ - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ - return e->internal >> ilog2(_factor); \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + return e->internal >> (_precision); \ } \ static inline void ewma_##name##_add(struct ewma_##name *e, \ unsigned long val) \ { \ unsigned long internal = ACCESS_ONCE(e->internal); \ - unsigned long weight = ilog2(_weight); \ - unsigned long factor = ilog2(_factor); \ + unsigned long weight_rcp = ilog2(_weight_rcp); \ + unsigned long precision = _precision; \ \ - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ \ ACCESS_ONCE(e->internal) = internal ? \ - (((internal << weight) - internal) + \ - (val << factor)) >> weight : \ - (val << factor); \ + (((internal << weight_rcp) - internal) + \ + (val << precision)) >> weight_rcp : \ + (val << precision); \ } #endif /* _LINUX_AVERAGE_H */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 1303b570b18cc9..05488da3aee9db 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -6,6 +6,8 @@ #include #include +struct filename; + #define CORENAME_MAX_SIZE 128 /* @@ -123,4 +125,12 @@ extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); +extern int do_execve(struct filename *, + const char __user * const __user *, + const char __user * const __user *); +extern int do_execveat(int, struct filename *, + const char __user * const __user *, + const char __user * const __user *, + int); + #endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h new file mode 100644 index 00000000000000..b1ef6e14744f62 --- /dev/null +++ b/include/linux/blk-mq-virtio.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_BLK_MQ_VIRTIO_H +#define _LINUX_BLK_MQ_VIRTIO_H + +struct blk_mq_tag_set; +struct virtio_device; + +int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set, + struct virtio_device *vdev, int first_vec); + +#endif /* _LINUX_BLK_MQ_VIRTIO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 001d30d727c56c..b296a900611790 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -245,6 +245,9 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); +void blk_mq_freeze_queue_wait(struct request_queue *q); +int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, + unsigned long timeout); int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); int blk_mq_map_queues(struct blk_mq_tag_set *set); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aecca0e7d9cadb..5a7da607ca045f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -2,6 +2,7 @@ #define _LINUX_BLKDEV_H #include +#include #ifdef CONFIG_BLOCK @@ -434,7 +435,6 @@ struct request_queue { struct delayed_work delay_work; struct backing_dev_info *backing_dev_info; - struct disk_devt *disk_devt; /* * The queue owner gets to use this for whatever they like. diff --git a/include/linux/ccp.h b/include/linux/ccp.h index c71dd8fa57640e..c41b8d99dd0e7f 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -556,7 +556,7 @@ enum ccp_engine { * struct ccp_cmd - CCP operation request * @entry: list element (ccp driver use only) * @work: work element used for callbacks (ccp driver use only) - * @ccp: CCP device to be run on (ccp driver use only) + * @ccp: CCP device to be run on * @ret: operation return code (ccp driver use only) * @flags: cmd processing flags * @engine: CCP operation to perform diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 1816c5e2658171..88cd5dc8e238a2 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -48,6 +48,7 @@ struct ceph_options { unsigned long mount_timeout; /* jiffies */ unsigned long osd_idle_ttl; /* jiffies */ unsigned long osd_keepalive_timeout; /* jiffies */ + unsigned long osd_request_timeout; /* jiffies */ /* * any type that can't be simply compared or doesn't need need @@ -68,6 +69,7 @@ struct ceph_options { #define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) +#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ #define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) #define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 2ea0c282f3dc93..c125b5d9e13ced 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -189,6 +189,7 @@ struct ceph_osd_request { /* internal */ unsigned long r_stamp; /* jiffies, send or check time */ + unsigned long r_start_stamp; /* jiffies */ int r_attempts; struct ceph_eversion r_replay_version; /* aka reassert_version */ u32 r_last_force_resend; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 3c02404cfce9b2..6a3f850cababb6 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -531,8 +531,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_begin() and allows cgroup operations to - * synchronize against threadgroup changes using a percpu_rw_semaphore. + * Allows cgroup operations to synchronize against threadgroup changes + * using a percpu_rw_semaphore. */ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { @@ -543,8 +543,7 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_end(). Counterpart of - * cgroup_threadcgroup_change_begin(). + * Counterpart of cgroup_threadcgroup_change_begin(). */ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) { @@ -555,7 +554,11 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) #define CGROUP_SUBSYS_COUNT 0 -static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + might_sleep(); +} + static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 5d3053c34fb3d5..6d7edc3082f984 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -229,7 +229,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { } #ifdef CONFIG_CLKEVT_PROBE extern int clockevent_probe(void); -#els +#else static inline int clockevent_probe(void) { return 0; } #endif diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 21f9c74496e75e..f92081234afd97 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -30,6 +30,8 @@ struct cpu { extern void boot_cpu_init(void); extern void boot_cpu_state_init(void); +extern void cpu_init(void); +extern void trap_init(void); extern int register_cpu(struct cpu *cpu, int num); extern struct device *get_cpu_device(unsigned cpu); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index bb790c4db0c519..62d240e962f036 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -26,7 +26,6 @@ enum cpuhp_state { CPUHP_ARM_OMAP_WAKE_DEAD, CPUHP_IRQ_POLL_DEAD, CPUHP_BLOCK_SOFTIRQ_DEAD, - CPUHP_VIRT_SCSI_DEAD, CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index bfc204e70338ab..611fce58d67039 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -9,6 +9,8 @@ */ #include +#include +#include #include #include #include diff --git a/include/linux/cputime.h b/include/linux/cputime.h deleted file mode 100644 index a691dc4ddc130a..00000000000000 --- a/include/linux/cputime.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __LINUX_CPUTIME_H -#define __LINUX_CPUTIME_H - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -#include - -#ifndef cputime_to_nsecs -# define cputime_to_nsecs(__ct) \ - (cputime_to_usecs(__ct) * NSEC_PER_USEC) -#endif - -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -#endif /* __LINUX_CPUTIME_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index f0e70a1bb3acfe..b03e7d049a64f4 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -18,8 +18,9 @@ #include #include #include +#include +#include -struct user_struct; struct cred; struct inode; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 591b6c16f9c12e..d2e38dc6172c06 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -11,6 +11,7 @@ #include #include #include +#include struct path; struct vfsmount; diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 61d042bbbf6072..68449293c4b623 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -163,6 +163,7 @@ struct dccp_request_sock { __u64 dreq_isr; __u64 dreq_gsr; __be32 dreq_service; + spinlock_t dreq_lock; struct list_head dreq_featneg; __u32 dreq_timestamp_echo; __u32 dreq_timestamp_time; diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 00e60f79a9cc7e..4178d24935477d 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -18,8 +18,6 @@ #define _LINUX_DELAYACCT_H #include -#include -#include /* * Per-task flags relevant to delay accounting @@ -30,7 +28,43 @@ #define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */ #ifdef CONFIG_TASK_DELAY_ACCT +struct task_delay_info { + spinlock_t lock; + unsigned int flags; /* Private per-task flags */ + + /* For each stat XXX, add following, aligned appropriately + * + * struct timespec XXX_start, XXX_end; + * u64 XXX_delay; + * u32 XXX_count; + * + * Atomicity of updates to XXX_delay, XXX_count protected by + * single lock above (split into XXX_lock if contention is an issue). + */ + + /* + * XXX_count is incremented on every XXX operation, the delay + * associated with the operation is added to XXX_delay. + * XXX_delay contains the accumulated delay time in nanoseconds. + */ + u64 blkio_start; /* Shared by blkio, swapin */ + u64 blkio_delay; /* wait for sync block io completion */ + u64 swapin_delay; /* wait for swapin block io completion */ + u32 blkio_count; /* total count of the number of sync block */ + /* io operations performed */ + u32 swapin_count; /* total count of the number of swapin block */ + /* io operations performed */ + + u64 freepages_start; + u64 freepages_delay; /* wait for memory reclaim */ + u32 freepages_count; /* total count of memory reclaim */ +}; +#endif +#include +#include + +#ifdef CONFIG_TASK_DELAY_ACCT extern int delayacct_on; /* Delay accounting turned on/off */ extern struct kmem_cache *delayacct_cache; extern void delayacct_init(void); diff --git a/include/linux/device.h b/include/linux/device.h index 30c4570e928dfe..9ef518af5515a0 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1140,7 +1140,6 @@ static inline bool device_supports_offline(struct device *dev) extern void lock_device_hotplug(void); extern void unlock_device_hotplug(void); extern int lock_device_hotplug_sysfs(void); -void assert_held_device_hotplug(void); extern int device_offline(struct device *dev); extern int device_online(struct device *dev); extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e9bc9292bd3a5e..e8ffba1052d3ac 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -26,7 +26,7 @@ #include #include #include -#include +#include struct acpi_dmar_header; diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 698d51a0eea3f3..c8240a12c42d98 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -3,6 +3,8 @@ #include #include +#include + #include #include diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h index 9ca23fcfb5d731..6fdfc884fdeb3d 100644 --- a/include/linux/errqueue.h +++ b/include/linux/errqueue.h @@ -20,6 +20,8 @@ struct sock_exterr_skb { struct sock_extended_err ee; u16 addr_offset; __be16 port; + u8 opt_stats:1, + unused:7; }; #endif diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 9f4956d8601c11..728d4e0292aa77 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -61,6 +61,8 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, #endif /* CONFIG_FAULT_INJECTION */ +struct kmem_cache; + #ifdef CONFIG_FAILSLAB extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else diff --git a/include/linux/filter.h b/include/linux/filter.h index 0c167fdee5f7d1..fbf7b39e810355 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -409,6 +409,7 @@ struct bpf_prog { u16 pages; /* Number of allocated pages */ kmemcheck_bitfield_begin(meta); u16 jited:1, /* Is our filter JIT'ed? */ + locked:1, /* Program image locked? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ @@ -554,22 +555,29 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) #ifdef CONFIG_ARCH_HAS_SET_MEMORY static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { - set_memory_ro((unsigned long)fp, fp->pages); + fp->locked = 1; + WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages)); } static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { - set_memory_rw((unsigned long)fp, fp->pages); + if (fp->locked) { + WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); + /* In case set_memory_rw() fails, we want to be the first + * to crash here instead of some random place later on. + */ + fp->locked = 0; + } } static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { - set_memory_ro((unsigned long)hdr, hdr->pages); + WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages)); } static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) { - set_memory_rw((unsigned long)hdr, hdr->pages); + WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); } #else static inline void bpf_prog_lock_ro(struct bpf_prog *fp) diff --git a/include/linux/fs.h b/include/linux/fs.h index c64f2cb7d3647c..7251f7bb45e8b8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1567,6 +1567,9 @@ extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); extern int vfs_whiteout(struct inode *, struct dentry *); +extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, + int open_flag); + /* * VFS file helper functions. */ @@ -1706,7 +1709,7 @@ struct inode_operations { int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); @@ -1718,6 +1721,29 @@ struct inode_operations { int (*set_acl)(struct inode *, struct posix_acl *, int); } ____cacheline_aligned; +static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->read_iter(kio, iter); +} + +static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->write_iter(kio, iter); +} + +static inline int call_mmap(struct file *file, struct vm_area_struct *vma) +{ + return file->f_op->mmap(file, vma); +} + +static inline int call_fsync(struct file *file, loff_t start, loff_t end, + int datasync) +{ + return file->f_op->fsync(file, start, end, datasync); +} + ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, @@ -1744,19 +1770,6 @@ extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, extern int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same); -static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - u64 len) -{ - int ret; - - sb_start_write(file_inode(file_out)->i_sb); - ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); - sb_end_write(file_inode(file_out)->i_sb); - - return ret; -} - struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); @@ -2568,6 +2581,19 @@ static inline void file_end_write(struct file *file) __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); } +static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + u64 len) +{ + int ret; + + file_start_write(file_out); + ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); + file_end_write(file_out); + + return ret; +} + /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. @@ -2652,7 +2678,7 @@ static const char * const kernel_read_file_str[] = { static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) { - if (id < 0 || id >= READING_MAX_ID) + if ((unsigned)id >= READING_MAX_ID) return kernel_read_file_str[READING_UNKNOWN]; return kernel_read_file_str[id]; @@ -2876,8 +2902,8 @@ extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); extern void generic_fillattr(struct inode *, struct kstat *); -int vfs_getattr_nosec(struct path *path, struct kstat *stat); -extern int vfs_getattr(struct path *, struct kstat *); +extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); +extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); void inode_add_bytes(struct inode *inode, loff_t bytes); void __inode_sub_bytes(struct inode *inode, loff_t bytes); @@ -2890,10 +2916,29 @@ extern const struct inode_operations simple_symlink_inode_operations; extern int iterate_dir(struct file *, struct dir_context *); -extern int vfs_stat(const char __user *, struct kstat *); -extern int vfs_lstat(const char __user *, struct kstat *); -extern int vfs_fstat(unsigned int, struct kstat *); -extern int vfs_fstatat(int , const char __user *, struct kstat *, int); +extern int vfs_statx(int, const char __user *, int, struct kstat *, u32); +extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int); + +static inline int vfs_stat(const char __user *filename, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, filename, 0, stat, STATX_BASIC_STATS); +} +static inline int vfs_lstat(const char __user *name, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW, + stat, STATX_BASIC_STATS); +} +static inline int vfs_fstatat(int dfd, const char __user *filename, + struct kstat *stat, int flags) +{ + return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS); +} +static inline int vfs_fstat(int fd, struct kstat *stat) +{ + return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0); +} + + extern const char *vfs_get_link(struct dentry *, struct delayed_call *); extern int vfs_readlink(struct dentry *, char __user *, int); @@ -2923,7 +2968,7 @@ extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, struct dir_context *); extern int simple_setattr(struct dentry *, struct iattr *); -extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h index 547f81592ba134..10c1abfbac6c45 100644 --- a/include/linux/fscrypt_common.h +++ b/include/linux/fscrypt_common.h @@ -87,7 +87,6 @@ struct fscrypt_operations { unsigned int flags; const char *key_prefix; int (*get_context)(struct inode *, void *, size_t); - int (*prepare_context)(struct inode *); int (*set_context)(struct inode *, const void *, size_t, void *); int (*dummy_context)(struct inode *); bool (*is_encrypted)(struct inode *); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a999d281a2f1e4..76f39754e7b029 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -167,13 +167,6 @@ struct blk_integrity { }; #endif /* CONFIG_BLK_DEV_INTEGRITY */ -struct disk_devt { - atomic_t count; - void (*release)(struct disk_devt *disk_devt); -}; - -void put_disk_devt(struct disk_devt *disk_devt); -void get_disk_devt(struct disk_devt *disk_devt); struct gendisk { /* major, first_minor and minors are input parameters only, @@ -183,7 +176,6 @@ struct gendisk { int first_minor; int minors; /* maximum number of minors, =1 for * disks that can't be partitioned. */ - struct disk_devt *disk_devt; char disk_name[DISK_NAME_LEN]; /* name of major driver */ char *(*devnode)(struct gendisk *gd, umode_t *mode); diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 2484b2fcc6eb58..933d936566055d 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -143,15 +143,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, struct fwnode_handle *child, enum gpiod_flags flags, const char *label); -/* FIXME: delete this helper when users are switched over */ -static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, - const char *con_id, struct fwnode_handle *child) -{ - return devm_fwnode_get_index_gpiod_from_child(dev, con_id, - 0, child, - GPIOD_ASIS, - "?"); -} #else /* CONFIG_GPIOLIB */ @@ -444,13 +435,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, return ERR_PTR(-ENOSYS); } -/* FIXME: delete this when all users are switched over */ -static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, - const char *con_id, struct fwnode_handle *child) -{ - return ERR_PTR(-ENOSYS); -} - #endif /* CONFIG_GPIOLIB */ static inline diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index e52b427223baa8..249e579ecd4c4d 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 503099d8aada53..b857fc8cc2ecae 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -122,7 +122,7 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags); int pmd_huge(pmd_t pmd); -int pud_huge(pud_t pmd); +int pud_huge(pud_t pud); unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); @@ -197,6 +197,9 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, #ifndef pgd_huge #define pgd_huge(x) 0 #endif +#ifndef p4d_huge +#define p4d_huge(x) 0 +#endif #ifndef pgd_write static inline int pgd_write(pgd_t pgd) diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 78d59dba563e33..88b6737491210a 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -88,6 +88,7 @@ enum hwmon_temp_attributes { #define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst) #define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency) #define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst) +#define HWMON_T_ALARM BIT(hwmon_temp_alarm) #define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) #define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) #define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 62bbf3c1aa4a04..970771a5f73902 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -845,6 +845,13 @@ struct vmbus_channel { * link up channels based on their CPU affinity. */ struct list_head percpu_list; + + /* + * Defer freeing channel until after all cpu's have + * gone through grace period. + */ + struct rcu_head rcu; + /* * For performance critical channels (storage, networking * etc,), Hyper-V has a mechanism to enhance the throughput @@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, const int *srv_version, int srv_vercnt, int *nego_fw_version, int *nego_srv_version); -void hv_event_tasklet_disable(struct vmbus_channel *channel); -void hv_event_tasklet_enable(struct vmbus_channel *channel); - void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); void vmbus_setevent(struct vmbus_channel *channel); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index bed8fbb45f31fb..6b183521c61697 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -30,6 +30,7 @@ #include /* for struct device */ #include /* for completion */ #include +#include #include /* for Host Notify IRQ */ #include /* for struct device_node */ #include /* for swab16 */ diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h index 23ca4151552796..fa793193306798 100644 --- a/include/linux/iio/sw_device.h +++ b/include/linux/iio/sw_device.h @@ -62,7 +62,7 @@ void iio_swd_group_init_type_name(struct iio_sw_device *d, const char *name, struct config_item_type *type) { -#ifdef CONFIG_CONFIGFS_FS +#if IS_ENABLED(CONFIG_CONFIGFS_FS) config_group_init_type_name(&d->group, name, type); #endif } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 3a85d61f761422..91d9049f003938 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -12,8 +12,10 @@ #include #include #include +#include #include #include +#include #include @@ -149,8 +151,6 @@ extern struct group_info init_groups; extern struct cred init_cred; -extern struct task_group root_task_group; - #ifdef CONFIG_CGROUP_SCHED # define INIT_CGROUP_SCHED(tsk) \ .sched_task_group = &root_task_group, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 6a6de187ddc0ff..2e4de0deee531a 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -125,9 +125,16 @@ enum iommu_attr { }; /* These are the possible reserved region types */ -#define IOMMU_RESV_DIRECT (1 << 0) -#define IOMMU_RESV_RESERVED (1 << 1) -#define IOMMU_RESV_MSI (1 << 2) +enum iommu_resv_type { + /* Memory regions which must be mapped 1:1 at all times */ + IOMMU_RESV_DIRECT, + /* Arbitrary "never map this or give it to a device" address ranges */ + IOMMU_RESV_RESERVED, + /* Hardware MSI region (untranslated) */ + IOMMU_RESV_MSI, + /* Software-managed MSI translation window */ + IOMMU_RESV_SW_MSI, +}; /** * struct iommu_resv_region - descriptor for a reserved memory region @@ -142,7 +149,7 @@ struct iommu_resv_region { phys_addr_t start; size_t length; int prot; - int type; + enum iommu_resv_type type; }; #ifdef CONFIG_IOMMU_API @@ -288,7 +295,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern int iommu_request_dm_for_dev(struct device *dev); extern struct iommu_resv_region * -iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type); +iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, + enum iommu_resv_type type); extern int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 672cfef72fc85d..97cbca19430d82 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -373,6 +373,8 @@ #define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) #define ICC_IGRPEN1_EL1_SHIFT 0 #define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) #define ICC_SRE_EL1_SRE (1U << 0) /* diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 188eced6813edd..9f3616085423cf 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -524,6 +524,10 @@ static inline struct irq_domain *irq_find_matching_fwnode( { return NULL; } +static inline bool irq_domain_check_msi_remap(void) +{ + return false; +} #endif /* !CONFIG_IRQ_DOMAIN */ #endif /* _LINUX_IRQDOMAIN_H */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 8e06d758ee48a2..2afd74b9d84409 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -90,6 +90,13 @@ extern bool static_key_initialized; struct static_key { atomic_t enabled; /* + * Note: + * To make anonymous unions work with old compilers, the static + * initialization of them requires brackets. This creates a dependency + * on the order of the struct with the initializers. If any fields + * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need + * to be modified. + * * bit 0 => 1 if key is initially true * 0 if initially false * bit 1 => 1 if points to struct static_key_mod @@ -166,10 +173,10 @@ extern void static_key_disable(struct static_key *key); */ #define STATIC_KEY_INIT_TRUE \ { .enabled = { 1 }, \ - .entries = (void *)JUMP_TYPE_TRUE } + { .entries = (void *)JUMP_TYPE_TRUE } } #define STATIC_KEY_INIT_FALSE \ { .enabled = { 0 }, \ - .entries = (void *)JUMP_TYPE_FALSE } + { .entries = (void *)JUMP_TYPE_FALSE } } #else /* !HAVE_JUMP_LABEL */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index c908b25bf5a558..a5c7046f26b4b9 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -1,12 +1,12 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H -#include #include struct kmem_cache; struct page; struct vm_struct; +struct task_struct; #ifdef CONFIG_KASAN @@ -19,6 +19,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE]; extern pte_t kasan_zero_pte[PTRS_PER_PTE]; extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; extern pud_t kasan_zero_pud[PTRS_PER_PUD]; +extern p4d_t kasan_zero_p4d[PTRS_PER_P4D]; void kasan_populate_zero_shadow(const void *shadow_start, const void *shadow_end); @@ -30,16 +31,10 @@ static inline void *kasan_mem_to_shadow(const void *addr) } /* Enable reporting bugs after kasan_disable_current() */ -static inline void kasan_enable_current(void) -{ - current->kasan_depth++; -} +extern void kasan_enable_current(void); /* Disable reporting bugs for current task */ -static inline void kasan_disable_current(void) -{ - current->kasan_depth--; -} +extern void kasan_disable_current(void); void kasan_unpoison_shadow(const void *address, size_t size); @@ -81,6 +76,9 @@ size_t ksize(const void *); static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } size_t kasan_metadata_size(struct kmem_cache *cache); +bool kasan_save_enable_multi_shot(void); +void kasan_restore_multi_shot(bool enabled); + #else /* CONFIG_KASAN */ static inline void kasan_unpoison_shadow(const void *address, size_t size) {} diff --git a/include/linux/key.h b/include/linux/key.h index 722914798f3749..e45212f2777e36 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -354,7 +354,10 @@ static inline bool key_is_instantiated(const struct key *key) !test_bit(KEY_FLAG_NEGATIVE, &key->flags); } -#define rcu_dereference_key(KEY) \ +#define dereference_key_rcu(KEY) \ + (rcu_dereference((KEY)->payload.rcu_data0)) + +#define dereference_key_locked(KEY) \ (rcu_dereference_protected((KEY)->payload.rcu_data0, \ rwsem_is_locked(&((struct key *)(KEY))->sem))) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 1e032a1ddb3eaa..5d9a400af5091f 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -1,7 +1,8 @@ #ifndef _LINUX_KHUGEPAGED_H #define _LINUX_KHUGEPAGED_H -#include /* MMF_VM_HUGEPAGE */ +#include /* MMF_VM_HUGEPAGE */ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 481c8c4627ca22..e1cfda4bee588d 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -12,6 +12,7 @@ #include #include #include +#include struct stable_node; struct mem_cgroup; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8d69d515074838..d0250744507a28 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -161,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); -int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev); +void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr); @@ -401,7 +402,7 @@ struct kvm { #endif struct kvm_vm_stat stat; struct kvm_arch arch; - atomic_t users_count; + refcount_t users_count; #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; spinlock_t ring_lock; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 8458c5351e562e..77e7af32543f69 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -70,6 +70,8 @@ struct nd_cmd_desc { struct nd_interleave_set { u64 cookie; + /* compatibility with initial buggy Linux implementation */ + u64 altcookie; }; struct nd_mapping_desc { diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index b01fe100908430..87ff4f58a2f018 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -29,6 +29,11 @@ struct hlist_nulls_node { ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls)) #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_nulls_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \ + }) /** * ptr_is_a_nulls - Test if a ptr is a nulls * @ptr: ptr to be tested diff --git a/include/linux/log2.h b/include/linux/log2.h index ef3d4f67118ce0..c373295f359fa5 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -15,12 +15,6 @@ #include #include -/* - * deal with unrepresentable constant logarithms - */ -extern __attribute__((const, noreturn)) -int ____ilog2_NaN(void); - /* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented @@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n) < 1 ? ____ilog2_NaN() : \ + (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ @@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - (n) & (1ULL << 1) ? 1 : \ - (n) & (1ULL << 0) ? 0 : \ - ____ilog2_NaN() \ - ) : \ + 1 ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5af37730388074..bb7250c45cb835 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -740,6 +740,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait) return false; } +static inline void mem_cgroup_update_page_stat(struct page *page, + enum mem_cgroup_stat_index idx, + int nr) +{ +} + static inline void mem_cgroup_inc_page_stat(struct page *page, enum mem_cgroup_stat_index idx) { diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 7a01c94496f14e..3eef9fb9968ae7 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -35,10 +35,11 @@ * Max bus-specific overhead incurred by request/responses. * I2C requires 1 additional byte for requests. * I2C requires 2 additional bytes for responses. + * SPI requires up to 32 additional bytes for responses. * */ #define EC_PROTO_VERSION_UNKNOWN 0 #define EC_MAX_REQUEST_OVERHEAD 1 -#define EC_MAX_RESPONSE_OVERHEAD 2 +#define EC_MAX_RESPONSE_OVERHEAD 32 /* * Command interface between EC and AP, for LPC, I2C and SPI interfaces. diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7e66e4f62858f3..1beb1ec2fbdf33 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -476,6 +476,7 @@ enum { enum { MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_DELETION = 1 << 1, + MLX4_INTERFACE_STATE_NOWAIT = 1 << 2, }; #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index e965e5090d9622..a858bcb6220b5d 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -109,7 +109,7 @@ static inline void mlx4_u64_to_mac(u8 *addr, u64 mac) int i; for (i = ETH_ALEN; i > 0; i--) { - addr[i - 1] = mac && 0xFF; + addr[i - 1] = mac & 0xFF; mac >>= 8; } } diff --git a/include/linux/mm.h b/include/linux/mm.h index 0d65dd72c0f49e..00a8fa7e366a03 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -32,6 +32,8 @@ struct user_struct; struct writeback_control; struct bdi_writeback; +void init_mm_internals(void); + #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; @@ -1560,14 +1562,24 @@ static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, return ptep; } +#ifdef __PAGETABLE_P4D_FOLDED +static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return 0; +} +#else +int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +#endif + #ifdef __PAGETABLE_PUD_FOLDED -static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, +static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) { return 0; } #else -int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); #endif #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) @@ -1619,11 +1631,22 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); * Remove it when 4level-fixup.h has been removed. */ #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) -static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) + +#ifndef __ARCH_HAS_5LEVEL_HACK +static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? + NULL : p4d_offset(pgd, address); +} + +static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, + unsigned long address) { - return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? - NULL: pud_offset(pgd, address); + return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? + NULL : pud_offset(p4d, address); } +#endif /* !__ARCH_HAS_5LEVEL_HACK */ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { @@ -2385,7 +2408,8 @@ void sparse_mem_maps_populate_node(struct page **map_map, struct page *sparse_mem_map_populate(unsigned long pnum, int nid); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); -pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); +p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); +pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); void *vmemmap_alloc_block(unsigned long size, int node); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4f6d440ad78551..f60f45fe226fca 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1,9 +1,9 @@ #ifndef _LINUX_MM_TYPES_H #define _LINUX_MM_TYPES_H +#include + #include -#include -#include #include #include #include @@ -13,7 +13,7 @@ #include #include #include -#include + #include #ifndef AT_VECTOR_SIZE_ARCH @@ -24,11 +24,6 @@ struct address_space; struct mem_cgroup; -#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) -#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ - IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) -#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) - /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -231,17 +226,6 @@ struct page { #endif ; -struct page_frag { - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 offset; - __u32 size; -#else - __u16 offset; - __u16 size; -#endif -}; - #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) @@ -371,27 +355,6 @@ struct core_state { struct completion startup; }; -enum { - MM_FILEPAGES, /* Resident file mapping pages */ - MM_ANONPAGES, /* Resident anonymous pages */ - MM_SWAPENTS, /* Anonymous swap entries */ - MM_SHMEMPAGES, /* Resident shared memory pages */ - NR_MM_COUNTERS -}; - -#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) -#define SPLIT_RSS_COUNTING -/* per-thread cached information, */ -struct task_rss_stat { - int events; /* for synchronization threshold */ - int count[NR_MM_COUNTERS]; -}; -#endif /* USE_SPLIT_PTE_PTLOCKS */ - -struct mm_rss_stat { - atomic_long_t count[NR_MM_COUNTERS]; -}; - struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ @@ -534,6 +497,8 @@ struct mm_struct { struct work_struct async_put_work; }; +extern struct mm_struct init_mm; + static inline void mm_init_cpumask(struct mm_struct *mm) { #ifdef CONFIG_CPUMASK_OFFSTACK diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h new file mode 100644 index 00000000000000..136dfdf63ba10e --- /dev/null +++ b/include/linux/mm_types_task.h @@ -0,0 +1,87 @@ +#ifndef _LINUX_MM_TYPES_TASK_H +#define _LINUX_MM_TYPES_TASK_H + +/* + * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. + * + * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) + */ + +#include +#include +#include +#include + +#include + +#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) +#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ + IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) + +/* + * The per task VMA cache array: + */ +#define VMACACHE_BITS 2 +#define VMACACHE_SIZE (1U << VMACACHE_BITS) +#define VMACACHE_MASK (VMACACHE_SIZE - 1) + +struct vmacache { + u32 seqnum; + struct vm_area_struct *vmas[VMACACHE_SIZE]; +}; + +enum { + MM_FILEPAGES, /* Resident file mapping pages */ + MM_ANONPAGES, /* Resident anonymous pages */ + MM_SWAPENTS, /* Anonymous swap entries */ + MM_SHMEMPAGES, /* Resident shared memory pages */ + NR_MM_COUNTERS +}; + +#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) +#define SPLIT_RSS_COUNTING +/* per-thread cached information, */ +struct task_rss_stat { + int events; /* for synchronization threshold */ + int count[NR_MM_COUNTERS]; +}; +#endif /* USE_SPLIT_PTE_PTLOCKS */ + +struct mm_rss_stat { + atomic_long_t count[NR_MM_COUNTERS]; +}; + +struct page_frag { + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 offset; + __u32 size; +#else + __u16 offset; + __u16 size; +#endif +}; + +/* Track pages that require TLB flushes */ +struct tlbflush_unmap_batch { +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + /* + * Each bit set is a CPU that potentially has a TLB entry for one of + * the PFNs being flushed. See set_tlb_ubc_flush_pending(). + */ + struct cpumask cpumask; + + /* True if any bit in cpumask is set */ + bool flush_required; + + /* + * If true then the PTE was dirty when unmapped. The entry must be + * flushed before IO is initiated or a stale TLB entry potentially + * allows an update without redirtying the page. + */ + bool writable; +#endif +}; + +#endif /* _LINUX_MM_TYPES_TASK_H */ diff --git a/include/linux/net.h b/include/linux/net.h index cd0c8bd0a1dec0..0620f5e18c96b7 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -146,7 +146,7 @@ struct proto_ops { int (*socketpair)(struct socket *sock1, struct socket *sock2); int (*accept) (struct socket *sock, - struct socket *newsock, int flags); + struct socket *newsock, int flags, bool kern); int (*getname) (struct socket *sock, struct sockaddr *addr, int *sockaddr_len, int peer); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f40f0ab3847a8c..97456b2539e46d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -330,6 +330,7 @@ struct napi_struct { enum { NAPI_STATE_SCHED, /* Poll is scheduled */ + NAPI_STATE_MISSED, /* reschedule a napi */ NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ @@ -338,12 +339,13 @@ enum { }; enum { - NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED), - NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE), - NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC), - NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED), - NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL), - NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL), + NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), + NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), + NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), + NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), + NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), + NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), + NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), }; enum gro_result { @@ -414,20 +416,7 @@ static inline bool napi_disable_pending(struct napi_struct *n) return test_bit(NAPI_STATE_DISABLE, &n->state); } -/** - * napi_schedule_prep - check if NAPI can be scheduled - * @n: NAPI context - * - * Test if NAPI routine is already running, and if not mark - * it as running. This is used as a condition variable to - * insure only one NAPI poll instance runs. We also make - * sure there is no pending NAPI disable. - */ -static inline bool napi_schedule_prep(struct napi_struct *n) -{ - return !napi_disable_pending(n) && - !test_and_set_bit(NAPI_STATE_SCHED, &n->state); -} +bool napi_schedule_prep(struct napi_struct *n); /** * napi_schedule - schedule NAPI poll diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f1da8c8dd47386..287f341610864f 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -335,7 +335,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); -extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct inode *, int); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 0a3fadc32693a9..aa3cd087827038 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -7,6 +7,43 @@ #include #include +#ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); +extern void touch_softlockup_watchdog(void); +extern void touch_softlockup_watchdog_sync(void); +extern void touch_all_softlockup_watchdogs(void); +extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +extern unsigned int softlockup_panic; +extern unsigned int hardlockup_panic; +void lockup_detector_init(void); +#else +static inline void touch_softlockup_watchdog_sched(void) +{ +} +static inline void touch_softlockup_watchdog(void) +{ +} +static inline void touch_softlockup_watchdog_sync(void) +{ +} +static inline void touch_all_softlockup_watchdogs(void) +{ +} +static inline void lockup_detector_init(void) +{ +} +#endif + +#ifdef CONFIG_DETECT_HUNG_TASK +void reset_hung_task_detector(void); +#else +static inline void reset_hung_task_detector(void) +{ +} +#endif + /* * The run state of the lockup detectors is controlled by the content of the * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index 35d0fd7a4948e6..fd0de00c0d777e 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h @@ -76,22 +76,12 @@ struct gpmc_timings; struct omap_nand_platform_data; struct omap_onenand_platform_data; -#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2) -extern int gpmc_nand_init(struct omap_nand_platform_data *d, - struct gpmc_timings *gpmc_t); -#else -static inline int gpmc_nand_init(struct omap_nand_platform_data *d, - struct gpmc_timings *gpmc_t) -{ - return 0; -} -#endif - #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) -extern void gpmc_onenand_init(struct omap_onenand_platform_data *d); +extern int gpmc_onenand_init(struct omap_onenand_platform_data *d); #else #define board_onenand_data NULL -static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d) +static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d) { + return 0; } #endif diff --git a/include/linux/oom.h b/include/linux/oom.h index b4e36e92bc878f..8a266e2be5a63a 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -2,7 +2,7 @@ #define __INCLUDE_LINUX_OOM_H -#include +#include #include #include #include diff --git a/include/linux/pci.h b/include/linux/pci.h index 282ed32244ce79..eb3da1a04e6cdc 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1323,6 +1323,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); +int pci_irq_get_node(struct pci_dev *pdev, int vec); #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1370,6 +1371,11 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, { return cpu_possible_mask; } + +static inline int pci_irq_get_node(struct pci_dev *pdev, int vec) +{ + return first_online_node; +} #endif static inline int diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index a5f98d53d7325b..9b7dd59fe28d5f 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -1,6 +1,8 @@ #ifndef _LINUX_PERF_REGS_H #define _LINUX_PERF_REGS_H +#include + struct perf_regs { __u64 abi; struct pt_regs *regs; diff --git a/include/linux/phy.h b/include/linux/phy.h index 772476028a6507..43a774873aa96d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -837,6 +837,10 @@ int genphy_read_status(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); int genphy_soft_reset(struct phy_device *phydev); +static inline int genphy_no_soft_reset(struct phy_device *phydev) +{ + return 0; +} void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver, struct module *owner); diff --git a/include/linux/pid.h b/include/linux/pid.h index 298ead5512e55d..4d179316e43108 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -1,7 +1,7 @@ #ifndef _LINUX_PID_H #define _LINUX_PID_H -#include +#include enum pid_type { diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index d4d34791e4635f..032b559091450a 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -146,8 +146,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier); int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier); -int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); -int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, @@ -172,6 +170,12 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return dev->power.qos->flags_req->data.flr.flags; } + +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +{ + return IS_ERR_OR_NULL(dev->power.qos) ? + 0 : pm_qos_read_value(&dev->power.qos->resume_latency); +} #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) @@ -199,12 +203,6 @@ static inline int dev_pm_qos_add_notifier(struct device *dev, static inline int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier) { return 0; } -static inline int dev_pm_qos_add_global_notifier( - struct notifier_block *notifier) - { return 0; } -static inline int dev_pm_qos_remove_global_notifier( - struct notifier_block *notifier) - { return 0; } static inline void dev_pm_qos_constraints_init(struct device *dev) { dev->power.power_state = PMSG_ON; @@ -236,6 +234,7 @@ static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; } #endif #endif diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7eeceac52dea25..cae461224948a7 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -55,6 +55,27 @@ /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 +#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + +/* + * Disable preemption until the scheduler is running -- use an unconditional + * value so that it also works on !PREEMPT_COUNT kernels. + * + * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). + */ +#define INIT_PREEMPT_COUNT PREEMPT_OFFSET + +/* + * Initial preempt_count value; reflects the preempt_count schedule invariant + * which states that during context switches: + * + * preempt_count() == 2*PREEMPT_DISABLE_OFFSET + * + * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. + * Note: See finish_task_switch(). + */ +#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ #include diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index e0e539321ab95c..422bc2e4cb6a6f 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -3,6 +3,7 @@ #include /* For unlikely. */ #include /* For struct task_struct. */ +#include /* For send_sig(), same_thread_group(), etc. */ #include /* for IS_ERR_VALUE */ #include /* For BUG_ON. */ #include /* For task_active_pid_ns. */ diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h new file mode 100644 index 00000000000000..d60d4e27860960 --- /dev/null +++ b/include/linux/purgatory.h @@ -0,0 +1,23 @@ +#ifndef _LINUX_PURGATORY_H +#define _LINUX_PURGATORY_H + +#include +#include +#include + +struct kexec_sha_region { + unsigned long start; + unsigned long len; +}; + +/* + * These forward declarations serve two purposes: + * + * 1) Make sparse happy when checking arch/purgatory + * 2) Document that these are required to be global so the symbol + * lookup in kexec works + */ +extern struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; +extern u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; + +#endif diff --git a/include/linux/random.h b/include/linux/random.h index 7bd2403e4fef1a..ed5c3838780de5 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -37,14 +37,26 @@ extern void get_random_bytes(void *buf, int nbytes); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); extern void get_random_bytes_arch(void *buf, int nbytes); -extern int random_int_secret_init(void); #ifndef MODULE extern const struct file_operations random_fops, urandom_fops; #endif -unsigned int get_random_int(void); -unsigned long get_random_long(void); +u32 get_random_u32(void); +u64 get_random_u64(void); +static inline unsigned int get_random_int(void) +{ + return get_random_u32(); +} +static inline unsigned long get_random_long(void) +{ +#if BITS_PER_LONG == 64 + return get_random_u64(); +#else + return get_random_u32(); +#endif +} + unsigned long randomize_page(unsigned long start, unsigned long range); u32 prandom_u32(void); diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index 4ae95f7e8597b0..a23a3315318048 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -156,5 +156,19 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) +/** + * hlist_nulls_for_each_entry_safe - + * iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_nulls_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_nulls_node within the struct. + */ +#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ + for (({barrier();}), \ + pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ + (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \ + pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });) #endif #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6ade6a52d9d42b..de88b33c097487 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include @@ -226,45 +225,6 @@ void call_rcu_sched(struct rcu_head *head, void synchronize_sched(void); -/* - * Structure allowing asynchronous waiting on RCU. - */ -struct rcu_synchronize { - struct rcu_head head; - struct completion completion; -}; -void wakeme_after_rcu(struct rcu_head *head); - -void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, - struct rcu_synchronize *rs_array); - -#define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ - __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ - __crcu_array, __rs_array); \ -} while (0) - -#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) - -/** - * synchronize_rcu_mult - Wait concurrently for multiple grace periods - * @...: List of call_rcu() functions for the flavors to wait on. - * - * This macro waits concurrently for multiple flavors of RCU grace periods. - * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait - * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU - * domain requires you to write a wrapper function for that SRCU domain's - * call_srcu() function, supplying the corresponding srcu_struct. - * - * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU - * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called - * is automatically a grace period. - */ -#define synchronize_rcu_mult(...) \ - _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) - /** * call_rcu_tasks() - Queue an RCU for invocation task-based grace period * @head: structure to be used for queueing the RCU updates. diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h new file mode 100644 index 00000000000000..e774b4f5f220da --- /dev/null +++ b/include/linux/rcupdate_wait.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_SCHED_RCUPDATE_WAIT_H +#define _LINUX_SCHED_RCUPDATE_WAIT_H + +/* + * RCU synchronization types and methods: + */ + +#include +#include + +/* + * Structure allowing asynchronous waiting on RCU. + */ +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; +void wakeme_after_rcu(struct rcu_head *head); + +void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, + struct rcu_synchronize *rs_array); + +#define _wait_rcu_gp(checktiny, ...) \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ +} while (0) + +#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) + +/** + * synchronize_rcu_mult - Wait concurrently for multiple grace periods + * @...: List of call_rcu() functions for the flavors to wait on. + * + * This macro waits concurrently for multiple flavors of RCU grace periods. + * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait + * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU + * domain requires you to write a wrapper function for that SRCU domain's + * call_srcu() function, supplying the corresponding srcu_struct. + * + * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU + * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called + * is automatically a grace period. + */ +#define synchronize_rcu_mult(...) \ + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + +#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 4f9b2fa2173d69..b452953e21c8ae 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -53,15 +53,8 @@ static inline void cond_synchronize_sched(unsigned long oldstate) might_sleep(); } -static inline void rcu_barrier_bh(void) -{ - wait_rcu_gp(call_rcu_bh); -} - -static inline void rcu_barrier_sched(void) -{ - wait_rcu_gp(call_rcu_sched); -} +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); static inline void synchronize_rcu_expedited(void) { diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index ad3e5158e586dc..c9f795e9a2ee26 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -65,7 +65,7 @@ struct regulator_state { int uV; /* suspend voltage */ unsigned int mode; /* suspend regulator operating mode */ int enabled; /* is regulator enabled in this suspend state */ - int disabled; /* is the regulator disbled in this suspend state */ + int disabled; /* is the regulator disabled in this suspend state */ }; /** diff --git a/include/linux/reset.h b/include/linux/reset.h index 86b4ed75359e85..96fb139bdd08fd 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -31,31 +31,26 @@ static inline int device_reset_optional(struct device *dev) static inline int reset_control_reset(struct reset_control *rstc) { - WARN_ON(1); return 0; } static inline int reset_control_assert(struct reset_control *rstc) { - WARN_ON(1); return 0; } static inline int reset_control_deassert(struct reset_control *rstc) { - WARN_ON(1); return 0; } static inline int reset_control_status(struct reset_control *rstc) { - WARN_ON(1); return 0; } static inline void reset_control_put(struct reset_control *rstc) { - WARN_ON(1); } static inline int __must_check device_reset(struct device *dev) @@ -74,14 +69,14 @@ static inline struct reset_control *__of_reset_control_get( const char *id, int index, bool shared, bool optional) { - return ERR_PTR(-ENOTSUPP); + return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline struct reset_control *__devm_reset_control_get( struct device *dev, const char *id, int index, bool shared, bool optional) { - return ERR_PTR(-ENOTSUPP); + return optional ? NULL : ERR_PTR(-ENOTSUPP); } #endif /* CONFIG_RESET_CONTROLLER */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index f2e12a8459100e..092292b6675e2c 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -25,7 +25,7 @@ #include #include #include -#include +#include /* * The end of the chain is marked with a special nulls marks which has diff --git a/include/linux/sched.h b/include/linux/sched.h index 4a28deb5f210a1..d67eee84fd430f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1,197 +1,57 @@ #ifndef _LINUX_SCHED_H #define _LINUX_SCHED_H -#include - -#include - - -struct sched_param { - int sched_priority; -}; - -#include /* for HZ */ +/* + * Define 'struct task_struct' and provide the main scheduler + * APIs (schedule(), wakeup variants, etc.) + */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include -#include -#include +#include -#include +#include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include +#include #include -#include -#include - -#include -#include #include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include - -#include - -#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ - -/* - * Extended scheduling parameters data structure. - * - * This is needed because the original struct sched_param can not be - * altered without introducing ABI issues with legacy applications - * (e.g., in sched_getparam()). - * - * However, the possibility of specifying more than just a priority for - * the tasks may be useful for a wide variety of application fields, e.g., - * multimedia, streaming, automation and control, and many others. - * - * This variant (sched_attr) is meant at describing a so-called - * sporadic time-constrained task. In such model a task is specified by: - * - the activation period or minimum instance inter-arrival time; - * - the maximum (or average, depending on the actual scheduling - * discipline) computation time of all instances, a.k.a. runtime; - * - the deadline (relative to the actual activation time) of each - * instance. - * Very briefly, a periodic (sporadic) task asks for the execution of - * some specific computation --which is typically called an instance-- - * (at most) every period. Moreover, each instance typically lasts no more - * than the runtime and must be completed by time instant t equal to - * the instance activation time + the deadline. - * - * This is reflected by the actual fields of the sched_attr structure: - * - * @size size of the structure, for fwd/bwd compat. - * - * @sched_policy task's scheduling policy - * @sched_flags for customizing the scheduler behaviour - * @sched_nice task's nice value (SCHED_NORMAL/BATCH) - * @sched_priority task's static priority (SCHED_FIFO/RR) - * @sched_deadline representative of the task's deadline - * @sched_runtime representative of the task's runtime - * @sched_period representative of the task's period - * - * Given this task model, there are a multiplicity of scheduling algorithms - * and policies, that can be used to ensure all the tasks will make their - * timing constraints. - * - * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the - * only user of this new interface. More information about the algorithm - * available in the scheduling class file or in Documentation/. - */ -struct sched_attr { - u32 size; - - u32 sched_policy; - u64 sched_flags; - - /* SCHED_NORMAL, SCHED_BATCH */ - s32 sched_nice; - - /* SCHED_FIFO, SCHED_RR */ - u32 sched_priority; - - /* SCHED_DEADLINE */ - u64 sched_runtime; - u64 sched_deadline; - u64 sched_period; -}; +#include +#include +#include +#include -struct futex_pi_state; -struct robust_list_head; +/* task_struct member predeclarations (sorted alphabetically): */ +struct audit_context; +struct backing_dev_info; struct bio_list; -struct fs_struct; -struct perf_event_context; struct blk_plug; -struct filename; +struct cfs_rq; +struct fs_struct; +struct futex_pi_state; +struct io_context; +struct mempolicy; struct nameidata; - -#define VMACACHE_BITS 2 -#define VMACACHE_SIZE (1U << VMACACHE_BITS) -#define VMACACHE_MASK (VMACACHE_SIZE - 1) - -/* - * These are the constant used to fake the fixed-point load-average - * counting. Some notes: - * - 11 bit fractions expand to 22 bits by the multiplies: this gives - * a load-average precision of 10 bits integer + 11 bits fractional - * - if you want to count load-averages more often, you need more - * precision, or rounding will get you. With 2-second counting freq, - * the EXP_n values would be 1981, 2034 and 2043 if still using only - * 11 bit fractions. - */ -extern unsigned long avenrun[]; /* Load averages */ -extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); - -#define FSHIFT 11 /* nr of bits of precision */ -#define FIXED_1 (1<>= FSHIFT; - -extern unsigned long total_forks; -extern int nr_threads; -DECLARE_PER_CPU(unsigned long, process_counts); -extern int nr_processes(void); -extern unsigned long nr_running(void); -extern bool single_task_running(void); -extern unsigned long nr_iowait(void); -extern unsigned long nr_iowait_cpu(int cpu); -extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); - -extern void calc_global_load(unsigned long ticks); - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void cpu_load_update_nohz_start(void); -extern void cpu_load_update_nohz_stop(void); -#else -static inline void cpu_load_update_nohz_start(void) { } -static inline void cpu_load_update_nohz_stop(void) { } -#endif - -extern void dump_cpu_task(int cpu); - +struct nsproxy; +struct perf_event_context; +struct pid_namespace; +struct pipe_inode_info; +struct rcu_node; +struct reclaim_state; +struct robust_list_head; +struct sched_attr; +struct sched_param; struct seq_file; -struct cfs_rq; +struct sighand_struct; +struct signal_struct; +struct task_delay_info; struct task_group; -#ifdef CONFIG_SCHED_DEBUG -extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); -extern void proc_sched_set_task(struct task_struct *p); -#endif /* * Task state bitmask. NOTE! These bits are also @@ -203,53 +63,53 @@ extern void proc_sched_set_task(struct task_struct *p); * modifying one set can't modify the other one by * mistake. */ -#define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define __TASK_STOPPED 4 -#define __TASK_TRACED 8 -/* in tsk->exit_state */ -#define EXIT_DEAD 16 -#define EXIT_ZOMBIE 32 -#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) -/* in tsk->state again */ -#define TASK_DEAD 64 -#define TASK_WAKEKILL 128 -#define TASK_WAKING 256 -#define TASK_PARKED 512 -#define TASK_NOLOAD 1024 -#define TASK_NEW 2048 -#define TASK_STATE_MAX 4096 - -#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" - -extern char ___assert_task_state[1 - 2*!!( - sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; - -/* Convenience macros for the sake of set_current_state */ -#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) -#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) -#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) - -#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) - -/* Convenience macros for the sake of wake_up */ -#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) -#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) - -/* get_task_state() */ -#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ - TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) - -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) -#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) -#define task_is_stopped_or_traced(task) \ - ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) -#define task_contributes_to_load(task) \ - ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0 && \ - (task->state & TASK_NOLOAD) == 0) + +/* Used in tsk->state: */ +#define TASK_RUNNING 0 +#define TASK_INTERRUPTIBLE 1 +#define TASK_UNINTERRUPTIBLE 2 +#define __TASK_STOPPED 4 +#define __TASK_TRACED 8 +/* Used in tsk->exit_state: */ +#define EXIT_DEAD 16 +#define EXIT_ZOMBIE 32 +#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) +/* Used in tsk->state again: */ +#define TASK_DEAD 64 +#define TASK_WAKEKILL 128 +#define TASK_WAKING 256 +#define TASK_PARKED 512 +#define TASK_NOLOAD 1024 +#define TASK_NEW 2048 +#define TASK_STATE_MAX 4096 + +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" + +/* Convenience macros for the sake of set_current_state: */ +#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) +#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) +#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) + +#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) + +/* Convenience macros for the sake of wake_up(): */ +#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) +#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) + +/* get_task_state(): */ +#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + +#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + +#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) + +#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + +#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0 && \ + (task->state & TASK_NOLOAD) == 0) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP @@ -299,139 +159,24 @@ extern char ___assert_task_state[1 - 2*!!( * * Also see the comments of try_to_wake_up(). */ -#define __set_current_state(state_value) \ - do { current->state = (state_value); } while (0) -#define set_current_state(state_value) \ - smp_store_mb(current->state, (state_value)) - -#endif - -/* Task command name length */ -#define TASK_COMM_LEN 16 - -#include - -/* - * This serializes "schedule()" and also protects - * the run-queue from deletions/modifications (but - * _adding_ to the beginning of the run-queue has - * a separate lock). - */ -extern rwlock_t tasklist_lock; -extern spinlock_t mmlist_lock; - -struct task_struct; - -#ifdef CONFIG_PROVE_RCU -extern int lockdep_tasklist_lock_is_held(void); -#endif /* #ifdef CONFIG_PROVE_RCU */ - -extern void sched_init(void); -extern void sched_init_smp(void); -extern asmlinkage void schedule_tail(struct task_struct *prev); -extern void init_idle(struct task_struct *idle, int cpu); -extern void init_idle_bootup_task(struct task_struct *idle); - -extern cpumask_var_t cpu_isolated_map; - -extern int runqueue_is_locked(int cpu); - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void nohz_balance_enter_idle(int cpu); -extern void set_cpu_sd_state_idle(void); -extern int get_nohz_timer_target(void); -#else -static inline void nohz_balance_enter_idle(int cpu) { } -static inline void set_cpu_sd_state_idle(void) { } +#define __set_current_state(state_value) do { current->state = (state_value); } while (0) +#define set_current_state(state_value) smp_store_mb(current->state, (state_value)) #endif -/* - * Only dump TASK_* tasks. (0 for all tasks) - */ -extern void show_state_filter(unsigned long state_filter); - -static inline void show_state(void) -{ - show_state_filter(0); -} +/* Task command name length: */ +#define TASK_COMM_LEN 16 -extern void show_regs(struct pt_regs *); - -/* - * TASK is a pointer to the task whose backtrace we want to see (or NULL for current - * task), SP is the stack pointer of the first frame that should be shown in the back - * trace (or NULL if the entire call-chain of the task should be shown). - */ -extern void show_stack(struct task_struct *task, unsigned long *sp); +extern cpumask_var_t cpu_isolated_map; -extern void cpu_init (void); -extern void trap_init(void); -extern void update_process_times(int user); extern void scheduler_tick(void); -extern int sched_cpu_starting(unsigned int cpu); -extern int sched_cpu_activate(unsigned int cpu); -extern int sched_cpu_deactivate(unsigned int cpu); - -#ifdef CONFIG_HOTPLUG_CPU -extern int sched_cpu_dying(unsigned int cpu); -#else -# define sched_cpu_dying NULL -#endif - -extern void sched_show_task(struct task_struct *p); - -#ifdef CONFIG_LOCKUP_DETECTOR -extern void touch_softlockup_watchdog_sched(void); -extern void touch_softlockup_watchdog(void); -extern void touch_softlockup_watchdog_sync(void); -extern void touch_all_softlockup_watchdogs(void); -extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); -extern unsigned int softlockup_panic; -extern unsigned int hardlockup_panic; -void lockup_detector_init(void); -#else -static inline void touch_softlockup_watchdog_sched(void) -{ -} -static inline void touch_softlockup_watchdog(void) -{ -} -static inline void touch_softlockup_watchdog_sync(void) -{ -} -static inline void touch_all_softlockup_watchdogs(void) -{ -} -static inline void lockup_detector_init(void) -{ -} -#endif - -#ifdef CONFIG_DETECT_HUNG_TASK -void reset_hung_task_detector(void); -#else -static inline void reset_hung_task_detector(void) -{ -} -#endif - -/* Attach to any functions which should be ignored in wchan output. */ -#define __sched __attribute__((__section__(".sched.text"))) -/* Linker adds these: start and end of __sched functions */ -extern char __sched_text_start[], __sched_text_end[]; +#define MAX_SCHEDULE_TIMEOUT LONG_MAX -/* Is this address in the __sched functions? */ -extern int in_sched_functions(unsigned long addr); - -#define MAX_SCHEDULE_TIMEOUT LONG_MAX -extern signed long schedule_timeout(signed long timeout); -extern signed long schedule_timeout_interruptible(signed long timeout); -extern signed long schedule_timeout_killable(signed long timeout); -extern signed long schedule_timeout_uninterruptible(signed long timeout); -extern signed long schedule_timeout_idle(signed long timeout); +extern long schedule_timeout(long timeout); +extern long schedule_timeout_interruptible(long timeout); +extern long schedule_timeout_killable(long timeout); +extern long schedule_timeout_uninterruptible(long timeout); +extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); @@ -440,112 +185,6 @@ extern void io_schedule_finish(int token); extern long io_schedule_timeout(long timeout); extern void io_schedule(void); -void __noreturn do_task_dead(void); - -struct nsproxy; -struct user_namespace; - -#ifdef CONFIG_MMU -extern void arch_pick_mmap_layout(struct mm_struct *mm); -extern unsigned long -arch_get_unmapped_area(struct file *, unsigned long, unsigned long, - unsigned long, unsigned long); -extern unsigned long -arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); -#else -static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} -#endif - -#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ -#define SUID_DUMP_USER 1 /* Dump as user of process */ -#define SUID_DUMP_ROOT 2 /* Dump as root */ - -/* mm flags */ - -/* for SUID_DUMP_* above */ -#define MMF_DUMPABLE_BITS 2 -#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) - -extern void set_dumpable(struct mm_struct *mm, int value); -/* - * This returns the actual value of the suid_dumpable flag. For things - * that are using this for checking for privilege transitions, it must - * test against SUID_DUMP_USER rather than treating it as a boolean - * value. - */ -static inline int __get_dumpable(unsigned long mm_flags) -{ - return mm_flags & MMF_DUMPABLE_MASK; -} - -static inline int get_dumpable(struct mm_struct *mm) -{ - return __get_dumpable(mm->flags); -} - -/* coredump filter bits */ -#define MMF_DUMP_ANON_PRIVATE 2 -#define MMF_DUMP_ANON_SHARED 3 -#define MMF_DUMP_MAPPED_PRIVATE 4 -#define MMF_DUMP_MAPPED_SHARED 5 -#define MMF_DUMP_ELF_HEADERS 6 -#define MMF_DUMP_HUGETLB_PRIVATE 7 -#define MMF_DUMP_HUGETLB_SHARED 8 -#define MMF_DUMP_DAX_PRIVATE 9 -#define MMF_DUMP_DAX_SHARED 10 - -#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS -#define MMF_DUMP_FILTER_BITS 9 -#define MMF_DUMP_FILTER_MASK \ - (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) -#define MMF_DUMP_FILTER_DEFAULT \ - ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ - (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) - -#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS -# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) -#else -# define MMF_DUMP_MASK_DEFAULT_ELF 0 -#endif - /* leave room for more dump flags */ -#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ -#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ -/* - * This one-shot flag is dropped due to necessity of changing exe once again - * on NFS restore - */ -//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ - -#define MMF_HAS_UPROBES 19 /* has uprobes */ -#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ -#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ -#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ -#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ - -#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) - -struct sighand_struct { - atomic_t count; - struct k_sigaction action[_NSIG]; - spinlock_t siglock; - wait_queue_head_t signalfd_wqh; -}; - -struct pacct_struct { - int ac_flag; - long ac_exitcode; - unsigned long ac_mem; - u64 ac_utime, ac_stime; - unsigned long ac_minflt, ac_majflt; -}; - -struct cpu_itimer { - u64 expires; - u64 incr; -}; - /** * struct prev_cputime - snaphsot of system and user cputime * @utime: time spent in user mode @@ -557,20 +196,12 @@ struct cpu_itimer { */ struct prev_cputime { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - u64 utime; - u64 stime; - raw_spinlock_t lock; + u64 utime; + u64 stime; + raw_spinlock_t lock; #endif }; -static inline void prev_cputime_init(struct prev_cputime *prev) -{ -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - prev->utime = prev->stime = 0; - raw_spin_lock_init(&prev->lock); -#endif -} - /** * struct task_cputime - collected CPU time counts * @utime: time spent in user mode, in nanoseconds @@ -582,2733 +213,1208 @@ static inline void prev_cputime_init(struct prev_cputime *prev) * these counts together and treat all three of them in parallel. */ struct task_cputime { - u64 utime; - u64 stime; - unsigned long long sum_exec_runtime; + u64 utime; + u64 stime; + unsigned long long sum_exec_runtime; }; -/* Alternate field names when used to cache expirations. */ -#define virt_exp utime -#define prof_exp stime -#define sched_exp sum_exec_runtime +/* Alternate field names when used on cache expirations: */ +#define virt_exp utime +#define prof_exp stime +#define sched_exp sum_exec_runtime -/* - * This is the atomic variant of task_cputime, which can be used for - * storing and updating task_cputime statistics without locking. - */ -struct task_cputime_atomic { - atomic64_t utime; - atomic64_t stime; - atomic64_t sum_exec_runtime; -}; +struct sched_info { +#ifdef CONFIG_SCHED_INFO + /* Cumulative counters: */ + + /* # of times we have run on this CPU: */ + unsigned long pcount; + + /* Time spent waiting on a runqueue: */ + unsigned long long run_delay; + + /* Timestamps: */ + + /* When did we last run on a CPU? */ + unsigned long long last_arrival; -#define INIT_CPUTIME_ATOMIC \ - (struct task_cputime_atomic) { \ - .utime = ATOMIC64_INIT(0), \ - .stime = ATOMIC64_INIT(0), \ - .sum_exec_runtime = ATOMIC64_INIT(0), \ - } + /* When were we last queued to run? */ + unsigned long long last_queued; -#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) +#endif /* CONFIG_SCHED_INFO */ +}; /* - * Disable preemption until the scheduler is running -- use an unconditional - * value so that it also works on !PREEMPT_COUNT kernels. + * Integer metrics need fixed point arithmetic, e.g., sched/fair + * has a few: load, load_avg, util_avg, freq, and capacity. * - * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). + * We define a basic fixed point arithmetic range, and then formalize + * all these metrics based on that basic range. */ -#define INIT_PREEMPT_COUNT PREEMPT_OFFSET +# define SCHED_FIXEDPOINT_SHIFT 10 +# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) + +struct load_weight { + unsigned long weight; + u32 inv_weight; +}; /* - * Initial preempt_count value; reflects the preempt_count schedule invariant - * which states that during context switches: + * The load_avg/util_avg accumulates an infinite geometric series + * (see __update_load_avg() in kernel/sched/fair.c). * - * preempt_count() == 2*PREEMPT_DISABLE_OFFSET + * [load_avg definition] * - * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. - * Note: See finish_task_switch(). - */ -#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) - -/** - * struct thread_group_cputimer - thread group interval timer counts - * @cputime_atomic: atomic thread group interval timers. - * @running: true when there are timers running and - * @cputime_atomic receives updates. - * @checking_timer: true when a thread in the group is in the - * process of checking for thread group timers. + * load_avg = runnable% * scale_load_down(load) + * + * where runnable% is the time ratio that a sched_entity is runnable. + * For cfs_rq, it is the aggregated load_avg of all runnable and + * blocked sched_entities. + * + * load_avg may also take frequency scaling into account: + * + * load_avg = runnable% * scale_load_down(load) * freq% * - * This structure contains the version of task_cputime, above, that is - * used for thread group CPU timer calculations. + * where freq% is the CPU frequency normalized to the highest frequency. + * + * [util_avg definition] + * + * util_avg = running% * SCHED_CAPACITY_SCALE + * + * where running% is the time ratio that a sched_entity is running on + * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable + * and blocked sched_entities. + * + * util_avg may also factor frequency scaling and CPU capacity scaling: + * + * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% + * + * where freq% is the same as above, and capacity% is the CPU capacity + * normalized to the greatest capacity (due to uarch differences, etc). + * + * N.B., the above ratios (runnable%, running%, freq%, and capacity%) + * themselves are in the range of [0, 1]. To do fixed point arithmetics, + * we therefore scale them to as large a range as necessary. This is for + * example reflected by util_avg's SCHED_CAPACITY_SCALE. + * + * [Overflow issue] + * + * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities + * with the highest load (=88761), always runnable on a single cfs_rq, + * and should not overflow as the number already hits PID_MAX_LIMIT. + * + * For all other cases (including 32-bit kernels), struct load_weight's + * weight will overflow first before we do, because: + * + * Max(load_avg) <= Max(load.weight) + * + * Then it is the load_weight's responsibility to consider overflow + * issues. */ -struct thread_group_cputimer { - struct task_cputime_atomic cputime_atomic; - bool running; - bool checking_timer; +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u32 util_sum; + u32 period_contrib; + unsigned long load_avg; + unsigned long util_avg; }; -#include -struct autogroup; - -/* - * NOTE! "signal_struct" does not have its own - * locking, because a shared signal_struct always - * implies a shared sighand_struct, so locking - * sighand_struct is always a proper superset of - * the locking of signal_struct. - */ -struct signal_struct { - atomic_t sigcnt; - atomic_t live; - int nr_threads; - struct list_head thread_head; - - wait_queue_head_t wait_chldexit; /* for wait4() */ - - /* current thread group signal load-balancing target: */ - struct task_struct *curr_target; - - /* shared signal handling: */ - struct sigpending shared_pending; - - /* thread group exit support */ - int group_exit_code; - /* overloaded: - * - notify group_exit_task when ->count is equal to notify_count - * - everyone except group_exit_task is stopped during signal delivery - * of fatal signals, group_exit_task processes the signal. - */ - int notify_count; - struct task_struct *group_exit_task; - - /* thread group stop support, overloads group_exit_code too */ - int group_stop_count; - unsigned int flags; /* see SIGNAL_* flags below */ +struct sched_statistics { +#ifdef CONFIG_SCHEDSTATS + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; +#endif +}; - /* - * PR_SET_CHILD_SUBREAPER marks a process, like a service - * manager, to re-parent orphan (double-forking) child processes - * to this process instead of 'init'. The service manager is - * able to receive SIGCHLD signals and is able to investigate - * the process until it calls wait(). All children of this - * process will inherit a flag if they should look for a - * child_subreaper process at exit. - */ - unsigned int is_child_subreaper:1; - unsigned int has_child_subreaper:1; +struct sched_entity { + /* For load-balancing: */ + struct load_weight load; + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; -#ifdef CONFIG_POSIX_TIMERS + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; - /* POSIX.1b Interval Timers */ - int posix_timer_id; - struct list_head posix_timers; + u64 nr_migrations; - /* ITIMER_REAL timer for the process */ - struct hrtimer real_timer; - ktime_t it_real_incr; + struct sched_statistics statistics; - /* - * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use - * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these - * values are defined to 0 and 1 respectively - */ - struct cpu_itimer it[2]; +#ifdef CONFIG_FAIR_GROUP_SCHED + int depth; + struct sched_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct cfs_rq *cfs_rq; + /* rq "owned" by this entity/group: */ + struct cfs_rq *my_q; +#endif +#ifdef CONFIG_SMP /* - * Thread group totals for process CPU timers. - * See thread_group_cputimer(), et al, for details. + * Per entity load average tracking. + * + * Put into separate cache line so it does not + * collide with read-mostly values above. */ - struct thread_group_cputimer cputimer; - - /* Earliest-expiration cache. */ - struct task_cputime cputime_expires; - - struct list_head cpu_timers[3]; - + struct sched_avg avg ____cacheline_aligned_in_smp; #endif +}; - struct pid *leader_pid; - -#ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; +struct sched_rt_entity { + struct list_head run_list; + unsigned long timeout; + unsigned long watchdog_stamp; + unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; + + struct sched_rt_entity *back; +#ifdef CONFIG_RT_GROUP_SCHED + struct sched_rt_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct rt_rq *rt_rq; + /* rq "owned" by this entity/group: */ + struct rt_rq *my_q; #endif +}; - struct pid *tty_old_pgrp; - - /* boolean value for session group leader */ - int leader; - - struct tty_struct *tty; /* NULL if no tty */ +struct sched_dl_entity { + struct rb_node rb_node; -#ifdef CONFIG_SCHED_AUTOGROUP - struct autogroup *autogroup; -#endif /* - * Cumulative resource counters for dead threads in the group, - * and for reaped dead child processes forked by this group. - * Live threads maintain their own counters and add to these - * in __exit_signal, except for the group leader. + * Original scheduling parameters. Copied here from sched_attr + * during sched_setattr(), they will remain the same until + * the next sched_setattr(). */ - seqlock_t stats_lock; - u64 utime, stime, cutime, cstime; - u64 gtime; - u64 cgtime; - struct prev_cputime prev_cputime; - unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; - unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; - unsigned long inblock, oublock, cinblock, coublock; - unsigned long maxrss, cmaxrss; - struct task_io_accounting ioac; + u64 dl_runtime; /* Maximum runtime for each instance */ + u64 dl_deadline; /* Relative deadline of each instance */ + u64 dl_period; /* Separation of two instances (period) */ + u64 dl_bw; /* dl_runtime / dl_deadline */ /* - * Cumulative ns of schedule CPU time fo dead threads in the - * group, not including a zombie group leader, (This only differs - * from jiffies_to_ns(utime + stime) if sched_clock uses something - * other than jiffies.) + * Actual scheduling parameters. Initialized with the values above, + * they are continously updated during task execution. Note that + * the remaining runtime could be < 0 in case we are in overrun. */ - unsigned long long sum_sched_runtime; + s64 runtime; /* Remaining runtime for this instance */ + u64 deadline; /* Absolute deadline for this instance */ + unsigned int flags; /* Specifying the scheduler behaviour */ /* - * We don't bother to synchronize most readers of this at all, - * because there is no reader checking a limit that actually needs - * to get both rlim_cur and rlim_max atomically, and either one - * alone is a single word that can safely be read normally. - * getrlimit/setrlimit use task_lock(current->group_leader) to - * protect this instead of the siglock, because they really - * have no need to disable irqs. + * Some bool flags: + * + * @dl_throttled tells if we exhausted the runtime. If so, the + * task has to wait for a replenishment to be performed at the + * next firing of dl_timer. + * + * @dl_boosted tells if we are boosted due to DI. If so we are + * outside bandwidth enforcement mechanism (but only until we + * exit the critical section); + * + * @dl_yielded tells if task gave up the CPU before consuming + * all its available runtime during the last job. */ - struct rlimit rlim[RLIM_NLIMITS]; - -#ifdef CONFIG_BSD_PROCESS_ACCT - struct pacct_struct pacct; /* per-process accounting information */ -#endif -#ifdef CONFIG_TASKSTATS - struct taskstats *stats; -#endif -#ifdef CONFIG_AUDIT - unsigned audit_tty; - struct tty_audit_buf *tty_audit_buf; -#endif + int dl_throttled; + int dl_boosted; + int dl_yielded; /* - * Thread is the potential origin of an oom condition; kill first on - * oom + * Bandwidth enforcement timer. Each -deadline task has its + * own bandwidth to be enforced, thus we need one timer per task. */ - bool oom_flag_origin; - short oom_score_adj; /* OOM kill score adjustment */ - short oom_score_adj_min; /* OOM kill score adjustment min value. - * Only settable by CAP_SYS_RESOURCE. */ - struct mm_struct *oom_mm; /* recorded mm when the thread group got - * killed by the oom killer */ - - struct mutex cred_guard_mutex; /* guard against foreign influences on - * credential calculations - * (notably. ptrace) */ + struct hrtimer dl_timer; }; -/* - * Bits in flags field of signal_struct. - */ -#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ -#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ -#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ -#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ -/* - * Pending notifications to parent. - */ -#define SIGNAL_CLD_STOPPED 0x00000010 -#define SIGNAL_CLD_CONTINUED 0x00000020 -#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) - -#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_need_qs; -#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ - SIGNAL_STOP_CONTINUED) + /* Otherwise the compiler can store garbage here: */ + u8 pad; + } b; /* Bits. */ + u32 s; /* Set of bits. */ +}; -static inline void signal_set_stop_flags(struct signal_struct *sig, - unsigned int flags) -{ - WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); - sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; -} +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_sw_context, + perf_nr_task_contexts, +}; -/* If true, all threads except ->group_exit_task have pending SIGKILL */ -static inline int signal_group_exit(const struct signal_struct *sig) -{ - return (sig->flags & SIGNAL_GROUP_EXIT) || - (sig->group_exit_task != NULL); -} +struct wake_q_node { + struct wake_q_node *next; +}; -/* - * Some day this will be a full-fledged user tracking system.. - */ -struct user_struct { - atomic_t __count; /* reference count */ - atomic_t processes; /* How many processes does this user have? */ - atomic_t sigpending; /* How many pending signals does this user have? */ -#ifdef CONFIG_FANOTIFY - atomic_t fanotify_listeners; +struct task_struct { +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* + * For reasons of header soup (see current_thread_info()), this + * must be the first element of task_struct. + */ + struct thread_info thread_info; #endif -#ifdef CONFIG_EPOLL - atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ + /* -1 unrunnable, 0 runnable, >0 stopped: */ + volatile long state; + void *stack; + atomic_t usage; + /* Per task flags (PF_*), defined further below: */ + unsigned int flags; + unsigned int ptrace; + +#ifdef CONFIG_SMP + struct llist_node wake_entry; + int on_cpu; +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* Current CPU: */ + unsigned int cpu; #endif -#ifdef CONFIG_POSIX_MQUEUE - /* protected by mq_lock */ - unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; + + int wake_cpu; #endif - unsigned long locked_shm; /* How many pages of mlocked shm ? */ - unsigned long unix_inflight; /* How many files in flight in unix sockets */ - atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ + int on_rq; + + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; -#ifdef CONFIG_KEYS - struct key *uid_keyring; /* UID specific keyring */ - struct key *session_keyring; /* UID's default session keyring */ + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; +#ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; #endif + struct sched_dl_entity dl; - /* Hash table maintenance information */ - struct hlist_node uidhash_node; - kuid_t uid; +#ifdef CONFIG_PREEMPT_NOTIFIERS + /* List of struct preempt_notifier: */ + struct hlist_head preempt_notifiers; +#endif -#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) - atomic_long_t locked_vm; +#ifdef CONFIG_BLK_DEV_IO_TRACE + unsigned int btrace_seq; #endif -}; -extern int uids_sysfs_init(void); + unsigned int policy; + int nr_cpus_allowed; + cpumask_t cpus_allowed; -extern struct user_struct *find_user(kuid_t); +#ifdef CONFIG_PREEMPT_RCU + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; +#endif /* #ifdef CONFIG_PREEMPT_RCU */ -extern struct user_struct root_user; -#define INIT_USER (&root_user) +#ifdef CONFIG_TASKS_RCU + unsigned long rcu_tasks_nvcsw; + bool rcu_tasks_holdout; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_idle_cpu; +#endif /* #ifdef CONFIG_TASKS_RCU */ + struct sched_info sched_info; -struct backing_dev_info; -struct reclaim_state; + struct list_head tasks; +#ifdef CONFIG_SMP + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; +#endif -#ifdef CONFIG_SCHED_INFO -struct sched_info { - /* cumulative counters */ - unsigned long pcount; /* # of times run on this cpu */ - unsigned long long run_delay; /* time spent waiting on a runqueue */ + struct mm_struct *mm; + struct mm_struct *active_mm; - /* timestamps */ - unsigned long long last_arrival,/* when we last ran on a cpu */ - last_queued; /* when we were last queued to run */ -}; -#endif /* CONFIG_SCHED_INFO */ + /* Per-thread vma caching: */ + struct vmacache vmacache; -#ifdef CONFIG_TASK_DELAY_ACCT -struct task_delay_info { - spinlock_t lock; - unsigned int flags; /* Private per-task flags */ +#ifdef SPLIT_RSS_COUNTING + struct task_rss_stat rss_stat; +#endif + int exit_state; + int exit_code; + int exit_signal; + /* The signal sent when the parent dies: */ + int pdeath_signal; + /* JOBCTL_*, siglock protected: */ + unsigned long jobctl; - /* For each stat XXX, add following, aligned appropriately - * - * struct timespec XXX_start, XXX_end; - * u64 XXX_delay; - * u32 XXX_count; - * - * Atomicity of updates to XXX_delay, XXX_count protected by - * single lock above (split into XXX_lock if contention is an issue). - */ + /* Used for emulating ABI behavior of previous Linux versions: */ + unsigned int personality; - /* - * XXX_count is incremented on every XXX operation, the delay - * associated with the operation is added to XXX_delay. - * XXX_delay contains the accumulated delay time in nanoseconds. - */ - u64 blkio_start; /* Shared by blkio, swapin */ - u64 blkio_delay; /* wait for sync block io completion */ - u64 swapin_delay; /* wait for swapin block io completion */ - u32 blkio_count; /* total count of the number of sync block */ - /* io operations performed */ - u32 swapin_count; /* total count of the number of swapin block */ - /* io operations performed */ - - u64 freepages_start; - u64 freepages_delay; /* wait for memory reclaim */ - u32 freepages_count; /* total count of memory reclaim */ -}; -#endif /* CONFIG_TASK_DELAY_ACCT */ + /* Scheduler bits, serialized by scheduler locks: */ + unsigned sched_reset_on_fork:1; + unsigned sched_contributes_to_load:1; + unsigned sched_migrated:1; + unsigned sched_remote_wakeup:1; + /* Force alignment to the next boundary: */ + unsigned :0; -static inline int sched_info_on(void) -{ -#ifdef CONFIG_SCHEDSTATS - return 1; -#elif defined(CONFIG_TASK_DELAY_ACCT) - extern int delayacct_on; - return delayacct_on; -#else - return 0; -#endif -} + /* Unserialized, strictly 'current' */ -#ifdef CONFIG_SCHEDSTATS -void force_schedstat_enabled(void); + /* Bit to tell LSMs we're in execve(): */ + unsigned in_execve:1; + unsigned in_iowait:1; +#ifndef TIF_RESTORE_SIGMASK + unsigned restore_sigmask:1; +#endif +#ifdef CONFIG_MEMCG + unsigned memcg_may_oom:1; +#ifndef CONFIG_SLOB + unsigned memcg_kmem_skip_account:1; +#endif +#endif +#ifdef CONFIG_COMPAT_BRK + unsigned brk_randomized:1; #endif -enum cpu_idle_type { - CPU_IDLE, - CPU_NOT_IDLE, - CPU_NEWLY_IDLE, - CPU_MAX_IDLE_TYPES -}; + unsigned long atomic_flags; /* Flags requiring atomic access. */ -/* - * Integer metrics need fixed point arithmetic, e.g., sched/fair - * has a few: load, load_avg, util_avg, freq, and capacity. - * - * We define a basic fixed point arithmetic range, and then formalize - * all these metrics based on that basic range. - */ -# define SCHED_FIXEDPOINT_SHIFT 10 -# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) + struct restart_block restart_block; -/* - * Increase resolution of cpu_capacity calculations - */ -#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT -#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + pid_t pid; + pid_t tgid; -/* - * Wake-queues are lists of tasks with a pending wakeup, whose - * callers have already marked the task as woken internally, - * and can thus carry on. A common use case is being able to - * do the wakeups once the corresponding user lock as been - * released. - * - * We hold reference to each task in the list across the wakeup, - * thus guaranteeing that the memory is still valid by the time - * the actual wakeups are performed in wake_up_q(). - * - * One per task suffices, because there's never a need for a task to be - * in two wake queues simultaneously; it is forbidden to abandon a task - * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is - * already in a wake queue, the wakeup will happen soon and the second - * waker can just skip it. - * - * The DEFINE_WAKE_Q macro declares and initializes the list head. - * wake_up_q() does NOT reinitialize the list; it's expected to be - * called near the end of a function. Otherwise, the list can be - * re-initialized for later re-use by wake_q_init(). - * - * Note that this can cause spurious wakeups. schedule() callers - * must ensure the call is done inside a loop, confirming that the - * wakeup condition has in fact occurred. - */ -struct wake_q_node { - struct wake_q_node *next; -}; +#ifdef CONFIG_CC_STACKPROTECTOR + /* Canary value for the -fstack-protector GCC feature: */ + unsigned long stack_canary; +#endif + /* + * Pointers to the (original) parent process, youngest child, younger sibling, + * older sibling, respectively. (p->father can be replaced with + * p->real_parent->pid) + */ -struct wake_q_head { - struct wake_q_node *first; - struct wake_q_node **lastp; -}; + /* Real parent process: */ + struct task_struct __rcu *real_parent; + + /* Recipient of SIGCHLD, wait4() reports: */ + struct task_struct __rcu *parent; -#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) + /* + * Children/sibling form the list of natural children: + */ + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; -#define DEFINE_WAKE_Q(name) \ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + /* + * 'ptraced' is the list of tasks this task is using ptrace() on. + * + * This includes both natural children and PTRACE_ATTACH targets. + * 'ptrace_entry' is this task's link on the p->parent->ptraced list. + */ + struct list_head ptraced; + struct list_head ptrace_entry; -static inline void wake_q_init(struct wake_q_head *head) -{ - head->first = WAKE_Q_TAIL; - head->lastp = &head->first; -} + /* PID/PID hash table linkage. */ + struct pid_link pids[PIDTYPE_MAX]; + struct list_head thread_group; + struct list_head thread_node; -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); -extern void wake_up_q(struct wake_q_head *head); + struct completion *vfork_done; -/* - * sched-domains (multiprocessor balancing) declarations: - */ -#ifdef CONFIG_SMP -#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ -#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ -#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ -#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ -#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ -#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ -#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ -#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ -#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ -#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ -#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ -#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ -#define SD_NUMA 0x4000 /* cross-node balancing */ - -#ifdef CONFIG_SCHED_SMT -static inline int cpu_smt_flags(void) -{ - return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; -} -#endif + /* CLONE_CHILD_SETTID: */ + int __user *set_child_tid; -#ifdef CONFIG_SCHED_MC -static inline int cpu_core_flags(void) -{ - return SD_SHARE_PKG_RESOURCES; -} + /* CLONE_CHILD_CLEARTID: */ + int __user *clear_child_tid; + + u64 utime; + u64 stime; +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME + u64 utimescaled; + u64 stimescaled; +#endif + u64 gtime; + struct prev_cputime prev_cputime; +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + seqcount_t vtime_seqcount; + unsigned long long vtime_snap; + enum { + /* Task is sleeping or running in a CPU with VTIME inactive: */ + VTIME_INACTIVE = 0, + /* Task runs in userspace in a CPU with VTIME active: */ + VTIME_USER, + /* Task runs in kernelspace in a CPU with VTIME active: */ + VTIME_SYS, + } vtime_snap_whence; #endif -#ifdef CONFIG_NUMA -static inline int cpu_numa_flags(void) -{ - return SD_NUMA; -} +#ifdef CONFIG_NO_HZ_FULL + atomic_t tick_dep_mask; #endif + /* Context switch counts: */ + unsigned long nvcsw; + unsigned long nivcsw; -extern int arch_asym_cpu_priority(int cpu); + /* Monotonic time in nsecs: */ + u64 start_time; -struct sched_domain_attr { - int relax_domain_level; -}; + /* Boot based time in nsecs: */ + u64 real_start_time; -#define SD_ATTR_INIT (struct sched_domain_attr) { \ - .relax_domain_level = -1, \ -} + /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ + unsigned long min_flt; + unsigned long maj_flt; -extern int sched_domain_level_max; +#ifdef CONFIG_POSIX_TIMERS + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; +#endif -struct sched_group; + /* Process credentials: */ -struct sched_domain_shared { - atomic_t ref; - atomic_t nr_busy_cpus; - int has_idle_cores; -}; + /* Tracer's credentials at attach: */ + const struct cred __rcu *ptracer_cred; -struct sched_domain { - /* These fields must be setup */ - struct sched_domain *parent; /* top domain must be null terminated */ - struct sched_domain *child; /* bottom domain must be null terminated */ - struct sched_group *groups; /* the balancing groups of the domain */ - unsigned long min_interval; /* Minimum balance interval ms */ - unsigned long max_interval; /* Maximum balance interval ms */ - unsigned int busy_factor; /* less balancing by factor if busy */ - unsigned int imbalance_pct; /* No balance until over watermark */ - unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ - unsigned int busy_idx; - unsigned int idle_idx; - unsigned int newidle_idx; - unsigned int wake_idx; - unsigned int forkexec_idx; - unsigned int smt_gain; - - int nohz_idle; /* NOHZ IDLE status */ - int flags; /* See SD_* */ - int level; - - /* Runtime fields. */ - unsigned long last_balance; /* init to jiffies. units in jiffies */ - unsigned int balance_interval; /* initialise to 1. units in ms. */ - unsigned int nr_balance_failed; /* initialise to 0 */ - - /* idle_balance() stats */ - u64 max_newidle_lb_cost; - unsigned long next_decay_max_lb_cost; - - u64 avg_scan_cost; /* select_idle_sibling */ + /* Objective and real subjective task credentials (COW): */ + const struct cred __rcu *real_cred; + + /* Effective (overridable) subjective task credentials (COW): */ + const struct cred __rcu *cred; -#ifdef CONFIG_SCHEDSTATS - /* load_balance() stats */ - unsigned int lb_count[CPU_MAX_IDLE_TYPES]; - unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; - unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; - unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; - unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; - unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; - - /* Active load balancing */ - unsigned int alb_count; - unsigned int alb_failed; - unsigned int alb_pushed; - - /* SD_BALANCE_EXEC stats */ - unsigned int sbe_count; - unsigned int sbe_balanced; - unsigned int sbe_pushed; - - /* SD_BALANCE_FORK stats */ - unsigned int sbf_count; - unsigned int sbf_balanced; - unsigned int sbf_pushed; - - /* try_to_wake_up() stats */ - unsigned int ttwu_wake_remote; - unsigned int ttwu_move_affine; - unsigned int ttwu_move_balance; -#endif -#ifdef CONFIG_SCHED_DEBUG - char *name; -#endif - union { - void *private; /* used during construction */ - struct rcu_head rcu; /* used during destruction */ - }; - struct sched_domain_shared *shared; - - unsigned int span_weight; /* - * Span of all CPUs in this domain. + * executable name, excluding path. * - * NOTE: this field is variable length. (Allocated dynamically - * by attaching extra space to the end of the structure, - * depending on how many CPUs the kernel has booted up with) + * - normally initialized setup_new_exec() + * - access it with [gs]et_task_comm() + * - lock it with task_lock() */ - unsigned long span[0]; -}; - -static inline struct cpumask *sched_domain_span(struct sched_domain *sd) -{ - return to_cpumask(sd->span); -} + char comm[TASK_COMM_LEN]; -extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new); + struct nameidata *nameidata; -/* Allocate an array of sched domains, for partition_sched_domains(). */ -cpumask_var_t *alloc_sched_domains(unsigned int ndoms); -void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); +#ifdef CONFIG_SYSVIPC + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; +#endif +#ifdef CONFIG_DETECT_HUNG_TASK + unsigned long last_switch_count; +#endif + /* Filesystem information: */ + struct fs_struct *fs; -bool cpus_share_cache(int this_cpu, int that_cpu); + /* Open file information: */ + struct files_struct *files; -typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); -typedef int (*sched_domain_flags_f)(void); + /* Namespaces: */ + struct nsproxy *nsproxy; -#define SDTL_OVERLAP 0x01 + /* Signal handlers: */ + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + /* Restored if set_restore_sigmask() was used: */ + sigset_t saved_sigmask; + struct sigpending pending; + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; -struct sd_data { - struct sched_domain **__percpu sd; - struct sched_domain_shared **__percpu sds; - struct sched_group **__percpu sg; - struct sched_group_capacity **__percpu sgc; -}; + struct callback_head *task_works; -struct sched_domain_topology_level { - sched_domain_mask_f mask; - sched_domain_flags_f sd_flags; - int flags; - int numa_level; - struct sd_data data; -#ifdef CONFIG_SCHED_DEBUG - char *name; + struct audit_context *audit_context; +#ifdef CONFIG_AUDITSYSCALL + kuid_t loginuid; + unsigned int sessionid; #endif -}; + struct seccomp seccomp; -extern void set_sched_topology(struct sched_domain_topology_level *tl); -extern void wake_up_if_idle(int cpu); + /* Thread group tracking: */ + u32 parent_exec_id; + u32 self_exec_id; -#ifdef CONFIG_SCHED_DEBUG -# define SD_INIT_NAME(type) .name = #type -#else -# define SD_INIT_NAME(type) -#endif + /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ + spinlock_t alloc_lock; -#else /* CONFIG_SMP */ + /* Protection of the PI data structures: */ + raw_spinlock_t pi_lock; -struct sched_domain_attr; + struct wake_q_node wake_q; -static inline void -partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new) -{ -} +#ifdef CONFIG_RT_MUTEXES + /* PI waiters blocked on a rt_mutex held by this task: */ + struct rb_root pi_waiters; + struct rb_node *pi_waiters_leftmost; + /* Deadlock detection and priority inheritance handling: */ + struct rt_mutex_waiter *pi_blocked_on; +#endif -static inline bool cpus_share_cache(int this_cpu, int that_cpu) -{ - return true; -} +#ifdef CONFIG_DEBUG_MUTEXES + /* Mutex deadlock detection: */ + struct mutex_waiter *blocked_on; +#endif -#endif /* !CONFIG_SMP */ +#ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + int hardirqs_enabled; + int hardirq_context; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; + int softirqs_enabled; + int softirq_context; +#endif + +#ifdef CONFIG_LOCKDEP +# define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; +#endif +#ifdef CONFIG_UBSAN + unsigned int in_ubsan; +#endif -struct io_context; /* See blkdev.h */ + /* Journalling filesystem info: */ + void *journal_info; + /* Stacked block device info: */ + struct bio_list *bio_list; -#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct *t); -#else -static inline void prefetch_stack(struct task_struct *t) { } +#ifdef CONFIG_BLOCK + /* Stack plugging: */ + struct blk_plug *plug; #endif -struct audit_context; /* See audit.c */ -struct mempolicy; -struct pipe_inode_info; -struct uts_namespace; + /* VM state: */ + struct reclaim_state *reclaim_state; -struct load_weight { - unsigned long weight; - u32 inv_weight; -}; + struct backing_dev_info *backing_dev_info; -/* - * The load_avg/util_avg accumulates an infinite geometric series - * (see __update_load_avg() in kernel/sched/fair.c). - * - * [load_avg definition] - * - * load_avg = runnable% * scale_load_down(load) - * - * where runnable% is the time ratio that a sched_entity is runnable. - * For cfs_rq, it is the aggregated load_avg of all runnable and - * blocked sched_entities. - * - * load_avg may also take frequency scaling into account: - * - * load_avg = runnable% * scale_load_down(load) * freq% - * - * where freq% is the CPU frequency normalized to the highest frequency. - * - * [util_avg definition] - * - * util_avg = running% * SCHED_CAPACITY_SCALE - * - * where running% is the time ratio that a sched_entity is running on - * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable - * and blocked sched_entities. - * - * util_avg may also factor frequency scaling and CPU capacity scaling: - * - * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% - * - * where freq% is the same as above, and capacity% is the CPU capacity - * normalized to the greatest capacity (due to uarch differences, etc). - * - * N.B., the above ratios (runnable%, running%, freq%, and capacity%) - * themselves are in the range of [0, 1]. To do fixed point arithmetics, - * we therefore scale them to as large a range as necessary. This is for - * example reflected by util_avg's SCHED_CAPACITY_SCALE. - * - * [Overflow issue] - * - * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities - * with the highest load (=88761), always runnable on a single cfs_rq, - * and should not overflow as the number already hits PID_MAX_LIMIT. - * - * For all other cases (including 32-bit kernels), struct load_weight's - * weight will overflow first before we do, because: - * - * Max(load_avg) <= Max(load.weight) - * - * Then it is the load_weight's responsibility to consider overflow - * issues. - */ -struct sched_avg { - u64 last_update_time, load_sum; - u32 util_sum, period_contrib; - unsigned long load_avg, util_avg; -}; - -#ifdef CONFIG_SCHEDSTATS -struct sched_statistics { - u64 wait_start; - u64 wait_max; - u64 wait_count; - u64 wait_sum; - u64 iowait_count; - u64 iowait_sum; - - u64 sleep_start; - u64 sleep_max; - s64 sum_sleep_runtime; - - u64 block_start; - u64 block_max; - u64 exec_max; - u64 slice_max; - - u64 nr_migrations_cold; - u64 nr_failed_migrations_affine; - u64 nr_failed_migrations_running; - u64 nr_failed_migrations_hot; - u64 nr_forced_migrations; - - u64 nr_wakeups; - u64 nr_wakeups_sync; - u64 nr_wakeups_migrate; - u64 nr_wakeups_local; - u64 nr_wakeups_remote; - u64 nr_wakeups_affine; - u64 nr_wakeups_affine_attempts; - u64 nr_wakeups_passive; - u64 nr_wakeups_idle; -}; -#endif - -struct sched_entity { - struct load_weight load; /* for load-balancing */ - struct rb_node run_node; - struct list_head group_node; - unsigned int on_rq; - - u64 exec_start; - u64 sum_exec_runtime; - u64 vruntime; - u64 prev_sum_exec_runtime; - - u64 nr_migrations; - -#ifdef CONFIG_SCHEDSTATS - struct sched_statistics statistics; -#endif - -#ifdef CONFIG_FAIR_GROUP_SCHED - int depth; - struct sched_entity *parent; - /* rq on which this entity is (to be) queued: */ - struct cfs_rq *cfs_rq; - /* rq "owned" by this entity/group: */ - struct cfs_rq *my_q; -#endif - -#ifdef CONFIG_SMP - /* - * Per entity load average tracking. - * - * Put into separate cache line so it does not - * collide with read-mostly values above. - */ - struct sched_avg avg ____cacheline_aligned_in_smp; -#endif -}; - -struct sched_rt_entity { - struct list_head run_list; - unsigned long timeout; - unsigned long watchdog_stamp; - unsigned int time_slice; - unsigned short on_rq; - unsigned short on_list; - - struct sched_rt_entity *back; -#ifdef CONFIG_RT_GROUP_SCHED - struct sched_rt_entity *parent; - /* rq on which this entity is (to be) queued: */ - struct rt_rq *rt_rq; - /* rq "owned" by this entity/group: */ - struct rt_rq *my_q; -#endif -}; - -struct sched_dl_entity { - struct rb_node rb_node; - - /* - * Original scheduling parameters. Copied here from sched_attr - * during sched_setattr(), they will remain the same until - * the next sched_setattr(). - */ - u64 dl_runtime; /* maximum runtime for each instance */ - u64 dl_deadline; /* relative deadline of each instance */ - u64 dl_period; /* separation of two instances (period) */ - u64 dl_bw; /* dl_runtime / dl_deadline */ - - /* - * Actual scheduling parameters. Initialized with the values above, - * they are continously updated during task execution. Note that - * the remaining runtime could be < 0 in case we are in overrun. - */ - s64 runtime; /* remaining runtime for this instance */ - u64 deadline; /* absolute deadline for this instance */ - unsigned int flags; /* specifying the scheduler behaviour */ - - /* - * Some bool flags: - * - * @dl_throttled tells if we exhausted the runtime. If so, the - * task has to wait for a replenishment to be performed at the - * next firing of dl_timer. - * - * @dl_boosted tells if we are boosted due to DI. If so we are - * outside bandwidth enforcement mechanism (but only until we - * exit the critical section); - * - * @dl_yielded tells if task gave up the cpu before consuming - * all its available runtime during the last job. - */ - int dl_throttled, dl_boosted, dl_yielded; - - /* - * Bandwidth enforcement timer. Each -deadline task has its - * own bandwidth to be enforced, thus we need one timer per task. - */ - struct hrtimer dl_timer; -}; - -union rcu_special { - struct { - u8 blocked; - u8 need_qs; - u8 exp_need_qs; - u8 pad; /* Otherwise the compiler can store garbage here. */ - } b; /* Bits. */ - u32 s; /* Set of bits. */ -}; -struct rcu_node; - -enum perf_event_task_context { - perf_invalid_context = -1, - perf_hw_context = 0, - perf_sw_context, - perf_nr_task_contexts, -}; - -/* Track pages that require TLB flushes */ -struct tlbflush_unmap_batch { - /* - * Each bit set is a CPU that potentially has a TLB entry for one of - * the PFNs being flushed. See set_tlb_ubc_flush_pending(). - */ - struct cpumask cpumask; - - /* True if any bit in cpumask is set */ - bool flush_required; - - /* - * If true then the PTE was dirty when unmapped. The entry must be - * flushed before IO is initiated or a stale TLB entry potentially - * allows an update without redirtying the page. - */ - bool writable; -}; - -struct task_struct { -#ifdef CONFIG_THREAD_INFO_IN_TASK - /* - * For reasons of header soup (see current_thread_info()), this - * must be the first element of task_struct. - */ - struct thread_info thread_info; -#endif - volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ - void *stack; - atomic_t usage; - unsigned int flags; /* per process flags, defined below */ - unsigned int ptrace; - -#ifdef CONFIG_SMP - struct llist_node wake_entry; - int on_cpu; -#ifdef CONFIG_THREAD_INFO_IN_TASK - unsigned int cpu; /* current CPU */ -#endif - unsigned int wakee_flips; - unsigned long wakee_flip_decay_ts; - struct task_struct *last_wakee; - - int wake_cpu; -#endif - int on_rq; - - int prio, static_prio, normal_prio; - unsigned int rt_priority; - const struct sched_class *sched_class; - struct sched_entity se; - struct sched_rt_entity rt; -#ifdef CONFIG_CGROUP_SCHED - struct task_group *sched_task_group; -#endif - struct sched_dl_entity dl; - -#ifdef CONFIG_PREEMPT_NOTIFIERS - /* list of struct preempt_notifier: */ - struct hlist_head preempt_notifiers; -#endif - -#ifdef CONFIG_BLK_DEV_IO_TRACE - unsigned int btrace_seq; -#endif - - unsigned int policy; - int nr_cpus_allowed; - cpumask_t cpus_allowed; - -#ifdef CONFIG_PREEMPT_RCU - int rcu_read_lock_nesting; - union rcu_special rcu_read_unlock_special; - struct list_head rcu_node_entry; - struct rcu_node *rcu_blocked_node; -#endif /* #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_TASKS_RCU - unsigned long rcu_tasks_nvcsw; - bool rcu_tasks_holdout; - struct list_head rcu_tasks_holdout_list; - int rcu_tasks_idle_cpu; -#endif /* #ifdef CONFIG_TASKS_RCU */ - -#ifdef CONFIG_SCHED_INFO - struct sched_info sched_info; -#endif - - struct list_head tasks; -#ifdef CONFIG_SMP - struct plist_node pushable_tasks; - struct rb_node pushable_dl_tasks; -#endif - - struct mm_struct *mm, *active_mm; - /* per-thread vma caching */ - u32 vmacache_seqnum; - struct vm_area_struct *vmacache[VMACACHE_SIZE]; -#if defined(SPLIT_RSS_COUNTING) - struct task_rss_stat rss_stat; -#endif -/* task state */ - int exit_state; - int exit_code, exit_signal; - int pdeath_signal; /* The signal sent when the parent dies */ - unsigned long jobctl; /* JOBCTL_*, siglock protected */ - - /* Used for emulating ABI behavior of previous Linux versions */ - unsigned int personality; - - /* scheduler bits, serialized by scheduler locks */ - unsigned sched_reset_on_fork:1; - unsigned sched_contributes_to_load:1; - unsigned sched_migrated:1; - unsigned sched_remote_wakeup:1; - unsigned :0; /* force alignment to the next boundary */ - - /* unserialized, strictly 'current' */ - unsigned in_execve:1; /* bit to tell LSMs we're in execve */ - unsigned in_iowait:1; -#if !defined(TIF_RESTORE_SIGMASK) - unsigned restore_sigmask:1; -#endif -#ifdef CONFIG_MEMCG - unsigned memcg_may_oom:1; -#ifndef CONFIG_SLOB - unsigned memcg_kmem_skip_account:1; -#endif -#endif -#ifdef CONFIG_COMPAT_BRK - unsigned brk_randomized:1; -#endif - - unsigned long atomic_flags; /* Flags needing atomic access. */ - - struct restart_block restart_block; - - pid_t pid; - pid_t tgid; - -#ifdef CONFIG_CC_STACKPROTECTOR - /* Canary value for the -fstack-protector gcc feature */ - unsigned long stack_canary; -#endif - /* - * pointers to (original) parent process, youngest child, younger sibling, - * older sibling, respectively. (p->father can be replaced with - * p->real_parent->pid) - */ - struct task_struct __rcu *real_parent; /* real parent process */ - struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ - /* - * children/sibling forms the list of my natural children - */ - struct list_head children; /* list of my children */ - struct list_head sibling; /* linkage in my parent's children list */ - struct task_struct *group_leader; /* threadgroup leader */ - - /* - * ptraced is the list of tasks this task is using ptrace on. - * This includes both natural children and PTRACE_ATTACH targets. - * p->ptrace_entry is p's link on the p->parent->ptraced list. - */ - struct list_head ptraced; - struct list_head ptrace_entry; - - /* PID/PID hash table linkage. */ - struct pid_link pids[PIDTYPE_MAX]; - struct list_head thread_group; - struct list_head thread_node; - - struct completion *vfork_done; /* for vfork() */ - int __user *set_child_tid; /* CLONE_CHILD_SETTID */ - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ - - u64 utime, stime; -#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME - u64 utimescaled, stimescaled; -#endif - u64 gtime; - struct prev_cputime prev_cputime; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqcount_t vtime_seqcount; - unsigned long long vtime_snap; - enum { - /* Task is sleeping or running in a CPU with VTIME inactive */ - VTIME_INACTIVE = 0, - /* Task runs in userspace in a CPU with VTIME active */ - VTIME_USER, - /* Task runs in kernelspace in a CPU with VTIME active */ - VTIME_SYS, - } vtime_snap_whence; -#endif - -#ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; -#endif - unsigned long nvcsw, nivcsw; /* context switch counts */ - u64 start_time; /* monotonic time in nsec */ - u64 real_start_time; /* boot based time in nsec */ -/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ - unsigned long min_flt, maj_flt; - -#ifdef CONFIG_POSIX_TIMERS - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -#endif - -/* process credentials */ - const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ - const struct cred __rcu *real_cred; /* objective and real subjective task - * credentials (COW) */ - const struct cred __rcu *cred; /* effective (overridable) subjective task - * credentials (COW) */ - char comm[TASK_COMM_LEN]; /* executable name excluding path - - access with [gs]et_task_comm (which lock - it with task_lock()) - - initialized normally by setup_new_exec */ -/* file system info */ - struct nameidata *nameidata; -#ifdef CONFIG_SYSVIPC -/* ipc stuff */ - struct sysv_sem sysvsem; - struct sysv_shm sysvshm; -#endif -#ifdef CONFIG_DETECT_HUNG_TASK -/* hung task detection */ - unsigned long last_switch_count; -#endif -/* filesystem information */ - struct fs_struct *fs; -/* open file information */ - struct files_struct *files; -/* namespaces */ - struct nsproxy *nsproxy; -/* signal handlers */ - struct signal_struct *signal; - struct sighand_struct *sighand; - - sigset_t blocked, real_blocked; - sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ - struct sigpending pending; - - unsigned long sas_ss_sp; - size_t sas_ss_size; - unsigned sas_ss_flags; - - struct callback_head *task_works; - - struct audit_context *audit_context; -#ifdef CONFIG_AUDITSYSCALL - kuid_t loginuid; - unsigned int sessionid; -#endif - struct seccomp seccomp; - -/* Thread group tracking */ - u32 parent_exec_id; - u32 self_exec_id; -/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, - * mempolicy */ - spinlock_t alloc_lock; - - /* Protection of the PI data structures: */ - raw_spinlock_t pi_lock; - - struct wake_q_node wake_q; - -#ifdef CONFIG_RT_MUTEXES - /* PI waiters blocked on a rt_mutex held by this task */ - struct rb_root pi_waiters; - struct rb_node *pi_waiters_leftmost; - /* Deadlock detection and priority inheritance handling */ - struct rt_mutex_waiter *pi_blocked_on; -#endif - -#ifdef CONFIG_DEBUG_MUTEXES - /* mutex deadlock detection */ - struct mutex_waiter *blocked_on; -#endif -#ifdef CONFIG_TRACE_IRQFLAGS - unsigned int irq_events; - unsigned long hardirq_enable_ip; - unsigned long hardirq_disable_ip; - unsigned int hardirq_enable_event; - unsigned int hardirq_disable_event; - int hardirqs_enabled; - int hardirq_context; - unsigned long softirq_disable_ip; - unsigned long softirq_enable_ip; - unsigned int softirq_disable_event; - unsigned int softirq_enable_event; - int softirqs_enabled; - int softirq_context; -#endif -#ifdef CONFIG_LOCKDEP -# define MAX_LOCK_DEPTH 48UL - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; - gfp_t lockdep_reclaim_gfp; -#endif -#ifdef CONFIG_UBSAN - unsigned int in_ubsan; -#endif - -/* journalling filesystem info */ - void *journal_info; - -/* stacked block device info */ - struct bio_list *bio_list; - -#ifdef CONFIG_BLOCK -/* stack plugging */ - struct blk_plug *plug; -#endif - -/* VM state */ - struct reclaim_state *reclaim_state; + struct io_context *io_context; - struct backing_dev_info *backing_dev_info; + /* Ptrace state: */ + unsigned long ptrace_message; + siginfo_t *last_siginfo; - struct io_context *io_context; - - unsigned long ptrace_message; - siginfo_t *last_siginfo; /* For ptrace use. */ - struct task_io_accounting ioac; -#if defined(CONFIG_TASK_XACCT) - u64 acct_rss_mem1; /* accumulated rss usage */ - u64 acct_vm_mem1; /* accumulated virtual memory usage */ - u64 acct_timexpd; /* stime + utime since last update */ + struct task_io_accounting ioac; +#ifdef CONFIG_TASK_XACCT + /* Accumulated RSS usage: */ + u64 acct_rss_mem1; + /* Accumulated virtual memory usage: */ + u64 acct_vm_mem1; + /* stime + utime since last update: */ + u64 acct_timexpd; #endif #ifdef CONFIG_CPUSETS - nodemask_t mems_allowed; /* Protected by alloc_lock */ - seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ - int cpuset_mem_spread_rotor; - int cpuset_slab_spread_rotor; + /* Protected by ->alloc_lock: */ + nodemask_t mems_allowed; + /* Seqence number to catch updates: */ + seqcount_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; #endif #ifdef CONFIG_CGROUPS - /* Control Group info protected by css_set_lock */ - struct css_set __rcu *cgroups; - /* cg_list protected by css_set_lock and tsk->alloc_lock */ - struct list_head cg_list; + /* Control Group info protected by css_set_lock: */ + struct css_set __rcu *cgroups; + /* cg_list protected by css_set_lock and tsk->alloc_lock: */ + struct list_head cg_list; #endif #ifdef CONFIG_INTEL_RDT_A - int closid; + int closid; #endif #ifdef CONFIG_FUTEX - struct robust_list_head __user *robust_list; + struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT struct compat_robust_list_head __user *compat_robust_list; #endif - struct list_head pi_state_list; - struct futex_pi_state *pi_state_cache; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; #endif #ifdef CONFIG_PERF_EVENTS - struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; - struct mutex perf_event_mutex; - struct list_head perf_event_list; + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; #endif #ifdef CONFIG_DEBUG_PREEMPT - unsigned long preempt_disable_ip; + unsigned long preempt_disable_ip; #endif #ifdef CONFIG_NUMA - struct mempolicy *mempolicy; /* Protected by alloc_lock */ - short il_next; - short pref_node_fork; + /* Protected by alloc_lock: */ + struct mempolicy *mempolicy; + short il_next; + short pref_node_fork; #endif #ifdef CONFIG_NUMA_BALANCING - int numa_scan_seq; - unsigned int numa_scan_period; - unsigned int numa_scan_period_max; - int numa_preferred_nid; - unsigned long numa_migrate_retry; - u64 node_stamp; /* migration stamp */ - u64 last_task_numa_placement; - u64 last_sum_exec_runtime; - struct callback_head numa_work; - - struct list_head numa_entry; - struct numa_group *numa_group; - - /* - * numa_faults is an array split into four regions: - * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer - * in this precise order. - * - * faults_memory: Exponential decaying average of faults on a per-node - * basis. Scheduling placement decisions are made based on these - * counts. The values remain static for the duration of a PTE scan. - * faults_cpu: Track the nodes the process was running on when a NUMA - * hinting fault was incurred. - * faults_memory_buffer and faults_cpu_buffer: Record faults per node - * during the current scan window. When the scan completes, the counts - * in faults_memory and faults_cpu decay and these values are copied. - */ - unsigned long *numa_faults; - unsigned long total_numa_faults; - - /* - * numa_faults_locality tracks if faults recorded during the last - * scan window were remote/local or failed to migrate. The task scan - * period is adapted based on the locality of the faults with different - * weights depending on whether they were shared or private faults - */ - unsigned long numa_faults_locality[3]; + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + unsigned long numa_migrate_retry; + /* Migration stamp: */ + u64 node_stamp; + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; + + struct list_head numa_entry; + struct numa_group *numa_group; - unsigned long numa_pages_migrated; -#endif /* CONFIG_NUMA_BALANCING */ - -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH - struct tlbflush_unmap_batch tlb_ubc; -#endif - - struct rcu_head rcu; - - /* - * cache last used pipe for splice - */ - struct pipe_inode_info *splice_pipe; - - struct page_frag task_frag; - -#ifdef CONFIG_TASK_DELAY_ACCT - struct task_delay_info *delays; -#endif -#ifdef CONFIG_FAULT_INJECTION - int make_it_fail; -#endif - /* - * when (nr_dirtied >= nr_dirtied_pause), it's time to call - * balance_dirty_pages() for some dirty throttling pause - */ - int nr_dirtied; - int nr_dirtied_pause; - unsigned long dirty_paused_when; /* start of a write-and-pause period */ - -#ifdef CONFIG_LATENCYTOP - int latency_record_count; - struct latency_record latency_record[LT_SAVECOUNT]; -#endif - /* - * time slack values; these are used to round up poll() and - * select() etc timeout values. These are in nanoseconds. - */ - u64 timer_slack_ns; - u64 default_timer_slack_ns; - -#ifdef CONFIG_KASAN - unsigned int kasan_depth; -#endif -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* Index of current stored address in ret_stack */ - int curr_ret_stack; - /* Stack of return addresses for return function tracing */ - struct ftrace_ret_stack *ret_stack; - /* time stamp for last schedule */ - unsigned long long ftrace_timestamp; - /* - * Number of functions that haven't been traced - * because of depth overrun. - */ - atomic_t trace_overrun; - /* Pause for the tracing */ - atomic_t tracing_graph_pause; -#endif -#ifdef CONFIG_TRACING - /* state flags for use by tracers */ - unsigned long trace; - /* bitmask and counter of trace recursion */ - unsigned long trace_recursion; -#endif /* CONFIG_TRACING */ -#ifdef CONFIG_KCOV - /* Coverage collection mode enabled for this task (0 if disabled). */ - enum kcov_mode kcov_mode; - /* Size of the kcov_area. */ - unsigned kcov_size; - /* Buffer for coverage collection. */ - void *kcov_area; - /* kcov desciptor wired with this task or NULL. */ - struct kcov *kcov; -#endif -#ifdef CONFIG_MEMCG - struct mem_cgroup *memcg_in_oom; - gfp_t memcg_oom_gfp_mask; - int memcg_oom_order; - - /* number of pages to reclaim on returning to userland */ - unsigned int memcg_nr_pages_over_high; -#endif -#ifdef CONFIG_UPROBES - struct uprobe_task *utask; -#endif -#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) - unsigned int sequential_io; - unsigned int sequential_io_avg; -#endif -#ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; -#endif - int pagefault_disabled; -#ifdef CONFIG_MMU - struct task_struct *oom_reaper_list; -#endif -#ifdef CONFIG_VMAP_STACK - struct vm_struct *stack_vm_area; -#endif -#ifdef CONFIG_THREAD_INFO_IN_TASK - /* A live task holds one reference. */ - atomic_t stack_refcount; -#endif -/* CPU-specific state of this task */ - struct thread_struct thread; -/* - * WARNING: on x86, 'thread_struct' contains a variable-sized - * structure. It *MUST* be at the end of 'task_struct'. - * - * Do not put anything below here! - */ -}; - -#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT -extern int arch_task_struct_size __read_mostly; -#else -# define arch_task_struct_size (sizeof(struct task_struct)) -#endif - -#ifdef CONFIG_VMAP_STACK -static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) -{ - return t->stack_vm_area; -} -#else -static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) -{ - return NULL; -} -#endif - -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - -static inline int tsk_nr_cpus_allowed(struct task_struct *p) -{ - return p->nr_cpus_allowed; -} - -#define TNF_MIGRATED 0x01 -#define TNF_NO_GROUP 0x02 -#define TNF_SHARED 0x04 -#define TNF_FAULT_LOCAL 0x08 -#define TNF_MIGRATE_FAIL 0x10 - -static inline bool in_vfork(struct task_struct *tsk) -{ - bool ret; - - /* - * need RCU to access ->real_parent if CLONE_VM was used along with - * CLONE_PARENT. - * - * We check real_parent->mm == tsk->mm because CLONE_VFORK does not - * imply CLONE_VM - * - * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus - * ->real_parent is not necessarily the task doing vfork(), so in - * theory we can't rely on task_lock() if we want to dereference it. - * - * And in this case we can't trust the real_parent->mm == tsk->mm - * check, it can be false negative. But we do not care, if init or - * another oom-unkillable task does this it should blame itself. - */ - rcu_read_lock(); - ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; - rcu_read_unlock(); - - return ret; -} - -#ifdef CONFIG_NUMA_BALANCING -extern void task_numa_fault(int last_node, int node, int pages, int flags); -extern pid_t task_numa_group_id(struct task_struct *p); -extern void set_numabalancing_state(bool enabled); -extern void task_numa_free(struct task_struct *p); -extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, - int src_nid, int dst_cpu); -#else -static inline void task_numa_fault(int last_node, int node, int pages, - int flags) -{ -} -static inline pid_t task_numa_group_id(struct task_struct *p) -{ - return 0; -} -static inline void set_numabalancing_state(bool enabled) -{ -} -static inline void task_numa_free(struct task_struct *p) -{ -} -static inline bool should_numa_migrate_memory(struct task_struct *p, - struct page *page, int src_nid, int dst_cpu) -{ - return true; -} -#endif - -static inline struct pid *task_pid(struct task_struct *task) -{ - return task->pids[PIDTYPE_PID].pid; -} - -static inline struct pid *task_tgid(struct task_struct *task) -{ - return task->group_leader->pids[PIDTYPE_PID].pid; -} - -/* - * Without tasklist or rcu lock it is not safe to dereference - * the result of task_pgrp/task_session even if task == current, - * we can race with another thread doing sys_setsid/sys_setpgid. - */ -static inline struct pid *task_pgrp(struct task_struct *task) -{ - return task->group_leader->pids[PIDTYPE_PGID].pid; -} - -static inline struct pid *task_session(struct task_struct *task) -{ - return task->group_leader->pids[PIDTYPE_SID].pid; -} - -struct pid_namespace; - -/* - * the helpers to get the task's different pids as they are seen - * from various namespaces - * - * task_xid_nr() : global id, i.e. the id seen from the init namespace; - * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of - * current. - * task_xid_nr_ns() : id seen from the ns specified; - * - * set_task_vxid() : assigns a virtual id to a task; - * - * see also pid_nr() etc in include/linux/pid.h - */ -pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, - struct pid_namespace *ns); - -static inline pid_t task_pid_nr(struct task_struct *tsk) -{ - return tsk->pid; -} - -static inline pid_t task_pid_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); -} - -static inline pid_t task_pid_vnr(struct task_struct *tsk) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); -} - - -static inline pid_t task_tgid_nr(struct task_struct *tsk) -{ - return tsk->tgid; -} - -pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); - -static inline pid_t task_tgid_vnr(struct task_struct *tsk) -{ - return pid_vnr(task_tgid(tsk)); -} - - -static inline int pid_alive(const struct task_struct *p); -static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) -{ - pid_t pid = 0; - - rcu_read_lock(); - if (pid_alive(tsk)) - pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); - rcu_read_unlock(); - - return pid; -} - -static inline pid_t task_ppid_nr(const struct task_struct *tsk) -{ - return task_ppid_nr_ns(tsk, &init_pid_ns); -} - -static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); -} - -static inline pid_t task_pgrp_vnr(struct task_struct *tsk) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); -} - - -static inline pid_t task_session_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); -} - -static inline pid_t task_session_vnr(struct task_struct *tsk) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); -} - -/* obsolete, do not use */ -static inline pid_t task_pgrp_nr(struct task_struct *tsk) -{ - return task_pgrp_nr_ns(tsk, &init_pid_ns); -} - -/** - * pid_alive - check that a task structure is not stale - * @p: Task structure to be checked. - * - * Test if a process is not yet dead (at most zombie state) - * If pid_alive fails, then pointers within the task structure - * can be stale and must not be dereferenced. - * - * Return: 1 if the process is alive. 0 otherwise. - */ -static inline int pid_alive(const struct task_struct *p) -{ - return p->pids[PIDTYPE_PID].pid != NULL; -} - -/** - * is_global_init - check if a task structure is init. Since init - * is free to have sub-threads we need to check tgid. - * @tsk: Task structure to be checked. - * - * Check if a task structure is the first user space task the kernel created. - * - * Return: 1 if the task structure is init. 0 otherwise. - */ -static inline int is_global_init(struct task_struct *tsk) -{ - return task_tgid_nr(tsk) == 1; -} - -extern struct pid *cad_pid; - -extern void free_task(struct task_struct *tsk); -#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) - -extern void __put_task_struct(struct task_struct *t); - -static inline void put_task_struct(struct task_struct *t) -{ - if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); -} - -struct task_struct *task_rcu_dereference(struct task_struct **ptask); -struct task_struct *try_get_task_struct(struct task_struct **ptask); - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void task_cputime(struct task_struct *t, - u64 *utime, u64 *stime); -extern u64 task_gtime(struct task_struct *t); -#else -static inline void task_cputime(struct task_struct *t, - u64 *utime, u64 *stime) -{ - *utime = t->utime; - *stime = t->stime; -} - -static inline u64 task_gtime(struct task_struct *t) -{ - return t->gtime; -} -#endif - -#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME -static inline void task_cputime_scaled(struct task_struct *t, - u64 *utimescaled, - u64 *stimescaled) -{ - *utimescaled = t->utimescaled; - *stimescaled = t->stimescaled; -} -#else -static inline void task_cputime_scaled(struct task_struct *t, - u64 *utimescaled, - u64 *stimescaled) -{ - task_cputime(t, utimescaled, stimescaled); -} -#endif - -extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); -extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); - -/* - * Per process flags - */ -#define PF_IDLE 0x00000002 /* I am an IDLE thread */ -#define PF_EXITING 0x00000004 /* getting shut down */ -#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ -#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ -#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ -#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ -#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ -#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ -#define PF_DUMPCORE 0x00000200 /* dumped core */ -#define PF_SIGNALED 0x00000400 /* killed by a signal */ -#define PF_MEMALLOC 0x00000800 /* Allocating memory */ -#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ -#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ -#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ -#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ -#define PF_FROZEN 0x00010000 /* frozen for system suspend */ -#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ -#define PF_KSWAPD 0x00040000 /* I am kswapd */ -#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ -#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ -#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ -#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ -#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ -#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ -#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ -#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ -#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ - -/* - * Only the _current_ task can read/write to tsk->flags, but other - * tasks can access tsk->flags in readonly mode for example - * with tsk_used_math (like during threaded core dumping). - * There is however an exception to this rule during ptrace - * or during fork: the ptracer task is allowed to write to the - * child->flags of its traced child (same goes for fork, the parent - * can write to the child->flags), because we're guaranteed the - * child is not running and in turn not changing child->flags - * at the same time the parent does it. - */ -#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) -#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) -#define clear_used_math() clear_stopped_child_used_math(current) -#define set_used_math() set_stopped_child_used_math(current) -#define conditional_stopped_child_used_math(condition, child) \ - do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) -#define conditional_used_math(condition) \ - conditional_stopped_child_used_math(condition, current) -#define copy_to_stopped_child_used_math(child) \ - do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) -/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ -#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) -#define used_math() tsk_used_math(current) - -/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags - * __GFP_FS is also cleared as it implies __GFP_IO. - */ -static inline gfp_t memalloc_noio_flags(gfp_t flags) -{ - if (unlikely(current->flags & PF_MEMALLOC_NOIO)) - flags &= ~(__GFP_IO | __GFP_FS); - return flags; -} - -static inline unsigned int memalloc_noio_save(void) -{ - unsigned int flags = current->flags & PF_MEMALLOC_NOIO; - current->flags |= PF_MEMALLOC_NOIO; - return flags; -} - -static inline void memalloc_noio_restore(unsigned int flags) -{ - current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; -} - -/* Per-process atomic flags. */ -#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ -#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ -#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ -#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ - - -#define TASK_PFA_TEST(name, func) \ - static inline bool task_##func(struct task_struct *p) \ - { return test_bit(PFA_##name, &p->atomic_flags); } -#define TASK_PFA_SET(name, func) \ - static inline void task_set_##func(struct task_struct *p) \ - { set_bit(PFA_##name, &p->atomic_flags); } -#define TASK_PFA_CLEAR(name, func) \ - static inline void task_clear_##func(struct task_struct *p) \ - { clear_bit(PFA_##name, &p->atomic_flags); } - -TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) -TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) - -TASK_PFA_TEST(SPREAD_PAGE, spread_page) -TASK_PFA_SET(SPREAD_PAGE, spread_page) -TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) - -TASK_PFA_TEST(SPREAD_SLAB, spread_slab) -TASK_PFA_SET(SPREAD_SLAB, spread_slab) -TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) - -TASK_PFA_TEST(LMK_WAITING, lmk_waiting) -TASK_PFA_SET(LMK_WAITING, lmk_waiting) - -/* - * task->jobctl flags - */ -#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ - -#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ -#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ -#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ -#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ -#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ -#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ -#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ - -#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) -#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) -#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) -#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) -#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) -#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) -#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) - -#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) -#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) - -extern bool task_set_jobctl_pending(struct task_struct *task, - unsigned long mask); -extern void task_clear_jobctl_trapping(struct task_struct *task); -extern void task_clear_jobctl_pending(struct task_struct *task, - unsigned long mask); - -static inline void rcu_copy_process(struct task_struct *p) -{ -#ifdef CONFIG_PREEMPT_RCU - p->rcu_read_lock_nesting = 0; - p->rcu_read_unlock_special.s = 0; - p->rcu_blocked_node = NULL; - INIT_LIST_HEAD(&p->rcu_node_entry); -#endif /* #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_TASKS_RCU - p->rcu_tasks_holdout = false; - INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); - p->rcu_tasks_idle_cpu = -1; -#endif /* #ifdef CONFIG_TASKS_RCU */ -} - -static inline void tsk_restore_flags(struct task_struct *task, - unsigned long orig_flags, unsigned long flags) -{ - task->flags &= ~flags; - task->flags |= orig_flags & flags; -} - -extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, - const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, - const struct cpumask *cs_cpus_allowed); -#ifdef CONFIG_SMP -extern void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask); - -extern int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask); -#else -static inline void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask) -{ -} -static inline int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask) -{ - if (!cpumask_test_cpu(0, new_mask)) - return -EINVAL; - return 0; -} -#endif - -#ifdef CONFIG_NO_HZ_COMMON -void calc_load_enter_idle(void); -void calc_load_exit_idle(void); -#else -static inline void calc_load_enter_idle(void) { } -static inline void calc_load_exit_idle(void) { } -#endif /* CONFIG_NO_HZ_COMMON */ - -#ifndef cpu_relax_yield -#define cpu_relax_yield() cpu_relax() -#endif - -/* - * Do not use outside of architecture code which knows its limitations. - * - * sched_clock() has no promise of monotonicity or bounded drift between - * CPUs, use (which you should not) requires disabling IRQs. - * - * Please use one of the three interfaces below. - */ -extern unsigned long long notrace sched_clock(void); -/* - * See the comment in kernel/sched/clock.c - */ -extern u64 running_clock(void); -extern u64 sched_clock_cpu(int cpu); - - -extern void sched_clock_init(void); - -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init_late(void) -{ -} - -static inline void sched_clock_tick(void) -{ -} - -static inline void clear_sched_clock_stable(void) -{ -} - -static inline void sched_clock_idle_sleep_event(void) -{ -} - -static inline void sched_clock_idle_wakeup_event(u64 delta_ns) -{ -} - -static inline u64 cpu_clock(int cpu) -{ - return sched_clock(); -} - -static inline u64 local_clock(void) -{ - return sched_clock(); -} -#else -extern void sched_clock_init_late(void); -/* - * Architectures can set this to 1 if they have specified - * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, - * but then during bootup it turns out that sched_clock() - * is reliable after all: - */ -extern int sched_clock_stable(void); -extern void clear_sched_clock_stable(void); - -extern void sched_clock_tick(void); -extern void sched_clock_idle_sleep_event(void); -extern void sched_clock_idle_wakeup_event(u64 delta_ns); - -/* - * As outlined in clock.c, provides a fast, high resolution, nanosecond - * time source that is monotonic per cpu argument and has bounded drift - * between cpus. - * - * ######################### BIG FAT WARNING ########################## - * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # - * # go backwards !! # - * #################################################################### - */ -static inline u64 cpu_clock(int cpu) -{ - return sched_clock_cpu(cpu); -} - -static inline u64 local_clock(void) -{ - return sched_clock_cpu(raw_smp_processor_id()); -} -#endif - -#ifdef CONFIG_IRQ_TIME_ACCOUNTING -/* - * An i/f to runtime opt-in for irq time accounting based off of sched_clock. - * The reason for this explicit opt-in is not to have perf penalty with - * slow sched_clocks. - */ -extern void enable_sched_clock_irqtime(void); -extern void disable_sched_clock_irqtime(void); -#else -static inline void enable_sched_clock_irqtime(void) {} -static inline void disable_sched_clock_irqtime(void) {} -#endif - -extern unsigned long long -task_sched_runtime(struct task_struct *task); - -/* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP -extern void sched_exec(void); -#else -#define sched_exec() {} -#endif - -extern void sched_clock_idle_sleep_event(void); -extern void sched_clock_idle_wakeup_event(u64 delta_ns); - -#ifdef CONFIG_HOTPLUG_CPU -extern void idle_task_exit(void); -#else -static inline void idle_task_exit(void) {} -#endif - -#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) -extern void wake_up_nohz_cpu(int cpu); -#else -static inline void wake_up_nohz_cpu(int cpu) { } -#endif - -#ifdef CONFIG_NO_HZ_FULL -extern u64 scheduler_tick_max_deferment(void); -#endif - -#ifdef CONFIG_SCHED_AUTOGROUP -extern void sched_autogroup_create_attach(struct task_struct *p); -extern void sched_autogroup_detach(struct task_struct *p); -extern void sched_autogroup_fork(struct signal_struct *sig); -extern void sched_autogroup_exit(struct signal_struct *sig); -extern void sched_autogroup_exit_task(struct task_struct *p); -#ifdef CONFIG_PROC_FS -extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); -extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); -#endif -#else -static inline void sched_autogroup_create_attach(struct task_struct *p) { } -static inline void sched_autogroup_detach(struct task_struct *p) { } -static inline void sched_autogroup_fork(struct signal_struct *sig) { } -static inline void sched_autogroup_exit(struct signal_struct *sig) { } -static inline void sched_autogroup_exit_task(struct task_struct *p) { } -#endif - -extern int yield_to(struct task_struct *p, bool preempt); -extern void set_user_nice(struct task_struct *p, long nice); -extern int task_prio(const struct task_struct *p); -/** - * task_nice - return the nice value of a given task. - * @p: the task in question. - * - * Return: The nice value [ -20 ... 0 ... 19 ]. - */ -static inline int task_nice(const struct task_struct *p) -{ - return PRIO_TO_NICE((p)->static_prio); -} -extern int can_nice(const struct task_struct *p, const int nice); -extern int task_curr(const struct task_struct *p); -extern int idle_cpu(int cpu); -extern int sched_setscheduler(struct task_struct *, int, - const struct sched_param *); -extern int sched_setscheduler_nocheck(struct task_struct *, int, - const struct sched_param *); -extern int sched_setattr(struct task_struct *, - const struct sched_attr *); -extern struct task_struct *idle_task(int cpu); -/** - * is_idle_task - is the specified task an idle task? - * @p: the task in question. - * - * Return: 1 if @p is an idle task. 0 otherwise. - */ -static inline bool is_idle_task(const struct task_struct *p) -{ - return !!(p->flags & PF_IDLE); -} -extern struct task_struct *curr_task(int cpu); -extern void ia64_set_curr_task(int cpu, struct task_struct *p); - -void yield(void); - -union thread_union { -#ifndef CONFIG_THREAD_INFO_IN_TASK - struct thread_info thread_info; -#endif - unsigned long stack[THREAD_SIZE/sizeof(long)]; -}; + /* + * numa_faults is an array split into four regions: + * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer + * in this precise order. + * + * faults_memory: Exponential decaying average of faults on a per-node + * basis. Scheduling placement decisions are made based on these + * counts. The values remain static for the duration of a PTE scan. + * faults_cpu: Track the nodes the process was running on when a NUMA + * hinting fault was incurred. + * faults_memory_buffer and faults_cpu_buffer: Record faults per node + * during the current scan window. When the scan completes, the counts + * in faults_memory and faults_cpu decay and these values are copied. + */ + unsigned long *numa_faults; + unsigned long total_numa_faults; -#ifndef __HAVE_ARCH_KSTACK_END -static inline int kstack_end(void *addr) -{ - /* Reliable end of stack detection: - * Some APM bios versions misalign the stack + /* + * numa_faults_locality tracks if faults recorded during the last + * scan window were remote/local or failed to migrate. The task scan + * period is adapted based on the locality of the faults with different + * weights depending on whether they were shared or private faults */ - return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); -} -#endif + unsigned long numa_faults_locality[3]; -extern union thread_union init_thread_union; -extern struct task_struct init_task; + unsigned long numa_pages_migrated; +#endif /* CONFIG_NUMA_BALANCING */ -extern struct mm_struct init_mm; + struct tlbflush_unmap_batch tlb_ubc; -extern struct pid_namespace init_pid_ns; + struct rcu_head rcu; -/* - * find a task by one of its numerical ids - * - * find_task_by_pid_ns(): - * finds a task by its pid in the specified namespace - * find_task_by_vpid(): - * finds a task by its virtual pid - * - * see also find_vpid() etc in include/linux/pid.h - */ + /* Cache last used pipe for splice(): */ + struct pipe_inode_info *splice_pipe; -extern struct task_struct *find_task_by_vpid(pid_t nr); -extern struct task_struct *find_task_by_pid_ns(pid_t nr, - struct pid_namespace *ns); + struct page_frag task_frag; -/* per-UID process charging. */ -extern struct user_struct * alloc_uid(kuid_t); -static inline struct user_struct *get_uid(struct user_struct *u) -{ - atomic_inc(&u->__count); - return u; -} -extern void free_uid(struct user_struct *); +#ifdef CONFIG_TASK_DELAY_ACCT + struct task_delay_info *delays; +#endif -#include +#ifdef CONFIG_FAULT_INJECTION + int make_it_fail; +#endif + /* + * When (nr_dirtied >= nr_dirtied_pause), it's time to call + * balance_dirty_pages() for a dirty throttling pause: + */ + int nr_dirtied; + int nr_dirtied_pause; + /* Start of a write-and-pause period: */ + unsigned long dirty_paused_when; -extern void xtime_update(unsigned long ticks); +#ifdef CONFIG_LATENCYTOP + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; +#endif + /* + * Time slack values; these are used to round up poll() and + * select() etc timeout values. These are in nanoseconds. + */ + u64 timer_slack_ns; + u64 default_timer_slack_ns; -extern int wake_up_state(struct task_struct *tsk, unsigned int state); -extern int wake_up_process(struct task_struct *tsk); -extern void wake_up_new_task(struct task_struct *tsk); -#ifdef CONFIG_SMP - extern void kick_process(struct task_struct *tsk); -#else - static inline void kick_process(struct task_struct *tsk) { } +#ifdef CONFIG_KASAN + unsigned int kasan_depth; #endif -extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_dead(struct task_struct *p); -extern void proc_caches_init(void); -extern void flush_signals(struct task_struct *); -extern void ignore_signals(struct task_struct *); -extern void flush_signal_handlers(struct task_struct *, int force_default); -extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* Index of current stored address in ret_stack: */ + int curr_ret_stack; -static inline int kernel_dequeue_signal(siginfo_t *info) -{ - struct task_struct *tsk = current; - siginfo_t __info; - int ret; + /* Stack of return addresses for return function tracing: */ + struct ftrace_ret_stack *ret_stack; - spin_lock_irq(&tsk->sighand->siglock); - ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); - spin_unlock_irq(&tsk->sighand->siglock); + /* Timestamp for last schedule: */ + unsigned long long ftrace_timestamp; - return ret; -} + /* + * Number of functions that haven't been traced + * because of depth overrun: + */ + atomic_t trace_overrun; -static inline void kernel_signal_stop(void) -{ - spin_lock_irq(¤t->sighand->siglock); - if (current->jobctl & JOBCTL_STOP_DEQUEUED) - __set_current_state(TASK_STOPPED); - spin_unlock_irq(¤t->sighand->siglock); + /* Pause tracing: */ + atomic_t tracing_graph_pause; +#endif - schedule(); -} +#ifdef CONFIG_TRACING + /* State flags for use by tracers: */ + unsigned long trace; -extern void release_task(struct task_struct * p); -extern int send_sig_info(int, struct siginfo *, struct task_struct *); -extern int force_sigsegv(int, struct task_struct *); -extern int force_sig_info(int, struct siginfo *, struct task_struct *); -extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); -extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); -extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, - const struct cred *, u32); -extern int kill_pgrp(struct pid *pid, int sig, int priv); -extern int kill_pid(struct pid *pid, int sig, int priv); -extern int kill_proc_info(int, struct siginfo *, pid_t); -extern __must_check bool do_notify_parent(struct task_struct *, int); -extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); -extern void force_sig(int, struct task_struct *); -extern int send_sig(int, struct task_struct *, int); -extern int zap_other_threads(struct task_struct *p); -extern struct sigqueue *sigqueue_alloc(void); -extern void sigqueue_free(struct sigqueue *); -extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); -extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); - -#ifdef TIF_RESTORE_SIGMASK -/* - * Legacy restore_sigmask accessors. These are inefficient on - * SMP architectures because they require atomic operations. - */ + /* Bitmask and counter of trace recursion: */ + unsigned long trace_recursion; +#endif /* CONFIG_TRACING */ -/** - * set_restore_sigmask() - make sure saved_sigmask processing gets done - * - * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code - * will run before returning to user mode, to process the flag. For - * all callers, TIF_SIGPENDING is already set or it's no harm to set - * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the - * arch code will notice on return to user mode, in case those bits - * are scarce. We set TIF_SIGPENDING here to ensure that the arch - * signal code always gets run when TIF_RESTORE_SIGMASK is set. - */ -static inline void set_restore_sigmask(void) -{ - set_thread_flag(TIF_RESTORE_SIGMASK); - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); -} -static inline void clear_restore_sigmask(void) -{ - clear_thread_flag(TIF_RESTORE_SIGMASK); -} -static inline bool test_restore_sigmask(void) -{ - return test_thread_flag(TIF_RESTORE_SIGMASK); -} -static inline bool test_and_clear_restore_sigmask(void) -{ - return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); -} +#ifdef CONFIG_KCOV + /* Coverage collection mode enabled for this task (0 if disabled): */ + enum kcov_mode kcov_mode; -#else /* TIF_RESTORE_SIGMASK */ + /* Size of the kcov_area: */ + unsigned int kcov_size; -/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ -static inline void set_restore_sigmask(void) -{ - current->restore_sigmask = true; - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); -} -static inline void clear_restore_sigmask(void) -{ - current->restore_sigmask = false; -} -static inline bool test_restore_sigmask(void) -{ - return current->restore_sigmask; -} -static inline bool test_and_clear_restore_sigmask(void) -{ - if (!current->restore_sigmask) - return false; - current->restore_sigmask = false; - return true; -} -#endif + /* Buffer for coverage collection: */ + void *kcov_area; -static inline void restore_saved_sigmask(void) -{ - if (test_and_clear_restore_sigmask()) - __set_current_blocked(¤t->saved_sigmask); -} + /* KCOV descriptor wired with this task or NULL: */ + struct kcov *kcov; +#endif -static inline sigset_t *sigmask_to_save(void) -{ - sigset_t *res = ¤t->blocked; - if (unlikely(test_restore_sigmask())) - res = ¤t->saved_sigmask; - return res; -} +#ifdef CONFIG_MEMCG + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; -static inline int kill_cad_pid(int sig, int priv) -{ - return kill_pid(cad_pid, sig, priv); -} + /* Number of pages to reclaim on returning to userland: */ + unsigned int memcg_nr_pages_over_high; +#endif -/* These can be the second arg to send_sig_info/send_group_sig_info. */ -#define SEND_SIG_NOINFO ((struct siginfo *) 0) -#define SEND_SIG_PRIV ((struct siginfo *) 1) -#define SEND_SIG_FORCED ((struct siginfo *) 2) +#ifdef CONFIG_UPROBES + struct uprobe_task *utask; +#endif +#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) + unsigned int sequential_io; + unsigned int sequential_io_avg; +#endif +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + unsigned long task_state_change; +#endif + int pagefault_disabled; +#ifdef CONFIG_MMU + struct task_struct *oom_reaper_list; +#endif +#ifdef CONFIG_VMAP_STACK + struct vm_struct *stack_vm_area; +#endif +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* A live task holds one reference: */ + atomic_t stack_refcount; +#endif + /* CPU-specific state of this task: */ + struct thread_struct thread; -/* - * True if we are on the alternate signal stack. - */ -static inline int on_sig_stack(unsigned long sp) -{ /* - * If the signal stack is SS_AUTODISARM then, by construction, we - * can't be on the signal stack unless user code deliberately set - * SS_AUTODISARM when we were already on it. + * WARNING: on x86, 'thread_struct' contains a variable-sized + * structure. It *MUST* be at the end of 'task_struct'. * - * This improves reliability: if user state gets corrupted such that - * the stack pointer points very close to the end of the signal stack, - * then this check will enable the signal to be handled anyway. + * Do not put anything below here! */ - if (current->sas_ss_flags & SS_AUTODISARM) - return 0; +}; -#ifdef CONFIG_STACK_GROWSUP - return sp >= current->sas_ss_sp && - sp - current->sas_ss_sp < current->sas_ss_size; -#else - return sp > current->sas_ss_sp && - sp - current->sas_ss_sp <= current->sas_ss_size; -#endif +static inline struct pid *task_pid(struct task_struct *task) +{ + return task->pids[PIDTYPE_PID].pid; } -static inline int sas_ss_flags(unsigned long sp) +static inline struct pid *task_tgid(struct task_struct *task) { - if (!current->sas_ss_size) - return SS_DISABLE; - - return on_sig_stack(sp) ? SS_ONSTACK : 0; + return task->group_leader->pids[PIDTYPE_PID].pid; } -static inline void sas_ss_reset(struct task_struct *p) +/* + * Without tasklist or RCU lock it is not safe to dereference + * the result of task_pgrp/task_session even if task == current, + * we can race with another thread doing sys_setsid/sys_setpgid. + */ +static inline struct pid *task_pgrp(struct task_struct *task) { - p->sas_ss_sp = 0; - p->sas_ss_size = 0; - p->sas_ss_flags = SS_DISABLE; + return task->group_leader->pids[PIDTYPE_PGID].pid; } -static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) +static inline struct pid *task_session(struct task_struct *task) { - if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) -#ifdef CONFIG_STACK_GROWSUP - return current->sas_ss_sp; -#else - return current->sas_ss_sp + current->sas_ss_size; -#endif - return sp; + return task->group_leader->pids[PIDTYPE_SID].pid; } /* - * Routines for handling mm_structs - */ -extern struct mm_struct * mm_alloc(void); - -/** - * mmgrab() - Pin a &struct mm_struct. - * @mm: The &struct mm_struct to pin. - * - * Make sure that @mm will not get freed even after the owning task - * exits. This doesn't guarantee that the associated address space - * will still exist later on and mmget_not_zero() has to be used before - * accessing it. + * the helpers to get the task's different pids as they are seen + * from various namespaces * - * This is a preferred way to to pin @mm for a longer/unbounded amount - * of time. + * task_xid_nr() : global id, i.e. the id seen from the init namespace; + * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of + * current. + * task_xid_nr_ns() : id seen from the ns specified; * - * Use mmdrop() to release the reference acquired by mmgrab(). + * set_task_vxid() : assigns a virtual id to a task; * - * See also for an in-depth explanation - * of &mm_struct.mm_count vs &mm_struct.mm_users. + * see also pid_nr() etc in include/linux/pid.h */ -static inline void mmgrab(struct mm_struct *mm) +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); + +static inline pid_t task_pid_nr(struct task_struct *tsk) +{ + return tsk->pid; +} + +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { - atomic_inc(&mm->mm_count); + return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); } -/* mmdrop drops the mm and the page tables */ -extern void __mmdrop(struct mm_struct *); -static inline void mmdrop(struct mm_struct *mm) +static inline pid_t task_pid_vnr(struct task_struct *tsk) { - if (unlikely(atomic_dec_and_test(&mm->mm_count))) - __mmdrop(mm); + return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); } -static inline void mmdrop_async_fn(struct work_struct *work) + +static inline pid_t task_tgid_nr(struct task_struct *tsk) { - struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); - __mmdrop(mm); + return tsk->tgid; } -static inline void mmdrop_async(struct mm_struct *mm) +extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); + +static inline pid_t task_tgid_vnr(struct task_struct *tsk) { - if (unlikely(atomic_dec_and_test(&mm->mm_count))) { - INIT_WORK(&mm->async_put_work, mmdrop_async_fn); - schedule_work(&mm->async_put_work); - } + return pid_vnr(task_tgid(tsk)); } /** - * mmget() - Pin the address space associated with a &struct mm_struct. - * @mm: The address space to pin. - * - * Make sure that the address space of the given &struct mm_struct doesn't - * go away. This does not protect against parts of the address space being - * modified or freed, however. - * - * Never use this function to pin this address space for an - * unbounded/indefinite amount of time. + * pid_alive - check that a task structure is not stale + * @p: Task structure to be checked. * - * Use mmput() to release the reference acquired by mmget(). + * Test if a process is not yet dead (at most zombie state) + * If pid_alive fails, then pointers within the task structure + * can be stale and must not be dereferenced. * - * See also for an in-depth explanation - * of &mm_struct.mm_count vs &mm_struct.mm_users. + * Return: 1 if the process is alive. 0 otherwise. */ -static inline void mmget(struct mm_struct *mm) +static inline int pid_alive(const struct task_struct *p) { - atomic_inc(&mm->mm_users); + return p->pids[PIDTYPE_PID].pid != NULL; } -static inline bool mmget_not_zero(struct mm_struct *mm) +static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) { - return atomic_inc_not_zero(&mm->mm_users); -} + pid_t pid = 0; -/* mmput gets rid of the mappings and all user-space */ -extern void mmput(struct mm_struct *); -#ifdef CONFIG_MMU -/* same as above but performs the slow path from the async context. Can - * be called from the atomic context as well - */ -extern void mmput_async(struct mm_struct *); -#endif + rcu_read_lock(); + if (pid_alive(tsk)) + pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); + rcu_read_unlock(); -/* Grab a reference to a task's mm, if it is not already going away */ -extern struct mm_struct *get_task_mm(struct task_struct *task); -/* - * Grab a reference to a task's mm, if it is not already going away - * and ptrace_may_access with the mode parameter passed to it - * succeeds. - */ -extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); -/* Remove the current tasks stale references to the old mm_struct */ -extern void mm_release(struct task_struct *, struct mm_struct *); + return pid; +} -#ifdef CONFIG_HAVE_COPY_THREAD_TLS -extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, - struct task_struct *, unsigned long); -#else -extern int copy_thread(unsigned long, unsigned long, unsigned long, - struct task_struct *); - -/* Architectures that haven't opted into copy_thread_tls get the tls argument - * via pt_regs, so ignore the tls argument passed via C. */ -static inline int copy_thread_tls( - unsigned long clone_flags, unsigned long sp, unsigned long arg, - struct task_struct *p, unsigned long tls) +static inline pid_t task_ppid_nr(const struct task_struct *tsk) { - return copy_thread(clone_flags, sp, arg, p); + return task_ppid_nr_ns(tsk, &init_pid_ns); } -#endif -extern void flush_thread(void); -#ifdef CONFIG_HAVE_EXIT_THREAD -extern void exit_thread(struct task_struct *tsk); -#else -static inline void exit_thread(struct task_struct *tsk) +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); } -#endif - -extern void exit_files(struct task_struct *); -extern void __cleanup_sighand(struct sighand_struct *); -extern void exit_itimers(struct signal_struct *); -extern void flush_itimer_signals(void); - -extern void do_group_exit(int); +static inline pid_t task_pgrp_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); +} -extern int do_execve(struct filename *, - const char __user * const __user *, - const char __user * const __user *); -extern int do_execveat(int, struct filename *, - const char __user * const __user *, - const char __user * const __user *, - int); -extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); -extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); -struct task_struct *fork_idle(int); -extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); -extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); -static inline void set_task_comm(struct task_struct *tsk, const char *from) +static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { - __set_task_comm(tsk, from, false); + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); } -extern char *get_task_comm(char *to, struct task_struct *tsk); -#ifdef CONFIG_SMP -void scheduler_ipi(void); -extern unsigned long wait_task_inactive(struct task_struct *, long match_state); -#else -static inline void scheduler_ipi(void) { } -static inline unsigned long wait_task_inactive(struct task_struct *p, - long match_state) +static inline pid_t task_session_vnr(struct task_struct *tsk) { - return 1; + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); } -#endif -#define tasklist_empty() \ - list_empty(&init_task.tasks) +/* Obsolete, do not use: */ +static inline pid_t task_pgrp_nr(struct task_struct *tsk) +{ + return task_pgrp_nr_ns(tsk, &init_pid_ns); +} -#define next_task(p) \ - list_entry_rcu((p)->tasks.next, struct task_struct, tasks) +/** + * is_global_init - check if a task structure is init. Since init + * is free to have sub-threads we need to check tgid. + * @tsk: Task structure to be checked. + * + * Check if a task structure is the first user space task the kernel created. + * + * Return: 1 if the task structure is init. 0 otherwise. + */ +static inline int is_global_init(struct task_struct *tsk) +{ + return task_tgid_nr(tsk) == 1; +} -#define for_each_process(p) \ - for (p = &init_task ; (p = next_task(p)) != &init_task ; ) +extern struct pid *cad_pid; -extern bool current_is_single_threaded(void); +/* + * Per process flags + */ +#define PF_IDLE 0x00000002 /* I am an IDLE thread */ +#define PF_EXITING 0x00000004 /* Getting shut down */ +#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ +#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ +#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ +#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ +#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ +#define PF_DUMPCORE 0x00000200 /* Dumped core */ +#define PF_SIGNALED 0x00000400 /* Killed by a signal */ +#define PF_MEMALLOC 0x00000800 /* Allocating memory */ +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ +#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ +#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ +#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ +#define PF_FROZEN 0x00010000 /* Frozen for system suspend */ +#define PF_FSTRANS 0x00020000 /* Inside a filesystem transaction */ +#define PF_KSWAPD 0x00040000 /* I am kswapd */ +#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ +#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ +#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ +#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ +#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ +#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ /* - * Careful: do_each_thread/while_each_thread is a double loop so - * 'break' will not work as expected - use goto instead. + * Only the _current_ task can read/write to tsk->flags, but other + * tasks can access tsk->flags in readonly mode for example + * with tsk_used_math (like during threaded core dumping). + * There is however an exception to this rule during ptrace + * or during fork: the ptracer task is allowed to write to the + * child->flags of its traced child (same goes for fork, the parent + * can write to the child->flags), because we're guaranteed the + * child is not running and in turn not changing child->flags + * at the same time the parent does it. */ -#define do_each_thread(g, t) \ - for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do +#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) +#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) +#define clear_used_math() clear_stopped_child_used_math(current) +#define set_used_math() set_stopped_child_used_math(current) + +#define conditional_stopped_child_used_math(condition, child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) -#define while_each_thread(g, t) \ - while ((t = next_thread(t)) != g) +#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) -#define __for_each_thread(signal, t) \ - list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) +#define copy_to_stopped_child_used_math(child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) -#define for_each_thread(p, t) \ - __for_each_thread((p)->signal, t) +/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ +#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) +#define used_math() tsk_used_math(current) -/* Careful: this is a double loop, 'break' won't work as expected. */ -#define for_each_process_thread(p, t) \ - for_each_process(p) for_each_thread(p, t) +/* Per-process atomic flags. */ +#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ +#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ +#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ -typedef int (*proc_visitor)(struct task_struct *p, void *data); -void walk_process_tree(struct task_struct *top, proc_visitor, void *); -static inline int get_nr_threads(struct task_struct *tsk) -{ - return tsk->signal->nr_threads; -} +#define TASK_PFA_TEST(name, func) \ + static inline bool task_##func(struct task_struct *p) \ + { return test_bit(PFA_##name, &p->atomic_flags); } -static inline bool thread_group_leader(struct task_struct *p) -{ - return p->exit_signal >= 0; -} +#define TASK_PFA_SET(name, func) \ + static inline void task_set_##func(struct task_struct *p) \ + { set_bit(PFA_##name, &p->atomic_flags); } -/* Do to the insanities of de_thread it is possible for a process - * to have the pid of the thread group leader without actually being - * the thread group leader. For iteration through the pids in proc - * all we care about is that we have a task with the appropriate - * pid, we don't actually care if we have the right task. - */ -static inline bool has_group_leader_pid(struct task_struct *p) -{ - return task_pid(p) == p->signal->leader_pid; -} +#define TASK_PFA_CLEAR(name, func) \ + static inline void task_clear_##func(struct task_struct *p) \ + { clear_bit(PFA_##name, &p->atomic_flags); } -static inline -bool same_thread_group(struct task_struct *p1, struct task_struct *p2) -{ - return p1->signal == p2->signal; -} +TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) +TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) -static inline struct task_struct *next_thread(const struct task_struct *p) -{ - return list_entry_rcu(p->thread_group.next, - struct task_struct, thread_group); -} +TASK_PFA_TEST(SPREAD_PAGE, spread_page) +TASK_PFA_SET(SPREAD_PAGE, spread_page) +TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) -static inline int thread_group_empty(struct task_struct *p) -{ - return list_empty(&p->thread_group); -} +TASK_PFA_TEST(SPREAD_SLAB, spread_slab) +TASK_PFA_SET(SPREAD_SLAB, spread_slab) +TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) -#define delay_group_leader(p) \ - (thread_group_leader(p) && !thread_group_empty(p)) +TASK_PFA_TEST(LMK_WAITING, lmk_waiting) +TASK_PFA_SET(LMK_WAITING, lmk_waiting) -/* - * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring - * subscriptions and synchronises with wait4(). Also used in procfs. Also - * pins the final release of task.io_context. Also protects ->cpuset and - * ->cgroup.subsys[]. And ->vfork_done. - * - * Nests both inside and outside of read_lock(&tasklist_lock). - * It must not be nested with write_lock_irq(&tasklist_lock), - * neither inside nor outside. - */ -static inline void task_lock(struct task_struct *p) +static inline void +tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) { - spin_lock(&p->alloc_lock); + task->flags &= ~flags; + task->flags |= orig_flags & flags; } -static inline void task_unlock(struct task_struct *p) +extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); +#ifdef CONFIG_SMP +extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); +extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +#else +static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { - spin_unlock(&p->alloc_lock); } - -extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, - unsigned long *flags); - -static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, - unsigned long *flags) +static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { - struct sighand_struct *ret; - - ret = __lock_task_sighand(tsk, flags); - (void)__cond_lock(&tsk->sighand->siglock, ret); - return ret; + if (!cpumask_test_cpu(0, new_mask)) + return -EINVAL; + return 0; } +#endif -static inline void unlock_task_sighand(struct task_struct *tsk, - unsigned long *flags) -{ - spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); -} +#ifndef cpu_relax_yield +#define cpu_relax_yield() cpu_relax() +#endif + +extern int yield_to(struct task_struct *p, bool preempt); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); /** - * threadgroup_change_begin - mark the beginning of changes to a threadgroup - * @tsk: task causing the changes + * task_nice - return the nice value of a given task. + * @p: the task in question. * - * All operations which modify a threadgroup - a new thread joining the - * group, death of a member thread (the assertion of PF_EXITING) and - * exec(2) dethreading the process and replacing the leader - are wrapped - * by threadgroup_change_{begin|end}(). This is to provide a place which - * subsystems needing threadgroup stability can hook into for - * synchronization. + * Return: The nice value [ -20 ... 0 ... 19 ]. */ -static inline void threadgroup_change_begin(struct task_struct *tsk) +static inline int task_nice(const struct task_struct *p) { - might_sleep(); - cgroup_threadgroup_change_begin(tsk); + return PRIO_TO_NICE((p)->static_prio); } +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p); +extern int idle_cpu(int cpu); +extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); +extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern int sched_setattr(struct task_struct *, const struct sched_attr *); +extern struct task_struct *idle_task(int cpu); + /** - * threadgroup_change_end - mark the end of changes to a threadgroup - * @tsk: task causing the changes + * is_idle_task - is the specified task an idle task? + * @p: the task in question. * - * See threadgroup_change_begin(). + * Return: 1 if @p is an idle task. 0 otherwise. */ -static inline void threadgroup_change_end(struct task_struct *tsk) +static inline bool is_idle_task(const struct task_struct *p) { - cgroup_threadgroup_change_end(tsk); + return !!(p->flags & PF_IDLE); } -#ifdef CONFIG_THREAD_INFO_IN_TASK - -static inline struct thread_info *task_thread_info(struct task_struct *task) -{ - return &task->thread_info; -} +extern struct task_struct *curr_task(int cpu); +extern void ia64_set_curr_task(int cpu, struct task_struct *p); -/* - * When accessing the stack of a non-current task that might exit, use - * try_get_task_stack() instead. task_stack_page will return a pointer - * that could get freed out from under you. - */ -static inline void *task_stack_page(const struct task_struct *task) -{ - return task->stack; -} +void yield(void); -#define setup_thread_stack(new,old) do { } while(0) +union thread_union { +#ifndef CONFIG_THREAD_INFO_IN_TASK + struct thread_info thread_info; +#endif + unsigned long stack[THREAD_SIZE/sizeof(long)]; +}; -static inline unsigned long *end_of_stack(const struct task_struct *task) +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline struct thread_info *task_thread_info(struct task_struct *task) { - return task->stack; + return &task->thread_info; } - #elif !defined(__HAVE_THREAD_FUNCTIONS) - -#define task_thread_info(task) ((struct thread_info *)(task)->stack) -#define task_stack_page(task) ((void *)(task)->stack) - -static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) -{ - *task_thread_info(p) = *task_thread_info(org); - task_thread_info(p)->task = p; -} +# define task_thread_info(task) ((struct thread_info *)(task)->stack) +#endif /* - * Return the address of the last usable long on the stack. + * find a task by one of its numerical ids * - * When the stack grows down, this is just above the thread - * info struct. Going any lower will corrupt the threadinfo. + * find_task_by_pid_ns(): + * finds a task by its pid in the specified namespace + * find_task_by_vpid(): + * finds a task by its virtual pid * - * When the stack grows up, this is the highest address. - * Beyond that position, we corrupt data on the next page. + * see also find_vpid() etc in include/linux/pid.h */ -static inline unsigned long *end_of_stack(struct task_struct *p) -{ -#ifdef CONFIG_STACK_GROWSUP - return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; -#else - return (unsigned long *)(task_thread_info(p) + 1); -#endif -} -#endif +extern struct task_struct *find_task_by_vpid(pid_t nr); +extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); -#ifdef CONFIG_THREAD_INFO_IN_TASK -static inline void *try_get_task_stack(struct task_struct *tsk) -{ - return atomic_inc_not_zero(&tsk->stack_refcount) ? - task_stack_page(tsk) : NULL; -} +extern int wake_up_state(struct task_struct *tsk, unsigned int state); +extern int wake_up_process(struct task_struct *tsk); +extern void wake_up_new_task(struct task_struct *tsk); -extern void put_task_stack(struct task_struct *tsk); +#ifdef CONFIG_SMP +extern void kick_process(struct task_struct *tsk); #else -static inline void *try_get_task_stack(struct task_struct *tsk) -{ - return task_stack_page(tsk); -} - -static inline void put_task_stack(struct task_struct *tsk) {} +static inline void kick_process(struct task_struct *tsk) { } #endif -#define task_stack_end_corrupted(task) \ - (*(end_of_stack(task)) != STACK_END_MAGIC) +extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); -static inline int object_is_on_stack(void *obj) +static inline void set_task_comm(struct task_struct *tsk, const char *from) { - void *stack = task_stack_page(current); - - return (obj >= stack) && (obj < (stack + THREAD_SIZE)); + __set_task_comm(tsk, from, false); } -extern void thread_stack_cache_init(void); +extern char *get_task_comm(char *to, struct task_struct *tsk); -#ifdef CONFIG_DEBUG_STACK_USAGE -static inline unsigned long stack_not_used(struct task_struct *p) +#ifdef CONFIG_SMP +void scheduler_ipi(void); +extern unsigned long wait_task_inactive(struct task_struct *, long match_state); +#else +static inline void scheduler_ipi(void) { } +static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) { - unsigned long *n = end_of_stack(p); - - do { /* Skip over canary */ -# ifdef CONFIG_STACK_GROWSUP - n--; -# else - n++; -# endif - } while (!*n); - -# ifdef CONFIG_STACK_GROWSUP - return (unsigned long)end_of_stack(p) - (unsigned long)n; -# else - return (unsigned long)n - (unsigned long)end_of_stack(p); -# endif + return 1; } #endif -extern void set_task_stack_end_magic(struct task_struct *tsk); -/* set thread flags in other task's structures - * - see asm/thread_info.h for TIF_xxxx flags available +/* + * Set thread flags in other task's structures. + * See asm/thread_info.h for TIF_xxxx flags available: */ static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) { @@ -3350,37 +1456,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } -static inline int restart_syscall(void) -{ - set_tsk_thread_flag(current, TIF_SIGPENDING); - return -ERESTARTNOINTR; -} - -static inline int signal_pending(struct task_struct *p) -{ - return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); -} - -static inline int __fatal_signal_pending(struct task_struct *p) -{ - return unlikely(sigismember(&p->pending.signal, SIGKILL)); -} - -static inline int fatal_signal_pending(struct task_struct *p) -{ - return signal_pending(p) && __fatal_signal_pending(p); -} - -static inline int signal_pending_state(long state, struct task_struct *p) -{ - if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) - return 0; - if (!signal_pending(p)) - return 0; - - return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); -} - /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return @@ -3422,15 +1497,6 @@ static inline void cond_resched_rcu(void) #endif } -static inline unsigned long get_preempt_disable_ip(struct task_struct *p) -{ -#ifdef CONFIG_DEBUG_PREEMPT - return p->preempt_disable_ip; -#else - return 0; -#endif -} - /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPT, @@ -3445,113 +1511,11 @@ static inline int spin_needbreak(spinlock_t *lock) #endif } -/* - * Idle thread specific functions to determine the need_resched - * polling state. - */ -#ifdef TIF_POLLING_NRFLAG -static inline int tsk_is_polling(struct task_struct *p) -{ - return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); -} - -static inline void __current_set_polling(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); -} - -static inline bool __must_check current_set_polling_and_test(void) -{ - __current_set_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_curr() - */ - smp_mb__after_atomic(); - - return unlikely(tif_need_resched()); -} - -static inline void __current_clr_polling(void) -{ - clear_thread_flag(TIF_POLLING_NRFLAG); -} - -static inline bool __must_check current_clr_polling_and_test(void) -{ - __current_clr_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_curr() - */ - smp_mb__after_atomic(); - - return unlikely(tif_need_resched()); -} - -#else -static inline int tsk_is_polling(struct task_struct *p) { return 0; } -static inline void __current_set_polling(void) { } -static inline void __current_clr_polling(void) { } - -static inline bool __must_check current_set_polling_and_test(void) -{ - return unlikely(tif_need_resched()); -} -static inline bool __must_check current_clr_polling_and_test(void) -{ - return unlikely(tif_need_resched()); -} -#endif - -static inline void current_clr_polling(void) -{ - __current_clr_polling(); - - /* - * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. - * Once the bit is cleared, we'll get IPIs with every new - * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also - * fold. - */ - smp_mb(); /* paired with resched_curr() */ - - preempt_fold_need_resched(); -} - static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); } -/* - * Thread group CPU time accounting. - */ -void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); - -/* - * Reevaluate whether the task has signals pending delivery. - * Wake the task if so. - * This is required every time the blocked sigset_t changes. - * callers must hold sighand->siglock. - */ -extern void recalc_sigpending_and_wake(struct task_struct *t); -extern void recalc_sigpending(void); - -extern void signal_wake_up_state(struct task_struct *t, unsigned int state); - -static inline void signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); -} -static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? __TASK_TRACED : 0); -} - /* * Wrappers for p->thread_info->cpu access. No-op on UP. */ @@ -3566,11 +1530,6 @@ static inline unsigned int task_cpu(const struct task_struct *p) #endif } -static inline int task_node(const struct task_struct *p) -{ - return cpu_to_node(task_cpu(p)); -} - extern void set_task_cpu(struct task_struct *p, unsigned int cpu); #else @@ -3601,100 +1560,8 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); -#ifdef CONFIG_CGROUP_SCHED -extern struct task_group root_task_group; -#endif /* CONFIG_CGROUP_SCHED */ - -extern int task_can_switch_user(struct user_struct *up, - struct task_struct *tsk); - -#ifdef CONFIG_TASK_XACCT -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) -{ - tsk->ioac.rchar += amt; -} - -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) -{ - tsk->ioac.wchar += amt; -} - -static inline void inc_syscr(struct task_struct *tsk) -{ - tsk->ioac.syscr++; -} - -static inline void inc_syscw(struct task_struct *tsk) -{ - tsk->ioac.syscw++; -} -#else -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) -{ -} - -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) -{ -} - -static inline void inc_syscr(struct task_struct *tsk) -{ -} - -static inline void inc_syscw(struct task_struct *tsk) -{ -} -#endif - #ifndef TASK_SIZE_OF #define TASK_SIZE_OF(tsk) TASK_SIZE #endif -#ifdef CONFIG_MEMCG -extern void mm_update_next_owner(struct mm_struct *mm); -#else -static inline void mm_update_next_owner(struct mm_struct *mm) -{ -} -#endif /* CONFIG_MEMCG */ - -static inline unsigned long task_rlimit(const struct task_struct *tsk, - unsigned int limit) -{ - return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); -} - -static inline unsigned long task_rlimit_max(const struct task_struct *tsk, - unsigned int limit) -{ - return READ_ONCE(tsk->signal->rlim[limit].rlim_max); -} - -static inline unsigned long rlimit(unsigned int limit) -{ - return task_rlimit(current, limit); -} - -static inline unsigned long rlimit_max(unsigned int limit) -{ - return task_rlimit_max(current, limit); -} - -#define SCHED_CPUFREQ_RT (1U << 0) -#define SCHED_CPUFREQ_DL (1U << 1) -#define SCHED_CPUFREQ_IOWAIT (1U << 2) - -#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) - -#ifdef CONFIG_CPU_FREQ -struct update_util_data { - void (*func)(struct update_util_data *data, u64 time, unsigned int flags); -}; - -void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, - void (*func)(struct update_util_data *data, u64 time, - unsigned int flags)); -void cpufreq_remove_update_util_hook(int cpu); -#endif /* CONFIG_CPU_FREQ */ - #endif diff --git a/include/linux/sched/autogroup.h b/include/linux/sched/autogroup.h new file mode 100644 index 00000000000000..55cd496df8849d --- /dev/null +++ b/include/linux/sched/autogroup.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_SCHED_AUTOGROUP_H +#define _LINUX_SCHED_AUTOGROUP_H + +struct signal_struct; +struct task_struct; +struct task_group; +struct seq_file; + +#ifdef CONFIG_SCHED_AUTOGROUP +extern void sched_autogroup_create_attach(struct task_struct *p); +extern void sched_autogroup_detach(struct task_struct *p); +extern void sched_autogroup_fork(struct signal_struct *sig); +extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); +#ifdef CONFIG_PROC_FS +extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); +extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); +#endif +#else +static inline void sched_autogroup_create_attach(struct task_struct *p) { } +static inline void sched_autogroup_detach(struct task_struct *p) { } +static inline void sched_autogroup_fork(struct signal_struct *sig) { } +static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } +#endif + +#ifdef CONFIG_CGROUP_SCHED +extern struct task_group root_task_group; +#endif /* CONFIG_CGROUP_SCHED */ + +#endif /* _LINUX_SCHED_AUTOGROUP_H */ diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h new file mode 100644 index 00000000000000..34fe92ce1ebd7c --- /dev/null +++ b/include/linux/sched/clock.h @@ -0,0 +1,105 @@ +#ifndef _LINUX_SCHED_CLOCK_H +#define _LINUX_SCHED_CLOCK_H + +#include + +/* + * Do not use outside of architecture code which knows its limitations. + * + * sched_clock() has no promise of monotonicity or bounded drift between + * CPUs, use (which you should not) requires disabling IRQs. + * + * Please use one of the three interfaces below. + */ +extern unsigned long long notrace sched_clock(void); + +/* + * See the comment in kernel/sched/clock.c + */ +extern u64 running_clock(void); +extern u64 sched_clock_cpu(int cpu); + + +extern void sched_clock_init(void); + +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_init_late(void) +{ +} + +static inline void sched_clock_tick(void) +{ +} + +static inline void clear_sched_clock_stable(void) +{ +} + +static inline void sched_clock_idle_sleep_event(void) +{ +} + +static inline void sched_clock_idle_wakeup_event(u64 delta_ns) +{ +} + +static inline u64 cpu_clock(int cpu) +{ + return sched_clock(); +} + +static inline u64 local_clock(void) +{ + return sched_clock(); +} +#else +extern void sched_clock_init_late(void); +extern int sched_clock_stable(void); +extern void clear_sched_clock_stable(void); + +/* + * When sched_clock_stable(), __sched_clock_offset provides the offset + * between local_clock() and sched_clock(). + */ +extern u64 __sched_clock_offset; + + +extern void sched_clock_tick(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); + +/* + * As outlined in clock.c, provides a fast, high resolution, nanosecond + * time source that is monotonic per cpu argument and has bounded drift + * between cpus. + * + * ######################### BIG FAT WARNING ########################## + * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # + * # go backwards !! # + * #################################################################### + */ +static inline u64 cpu_clock(int cpu) +{ + return sched_clock_cpu(cpu); +} + +static inline u64 local_clock(void) +{ + return sched_clock_cpu(raw_smp_processor_id()); +} +#endif + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + +#endif /* _LINUX_SCHED_CLOCK_H */ diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h new file mode 100644 index 00000000000000..69eedcef8f03fb --- /dev/null +++ b/include/linux/sched/coredump.h @@ -0,0 +1,74 @@ +#ifndef _LINUX_SCHED_COREDUMP_H +#define _LINUX_SCHED_COREDUMP_H + +#include + +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ +#define SUID_DUMP_USER 1 /* Dump as user of process */ +#define SUID_DUMP_ROOT 2 /* Dump as root */ + +/* mm flags */ + +/* for SUID_DUMP_* above */ +#define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) + +extern void set_dumpable(struct mm_struct *mm, int value); +/* + * This returns the actual value of the suid_dumpable flag. For things + * that are using this for checking for privilege transitions, it must + * test against SUID_DUMP_USER rather than treating it as a boolean + * value. + */ +static inline int __get_dumpable(unsigned long mm_flags) +{ + return mm_flags & MMF_DUMPABLE_MASK; +} + +static inline int get_dumpable(struct mm_struct *mm) +{ + return __get_dumpable(mm->flags); +} + +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 +#define MMF_DUMP_DAX_PRIVATE 9 +#define MMF_DUMP_DAX_SHARED 10 + +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 9 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ +/* + * This one-shot flag is dropped due to necessity of changing exe once again + * on NFS restore + */ +//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ + +#define MMF_HAS_UPROBES 19 /* has uprobes */ +#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ +#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ +#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) + +#endif /* _LINUX_SCHED_COREDUMP_H */ diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h new file mode 100644 index 00000000000000..d2be2ccbb3729d --- /dev/null +++ b/include/linux/sched/cpufreq.h @@ -0,0 +1,27 @@ +#ifndef _LINUX_SCHED_CPUFREQ_H +#define _LINUX_SCHED_CPUFREQ_H + +#include + +/* + * Interface between cpufreq drivers and the scheduler: + */ + +#define SCHED_CPUFREQ_RT (1U << 0) +#define SCHED_CPUFREQ_DL (1U << 1) +#define SCHED_CPUFREQ_IOWAIT (1U << 2) + +#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) + +#ifdef CONFIG_CPU_FREQ +struct update_util_data { + void (*func)(struct update_util_data *data, u64 time, unsigned int flags); +}; + +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned int flags)); +void cpufreq_remove_update_util_hook(int cpu); +#endif /* CONFIG_CPU_FREQ */ + +#endif /* _LINUX_SCHED_CPUFREQ_H */ diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h new file mode 100644 index 00000000000000..4c5b9735c1ae1d --- /dev/null +++ b/include/linux/sched/cputime.h @@ -0,0 +1,187 @@ +#ifndef _LINUX_SCHED_CPUTIME_H +#define _LINUX_SCHED_CPUTIME_H + +#include + +/* + * cputime accounting APIs: + */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +#include + +#ifndef cputime_to_nsecs +# define cputime_to_nsecs(__ct) \ + (cputime_to_usecs(__ct) * NSEC_PER_USEC) +#endif +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime); +extern u64 task_gtime(struct task_struct *t); +#else +static inline void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime) +{ + *utime = t->utime; + *stime = t->stime; +} + +static inline u64 task_gtime(struct task_struct *t) +{ + return t->gtime; +} +#endif + +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + *utimescaled = t->utimescaled; + *stimescaled = t->stimescaled; +} +#else +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + task_cputime(t, utimescaled, stimescaled); +} +#endif + +extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); +extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); + + +/* + * Thread group CPU time accounting. + */ +void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); + + +/* + * The following are functions that support scheduler-internal time accounting. + * These functions are generally called at the timer tick. None of this depends + * on CONFIG_SCHEDSTATS. + */ + +/** + * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running + * + * @tsk: Pointer to target task. + */ +#ifdef CONFIG_POSIX_TIMERS +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; + + /* Check if cputimer isn't running. This is accessed without locking. */ + if (!READ_ONCE(cputimer->running)) + return NULL; + + /* + * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime + * in __exit_signal(), we won't account to the signal struct further + * cputime consumed by that task, even though the task can still be + * ticking after __exit_signal(). + * + * In order to keep a consistent behaviour between thread group cputime + * and thread group cputimer accounting, lets also ignore the cputime + * elapsing after __exit_signal() in any thread group timer running. + * + * This makes sure that POSIX CPU clocks and timers are synchronized, so + * that a POSIX CPU timer won't expire while the corresponding POSIX CPU + * clock delta is behind the expiring timer value. + */ + if (unlikely(!tsk->sighand)) + return NULL; + + return cputimer; +} +#else +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + return NULL; +} +#endif + +/** + * account_group_user_time - Maintain utime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the utime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the utime field there. + */ +static inline void account_group_user_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.utime); +} + +/** + * account_group_system_time - Maintain stime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the stime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the stime field there. + */ +static inline void account_group_system_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.stime); +} + +/** + * account_group_exec_runtime - Maintain exec runtime for a thread group. + * + * @tsk: Pointer to task structure. + * @ns: Time value by which to increment the sum_exec_runtime field + * of the thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the sum_exec_runtime field there. + */ +static inline void account_group_exec_runtime(struct task_struct *tsk, + unsigned long long ns) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); +} + +static inline void prev_cputime_init(struct prev_cputime *prev) +{ +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + prev->utime = prev->stime = 0; + raw_spin_lock_init(&prev->lock); +#endif +} + +extern unsigned long long +task_sched_runtime(struct task_struct *task); + +#endif /* _LINUX_SCHED_CPUTIME_H */ diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 9089a2ae913ddf..975be862e0835c 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -1,5 +1,7 @@ -#ifndef _SCHED_DEADLINE_H -#define _SCHED_DEADLINE_H +#ifndef _LINUX_SCHED_DEADLINE_H +#define _LINUX_SCHED_DEADLINE_H + +#include /* * SCHED_DEADLINE tasks has negative priorities, reflecting @@ -26,4 +28,4 @@ static inline bool dl_time_before(u64 a, u64 b) return (s64)(a - b) < 0; } -#endif /* _SCHED_DEADLINE_H */ +#endif /* _LINUX_SCHED_DEADLINE_H */ diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h new file mode 100644 index 00000000000000..e0eaee54c5a4c4 --- /dev/null +++ b/include/linux/sched/debug.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_SCHED_DEBUG_H +#define _LINUX_SCHED_DEBUG_H + +/* + * Various scheduler/task debugging interfaces: + */ + +struct task_struct; + +extern void dump_cpu_task(int cpu); + +/* + * Only dump TASK_* tasks. (0 for all tasks) + */ +extern void show_state_filter(unsigned long state_filter); + +static inline void show_state(void) +{ + show_state_filter(0); +} + +struct pt_regs; + +extern void show_regs(struct pt_regs *); + +/* + * TASK is a pointer to the task whose backtrace we want to see (or NULL for current + * task), SP is the stack pointer of the first frame that should be shown in the back + * trace (or NULL if the entire call-chain of the task should be shown). + */ +extern void show_stack(struct task_struct *task, unsigned long *sp); + +extern void sched_show_task(struct task_struct *p); + +#ifdef CONFIG_SCHED_DEBUG +struct seq_file; +extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); +extern void proc_sched_set_task(struct task_struct *p); +#endif + +/* Attach to any functions which should be ignored in wchan output. */ +#define __sched __attribute__((__section__(".sched.text"))) + +/* Linker adds these: start and end of __sched functions */ +extern char __sched_text_start[], __sched_text_end[]; + +/* Is this address in the __sched functions? */ +extern int in_sched_functions(unsigned long addr); + +#endif /* _LINUX_SCHED_DEBUG_H */ diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h new file mode 100644 index 00000000000000..752ac7e628d72e --- /dev/null +++ b/include/linux/sched/hotplug.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_SCHED_HOTPLUG_H +#define _LINUX_SCHED_HOTPLUG_H + +/* + * Scheduler interfaces for hotplug CPU support: + */ + +extern int sched_cpu_starting(unsigned int cpu); +extern int sched_cpu_activate(unsigned int cpu); +extern int sched_cpu_deactivate(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_cpu_dying(unsigned int cpu); +#else +# define sched_cpu_dying NULL +#endif + +#ifdef CONFIG_HOTPLUG_CPU +extern void idle_task_exit(void); +#else +static inline void idle_task_exit(void) {} +#endif + +#endif /* _LINUX_SCHED_HOTPLUG_H */ diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h new file mode 100644 index 00000000000000..5ca63ebad6b40e --- /dev/null +++ b/include/linux/sched/idle.h @@ -0,0 +1,86 @@ +#ifndef _LINUX_SCHED_IDLE_H +#define _LINUX_SCHED_IDLE_H + +#include + +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES +}; + +extern void wake_up_if_idle(int cpu); + +/* + * Idle thread specific functions to determine the need_resched + * polling state. + */ +#ifdef TIF_POLLING_NRFLAG + +static inline void __current_set_polling(void) +{ + set_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_set_polling_and_test(void) +{ + __current_set_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +static inline void __current_clr_polling(void) +{ + clear_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_clr_polling_and_test(void) +{ + __current_clr_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +#else +static inline void __current_set_polling(void) { } +static inline void __current_clr_polling(void) { } + +static inline bool __must_check current_set_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +static inline bool __must_check current_clr_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +#endif + +static inline void current_clr_polling(void) +{ + __current_clr_polling(); + + /* + * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. + * Once the bit is cleared, we'll get IPIs with every new + * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also + * fold. + */ + smp_mb(); /* paired with resched_curr() */ + + preempt_fold_need_resched(); +} + +#endif /* _LINUX_SCHED_IDLE_H */ diff --git a/include/linux/sched/init.h b/include/linux/sched/init.h new file mode 100644 index 00000000000000..127215045285e6 --- /dev/null +++ b/include/linux/sched/init.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_SCHED_INIT_H +#define _LINUX_SCHED_INIT_H + +/* + * Scheduler init related prototypes: + */ + +extern void sched_init(void); +extern void sched_init_smp(void); + +#endif /* _LINUX_SCHED_INIT_H */ diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h new file mode 100644 index 00000000000000..016afa0fb3bbce --- /dev/null +++ b/include/linux/sched/jobctl.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_SCHED_JOBCTL_H +#define _LINUX_SCHED_JOBCTL_H + +#include + +struct task_struct; + +/* + * task->jobctl flags + */ +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ + +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ +#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ +#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ +#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ + +#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) +#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) +#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) +#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) + +#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) + +extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask); + +#endif /* _LINUX_SCHED_JOBCTL_H */ diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h new file mode 100644 index 00000000000000..4264bc6b2c2767 --- /dev/null +++ b/include/linux/sched/loadavg.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_SCHED_LOADAVG_H +#define _LINUX_SCHED_LOADAVG_H + +/* + * These are the constant used to fake the fixed-point load-average + * counting. Some notes: + * - 11 bit fractions expand to 22 bits by the multiplies: this gives + * a load-average precision of 10 bits integer + 11 bits fractional + * - if you want to count load-averages more often, you need more + * precision, or rounding will get you. With 2-second counting freq, + * the EXP_n values would be 1981, 2034 and 2043 if still using only + * 11 bit fractions. + */ +extern unsigned long avenrun[]; /* Load averages */ +extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); + +#define FSHIFT 11 /* nr of bits of precision */ +#define FIXED_1 (1<>= FSHIFT; + +extern void calc_global_load(unsigned long ticks); + +#endif /* _LINUX_SCHED_LOADAVG_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h new file mode 100644 index 00000000000000..830953ebb391fa --- /dev/null +++ b/include/linux/sched/mm.h @@ -0,0 +1,174 @@ +#ifndef _LINUX_SCHED_MM_H +#define _LINUX_SCHED_MM_H + +#include +#include +#include +#include +#include + +/* + * Routines for handling mm_structs + */ +extern struct mm_struct * mm_alloc(void); + +/** + * mmgrab() - Pin a &struct mm_struct. + * @mm: The &struct mm_struct to pin. + * + * Make sure that @mm will not get freed even after the owning task + * exits. This doesn't guarantee that the associated address space + * will still exist later on and mmget_not_zero() has to be used before + * accessing it. + * + * This is a preferred way to to pin @mm for a longer/unbounded amount + * of time. + * + * Use mmdrop() to release the reference acquired by mmgrab(). + * + * See also for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmgrab(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_count); +} + +/* mmdrop drops the mm and the page tables */ +extern void __mmdrop(struct mm_struct *); +static inline void mmdrop(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); +} + +static inline void mmdrop_async_fn(struct work_struct *work) +{ + struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); + __mmdrop(mm); +} + +static inline void mmdrop_async(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) { + INIT_WORK(&mm->async_put_work, mmdrop_async_fn); + schedule_work(&mm->async_put_work); + } +} + +/** + * mmget() - Pin the address space associated with a &struct mm_struct. + * @mm: The address space to pin. + * + * Make sure that the address space of the given &struct mm_struct doesn't + * go away. This does not protect against parts of the address space being + * modified or freed, however. + * + * Never use this function to pin this address space for an + * unbounded/indefinite amount of time. + * + * Use mmput() to release the reference acquired by mmget(). + * + * See also for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmget(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_users); +} + +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + +/* mmput gets rid of the mappings and all user-space */ +extern void mmput(struct mm_struct *); +#ifdef CONFIG_MMU +/* same as above but performs the slow path from the async context. Can + * be called from the atomic context as well + */ +extern void mmput_async(struct mm_struct *); +#endif + +/* Grab a reference to a task's mm, if it is not already going away */ +extern struct mm_struct *get_task_mm(struct task_struct *task); +/* + * Grab a reference to a task's mm, if it is not already going away + * and ptrace_may_access with the mode parameter passed to it + * succeeds. + */ +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); +/* Remove the current tasks stale references to the old mm_struct */ +extern void mm_release(struct task_struct *, struct mm_struct *); + +#ifdef CONFIG_MEMCG +extern void mm_update_next_owner(struct mm_struct *mm); +#else +static inline void mm_update_next_owner(struct mm_struct *mm) +{ +} +#endif /* CONFIG_MEMCG */ + +#ifdef CONFIG_MMU +extern void arch_pick_mmap_layout(struct mm_struct *mm); +extern unsigned long +arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +extern unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#else +static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} +#endif + +static inline bool in_vfork(struct task_struct *tsk) +{ + bool ret; + + /* + * need RCU to access ->real_parent if CLONE_VM was used along with + * CLONE_PARENT. + * + * We check real_parent->mm == tsk->mm because CLONE_VFORK does not + * imply CLONE_VM + * + * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus + * ->real_parent is not necessarily the task doing vfork(), so in + * theory we can't rely on task_lock() if we want to dereference it. + * + * And in this case we can't trust the real_parent->mm == tsk->mm + * check, it can be false negative. But we do not care, if init or + * another oom-unkillable task does this it should blame itself. + */ + rcu_read_lock(); + ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +} + +/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags + * __GFP_FS is also cleared as it implies __GFP_IO. + */ +static inline gfp_t memalloc_noio_flags(gfp_t flags) +{ + if (unlikely(current->flags & PF_MEMALLOC_NOIO)) + flags &= ~(__GFP_IO | __GFP_FS); + return flags; +} + +static inline unsigned int memalloc_noio_save(void) +{ + unsigned int flags = current->flags & PF_MEMALLOC_NOIO; + current->flags |= PF_MEMALLOC_NOIO; + return flags; +} + +static inline void memalloc_noio_restore(unsigned int flags) +{ + current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; +} + +#endif /* _LINUX_SCHED_MM_H */ diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h new file mode 100644 index 00000000000000..4995b717500b67 --- /dev/null +++ b/include/linux/sched/nohz.h @@ -0,0 +1,43 @@ +#ifndef _LINUX_SCHED_NOHZ_H +#define _LINUX_SCHED_NOHZ_H + +/* + * This is the interface between the scheduler and nohz/dyntics: + */ + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void cpu_load_update_nohz_start(void); +extern void cpu_load_update_nohz_stop(void); +#else +static inline void cpu_load_update_nohz_start(void) { } +static inline void cpu_load_update_nohz_stop(void) { } +#endif + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void nohz_balance_enter_idle(int cpu); +extern void set_cpu_sd_state_idle(void); +extern int get_nohz_timer_target(void); +#else +static inline void nohz_balance_enter_idle(int cpu) { } +static inline void set_cpu_sd_state_idle(void) { } +#endif + +#ifdef CONFIG_NO_HZ_COMMON +void calc_load_enter_idle(void); +void calc_load_exit_idle(void); +#else +static inline void calc_load_enter_idle(void) { } +static inline void calc_load_exit_idle(void) { } +#endif /* CONFIG_NO_HZ_COMMON */ + +#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) +extern void wake_up_nohz_cpu(int cpu); +#else +static inline void wake_up_nohz_cpu(int cpu) { } +#endif + +#ifdef CONFIG_NO_HZ_FULL +extern u64 scheduler_tick_max_deferment(void); +#endif + +#endif /* _LINUX_SCHED_NOHZ_H */ diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h new file mode 100644 index 00000000000000..35d5fc77b4be9b --- /dev/null +++ b/include/linux/sched/numa_balancing.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_SCHED_NUMA_BALANCING_H +#define _LINUX_SCHED_NUMA_BALANCING_H + +/* + * This is the interface between the scheduler and the MM that + * implements memory access pattern based NUMA-balancing: + */ + +#include + +#define TNF_MIGRATED 0x01 +#define TNF_NO_GROUP 0x02 +#define TNF_SHARED 0x04 +#define TNF_FAULT_LOCAL 0x08 +#define TNF_MIGRATE_FAIL 0x10 + +#ifdef CONFIG_NUMA_BALANCING +extern void task_numa_fault(int last_node, int node, int pages, int flags); +extern pid_t task_numa_group_id(struct task_struct *p); +extern void set_numabalancing_state(bool enabled); +extern void task_numa_free(struct task_struct *p); +extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, + int src_nid, int dst_cpu); +#else +static inline void task_numa_fault(int last_node, int node, int pages, + int flags) +{ +} +static inline pid_t task_numa_group_id(struct task_struct *p) +{ + return 0; +} +static inline void set_numabalancing_state(bool enabled) +{ +} +static inline void task_numa_free(struct task_struct *p) +{ +} +static inline bool should_numa_migrate_memory(struct task_struct *p, + struct page *page, int src_nid, int dst_cpu) +{ + return true; +} +#endif + +#endif /* _LINUX_SCHED_NUMA_BALANCING_H */ diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index d9cf5a5762d9d3..2cc450f6ec5423 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -1,5 +1,5 @@ -#ifndef _SCHED_PRIO_H -#define _SCHED_PRIO_H +#ifndef _LINUX_SCHED_PRIO_H +#define _LINUX_SCHED_PRIO_H #define MAX_NICE 19 #define MIN_NICE -20 @@ -57,4 +57,4 @@ static inline long rlimit_to_nice(long prio) return (MAX_NICE - prio + 1); } -#endif /* _SCHED_PRIO_H */ +#endif /* _LINUX_SCHED_PRIO_H */ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index a30b172df6e1a7..3bd668414f61a5 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -1,7 +1,9 @@ -#ifndef _SCHED_RT_H -#define _SCHED_RT_H +#ifndef _LINUX_SCHED_RT_H +#define _LINUX_SCHED_RT_H -#include +#include + +struct task_struct; static inline int rt_prio(int prio) { @@ -57,4 +59,4 @@ extern void normalize_rt_tasks(void); */ #define RR_TIMESLICE (100 * HZ / 1000) -#endif /* _SCHED_RT_H */ +#endif /* _LINUX_SCHED_RT_H */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h new file mode 100644 index 00000000000000..2cf446704cd4fc --- /dev/null +++ b/include/linux/sched/signal.h @@ -0,0 +1,613 @@ +#ifndef _LINUX_SCHED_SIGNAL_H +#define _LINUX_SCHED_SIGNAL_H + +#include +#include +#include +#include +#include +#include + +/* + * Types defining task->signal and task->sighand and APIs using them: + */ + +struct sighand_struct { + atomic_t count; + struct k_sigaction action[_NSIG]; + spinlock_t siglock; + wait_queue_head_t signalfd_wqh; +}; + +/* + * Per-process accounting stats: + */ +struct pacct_struct { + int ac_flag; + long ac_exitcode; + unsigned long ac_mem; + u64 ac_utime, ac_stime; + unsigned long ac_minflt, ac_majflt; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +/* + * This is the atomic variant of task_cputime, which can be used for + * storing and updating task_cputime statistics without locking. + */ +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +#define INIT_CPUTIME_ATOMIC \ + (struct task_cputime_atomic) { \ + .utime = ATOMIC64_INIT(0), \ + .stime = ATOMIC64_INIT(0), \ + .sum_exec_runtime = ATOMIC64_INIT(0), \ + } +/** + * struct thread_group_cputimer - thread group interval timer counts + * @cputime_atomic: atomic thread group interval timers. + * @running: true when there are timers running and + * @cputime_atomic receives updates. + * @checking_timer: true when a thread in the group is in the + * process of checking for thread group timers. + * + * This structure contains the version of task_cputime, above, that is + * used for thread group CPU timer calculations. + */ +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; + bool running; + bool checking_timer; +}; + +/* + * NOTE! "signal_struct" does not have its own + * locking, because a shared signal_struct always + * implies a shared sighand_struct, so locking + * sighand_struct is always a proper superset of + * the locking of signal_struct. + */ +struct signal_struct { + atomic_t sigcnt; + atomic_t live; + int nr_threads; + struct list_head thread_head; + + wait_queue_head_t wait_chldexit; /* for wait4() */ + + /* current thread group signal load-balancing target: */ + struct task_struct *curr_target; + + /* shared signal handling: */ + struct sigpending shared_pending; + + /* thread group exit support */ + int group_exit_code; + /* overloaded: + * - notify group_exit_task when ->count is equal to notify_count + * - everyone except group_exit_task is stopped during signal delivery + * of fatal signals, group_exit_task processes the signal. + */ + int notify_count; + struct task_struct *group_exit_task; + + /* thread group stop support, overloads group_exit_code too */ + int group_stop_count; + unsigned int flags; /* see SIGNAL_* flags below */ + + /* + * PR_SET_CHILD_SUBREAPER marks a process, like a service + * manager, to re-parent orphan (double-forking) child processes + * to this process instead of 'init'. The service manager is + * able to receive SIGCHLD signals and is able to investigate + * the process until it calls wait(). All children of this + * process will inherit a flag if they should look for a + * child_subreaper process at exit. + */ + unsigned int is_child_subreaper:1; + unsigned int has_child_subreaper:1; + +#ifdef CONFIG_POSIX_TIMERS + + /* POSIX.1b Interval Timers */ + int posix_timer_id; + struct list_head posix_timers; + + /* ITIMER_REAL timer for the process */ + struct hrtimer real_timer; + ktime_t it_real_incr; + + /* + * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use + * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these + * values are defined to 0 and 1 respectively + */ + struct cpu_itimer it[2]; + + /* + * Thread group totals for process CPU timers. + * See thread_group_cputimer(), et al, for details. + */ + struct thread_group_cputimer cputimer; + + /* Earliest-expiration cache. */ + struct task_cputime cputime_expires; + + struct list_head cpu_timers[3]; + +#endif + + struct pid *leader_pid; + +#ifdef CONFIG_NO_HZ_FULL + atomic_t tick_dep_mask; +#endif + + struct pid *tty_old_pgrp; + + /* boolean value for session group leader */ + int leader; + + struct tty_struct *tty; /* NULL if no tty */ + +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif + /* + * Cumulative resource counters for dead threads in the group, + * and for reaped dead child processes forked by this group. + * Live threads maintain their own counters and add to these + * in __exit_signal, except for the group leader. + */ + seqlock_t stats_lock; + u64 utime, stime, cutime, cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; + unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; + struct task_io_accounting ioac; + + /* + * Cumulative ns of schedule CPU time fo dead threads in the + * group, not including a zombie group leader, (This only differs + * from jiffies_to_ns(utime + stime) if sched_clock uses something + * other than jiffies.) + */ + unsigned long long sum_sched_runtime; + + /* + * We don't bother to synchronize most readers of this at all, + * because there is no reader checking a limit that actually needs + * to get both rlim_cur and rlim_max atomically, and either one + * alone is a single word that can safely be read normally. + * getrlimit/setrlimit use task_lock(current->group_leader) to + * protect this instead of the siglock, because they really + * have no need to disable irqs. + */ + struct rlimit rlim[RLIM_NLIMITS]; + +#ifdef CONFIG_BSD_PROCESS_ACCT + struct pacct_struct pacct; /* per-process accounting information */ +#endif +#ifdef CONFIG_TASKSTATS + struct taskstats *stats; +#endif +#ifdef CONFIG_AUDIT + unsigned audit_tty; + struct tty_audit_buf *tty_audit_buf; +#endif + + /* + * Thread is the potential origin of an oom condition; kill first on + * oom + */ + bool oom_flag_origin; + short oom_score_adj; /* OOM kill score adjustment */ + short oom_score_adj_min; /* OOM kill score adjustment min value. + * Only settable by CAP_SYS_RESOURCE. */ + struct mm_struct *oom_mm; /* recorded mm when the thread group got + * killed by the oom killer */ + + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ +}; + +/* + * Bits in flags field of signal_struct. + */ +#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ +#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ +/* + * Pending notifications to parent. + */ +#define SIGNAL_CLD_STOPPED 0x00000010 +#define SIGNAL_CLD_CONTINUED 0x00000020 +#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) + +#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ + +#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ + SIGNAL_STOP_CONTINUED) + +static inline void signal_set_stop_flags(struct signal_struct *sig, + unsigned int flags) +{ + WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); + sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; +} + +/* If true, all threads except ->group_exit_task have pending SIGKILL */ +static inline int signal_group_exit(const struct signal_struct *sig) +{ + return (sig->flags & SIGNAL_GROUP_EXIT) || + (sig->group_exit_task != NULL); +} + +extern void flush_signals(struct task_struct *); +extern void ignore_signals(struct task_struct *); +extern void flush_signal_handlers(struct task_struct *, int force_default); +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); + +static inline int kernel_dequeue_signal(siginfo_t *info) +{ + struct task_struct *tsk = current; + siginfo_t __info; + int ret; + + spin_lock_irq(&tsk->sighand->siglock); + ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); + spin_unlock_irq(&tsk->sighand->siglock); + + return ret; +} + +static inline void kernel_signal_stop(void) +{ + spin_lock_irq(¤t->sighand->siglock); + if (current->jobctl & JOBCTL_STOP_DEQUEUED) + __set_current_state(TASK_STOPPED); + spin_unlock_irq(¤t->sighand->siglock); + + schedule(); +} +extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern int force_sigsegv(int, struct task_struct *); +extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); +extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, + const struct cred *, u32); +extern int kill_pgrp(struct pid *pid, int sig, int priv); +extern int kill_pid(struct pid *pid, int sig, int priv); +extern int kill_proc_info(int, struct siginfo *, pid_t); +extern __must_check bool do_notify_parent(struct task_struct *, int); +extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); +extern void force_sig(int, struct task_struct *); +extern int send_sig(int, struct task_struct *, int); +extern int zap_other_threads(struct task_struct *p); +extern struct sigqueue *sigqueue_alloc(void); +extern void sigqueue_free(struct sigqueue *); +extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); +extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); + +static inline int restart_syscall(void) +{ + set_tsk_thread_flag(current, TIF_SIGPENDING); + return -ERESTARTNOINTR; +} + +static inline int signal_pending(struct task_struct *p) +{ + return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); +} + +static inline int __fatal_signal_pending(struct task_struct *p) +{ + return unlikely(sigismember(&p->pending.signal, SIGKILL)); +} + +static inline int fatal_signal_pending(struct task_struct *p) +{ + return signal_pending(p) && __fatal_signal_pending(p); +} + +static inline int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) + return 0; + if (!signal_pending(p)) + return 0; + + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); +} + +/* + * Reevaluate whether the task has signals pending delivery. + * Wake the task if so. + * This is required every time the blocked sigset_t changes. + * callers must hold sighand->siglock. + */ +extern void recalc_sigpending_and_wake(struct task_struct *t); +extern void recalc_sigpending(void); + +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} + +#ifdef TIF_RESTORE_SIGMASK +/* + * Legacy restore_sigmask accessors. These are inefficient on + * SMP architectures because they require atomic operations. + */ + +/** + * set_restore_sigmask() - make sure saved_sigmask processing gets done + * + * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code + * will run before returning to user mode, to process the flag. For + * all callers, TIF_SIGPENDING is already set or it's no harm to set + * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the + * arch code will notice on return to user mode, in case those bits + * are scarce. We set TIF_SIGPENDING here to ensure that the arch + * signal code always gets run when TIF_RESTORE_SIGMASK is set. + */ +static inline void set_restore_sigmask(void) +{ + set_thread_flag(TIF_RESTORE_SIGMASK); + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + clear_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_restore_sigmask(void) +{ + return test_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_and_clear_restore_sigmask(void) +{ + return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); +} + +#else /* TIF_RESTORE_SIGMASK */ + +/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ +static inline void set_restore_sigmask(void) +{ + current->restore_sigmask = true; + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + current->restore_sigmask = false; +} +static inline bool test_restore_sigmask(void) +{ + return current->restore_sigmask; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + if (!current->restore_sigmask) + return false; + current->restore_sigmask = false; + return true; +} +#endif + +static inline void restore_saved_sigmask(void) +{ + if (test_and_clear_restore_sigmask()) + __set_current_blocked(¤t->saved_sigmask); +} + +static inline sigset_t *sigmask_to_save(void) +{ + sigset_t *res = ¤t->blocked; + if (unlikely(test_restore_sigmask())) + res = ¤t->saved_sigmask; + return res; +} + +static inline int kill_cad_pid(int sig, int priv) +{ + return kill_pid(cad_pid, sig, priv); +} + +/* These can be the second arg to send_sig_info/send_group_sig_info. */ +#define SEND_SIG_NOINFO ((struct siginfo *) 0) +#define SEND_SIG_PRIV ((struct siginfo *) 1) +#define SEND_SIG_FORCED ((struct siginfo *) 2) + +/* + * True if we are on the alternate signal stack. + */ +static inline int on_sig_stack(unsigned long sp) +{ + /* + * If the signal stack is SS_AUTODISARM then, by construction, we + * can't be on the signal stack unless user code deliberately set + * SS_AUTODISARM when we were already on it. + * + * This improves reliability: if user state gets corrupted such that + * the stack pointer points very close to the end of the signal stack, + * then this check will enable the signal to be handled anyway. + */ + if (current->sas_ss_flags & SS_AUTODISARM) + return 0; + +#ifdef CONFIG_STACK_GROWSUP + return sp >= current->sas_ss_sp && + sp - current->sas_ss_sp < current->sas_ss_size; +#else + return sp > current->sas_ss_sp && + sp - current->sas_ss_sp <= current->sas_ss_size; +#endif +} + +static inline int sas_ss_flags(unsigned long sp) +{ + if (!current->sas_ss_size) + return SS_DISABLE; + + return on_sig_stack(sp) ? SS_ONSTACK : 0; +} + +static inline void sas_ss_reset(struct task_struct *p) +{ + p->sas_ss_sp = 0; + p->sas_ss_size = 0; + p->sas_ss_flags = SS_DISABLE; +} + +static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) +{ + if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) +#ifdef CONFIG_STACK_GROWSUP + return current->sas_ss_sp; +#else + return current->sas_ss_sp + current->sas_ss_size; +#endif + return sp; +} + +extern void __cleanup_sighand(struct sighand_struct *); +extern void flush_itimer_signals(void); + +#define tasklist_empty() \ + list_empty(&init_task.tasks) + +#define next_task(p) \ + list_entry_rcu((p)->tasks.next, struct task_struct, tasks) + +#define for_each_process(p) \ + for (p = &init_task ; (p = next_task(p)) != &init_task ; ) + +extern bool current_is_single_threaded(void); + +/* + * Careful: do_each_thread/while_each_thread is a double loop so + * 'break' will not work as expected - use goto instead. + */ +#define do_each_thread(g, t) \ + for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do + +#define while_each_thread(g, t) \ + while ((t = next_thread(t)) != g) + +#define __for_each_thread(signal, t) \ + list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) + +#define for_each_thread(p, t) \ + __for_each_thread((p)->signal, t) + +/* Careful: this is a double loop, 'break' won't work as expected. */ +#define for_each_process_thread(p, t) \ + for_each_process(p) for_each_thread(p, t) + +typedef int (*proc_visitor)(struct task_struct *p, void *data); +void walk_process_tree(struct task_struct *top, proc_visitor, void *); + +static inline int get_nr_threads(struct task_struct *tsk) +{ + return tsk->signal->nr_threads; +} + +static inline bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} + +/* Do to the insanities of de_thread it is possible for a process + * to have the pid of the thread group leader without actually being + * the thread group leader. For iteration through the pids in proc + * all we care about is that we have a task with the appropriate + * pid, we don't actually care if we have the right task. + */ +static inline bool has_group_leader_pid(struct task_struct *p) +{ + return task_pid(p) == p->signal->leader_pid; +} + +static inline +bool same_thread_group(struct task_struct *p1, struct task_struct *p2) +{ + return p1->signal == p2->signal; +} + +static inline struct task_struct *next_thread(const struct task_struct *p) +{ + return list_entry_rcu(p->thread_group.next, + struct task_struct, thread_group); +} + +static inline int thread_group_empty(struct task_struct *p) +{ + return list_empty(&p->thread_group); +} + +#define delay_group_leader(p) \ + (thread_group_leader(p) && !thread_group_empty(p)) + +extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, + unsigned long *flags); + +static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + struct sighand_struct *ret; + + ret = __lock_task_sighand(tsk, flags); + (void)__cond_lock(&tsk->sighand->siglock, ret); + return ret; +} + +static inline void unlock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); +} + +static inline unsigned long task_rlimit(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); +} + +static inline unsigned long task_rlimit_max(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_max); +} + +static inline unsigned long rlimit(unsigned int limit) +{ + return task_rlimit(current, limit); +} + +static inline unsigned long rlimit_max(unsigned int limit) +{ + return task_rlimit_max(current, limit); +} + +#endif /* _LINUX_SCHED_SIGNAL_H */ diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h new file mode 100644 index 00000000000000..141b74c53fad2d --- /dev/null +++ b/include/linux/sched/stat.h @@ -0,0 +1,40 @@ +#ifndef _LINUX_SCHED_STAT_H +#define _LINUX_SCHED_STAT_H + +#include + +/* + * Various counters maintained by the scheduler and fork(), + * exposed via /proc, sys.c or used by drivers via these APIs. + * + * ( Note that all these values are aquired without locking, + * so they can only be relied on in narrow circumstances. ) + */ + +extern unsigned long total_forks; +extern int nr_threads; +DECLARE_PER_CPU(unsigned long, process_counts); +extern int nr_processes(void); +extern unsigned long nr_running(void); +extern bool single_task_running(void); +extern unsigned long nr_iowait(void); +extern unsigned long nr_iowait_cpu(int cpu); +extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); + +static inline int sched_info_on(void) +{ +#ifdef CONFIG_SCHEDSTATS + return 1; +#elif defined(CONFIG_TASK_DELAY_ACCT) + extern int delayacct_on; + return delayacct_on; +#else + return 0; +#endif +} + +#ifdef CONFIG_SCHEDSTATS +void force_schedstat_enabled(void); +#endif + +#endif /* _LINUX_SCHED_STAT_H */ diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 49308e142aaeb1..0f5ecd4d298e2a 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -1,5 +1,9 @@ -#ifndef _SCHED_SYSCTL_H -#define _SCHED_SYSCTL_H +#ifndef _LINUX_SCHED_SYSCTL_H +#define _LINUX_SCHED_SYSCTL_H + +#include + +struct ctl_table; #ifdef CONFIG_DETECT_HUNG_TASK extern int sysctl_hung_task_check_count; @@ -78,4 +82,4 @@ extern int sysctl_schedstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -#endif /* _SCHED_SYSCTL_H */ +#endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h new file mode 100644 index 00000000000000..a978d7189cfddd --- /dev/null +++ b/include/linux/sched/task.h @@ -0,0 +1,139 @@ +#ifndef _LINUX_SCHED_TASK_H +#define _LINUX_SCHED_TASK_H + +/* + * Interface between the scheduler and various task lifetime (fork()/exit()) + * functionality: + */ + +#include + +struct task_struct; +union thread_union; + +/* + * This serializes "schedule()" and also protects + * the run-queue from deletions/modifications (but + * _adding_ to the beginning of the run-queue has + * a separate lock). + */ +extern rwlock_t tasklist_lock; +extern spinlock_t mmlist_lock; + +extern union thread_union init_thread_union; +extern struct task_struct init_task; + +#ifdef CONFIG_PROVE_RCU +extern int lockdep_tasklist_lock_is_held(void); +#endif /* #ifdef CONFIG_PROVE_RCU */ + +extern asmlinkage void schedule_tail(struct task_struct *prev); +extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle_bootup_task(struct task_struct *idle); + +extern int sched_fork(unsigned long clone_flags, struct task_struct *p); +extern void sched_dead(struct task_struct *p); + +void __noreturn do_task_dead(void); + +extern void proc_caches_init(void); + +extern void release_task(struct task_struct * p); + +#ifdef CONFIG_HAVE_COPY_THREAD_TLS +extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, + struct task_struct *, unsigned long); +#else +extern int copy_thread(unsigned long, unsigned long, unsigned long, + struct task_struct *); + +/* Architectures that haven't opted into copy_thread_tls get the tls argument + * via pt_regs, so ignore the tls argument passed via C. */ +static inline int copy_thread_tls( + unsigned long clone_flags, unsigned long sp, unsigned long arg, + struct task_struct *p, unsigned long tls) +{ + return copy_thread(clone_flags, sp, arg, p); +} +#endif +extern void flush_thread(void); + +#ifdef CONFIG_HAVE_EXIT_THREAD +extern void exit_thread(struct task_struct *tsk); +#else +static inline void exit_thread(struct task_struct *tsk) +{ +} +#endif +extern void do_group_exit(int); + +extern void exit_files(struct task_struct *); +extern void exit_itimers(struct signal_struct *); + +extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); +extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); +struct task_struct *fork_idle(int); +extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +extern void free_task(struct task_struct *tsk); + +/* sched_exec is called by processes performing an exec */ +#ifdef CONFIG_SMP +extern void sched_exec(void); +#else +#define sched_exec() {} +#endif + +#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + +extern void __put_task_struct(struct task_struct *t); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); +} + +struct task_struct *task_rcu_dereference(struct task_struct **ptask); +struct task_struct *try_get_task_struct(struct task_struct **ptask); + + +#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT +extern int arch_task_struct_size __read_mostly; +#else +# define arch_task_struct_size (sizeof(struct task_struct)) +#endif + +#ifdef CONFIG_VMAP_STACK +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return t->stack_vm_area; +} +#else +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return NULL; +} +#endif + +/* + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. Also protects ->cpuset and + * ->cgroup.subsys[]. And ->vfork_done. + * + * Nests both inside and outside of read_lock(&tasklist_lock). + * It must not be nested with write_lock_irq(&tasklist_lock), + * neither inside nor outside. + */ +static inline void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} + +#endif /* _LINUX_SCHED_TASK_H */ diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h new file mode 100644 index 00000000000000..df6ea6665b310e --- /dev/null +++ b/include/linux/sched/task_stack.h @@ -0,0 +1,121 @@ +#ifndef _LINUX_SCHED_TASK_STACK_H +#define _LINUX_SCHED_TASK_STACK_H + +/* + * task->stack (kernel stack) handling interfaces: + */ + +#include +#include + +#ifdef CONFIG_THREAD_INFO_IN_TASK + +/* + * When accessing the stack of a non-current task that might exit, use + * try_get_task_stack() instead. task_stack_page will return a pointer + * that could get freed out from under you. + */ +static inline void *task_stack_page(const struct task_struct *task) +{ + return task->stack; +} + +#define setup_thread_stack(new,old) do { } while(0) + +static inline unsigned long *end_of_stack(const struct task_struct *task) +{ + return task->stack; +} + +#elif !defined(__HAVE_THREAD_FUNCTIONS) + +#define task_stack_page(task) ((void *)(task)->stack) + +static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) +{ + *task_thread_info(p) = *task_thread_info(org); + task_thread_info(p)->task = p; +} + +/* + * Return the address of the last usable long on the stack. + * + * When the stack grows down, this is just above the thread + * info struct. Going any lower will corrupt the threadinfo. + * + * When the stack grows up, this is the highest address. + * Beyond that position, we corrupt data on the next page. + */ +static inline unsigned long *end_of_stack(struct task_struct *p) +{ +#ifdef CONFIG_STACK_GROWSUP + return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; +#else + return (unsigned long *)(task_thread_info(p) + 1); +#endif +} + +#endif + +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return atomic_inc_not_zero(&tsk->stack_refcount) ? + task_stack_page(tsk) : NULL; +} + +extern void put_task_stack(struct task_struct *tsk); +#else +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return task_stack_page(tsk); +} + +static inline void put_task_stack(struct task_struct *tsk) {} +#endif + +#define task_stack_end_corrupted(task) \ + (*(end_of_stack(task)) != STACK_END_MAGIC) + +static inline int object_is_on_stack(void *obj) +{ + void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); +} + +extern void thread_stack_cache_init(void); + +#ifdef CONFIG_DEBUG_STACK_USAGE +static inline unsigned long stack_not_used(struct task_struct *p) +{ + unsigned long *n = end_of_stack(p); + + do { /* Skip over canary */ +# ifdef CONFIG_STACK_GROWSUP + n--; +# else + n++; +# endif + } while (!*n); + +# ifdef CONFIG_STACK_GROWSUP + return (unsigned long)end_of_stack(p) - (unsigned long)n; +# else + return (unsigned long)n - (unsigned long)end_of_stack(p); +# endif +} +#endif +extern void set_task_stack_end_magic(struct task_struct *tsk); + +#ifndef __HAVE_ARCH_KSTACK_END +static inline int kstack_end(void *addr) +{ + /* Reliable end of stack detection: + * Some APM bios versions misalign the stack + */ + return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); +} +#endif + +#endif /* _LINUX_SCHED_TASK_STACK_H */ diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h new file mode 100644 index 00000000000000..7d065abc7a470d --- /dev/null +++ b/include/linux/sched/topology.h @@ -0,0 +1,226 @@ +#ifndef _LINUX_SCHED_TOPOLOGY_H +#define _LINUX_SCHED_TOPOLOGY_H + +#include + +#include + +/* + * sched-domains (multiprocessor balancing) declarations: + */ +#ifdef CONFIG_SMP + +#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ +#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ +#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ +#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ +#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ +#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ +#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ +#define SD_NUMA 0x4000 /* cross-node balancing */ + +/* + * Increase resolution of cpu_capacity calculations + */ +#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT +#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + +#ifdef CONFIG_SCHED_SMT +static inline int cpu_smt_flags(void) +{ + return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_SCHED_MC +static inline int cpu_core_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_NUMA +static inline int cpu_numa_flags(void) +{ + return SD_NUMA; +} +#endif + +extern int arch_asym_cpu_priority(int cpu); + +struct sched_domain_attr { + int relax_domain_level; +}; + +#define SD_ATTR_INIT (struct sched_domain_attr) { \ + .relax_domain_level = -1, \ +} + +extern int sched_domain_level_max; + +struct sched_group; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; +}; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int busy_idx; + unsigned int idle_idx; + unsigned int newidle_idx; + unsigned int wake_idx; + unsigned int forkexec_idx; + unsigned int smt_gain; + + int nohz_idle; /* NOHZ IDLE status */ + int flags; /* See SD_* */ + int level; + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ + + /* idle_balance() stats */ + u64 max_newidle_lb_cost; + unsigned long next_decay_max_lb_cost; + + u64 avg_scan_cost; /* select_idle_sibling */ + +#ifdef CONFIG_SCHEDSTATS + /* load_balance() stats */ + unsigned int lb_count[CPU_MAX_IDLE_TYPES]; + unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; + + /* Active load balancing */ + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + + /* SD_BALANCE_EXEC stats */ + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + + /* SD_BALANCE_FORK stats */ + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + + /* try_to_wake_up() stats */ + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; +#endif +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif + union { + void *private; /* used during construction */ + struct rcu_head rcu; /* used during destruction */ + }; + struct sched_domain_shared *shared; + + unsigned int span_weight; + /* + * Span of all CPUs in this domain. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + */ + unsigned long span[0]; +}; + +static inline struct cpumask *sched_domain_span(struct sched_domain *sd) +{ + return to_cpumask(sd->span); +} + +extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new); + +/* Allocate an array of sched domains, for partition_sched_domains(). */ +cpumask_var_t *alloc_sched_domains(unsigned int ndoms); +void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); + +bool cpus_share_cache(int this_cpu, int that_cpu); + +typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); +typedef int (*sched_domain_flags_f)(void); + +#define SDTL_OVERLAP 0x01 + +struct sd_data { + struct sched_domain **__percpu sd; + struct sched_domain_shared **__percpu sds; + struct sched_group **__percpu sg; + struct sched_group_capacity **__percpu sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif +}; + +extern void set_sched_topology(struct sched_domain_topology_level *tl); + +#ifdef CONFIG_SCHED_DEBUG +# define SD_INIT_NAME(type) .name = #type +#else +# define SD_INIT_NAME(type) +#endif + +#else /* CONFIG_SMP */ + +struct sched_domain_attr; + +static inline void +partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ +} + +static inline bool cpus_share_cache(int this_cpu, int that_cpu) +{ + return true; +} + +#endif /* !CONFIG_SMP */ + +static inline int task_node(const struct task_struct *p) +{ + return cpu_to_node(task_cpu(p)); +} + +#endif /* _LINUX_SCHED_TOPOLOGY_H */ diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h new file mode 100644 index 00000000000000..5d5415e129d436 --- /dev/null +++ b/include/linux/sched/user.h @@ -0,0 +1,61 @@ +#ifndef _LINUX_SCHED_USER_H +#define _LINUX_SCHED_USER_H + +#include +#include + +struct key; + +/* + * Some day this will be a full-fledged user tracking system.. + */ +struct user_struct { + atomic_t __count; /* reference count */ + atomic_t processes; /* How many processes does this user have? */ + atomic_t sigpending; /* How many pending signals does this user have? */ +#ifdef CONFIG_FANOTIFY + atomic_t fanotify_listeners; +#endif +#ifdef CONFIG_EPOLL + atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ +#endif +#ifdef CONFIG_POSIX_MQUEUE + /* protected by mq_lock */ + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ +#endif + unsigned long locked_shm; /* How many pages of mlocked shm ? */ + unsigned long unix_inflight; /* How many files in flight in unix sockets */ + atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ + +#ifdef CONFIG_KEYS + struct key *uid_keyring; /* UID specific keyring */ + struct key *session_keyring; /* UID's default session keyring */ +#endif + + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + kuid_t uid; + +#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) + atomic_long_t locked_vm; +#endif +}; + +extern int uids_sysfs_init(void); + +extern struct user_struct *find_user(kuid_t); + +extern struct user_struct root_user; +#define INIT_USER (&root_user) + + +/* per-UID process charging. */ +extern struct user_struct * alloc_uid(kuid_t); +static inline struct user_struct *get_uid(struct user_struct *u) +{ + atomic_inc(&u->__count); + return u; +} +extern void free_uid(struct user_struct *); + +#endif /* _LINUX_SCHED_USER_H */ diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h new file mode 100644 index 00000000000000..d03d8a9047dcb5 --- /dev/null +++ b/include/linux/sched/wake_q.h @@ -0,0 +1,53 @@ +#ifndef _LINUX_SCHED_WAKE_Q_H +#define _LINUX_SCHED_WAKE_Q_H + +/* + * Wake-queues are lists of tasks with a pending wakeup, whose + * callers have already marked the task as woken internally, + * and can thus carry on. A common use case is being able to + * do the wakeups once the corresponding user lock as been + * released. + * + * We hold reference to each task in the list across the wakeup, + * thus guaranteeing that the memory is still valid by the time + * the actual wakeups are performed in wake_up_q(). + * + * One per task suffices, because there's never a need for a task to be + * in two wake queues simultaneously; it is forbidden to abandon a task + * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is + * already in a wake queue, the wakeup will happen soon and the second + * waker can just skip it. + * + * The DEFINE_WAKE_Q macro declares and initializes the list head. + * wake_up_q() does NOT reinitialize the list; it's expected to be + * called near the end of a function. Otherwise, the list can be + * re-initialized for later re-use by wake_q_init(). + * + * Note that this can cause spurious wakeups. schedule() callers + * must ensure the call is done inside a loop, confirming that the + * wakeup condition has in fact occurred. + */ + +#include + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + +static inline void wake_q_init(struct wake_q_head *head) +{ + head->first = WAKE_Q_TAIL; + head->lastp = &head->first; +} + +extern void wake_q_add(struct wake_q_head *head, + struct task_struct *task); +extern void wake_up_q(struct wake_q_head *head); + +#endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/sched/xacct.h b/include/linux/sched/xacct.h new file mode 100644 index 00000000000000..a28156a0d34a12 --- /dev/null +++ b/include/linux/sched/xacct.h @@ -0,0 +1,48 @@ +#ifndef _LINUX_SCHED_XACCT_H +#define _LINUX_SCHED_XACCT_H + +/* + * Extended task accounting methods: + */ + +#include + +#ifdef CONFIG_TASK_XACCT +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.rchar += amt; +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.wchar += amt; +} + +static inline void inc_syscr(struct task_struct *tsk) +{ + tsk->ioac.syscr++; +} + +static inline void inc_syscw(struct task_struct *tsk) +{ + tsk->ioac.syscw++; +} +#else +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void inc_syscr(struct task_struct *tsk) +{ +} + +static inline void inc_syscw(struct task_struct *tsk) +{ +} +#endif + +#endif /* _LINUX_SCHED_XACCT_H */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 5308304993bea5..94ad6eea955035 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -1,32 +1,13 @@ #ifndef _LINUX_SIGNAL_H #define _LINUX_SIGNAL_H -#include #include -#include +#include struct task_struct; /* for sysctl */ extern int print_fatal_signals; -/* - * Real Time signals may be queued. - */ - -struct sigqueue { - struct list_head list; - int flags; - siginfo_t info; - struct user_struct *user; -}; - -/* flags values. */ -#define SIGQUEUE_PREALLOC 1 - -struct sigpending { - struct list_head list; - sigset_t signal; -}; #ifndef HAVE_ARCH_COPY_SIGINFO @@ -272,42 +253,6 @@ extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; -struct sigaction { -#ifndef __ARCH_HAS_IRIX_SIGACTION - __sighandler_t sa_handler; - unsigned long sa_flags; -#else - unsigned int sa_flags; - __sighandler_t sa_handler; -#endif -#ifdef __ARCH_HAS_SA_RESTORER - __sigrestore_t sa_restorer; -#endif - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -#ifdef __ARCH_HAS_KA_RESTORER - __sigrestore_t ka_restorer; -#endif -}; - -#ifdef CONFIG_OLD_SIGACTION -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - __sigrestore_t sa_restorer; -}; -#endif - -struct ksignal { - struct k_sigaction ka; - siginfo_t info; - int sig; -}; - extern int get_signal(struct ksignal *ksig); extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void exit_signals(struct task_struct *tsk); diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h new file mode 100644 index 00000000000000..16d862a3d8f38d --- /dev/null +++ b/include/linux/signal_types.h @@ -0,0 +1,66 @@ +#ifndef _LINUX_SIGNAL_TYPES_H +#define _LINUX_SIGNAL_TYPES_H + +/* + * Basic signal handling related data type definitions: + */ + +#include +#include + +/* + * Real Time signals may be queued. + */ + +struct sigqueue { + struct list_head list; + int flags; + siginfo_t info; + struct user_struct *user; +}; + +/* flags values. */ +#define SIGQUEUE_PREALLOC 1 + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct sigaction { +#ifndef __ARCH_HAS_IRIX_SIGACTION + __sighandler_t sa_handler; + unsigned long sa_flags; +#else + unsigned int sa_flags; + __sighandler_t sa_handler; +#endif +#ifdef __ARCH_HAS_SA_RESTORER + __sigrestore_t sa_restorer; +#endif + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +#ifdef __ARCH_HAS_KA_RESTORER + __sigrestore_t ka_restorer; +#endif +}; + +#ifdef CONFIG_OLD_SIGACTION +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + __sigrestore_t sa_restorer; +}; +#endif + +struct ksignal { + struct k_sigaction ka; + siginfo_t info; + int sig; +}; + +#endif /* _LINUX_SIGNAL_TYPES_H */ diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index eadbe227c25648..4985048640a731 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h @@ -8,7 +8,7 @@ #define _LINUX_SIGNALFD_H #include - +#include #ifdef CONFIG_SIGNALFD diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 69ccd263691120..c776abd86937f5 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/stat.h b/include/linux/stat.h index 075cb0c7eb2ade..c76e524fb34b6a 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -18,20 +18,32 @@ #include #include +#define KSTAT_QUERY_FLAGS (AT_STATX_SYNC_TYPE) + struct kstat { - u64 ino; - dev_t dev; + u32 result_mask; /* What fields the user got */ umode_t mode; unsigned int nlink; + uint32_t blksize; /* Preferred I/O size */ + u64 attributes; +#define KSTAT_ATTR_FS_IOC_FLAGS \ + (STATX_ATTR_COMPRESSED | \ + STATX_ATTR_IMMUTABLE | \ + STATX_ATTR_APPEND | \ + STATX_ATTR_NODUMP | \ + STATX_ATTR_ENCRYPTED \ + )/* Attrs corresponding to FS_*_FL flags */ + u64 ino; + dev_t dev; + dev_t rdev; kuid_t uid; kgid_t gid; - dev_t rdev; loff_t size; - struct timespec atime; + struct timespec atime; struct timespec mtime; struct timespec ctime; - unsigned long blksize; - unsigned long long blocks; + struct timespec btime; /* File creation time */ + u64 blocks; }; #endif diff --git a/include/linux/sunrpc/types.h b/include/linux/sunrpc/types.h index d222f47550afa2..11a7536c0fd26d 100644 --- a/include/linux/sunrpc/types.h +++ b/include/linux/sunrpc/types.h @@ -10,6 +10,7 @@ #define _LINUX_SUNRPC_TYPES_H_ #include +#include #include #include #include diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 91a740f6b88423..980c3c9b06f881 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -48,6 +48,7 @@ struct stat; struct stat64; struct statfs; struct statfs64; +struct statx; struct __sysctl_args; struct sysinfo; struct timespec; @@ -902,5 +903,7 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len, unsigned long prot, int pkey); asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); asmlinkage long sys_pkey_free(int pkey); +asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags, + unsigned mask, struct statx __user *buffer); #endif diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h index 58de6edf751f5e..e2a5daf8d14f33 100644 --- a/include/linux/taskstats_kern.h +++ b/include/linux/taskstats_kern.h @@ -8,7 +8,7 @@ #define _LINUX_TASKSTATS_KERN_H #include -#include +#include #include #ifdef CONFIG_TASKSTATS diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index d2e804e15c3e46..b598cbc7b57684 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -8,6 +8,10 @@ void timekeeping_init(void); extern int timekeeping_suspended; +/* Architecture timer tick functions: */ +extern void update_process_times(int user); +extern void xtime_update(unsigned long ticks); + /* * Get and set timeofday */ diff --git a/include/linux/timer.h b/include/linux/timer.h index c7bdf895179c92..e6789b8757d502 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -212,7 +212,7 @@ struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -#include +struct ctl_table; extern unsigned int sysctl_timer_migration; int timer_migration_handler(struct ctl_table *table, int write, diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 1d0043dc34e427..de2a722fe3cf7c 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -50,4 +50,10 @@ /* device can't handle Link Power Management */ #define USB_QUIRK_NO_LPM BIT(10) +/* + * Device reports its bInterval as linear frames instead of the + * USB 2.0 calculation. + */ +#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11) + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 363e0e8082a9d7..32354b4b4b2ba5 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -5,6 +5,9 @@ #include #include #include +#include +#include +#include #include #define UID_GID_MAP_MAX_EXTENTS 5 @@ -69,7 +72,7 @@ struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; - atomic_t count; + int count; atomic_t ucount[UCOUNT_COUNTS]; }; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 0468548acebfef..48a3483dccb123 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -61,8 +61,7 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, unsigned long from, unsigned long to, unsigned long len); -extern void userfaultfd_remove(struct vm_area_struct *vma, - struct vm_area_struct **prev, +extern bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -72,8 +71,6 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, extern void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf); -extern void userfaultfd_exit(struct mm_struct *mm); - #else /* CONFIG_USERFAULTFD */ /* mm helpers */ @@ -120,11 +117,11 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, { } -static inline void userfaultfd_remove(struct vm_area_struct *vma, - struct vm_area_struct **prev, +static inline bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + return true; } static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, @@ -139,10 +136,6 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm, { } -static inline void userfaultfd_exit(struct mm_struct *mm) -{ -} - #endif /* CONFIG_USERFAULTFD */ #endif /* _LINUX_USERFAULTFD_K_H */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 26c155bb639b57..8355bab175e1d8 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -7,6 +7,8 @@ #include #include +struct irq_affinity; + /** * virtio_config_ops - operations for configuring a virtio device * @get: read the value of a configuration field @@ -56,6 +58,7 @@ * This returns a pointer to the bus name a la pci_name from which * the caller can then copy. * @set_vq_affinity: set the affinity for a virtqueue. + * @get_vq_affinity: get the affinity for a virtqueue (optional). */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { @@ -68,14 +71,15 @@ struct virtio_config_ops { void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]); + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *vdev); int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, int cpu); + const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, + int index); }; /* If driver didn't advertise the feature, it will never appear. */ @@ -169,7 +173,7 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *callbacks[] = { c }; const char *names[] = { n }; struct virtqueue *vq; - int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); + int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL); if (err < 0) return ERR_PTR(err); return vq; diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 9638bfeb0d1f63..584f9a647ad4ac 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -48,6 +48,8 @@ struct virtio_vsock_pkt { struct virtio_vsock_hdr hdr; struct work_struct work; struct list_head list; + /* socket refcnt not held, only use for cancellation */ + struct vsock_sock *vsk; void *buf; u32 len; u32 off; @@ -56,6 +58,7 @@ struct virtio_vsock_pkt { struct virtio_vsock_pkt_info { u32 remote_cid, remote_port; + struct vsock_sock *vsk; struct msghdr *msg; u32 pkt_len; u16 type; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 6aa1b6cb58285d..a80b7b59cf3341 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -79,6 +79,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_SPLIT_PAGE_FAILED, THP_DEFERRED_SPLIT_PAGE, THP_SPLIT_PMD, +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + THP_SPLIT_PUD, +#endif THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC_FAILED, #endif diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index c3fa0fd4394995..1081db987391d2 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -12,7 +12,7 @@ static inline void vmacache_flush(struct task_struct *tsk) { - memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); + memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); } extern void vmacache_flush_all(struct mm_struct *mm); diff --git a/include/linux/wait.h b/include/linux/wait.h index 1421132e90861b..db076ca7f11da0 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -6,6 +6,7 @@ #include #include #include + #include #include @@ -619,30 +620,19 @@ do { \ __ret; \ }) +extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *); +extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *); -#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ +#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ ({ \ - int __ret = 0; \ + int __ret; \ DEFINE_WAIT(__wait); \ if (exclusive) \ __wait.flags |= WQ_FLAG_EXCLUSIVE; \ do { \ - if (likely(list_empty(&__wait.task_list))) \ - __add_wait_queue_tail(&(wq), &__wait); \ - set_current_state(TASK_INTERRUPTIBLE); \ - if (signal_pending(current)) { \ - __ret = -ERESTARTSYS; \ + __ret = fn(&(wq), &__wait); \ + if (__ret) \ break; \ - } \ - if (irq) \ - spin_unlock_irq(&(wq).lock); \ - else \ - spin_unlock(&(wq).lock); \ - schedule(); \ - if (irq) \ - spin_lock_irq(&(wq).lock); \ - else \ - spin_lock(&(wq).lock); \ } while (!(condition)); \ __remove_wait_queue(&(wq), &__wait); \ __set_current_state(TASK_RUNNING); \ @@ -675,7 +665,7 @@ do { \ */ #define wait_event_interruptible_locked(wq, condition) \ ((condition) \ - ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) + ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) /** * wait_event_interruptible_locked_irq - sleep until a condition gets true @@ -702,7 +692,7 @@ do { \ */ #define wait_event_interruptible_locked_irq(wq, condition) \ ((condition) \ - ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) + ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) /** * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true @@ -733,7 +723,7 @@ do { \ */ #define wait_event_interruptible_exclusive_locked(wq, condition) \ ((condition) \ - ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) + ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) /** * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true @@ -764,7 +754,7 @@ do { \ */ #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ ((condition) \ - ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) + ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) #define __wait_event_killable(wq, condition) \ diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index 574ff2ae94beeb..6cd94e5ee113f0 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -12,6 +12,7 @@ #include #include #include +#include #include /* need __user */ #include diff --git a/include/media/vsp1.h b/include/media/vsp1.h index 458b400373d44d..38aac554dbbab6 100644 --- a/include/media/vsp1.h +++ b/include/media/vsp1.h @@ -20,8 +20,17 @@ struct device; int vsp1_du_init(struct device *dev); -int vsp1_du_setup_lif(struct device *dev, unsigned int width, - unsigned int height); +/** + * struct vsp1_du_lif_config - VSP LIF configuration + * @width: output frame width + * @height: output frame height + */ +struct vsp1_du_lif_config { + unsigned int width; + unsigned int height; +}; + +int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg); struct vsp1_du_atomic_config { u32 pixelformat; diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 27dfe85772b1b2..b8eb51a661e560 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -402,10 +402,10 @@ struct p9_wstat { u32 atime; u32 mtime; u64 length; - char *name; - char *uid; - char *gid; - char *muid; + const char *name; + const char *uid; + const char *gid; + const char *muid; char *extension; /* 9p2000.u extensions */ kuid_t n_uid; /* 9p2000.u extensions */ kgid_t n_gid; /* 9p2000.u extensions */ diff --git a/include/net/9p/client.h b/include/net/9p/client.h index c6b97e58cf8455..b582339ccef5c6 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -223,16 +223,16 @@ void p9_client_destroy(struct p9_client *clnt); void p9_client_disconnect(struct p9_client *clnt); void p9_client_begin_disconnect(struct p9_client *clnt); struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, - char *uname, kuid_t n_uname, char *aname); + const char *uname, kuid_t n_uname, const char *aname); struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, - char **wnames, int clone); + const unsigned char * const *wnames, int clone); int p9_client_open(struct p9_fid *fid, int mode); -int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, +int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode, char *extension); -int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, char *newname); -int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, kgid_t gid, - struct p9_qid *qid); -int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, +int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, const char *newname); +int p9_client_symlink(struct p9_fid *fid, const char *name, const char *symname, + kgid_t gid, struct p9_qid *qid); +int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags, u32 mode, kgid_t gid, struct p9_qid *qid); int p9_client_clunk(struct p9_fid *fid); int p9_client_fsync(struct p9_fid *fid, int datasync); @@ -250,9 +250,9 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr); struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, u64 request_mask); -int p9_client_mknod_dotl(struct p9_fid *oldfid, char *name, int mode, +int p9_client_mknod_dotl(struct p9_fid *oldfid, const char *name, int mode, dev_t rdev, kgid_t gid, struct p9_qid *); -int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, +int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode, kgid_t gid, struct p9_qid *); int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status); int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl); diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index f2758964ce6f89..f32ed9ac181a47 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -100,6 +100,9 @@ struct vsock_transport { void (*destruct)(struct vsock_sock *); void (*release)(struct vsock_sock *); + /* Cancel all pending packets sent on vsock. */ + int (*cancel_pkt)(struct vsock_sock *vsk); + /* Connections. */ int (*connect)(struct vsock_sock *); diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 90708f68cc024e..95ccc1eef55845 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -26,6 +26,8 @@ #define __HCI_CORE_H #include +#include + #include #include diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index b8d637225a07dd..c0452de83086e5 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -25,6 +25,8 @@ #define _LINUX_NET_BUSY_POLL_H #include +#include +#include #include #ifdef CONFIG_NET_RX_BUSY_POLL diff --git a/include/net/inet_common.h b/include/net/inet_common.h index b7952d55b9c000..f39ae697347f65 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -20,7 +20,8 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags, int is_sendmsg); int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); -int inet_accept(struct socket *sock, struct socket *newsock, int flags); +int inet_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern); int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 826f198374f809..c7a577976bec08 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -258,7 +258,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk, return (unsigned long)min_t(u64, when, max_when); } -struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); +struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern); int inet_csk_get_port(struct sock *sk, unsigned short snum); diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h index cb2615ccf761d6..d784f242cf7b4d 100644 --- a/include/net/irda/timer.h +++ b/include/net/irda/timer.h @@ -59,7 +59,7 @@ struct lap_cb; * Slot timer must never exceed 85 ms, and must always be at least 25 ms, * suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of * devices, and other stackes uses a lot more, so it's best we do it as well - * (Note : this is the default value and sysctl overides it - Jean II) + * (Note : this is the default value and sysctl overrides it - Jean II) */ #define SLOT_TIMEOUT (90*HZ/1000) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index f540f9ad2af4f6..19605878da4739 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -244,7 +244,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, u32 seq); /* Fake conntrack entry for untracked connections */ -DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); +DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked); static inline struct nf_conn *nf_ct_untracked_get(void) { return raw_cpu_ptr(&nf_conntrack_untracked); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index ac84686aaafb0b..0136028652bdb8 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -103,6 +103,35 @@ struct nft_regs { }; }; +/* Store/load an u16 or u8 integer to/from the u32 data register. + * + * Note, when using concatenations, register allocation happens at 32-bit + * level. So for store instruction, pad the rest part with zero to avoid + * garbage values. + */ + +static inline void nft_reg_store16(u32 *dreg, u16 val) +{ + *dreg = 0; + *(u16 *)dreg = val; +} + +static inline void nft_reg_store8(u32 *dreg, u8 val) +{ + *dreg = 0; + *(u8 *)dreg = val; +} + +static inline u16 nft_reg_load16(u32 *sreg) +{ + return *(u16 *)sreg; +} + +static inline u8 nft_reg_load8(u32 *sreg) +{ + return *(u8 *)sreg; +} + static inline void nft_data_copy(u32 *dst, const struct nft_data *src, unsigned int len) { @@ -203,7 +232,6 @@ struct nft_set_elem { struct nft_set; struct nft_set_iter { u8 genmask; - bool flush; unsigned int count; unsigned int skip; int err; @@ -988,9 +1016,9 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table, const struct nlattr *nla, u32 objtype, u8 genmask); -int nft_obj_notify(struct net *net, struct nft_table *table, - struct nft_object *obj, u32 portid, u32 seq, - int event, int family, int report, gfp_t gfp); +void nft_obj_notify(struct net *net, struct nft_table *table, + struct nft_object *obj, u32 portid, u32 seq, + int event, int family, int report, gfp_t gfp); /** * struct nft_object_type - stateful object type diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index d150b506620173..97983d1c05e4d3 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, struct sk_buff *skb, const struct nf_hook_state *state) { + unsigned int flags = IP6_FH_F_AUTH; int protohdr, thoff = 0; unsigned short frag_off; nft_set_pktinfo(pkt, skb, state); - protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); + protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); if (protohdr < 0) { nft_set_pktinfo_proto_unspec(pkt, skb); return; @@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, const struct nf_hook_state *state) { #if IS_ENABLED(CONFIG_IPV6) + unsigned int flags = IP6_FH_F_AUTH; struct ipv6hdr *ip6h, _ip6h; unsigned int thoff = 0; unsigned short frag_off; @@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, if (pkt_len + sizeof(*ip6h) > skb->len) return -1; - protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); + protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); if (protohdr < 0) return -1; diff --git a/include/net/scm.h b/include/net/scm.h index 59fa93c01d2a16..142ea9e7a6d0d8 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -3,6 +3,7 @@ #include #include +#include #include #include #include diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 1f71ee5ab51841..069582ee5d7fd5 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu) return frag; } -static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc) +static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc) { - - sctp_assoc_sync_pmtu(sk, asoc); + sctp_assoc_sync_pmtu(asoc); asoc->pmtu_pending = 0; } @@ -596,12 +595,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) */ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { - if (t->dst && (!dst_check(t->dst, t->dst_cookie) || - t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), - SCTP_DEFAULT_MINSEGMENT))) + if (t->dst && !dst_check(t->dst, t->dst_cookie)) sctp_transport_dst_release(t); return t->dst; } +static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) +{ + __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), + SCTP_DEFAULT_MINSEGMENT); + + if (t->pathmtu == pmtu) + return true; + + t->pathmtu = pmtu; + + return false; +} + #endif /* __net_sctp_h__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index a244db5e5ff7fa..138f8615acf099 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -83,6 +83,7 @@ struct sctp_bind_addr; struct sctp_ulpq; struct sctp_ep_common; struct crypto_shash; +struct sctp_stream; #include @@ -376,7 +377,8 @@ typedef struct sctp_sender_hb_info { __u64 hb_nonce; } sctp_sender_hb_info_t; -struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); +int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp); +int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp); void sctp_stream_free(struct sctp_stream *stream); void sctp_stream_clear(struct sctp_stream *stream); @@ -476,7 +478,8 @@ struct sctp_pf { int (*send_verify) (struct sctp_sock *, union sctp_addr *); int (*supported_addrs)(const struct sctp_sock *, __be16 *); struct sock *(*create_accept_sk) (struct sock *sk, - struct sctp_association *asoc); + struct sctp_association *asoc, + bool kern); int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr); void (*to_sk_saddr)(union sctp_addr *, struct sock *sk); void (*to_sk_daddr)(union sctp_addr *, struct sock *sk); @@ -497,7 +500,6 @@ struct sctp_datamsg { /* Did the messenge fail to send? */ int send_error; u8 send_failed:1, - force_delay:1, can_delay; /* should this message be Nagle delayed */ }; @@ -752,6 +754,8 @@ struct sctp_transport { /* Is the Path MTU update pending on this tranport */ pmtu_pending:1, + dst_pending_confirm:1, /* need to confirm neighbour */ + /* Has this transport moved the ctsn since we last sacked */ sack_generation:1; u32 dst_cookie; @@ -805,8 +809,6 @@ struct sctp_transport { __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ - __u32 dst_pending_confirm; /* need to confirm neighbour */ - /* Destination */ struct dst_entry *dst; /* Source address. */ @@ -950,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t); void sctp_transport_burst_limited(struct sctp_transport *); void sctp_transport_burst_reset(struct sctp_transport *); unsigned long sctp_transport_timeout(struct sctp_transport *); -void sctp_transport_reset(struct sctp_transport *); -void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); +void sctp_transport_reset(struct sctp_transport *t); +void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); void sctp_transport_immediate_rtx(struct sctp_transport *); void sctp_transport_dst_release(struct sctp_transport *t); void sctp_transport_dst_confirm(struct sctp_transport *t); @@ -1876,6 +1878,7 @@ struct sctp_association { __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ temp:1, /* Is it a temporary association? */ + force_delay:1, prsctp_enable:1, reconf_enable:1; @@ -1951,7 +1954,7 @@ void sctp_assoc_update(struct sctp_association *old, __u32 sctp_association_get_next_tsn(struct sctp_association *); -void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); +void sctp_assoc_sync_pmtu(struct sctp_association *asoc); void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); void sctp_assoc_set_primary(struct sctp_association *, diff --git a/include/net/sock.h b/include/net/sock.h index 9ccefa5c548786..03252d53975de7 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -236,6 +236,7 @@ struct sock_common { * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings * @sk_lock: synchronizer + * @sk_kern_sock: True if sock is using kernel lock classes * @sk_rcvbuf: size of receive buffer in bytes * @sk_wq: sock wait queue and async head * @sk_rx_dst: receive input route used by early demux @@ -430,7 +431,8 @@ struct sock { #endif kmemcheck_bitfield_begin(flags); - unsigned int sk_padding : 2, + unsigned int sk_padding : 1, + sk_kern_sock : 1, sk_no_check_tx : 1, sk_no_check_rx : 1, sk_userlocks : 4, @@ -1015,7 +1017,8 @@ struct proto { int addr_len); int (*disconnect)(struct sock *sk, int flags); - struct sock * (*accept)(struct sock *sk, int flags, int *err); + struct sock * (*accept)(struct sock *sk, int flags, int *err, + bool kern); int (*ioctl)(struct sock *sk, int cmd, unsigned long arg); @@ -1526,6 +1529,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, void sk_free(struct sock *sk); void sk_destruct(struct sock *sk); struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); +void sk_free_unlock_clone(struct sock *sk); struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority); @@ -1572,7 +1576,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg, int sock_no_bind(struct socket *, struct sockaddr *, int); int sock_no_connect(struct socket *, struct sockaddr *, int, int); int sock_no_socketpair(struct socket *, struct socket *); -int sock_no_accept(struct socket *, struct socket *, int); +int sock_no_accept(struct socket *, struct socket *, int, bool); int sock_no_getname(struct socket *, struct sockaddr *, int *, int); unsigned int sock_no_poll(struct file *, struct socket *, struct poll_table_struct *); diff --git a/include/rdma/ib.h b/include/rdma/ib.h index a6b93706b0fc96..9b4c22a3693188 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -35,6 +35,7 @@ #include #include +#include struct ib_addr { union { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 0f1813c1368795..99e4423eb2b80b 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1863,6 +1863,9 @@ struct ib_port_immutable { }; struct ib_device { + /* Do not access @dma_device directly from ULP nor from HW drivers. */ + struct device *dma_device; + char name[IB_DEVICE_NAME_MAX]; struct list_head event_handler_list; @@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) */ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { - return dma_mapping_error(&dev->dev, dma_addr); + return dma_mapping_error(dev->dma_device, dma_addr); } /** @@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { - return dma_map_single(&dev->dev, cpu_addr, size, direction); + return dma_map_single(dev->dma_device, cpu_addr, size, direction); } /** @@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - dma_unmap_single(&dev->dev, addr, size, direction); + dma_unmap_single(dev->dma_device, addr, size, direction); } /** @@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, size_t size, enum dma_data_direction direction) { - return dma_map_page(&dev->dev, page, offset, size, direction); + return dma_map_page(dev->dma_device, page, offset, size, direction); } /** @@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - dma_unmap_page(&dev->dev, addr, size, direction); + dma_unmap_page(dev->dma_device, addr, size, direction); } /** @@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - return dma_map_sg(&dev->dev, sg, nents, direction); + return dma_map_sg(dev->dma_device, sg, nents, direction); } /** @@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - dma_unmap_sg(&dev->dev, sg, nents, direction); + dma_unmap_sg(dev->dma_device, sg, nents, direction); } static inline int ib_dma_map_sg_attrs(struct ib_device *dev, @@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); + return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, + dma_attrs); } static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, @@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); + dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); } /** * ib_sg_dma_address - Return the DMA address from a scatter/gather entry @@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - dma_sync_single_for_cpu(&dev->dev, addr, size, dir); + dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); } /** @@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - dma_sync_single_for_device(&dev->dev, addr, size, dir); + dma_sync_single_for_device(dev->dma_device, addr, size, dir); } /** @@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, dma_addr_t *dma_handle, gfp_t flag) { - return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); + return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); } /** @@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); + dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); } /** diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index b0e275de6dec0d..583875ea136ab2 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -196,6 +196,7 @@ struct iscsi_conn { struct iscsi_task *task; /* xmit task in progress */ /* xmit */ + spinlock_t taskqueuelock; /* protects the next three lists */ struct list_head mgmtqueue; /* mgmt (control) xmit queue */ struct list_head cmdqueue; /* data-path cmd queue */ struct list_head requeue; /* tasks needing another run */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 8990e580b278bd..080c7ce9bae889 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -315,6 +315,7 @@ extern void scsi_remove_device(struct scsi_device *); extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); void scsi_attach_vpd(struct scsi_device *sdev); +extern struct scsi_device *scsi_device_from_queue(struct request_queue *q); extern int scsi_device_get(struct scsi_device *); extern void scsi_device_put(struct scsi_device *); extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, @@ -409,19 +410,16 @@ extern int scsi_is_target_device(const struct device *); extern void scsi_sanitize_inquiry_string(unsigned char *s, int len); extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, - u64 flags, int *resid); -extern int scsi_execute_req_flags(struct scsi_device *sdev, - const unsigned char *cmd, int data_direction, void *buffer, - unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, - int retries, int *resid, u64 flags, req_flags_t rq_flags); + unsigned char *sense, struct scsi_sense_hdr *sshdr, + int timeout, int retries, u64 flags, + req_flags_t rq_flags, int *resid); static inline int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, int *resid) { - return scsi_execute_req_flags(sdev, cmd, data_direction, buffer, - bufflen, sshdr, timeout, retries, resid, 0, 0); + return scsi_execute(sdev, cmd, data_direction, buffer, + bufflen, NULL, sshdr, timeout, retries, 0, 0, resid); } extern void sdev_disable_disk_events(struct scsi_device *sdev); extern void sdev_enable_disk_events(struct scsi_device *sdev); @@ -474,6 +472,10 @@ static inline int scsi_device_created(struct scsi_device *sdev) sdev->sdev_state == SDEV_CREATED_BLOCK; } +int scsi_internal_device_block(struct scsi_device *sdev, bool wait); +int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state); + /* accessor functions for the SCSI parameters */ static inline int scsi_device_sync(struct scsi_device *sdev) { diff --git a/include/sound/control.h b/include/sound/control.h index 21d047f229a1b5..bd7246de58e7c4 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -22,6 +22,7 @@ * */ +#include #include #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index 1277e9ba031818..ff1a4f4cd66d06 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -55,8 +55,12 @@ extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_scsi_req *); -extern int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *, - struct iscsi_cmd **); +extern int +__iscsit_check_dataout_hdr(struct iscsi_conn *, void *, + struct iscsi_cmd *, u32, bool *); +extern int +iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd **out_cmd); extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *, bool); extern int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *, @@ -125,6 +129,9 @@ extern void iscsit_release_cmd(struct iscsi_cmd *); extern void iscsit_free_cmd(struct iscsi_cmd *, bool); extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); +extern struct iscsi_cmd * +iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *conn, + itt_t init_task_tag, u32 length); /* * From iscsi_target_nego.c diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index b54b98dc2d4a77..1b0f447ce850f0 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -4,7 +4,12 @@ #include #include -#define TRANSPORT_FLAG_PASSTHROUGH 1 +#define TRANSPORT_FLAG_PASSTHROUGH 0x1 +/* + * ALUA commands, state checks and setup operations are handled by the + * backend module. + */ +#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2 struct request_queue; struct scatterlist; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 878560e60c7527..4b784b6e21c0d9 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -4,7 +4,9 @@ #include /* struct config_group */ #include /* enum dma_data_direction */ #include /* struct percpu_ida */ +#include #include /* struct semaphore */ +#include #define TARGET_CORE_VERSION "v5.0" @@ -197,6 +199,7 @@ enum tcm_tmreq_table { TMR_LUN_RESET = 5, TMR_TARGET_WARM_RESET = 6, TMR_TARGET_COLD_RESET = 7, + TMR_UNKNOWN = 0xff, }; /* fabric independent task management response values */ @@ -296,7 +299,7 @@ struct t10_alua_tg_pt_gp { struct list_head tg_pt_gp_lun_list; struct se_lun *tg_pt_gp_alua_lun; struct se_node_acl *tg_pt_gp_alua_nacl; - struct delayed_work tg_pt_gp_transition_work; + struct work_struct tg_pt_gp_transition_work; struct completion *tg_pt_gp_transition_complete; }; @@ -397,7 +400,6 @@ struct se_tmr_req { void *fabric_tmr_ptr; struct se_cmd *task_cmd; struct se_device *tmr_dev; - struct se_lun *tmr_lun; struct list_head tmr_list; }; @@ -488,8 +490,6 @@ struct se_cmd { #define CMD_T_COMPLETE (1 << 2) #define CMD_T_SENT (1 << 4) #define CMD_T_STOP (1 << 5) -#define CMD_T_DEV_ACTIVE (1 << 7) -#define CMD_T_BUSY (1 << 9) #define CMD_T_TAS (1 << 10) #define CMD_T_FABRIC_STOP (1 << 11) spinlock_t t_state_lock; @@ -732,6 +732,7 @@ struct se_lun { struct config_group lun_group; struct se_port_stat_grps port_stat_grps; struct completion lun_ref_comp; + struct completion lun_shutdown_comp; struct percpu_ref lun_ref; struct list_head lun_dev_link; struct hlist_node link; @@ -767,6 +768,8 @@ struct se_device { u32 dev_index; u64 creation_time; atomic_long_t num_resets; + atomic_long_t aborts_complete; + atomic_long_t aborts_no_task; atomic_long_t num_cmds; atomic_long_t read_bytes; atomic_long_t write_bytes; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 358041bad1da03..d7dd1427fe0de9 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -47,7 +47,7 @@ struct target_core_fabric_ops { u32 (*tpg_get_inst_index)(struct se_portal_group *); /* * Optional to release struct se_cmd and fabric dependent allocated - * I/O descriptor in transport_cmd_check_stop(). + * I/O descriptor after command execution has finished. * * Returning 1 will signal a descriptor has been released. * Returning 0 will signal a descriptor has not been released. diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 593f586545eba9..39123c06a56613 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -119,6 +119,7 @@ enum rxrpc_recvmsg_trace { rxrpc_recvmsg_full, rxrpc_recvmsg_hole, rxrpc_recvmsg_next, + rxrpc_recvmsg_requeue, rxrpc_recvmsg_return, rxrpc_recvmsg_terminal, rxrpc_recvmsg_to_be_accepted, @@ -277,6 +278,7 @@ enum rxrpc_congest_change { EM(rxrpc_recvmsg_full, "FULL") \ EM(rxrpc_recvmsg_hole, "HOLE") \ EM(rxrpc_recvmsg_next, "NEXT") \ + EM(rxrpc_recvmsg_requeue, "REQU") \ EM(rxrpc_recvmsg_return, "RETN") \ EM(rxrpc_recvmsg_terminal, "TERM") \ EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9b90c57517a918..9e3ef6c99e4b0d 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -4,7 +4,7 @@ #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SCHED_H -#include +#include #include #include diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index 14e49c7981359c..b35533b9427719 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h @@ -1,5 +1,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM raw_syscalls +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE syscalls #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 9b1462e38b821a..a076cf1a3a23be 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -730,9 +730,11 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect) __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) #define __NR_pkey_free 290 __SYSCALL(__NR_pkey_free, sys_pkey_free) +#define __NR_statx 291 +__SYSCALL(__NR_statx, sys_statx) #undef __NR_syscalls -#define __NR_syscalls 291 +#define __NR_syscalls 292 /* * All syscalls below here should go away really, diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h index 407cb55df6ac17..7fb97863c94577 100644 --- a/include/uapi/drm/omap_drm.h +++ b/include/uapi/drm/omap_drm.h @@ -33,8 +33,8 @@ extern "C" { #define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ struct drm_omap_param { - uint64_t param; /* in */ - uint64_t value; /* in (set_param), out (get_param) */ + __u64 param; /* in */ + __u64 value; /* in (set_param), out (get_param) */ }; #define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ @@ -53,18 +53,18 @@ struct drm_omap_param { #define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) union omap_gem_size { - uint32_t bytes; /* (for non-tiled formats) */ + __u32 bytes; /* (for non-tiled formats) */ struct { - uint16_t width; - uint16_t height; + __u16 width; + __u16 height; } tiled; /* (for tiled formats) */ }; struct drm_omap_gem_new { union omap_gem_size size; /* in */ - uint32_t flags; /* in */ - uint32_t handle; /* out */ - uint32_t __pad; + __u32 flags; /* in */ + __u32 handle; /* out */ + __u32 __pad; }; /* mask of operations: */ @@ -74,33 +74,33 @@ enum omap_gem_op { }; struct drm_omap_gem_cpu_prep { - uint32_t handle; /* buffer handle (in) */ - uint32_t op; /* mask of omap_gem_op (in) */ + __u32 handle; /* buffer handle (in) */ + __u32 op; /* mask of omap_gem_op (in) */ }; struct drm_omap_gem_cpu_fini { - uint32_t handle; /* buffer handle (in) */ - uint32_t op; /* mask of omap_gem_op (in) */ + __u32 handle; /* buffer handle (in) */ + __u32 op; /* mask of omap_gem_op (in) */ /* TODO maybe here we pass down info about what regions are touched * by sw so we can be clever about cache ops? For now a placeholder, * set to zero and we just do full buffer flush.. */ - uint32_t nregions; - uint32_t __pad; + __u32 nregions; + __u32 __pad; }; struct drm_omap_gem_info { - uint32_t handle; /* buffer handle (in) */ - uint32_t pad; - uint64_t offset; /* mmap offset (out) */ + __u32 handle; /* buffer handle (in) */ + __u32 pad; + __u64 offset; /* mmap offset (out) */ /* note: in case of tiled buffers, the user virtual size can be * different from the physical size (ie. how many pages are needed * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. * This size here is the one that should be used if you want to * mmap() the buffer: */ - uint32_t size; /* virtual size for mmap'ing (out) */ - uint32_t __pad; + __u32 size; /* virtual size for mmap'ing (out) */ + __u32 __pad; }; #define DRM_OMAP_GET_PARAM 0x00 diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 1c80efb67d109a..dd9820b1c7796b 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -466,6 +466,7 @@ header-y += virtio_console.h header-y += virtio_gpu.h header-y += virtio_ids.h header-y += virtio_input.h +header-y += virtio_mmio.h header-y += virtio_net.h header-y += virtio_pci.h header-y += virtio_ring.h diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index db4c253f8011b2..dcfc3a5a9cb1d2 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -713,33 +713,6 @@ enum btrfs_err_code { BTRFS_ERROR_DEV_ONLY_WRITABLE, BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS }; -/* An error code to error string mapping for the kernel -* error codes -*/ -static inline char *btrfs_err_str(enum btrfs_err_code err_code) -{ - switch (err_code) { - case BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET: - return "unable to go below two devices on raid1"; - case BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET: - return "unable to go below four devices on raid10"; - case BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET: - return "unable to go below two devices on raid5"; - case BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET: - return "unable to go below three devices on raid6"; - case BTRFS_ERROR_DEV_TGT_REPLACE: - return "unable to remove the dev_replace target dev"; - case BTRFS_ERROR_DEV_MISSING_NOT_FOUND: - return "no missing devices found to remove"; - case BTRFS_ERROR_DEV_ONLY_WRITABLE: - return "unable to remove the only writeable device"; - case BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS: - return "add/delete/balance/replace/resize operation "\ - "in progress"; - default: - return NULL; - } -} #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ struct btrfs_ioctl_vol_args) diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index beed138bd35938..813afd6eee713e 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -63,5 +63,10 @@ #define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */ #define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */ +#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */ +#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */ +#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */ +#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */ + #endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index d08c63f3dd6ff4..0c5d5dd61b6ab1 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h @@ -64,7 +64,7 @@ struct packet_diag_mclist { __u32 pdmc_count; __u16 pdmc_type; __u16 pdmc_alen; - __u8 pdmc_addr[MAX_ADDR_LEN]; + __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */ }; struct packet_diag_ring { diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h new file mode 100644 index 00000000000000..307acbc82d800f --- /dev/null +++ b/include/uapi/linux/sched/types.h @@ -0,0 +1,74 @@ +#ifndef _UAPI_LINUX_SCHED_TYPES_H +#define _UAPI_LINUX_SCHED_TYPES_H + +#include + +struct sched_param { + int sched_priority; +}; + +#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ + +/* + * Extended scheduling parameters data structure. + * + * This is needed because the original struct sched_param can not be + * altered without introducing ABI issues with legacy applications + * (e.g., in sched_getparam()). + * + * However, the possibility of specifying more than just a priority for + * the tasks may be useful for a wide variety of application fields, e.g., + * multimedia, streaming, automation and control, and many others. + * + * This variant (sched_attr) is meant at describing a so-called + * sporadic time-constrained task. In such model a task is specified by: + * - the activation period or minimum instance inter-arrival time; + * - the maximum (or average, depending on the actual scheduling + * discipline) computation time of all instances, a.k.a. runtime; + * - the deadline (relative to the actual activation time) of each + * instance. + * Very briefly, a periodic (sporadic) task asks for the execution of + * some specific computation --which is typically called an instance-- + * (at most) every period. Moreover, each instance typically lasts no more + * than the runtime and must be completed by time instant t equal to + * the instance activation time + the deadline. + * + * This is reflected by the actual fields of the sched_attr structure: + * + * @size size of the structure, for fwd/bwd compat. + * + * @sched_policy task's scheduling policy + * @sched_flags for customizing the scheduler behaviour + * @sched_nice task's nice value (SCHED_NORMAL/BATCH) + * @sched_priority task's static priority (SCHED_FIFO/RR) + * @sched_deadline representative of the task's deadline + * @sched_runtime representative of the task's runtime + * @sched_period representative of the task's period + * + * Given this task model, there are a multiplicity of scheduling algorithms + * and policies, that can be used to ensure all the tasks will make their + * timing constraints. + * + * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the + * only user of this new interface. More information about the algorithm + * available in the scheduling class file or in Documentation/. + */ +struct sched_attr { + u32 size; + + u32 sched_policy; + u64 sched_flags; + + /* SCHED_NORMAL, SCHED_BATCH */ + s32 sched_nice; + + /* SCHED_FIFO, SCHED_RR */ + u32 sched_priority; + + /* SCHED_DEADLINE */ + u64 sched_runtime; + u64 sched_deadline; + u64 sched_period; +}; + +#endif /* _UAPI_LINUX_SCHED_TYPES_H */ diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 7fec7e36d9217d..51a6b86e370043 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -1,6 +1,7 @@ #ifndef _UAPI_LINUX_STAT_H #define _UAPI_LINUX_STAT_H +#include #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) @@ -41,5 +42,135 @@ #endif +/* + * Timestamp structure for the timestamps in struct statx. + * + * tv_sec holds the number of seconds before (negative) or after (positive) + * 00:00:00 1st January 1970 UTC. + * + * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is + * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time. + * + * Note that if both tv_sec and tv_nsec are non-zero, then the two values must + * either be both positive or both negative. + * + * __reserved is held in case we need a yet finer resolution. + */ +struct statx_timestamp { + __s64 tv_sec; + __s32 tv_nsec; + __s32 __reserved; +}; + +/* + * Structures for the extended file attribute retrieval system call + * (statx()). + * + * The caller passes a mask of what they're specifically interested in as a + * parameter to statx(). What statx() actually got will be indicated in + * st_mask upon return. + * + * For each bit in the mask argument: + * + * - if the datum is not supported: + * + * - the bit will be cleared, and + * + * - the datum will be set to an appropriate fabricated value if one is + * available (eg. CIFS can take a default uid and gid), otherwise + * + * - the field will be cleared; + * + * - otherwise, if explicitly requested: + * + * - the datum will be synchronised to the server if AT_STATX_FORCE_SYNC is + * set or if the datum is considered out of date, and + * + * - the field will be filled in and the bit will be set; + * + * - otherwise, if not requested, but available in approximate form without any + * effort, it will be filled in anyway, and the bit will be set upon return + * (it might not be up to date, however, and no attempt will be made to + * synchronise the internal state first); + * + * - otherwise the field and the bit will be cleared before returning. + * + * Items in STATX_BASIC_STATS may be marked unavailable on return, but they + * will have values installed for compatibility purposes so that stat() and + * co. can be emulated in userspace. + */ +struct statx { + /* 0x00 */ + __u32 stx_mask; /* What results were written [uncond] */ + __u32 stx_blksize; /* Preferred general I/O size [uncond] */ + __u64 stx_attributes; /* Flags conveying information about the file [uncond] */ + /* 0x10 */ + __u32 stx_nlink; /* Number of hard links */ + __u32 stx_uid; /* User ID of owner */ + __u32 stx_gid; /* Group ID of owner */ + __u16 stx_mode; /* File mode */ + __u16 __spare0[1]; + /* 0x20 */ + __u64 stx_ino; /* Inode number */ + __u64 stx_size; /* File size */ + __u64 stx_blocks; /* Number of 512-byte blocks allocated */ + __u64 __spare1[1]; + /* 0x40 */ + struct statx_timestamp stx_atime; /* Last access time */ + struct statx_timestamp stx_btime; /* File creation time */ + struct statx_timestamp stx_ctime; /* Last attribute change time */ + struct statx_timestamp stx_mtime; /* Last data modification time */ + /* 0x80 */ + __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */ + __u32 stx_rdev_minor; + __u32 stx_dev_major; /* ID of device containing file [uncond] */ + __u32 stx_dev_minor; + /* 0x90 */ + __u64 __spare2[14]; /* Spare space for future expansion */ + /* 0x100 */ +}; + +/* + * Flags to be stx_mask + * + * Query request/result mask for statx() and struct statx::stx_mask. + * + * These bits should be set in the mask argument of statx() to request + * particular items when calling statx(). + */ +#define STATX_TYPE 0x00000001U /* Want/got stx_mode & S_IFMT */ +#define STATX_MODE 0x00000002U /* Want/got stx_mode & ~S_IFMT */ +#define STATX_NLINK 0x00000004U /* Want/got stx_nlink */ +#define STATX_UID 0x00000008U /* Want/got stx_uid */ +#define STATX_GID 0x00000010U /* Want/got stx_gid */ +#define STATX_ATIME 0x00000020U /* Want/got stx_atime */ +#define STATX_MTIME 0x00000040U /* Want/got stx_mtime */ +#define STATX_CTIME 0x00000080U /* Want/got stx_ctime */ +#define STATX_INO 0x00000100U /* Want/got stx_ino */ +#define STATX_SIZE 0x00000200U /* Want/got stx_size */ +#define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */ +#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ +#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ +#define STATX_ALL 0x00000fffU /* All currently supported flags */ + +/* + * Attributes to be found in stx_attributes + * + * These give information about the features or the state of a file that might + * be of use to ordinary userspace programs such as GUIs or ls rather than + * specialised tools. + * + * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS + * semantically. Where possible, the numerical value is picked to correspond + * also. + */ +#define STATX_ATTR_COMPRESSED 0x00000004 /* [I] File is compressed by the fs */ +#define STATX_ATTR_IMMUTABLE 0x00000010 /* [I] File is marked immutable */ +#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */ +#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */ +#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */ + +#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */ + #endif /* _UAPI_LINUX_STAT_H */ diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index c506cddb8165cf..af17b4154ef607 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -105,26 +105,26 @@ struct tcmu_cmd_entry { union { struct { - uint32_t iov_cnt; - uint32_t iov_bidi_cnt; - uint32_t iov_dif_cnt; - uint64_t cdb_off; - uint64_t __pad1; - uint64_t __pad2; + __u32 iov_cnt; + __u32 iov_bidi_cnt; + __u32 iov_dif_cnt; + __u64 cdb_off; + __u64 __pad1; + __u64 __pad2; struct iovec iov[0]; } req; struct { - uint8_t scsi_status; - uint8_t __pad1; - uint16_t __pad2; - uint32_t __pad3; + __u8 scsi_status; + __u8 __pad1; + __u16 __pad2; + __u32 __pad3; char sense_buffer[TCMU_SENSE_BUFFERSIZE]; } rsp; }; } __packed; -#define TCMU_OP_ALIGN_SIZE sizeof(uint64_t) +#define TCMU_OP_ALIGN_SIZE sizeof(__u64) enum tcmu_genl_cmd { TCMU_CMD_UNSPEC, diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index c055947c5c989f..3b059530dac95f 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -18,8 +18,7 @@ * means the userland is reading). */ #define UFFD_API ((__u64)0xAA) -#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT | \ - UFFD_FEATURE_EVENT_FORK | \ +#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \ UFFD_FEATURE_EVENT_REMAP | \ UFFD_FEATURE_EVENT_REMOVE | \ UFFD_FEATURE_EVENT_UNMAP | \ @@ -113,7 +112,6 @@ struct uffd_msg { #define UFFD_EVENT_REMAP 0x14 #define UFFD_EVENT_REMOVE 0x15 #define UFFD_EVENT_UNMAP 0x16 -#define UFFD_EVENT_EXIT 0x17 /* flags for UFFD_EVENT_PAGEFAULT */ #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ @@ -163,7 +161,6 @@ struct uffdio_api { #define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4) #define UFFD_FEATURE_MISSING_SHMEM (1<<5) #define UFFD_FEATURE_EVENT_UNMAP (1<<6) -#define UFFD_FEATURE_EVENT_EXIT (1<<7) __u64 features; __u64 ioctls; diff --git a/include/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h similarity index 100% rename from include/linux/virtio_mmio.h rename to include/uapi/linux/virtio_mmio.h diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 90007a1abcab14..15b4385a2be169 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -79,7 +79,7 @@ * configuration space */ #define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ -#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled) +#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index da7cd62bace746..0b3d30837a9f64 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -34,6 +34,7 @@ #define MLX5_ABI_USER_H #include +#include /* For ETH_ALEN. */ enum { MLX5_QP_FLAG_SIGNATURE = 1 << 0, @@ -66,7 +67,7 @@ struct mlx5_ib_alloc_ucontext_req { }; enum mlx5_lib_caps { - MLX5_LIB_CAP_4K_UAR = (u64)1 << 0, + MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0, }; struct mlx5_ib_alloc_ucontext_req_v2 { diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h index ef8e2a8ad0afc8..6b083d327e982c 100644 --- a/include/video/exynos5433_decon.h +++ b/include/video/exynos5433_decon.h @@ -46,6 +46,7 @@ #define DECON_FRAMEFIFO_STATUS 0x0524 #define DECON_CMU 0x1404 #define DECON_UPDATE 0x1410 +#define DECON_CRFMID 0x1414 #define DECON_UPDATE_SCHEME 0x1438 #define DECON_VIDCON1 0x2000 #define DECON_VIDCON2 0x2004 @@ -126,6 +127,10 @@ /* VIDINTCON0 */ #define VIDINTCON0_FRAMEDONE (1 << 17) +#define VIDINTCON0_FRAMESEL_BP (0 << 15) +#define VIDINTCON0_FRAMESEL_VS (1 << 15) +#define VIDINTCON0_FRAMESEL_AC (2 << 15) +#define VIDINTCON0_FRAMESEL_FP (3 << 15) #define VIDINTCON0_INTFRMEN (1 << 12) #define VIDINTCON0_INTEN (1 << 0) @@ -142,6 +147,13 @@ #define STANDALONE_UPDATE_F (1 << 0) /* DECON_VIDCON1 */ +#define VIDCON1_LINECNT_MASK (0x0fff << 16) +#define VIDCON1_I80_ACTIVE (1 << 15) +#define VIDCON1_VSTATUS_MASK (0x3 << 13) +#define VIDCON1_VSTATUS_VS (0 << 13) +#define VIDCON1_VSTATUS_BP (1 << 13) +#define VIDCON1_VSTATUS_AC (2 << 13) +#define VIDCON1_VSTATUS_FP (3 << 13) #define VIDCON1_VCLK_MASK (0x3 << 9) #define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9) #define VIDCON1_VCLK_HOLD (0x0 << 9) diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index a0083be5d52951..1f6d78f044b671 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -2,6 +2,7 @@ #define __LINUX_SWIOTLB_XEN_H #include +#include #include extern int xen_swiotlb_init(int verbose, bool early); @@ -55,4 +56,14 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask); extern int xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask); + +extern int +xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); + +extern int +xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, + unsigned long attrs); #endif /* __LINUX_SWIOTLB_XEN_H */ diff --git a/init/init_task.c b/init/init_task.c index 53d4ce942a887f..66787e30a4191b 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/init/main.c b/init/main.c index ae9f2008fb8683..b0c11cbf5ddf8a 100644 --- a/init/main.c +++ b/init/main.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -61,6 +63,7 @@ #include #include #include +#include #include #include #include @@ -75,6 +78,8 @@ #include #include #include +#include +#include #include #include #include @@ -877,7 +882,6 @@ static void __init do_basic_setup(void) do_ctors(); usermodehelper_enable(); do_initcalls(); - random_int_secret_init(); } static void __init do_pre_smp_initcalls(void) @@ -1018,6 +1022,8 @@ static noinline void __init kernel_init_freeable(void) workqueue_init(); + init_mm_internals(); + do_pre_smp_initcalls(); lockup_detector_init(); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 4fdd970314315a..e8d41ff57241d8 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -35,6 +35,9 @@ #include #include #include +#include +#include +#include #include #include "util.h" diff --git a/ipc/msg.c b/ipc/msg.c index e3e52ce01123c5..104926dc72be4e 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/ipc/namespace.c b/ipc/namespace.c index 0abdea496493da..b4d80f9f724673 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c @@ -9,10 +9,12 @@ #include #include #include +#include #include #include #include #include +#include #include "util.h" diff --git a/ipc/sem.c b/ipc/sem.c index e468cd1c12f0d6..947dc2348271f9 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -82,6 +82,7 @@ #include #include #include +#include #include #include "util.h" diff --git a/ipc/shm.c b/ipc/shm.c index 06ea9ef7f54a77..481d2a9c298ab1 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -423,7 +423,7 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma) if (ret) return ret; - ret = sfd->file->f_op->mmap(sfd->file, vma); + ret = call_mmap(sfd->file, vma); if (ret) { shm_close(vma); return ret; @@ -452,7 +452,7 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) if (!sfd->file->f_op->fsync) return -EINVAL; - return sfd->file->f_op->fsync(sfd->file, start, end, datasync); + return call_fsync(sfd->file, start, end, datasync); } static long shm_fallocate(struct file *file, int mode, loff_t offset, diff --git a/kernel/acct.c b/kernel/acct.c index ca9cb55b585599..5b1284370367aa 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -56,6 +56,8 @@ #include #include #include +#include + #include #include /* sector_div */ #include diff --git a/kernel/audit.c b/kernel/audit.c index e794544f5e6333..2f4964cfde0b4f 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -54,6 +54,10 @@ #include #include #include +#include +#include +#include +#include #include @@ -90,13 +94,34 @@ static u32 audit_default; /* If auditing cannot proceed, audit_failure selects what happens. */ static u32 audit_failure = AUDIT_FAIL_PRINTK; -/* - * If audit records are to be written to the netlink socket, audit_pid - * contains the pid of the auditd process and audit_nlk_portid contains - * the portid to use to send netlink messages to that process. +/* private audit network namespace index */ +static unsigned int audit_net_id; + +/** + * struct audit_net - audit private network namespace data + * @sk: communication socket + */ +struct audit_net { + struct sock *sk; +}; + +/** + * struct auditd_connection - kernel/auditd connection state + * @pid: auditd PID + * @portid: netlink portid + * @net: the associated network namespace + * @lock: spinlock to protect write access + * + * Description: + * This struct is RCU protected; you must either hold the RCU lock for reading + * or the included spinlock for writing. */ -int audit_pid; -static __u32 audit_nlk_portid; +static struct auditd_connection { + int pid; + u32 portid; + struct net *net; + spinlock_t lock; +} auditd_conn; /* If audit_rate_limit is non-zero, limit the rate of sending audit records * to that number per second. This prevents DoS attacks, but results in @@ -123,10 +148,6 @@ u32 audit_sig_sid = 0; */ static atomic_t audit_lost = ATOMIC_INIT(0); -/* The netlink socket. */ -static struct sock *audit_sock; -static unsigned int audit_net_id; - /* Hash for inode-based rules */ struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; @@ -139,6 +160,7 @@ static LIST_HEAD(audit_freelist); /* queue msgs to send via kauditd_task */ static struct sk_buff_head audit_queue; +static void kauditd_hold_skb(struct sk_buff *skb); /* queue msgs due to temporary unicast send problems */ static struct sk_buff_head audit_retry_queue; /* queue msgs waiting for new auditd connection */ @@ -192,6 +214,43 @@ struct audit_reply { struct sk_buff *skb; }; +/** + * auditd_test_task - Check to see if a given task is an audit daemon + * @task: the task to check + * + * Description: + * Return 1 if the task is a registered audit daemon, 0 otherwise. + */ +int auditd_test_task(const struct task_struct *task) +{ + int rc; + + rcu_read_lock(); + rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0); + rcu_read_unlock(); + + return rc; +} + +/** + * audit_get_sk - Return the audit socket for the given network namespace + * @net: the destination network namespace + * + * Description: + * Returns the sock pointer if valid, NULL otherwise. The caller must ensure + * that a reference is held for the network namespace while the sock is in use. + */ +static struct sock *audit_get_sk(const struct net *net) +{ + struct audit_net *aunet; + + if (!net) + return NULL; + + aunet = net_generic(net, audit_net_id); + return aunet->sk; +} + static void audit_set_portid(struct audit_buffer *ab, __u32 portid) { if (ab) { @@ -210,9 +269,7 @@ void audit_panic(const char *message) pr_err("%s\n", message); break; case AUDIT_FAIL_PANIC: - /* test audit_pid since printk is always losey, why bother? */ - if (audit_pid) - panic("audit: %s\n", message); + panic("audit: %s\n", message); break; } } @@ -370,21 +427,87 @@ static int audit_set_failure(u32 state) return audit_do_config_change("audit_failure", &audit_failure, state); } -/* - * For one reason or another this nlh isn't getting delivered to the userspace - * audit daemon, just send it to printk. +/** + * auditd_set - Set/Reset the auditd connection state + * @pid: auditd PID + * @portid: auditd netlink portid + * @net: auditd network namespace pointer + * + * Description: + * This function will obtain and drop network namespace references as + * necessary. + */ +static void auditd_set(int pid, u32 portid, struct net *net) +{ + unsigned long flags; + + spin_lock_irqsave(&auditd_conn.lock, flags); + auditd_conn.pid = pid; + auditd_conn.portid = portid; + if (auditd_conn.net) + put_net(auditd_conn.net); + if (net) + auditd_conn.net = get_net(net); + else + auditd_conn.net = NULL; + spin_unlock_irqrestore(&auditd_conn.lock, flags); +} + +/** + * auditd_reset - Disconnect the auditd connection + * + * Description: + * Break the auditd/kauditd connection and move all the queued records into the + * hold queue in case auditd reconnects. + */ +static void auditd_reset(void) +{ + struct sk_buff *skb; + + /* if it isn't already broken, break the connection */ + rcu_read_lock(); + if (auditd_conn.pid) + auditd_set(0, 0, NULL); + rcu_read_unlock(); + + /* flush all of the main and retry queues to the hold queue */ + while ((skb = skb_dequeue(&audit_retry_queue))) + kauditd_hold_skb(skb); + while ((skb = skb_dequeue(&audit_queue))) + kauditd_hold_skb(skb); +} + +/** + * kauditd_print_skb - Print the audit record to the ring buffer + * @skb: audit record + * + * Whatever the reason, this packet may not make it to the auditd connection + * so write it via printk so the information isn't completely lost. */ static void kauditd_printk_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); char *data = nlmsg_data(nlh); - if (nlh->nlmsg_type != AUDIT_EOE) { - if (printk_ratelimit()) - pr_notice("type=%d %s\n", nlh->nlmsg_type, data); - else - audit_log_lost("printk limit exceeded"); - } + if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit()) + pr_notice("type=%d %s\n", nlh->nlmsg_type, data); +} + +/** + * kauditd_rehold_skb - Handle a audit record send failure in the hold queue + * @skb: audit record + * + * Description: + * This should only be used by the kauditd_thread when it fails to flush the + * hold queue. + */ +static void kauditd_rehold_skb(struct sk_buff *skb) +{ + /* put the record back in the queue at the same place */ + skb_queue_head(&audit_hold_queue, skb); + + /* fail the auditd connection */ + auditd_reset(); } /** @@ -421,6 +544,9 @@ static void kauditd_hold_skb(struct sk_buff *skb) /* we have no other options - drop the message */ audit_log_lost("kauditd hold queue overflow"); kfree_skb(skb); + + /* fail the auditd connection */ + auditd_reset(); } /** @@ -441,51 +567,122 @@ static void kauditd_retry_skb(struct sk_buff *skb) } /** - * auditd_reset - Disconnect the auditd connection + * auditd_send_unicast_skb - Send a record via unicast to auditd + * @skb: audit record * * Description: - * Break the auditd/kauditd connection and move all the records in the retry - * queue into the hold queue in case auditd reconnects. The audit_cmd_mutex - * must be held when calling this function. + * Send a skb to the audit daemon, returns positive/zero values on success and + * negative values on failure; in all cases the skb will be consumed by this + * function. If the send results in -ECONNREFUSED the connection with auditd + * will be reset. This function may sleep so callers should not hold any locks + * where this would cause a problem. */ -static void auditd_reset(void) +static int auditd_send_unicast_skb(struct sk_buff *skb) { - struct sk_buff *skb; - - /* break the connection */ - if (audit_sock) { - sock_put(audit_sock); - audit_sock = NULL; + int rc; + u32 portid; + struct net *net; + struct sock *sk; + + /* NOTE: we can't call netlink_unicast while in the RCU section so + * take a reference to the network namespace and grab local + * copies of the namespace, the sock, and the portid; the + * namespace and sock aren't going to go away while we hold a + * reference and if the portid does become invalid after the RCU + * section netlink_unicast() should safely return an error */ + + rcu_read_lock(); + if (!auditd_conn.pid) { + rcu_read_unlock(); + rc = -ECONNREFUSED; + goto err; } - audit_pid = 0; - audit_nlk_portid = 0; + net = auditd_conn.net; + get_net(net); + sk = audit_get_sk(net); + portid = auditd_conn.portid; + rcu_read_unlock(); - /* flush all of the retry queue to the hold queue */ - while ((skb = skb_dequeue(&audit_retry_queue))) - kauditd_hold_skb(skb); + rc = netlink_unicast(sk, skb, portid, 0); + put_net(net); + if (rc < 0) + goto err; + + return rc; + +err: + if (rc == -ECONNREFUSED) + auditd_reset(); + return rc; } /** - * kauditd_send_unicast_skb - Send a record via unicast to auditd - * @skb: audit record + * kauditd_send_queue - Helper for kauditd_thread to flush skb queues + * @sk: the sending sock + * @portid: the netlink destination + * @queue: the skb queue to process + * @retry_limit: limit on number of netlink unicast failures + * @skb_hook: per-skb hook for additional processing + * @err_hook: hook called if the skb fails the netlink unicast send + * + * Description: + * Run through the given queue and attempt to send the audit records to auditd, + * returns zero on success, negative values on failure. It is up to the caller + * to ensure that the @sk is valid for the duration of this function. + * */ -static int kauditd_send_unicast_skb(struct sk_buff *skb) +static int kauditd_send_queue(struct sock *sk, u32 portid, + struct sk_buff_head *queue, + unsigned int retry_limit, + void (*skb_hook)(struct sk_buff *skb), + void (*err_hook)(struct sk_buff *skb)) { - int rc; + int rc = 0; + struct sk_buff *skb; + static unsigned int failed = 0; - /* if we know nothing is connected, don't even try the netlink call */ - if (!audit_pid) - return -ECONNREFUSED; + /* NOTE: kauditd_thread takes care of all our locking, we just use + * the netlink info passed to us (e.g. sk and portid) */ + + while ((skb = skb_dequeue(queue))) { + /* call the skb_hook for each skb we touch */ + if (skb_hook) + (*skb_hook)(skb); + + /* can we send to anyone via unicast? */ + if (!sk) { + if (err_hook) + (*err_hook)(skb); + continue; + } - /* get an extra skb reference in case we fail to send */ - skb_get(skb); - rc = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0); - if (rc >= 0) { - consume_skb(skb); - rc = 0; + /* grab an extra skb reference in case of error */ + skb_get(skb); + rc = netlink_unicast(sk, skb, portid, 0); + if (rc < 0) { + /* fatal failure for our queue flush attempt? */ + if (++failed >= retry_limit || + rc == -ECONNREFUSED || rc == -EPERM) { + /* yes - error processing for the queue */ + sk = NULL; + if (err_hook) + (*err_hook)(skb); + if (!skb_hook) + goto out; + /* keep processing with the skb_hook */ + continue; + } else + /* no - requeue to preserve ordering */ + skb_queue_head(queue, skb); + } else { + /* it worked - drop the extra reference and continue */ + consume_skb(skb); + failed = 0; + } } - return rc; +out: + return (rc >= 0 ? 0 : rc); } /* @@ -493,16 +690,19 @@ static int kauditd_send_unicast_skb(struct sk_buff *skb) * @skb: audit record * * Description: - * This function doesn't consume an skb as might be expected since it has to - * copy it anyways. + * Write a multicast message to anyone listening in the initial network + * namespace. This function doesn't consume an skb as might be expected since + * it has to copy it anyways. */ static void kauditd_send_multicast_skb(struct sk_buff *skb) { struct sk_buff *copy; - struct audit_net *aunet = net_generic(&init_net, audit_net_id); - struct sock *sock = aunet->nlsk; + struct sock *sock = audit_get_sk(&init_net); struct nlmsghdr *nlh; + /* NOTE: we are not taking an additional reference for init_net since + * we don't have to worry about it going away */ + if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG)) return; @@ -526,149 +726,75 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb) } /** - * kauditd_wake_condition - Return true when it is time to wake kauditd_thread - * - * Description: - * This function is for use by the wait_event_freezable() call in - * kauditd_thread(). + * kauditd_thread - Worker thread to send audit records to userspace + * @dummy: unused */ -static int kauditd_wake_condition(void) -{ - static int pid_last = 0; - int rc; - int pid = audit_pid; - - /* wake on new messages or a change in the connected auditd */ - rc = skb_queue_len(&audit_queue) || (pid && pid != pid_last); - if (rc) - pid_last = pid; - - return rc; -} - static int kauditd_thread(void *dummy) { int rc; - int auditd = 0; - int reschedule = 0; - struct sk_buff *skb; - struct nlmsghdr *nlh; + u32 portid = 0; + struct net *net = NULL; + struct sock *sk = NULL; #define UNICAST_RETRIES 5 -#define AUDITD_BAD(x,y) \ - ((x) == -ECONNREFUSED || (x) == -EPERM || ++(y) >= UNICAST_RETRIES) - - /* NOTE: we do invalidate the auditd connection flag on any sending - * errors, but we only "restore" the connection flag at specific places - * in the loop in order to help ensure proper ordering of audit - * records */ set_freezable(); while (!kthread_should_stop()) { - /* NOTE: possible area for future improvement is to look at - * the hold and retry queues, since only this thread - * has access to these queues we might be able to do - * our own queuing and skip some/all of the locking */ - - /* NOTE: it might be a fun experiment to split the hold and - * retry queue handling to another thread, but the - * synchronization issues and other overhead might kill - * any performance gains */ + /* NOTE: see the lock comments in auditd_send_unicast_skb() */ + rcu_read_lock(); + if (!auditd_conn.pid) { + rcu_read_unlock(); + goto main_queue; + } + net = auditd_conn.net; + get_net(net); + sk = audit_get_sk(net); + portid = auditd_conn.portid; + rcu_read_unlock(); /* attempt to flush the hold queue */ - while (auditd && (skb = skb_dequeue(&audit_hold_queue))) { - rc = kauditd_send_unicast_skb(skb); - if (rc) { - /* requeue to the same spot */ - skb_queue_head(&audit_hold_queue, skb); - - auditd = 0; - if (AUDITD_BAD(rc, reschedule)) { - mutex_lock(&audit_cmd_mutex); - auditd_reset(); - mutex_unlock(&audit_cmd_mutex); - reschedule = 0; - } - } else - /* we were able to send successfully */ - reschedule = 0; + rc = kauditd_send_queue(sk, portid, + &audit_hold_queue, UNICAST_RETRIES, + NULL, kauditd_rehold_skb); + if (rc < 0) { + sk = NULL; + goto main_queue; } /* attempt to flush the retry queue */ - while (auditd && (skb = skb_dequeue(&audit_retry_queue))) { - rc = kauditd_send_unicast_skb(skb); - if (rc) { - auditd = 0; - if (AUDITD_BAD(rc, reschedule)) { - kauditd_hold_skb(skb); - mutex_lock(&audit_cmd_mutex); - auditd_reset(); - mutex_unlock(&audit_cmd_mutex); - reschedule = 0; - } else - /* temporary problem (we hope), queue - * to the same spot and retry */ - skb_queue_head(&audit_retry_queue, skb); - } else - /* we were able to send successfully */ - reschedule = 0; + rc = kauditd_send_queue(sk, portid, + &audit_retry_queue, UNICAST_RETRIES, + NULL, kauditd_hold_skb); + if (rc < 0) { + sk = NULL; + goto main_queue; } - /* standard queue processing, try to be as quick as possible */ -quick_loop: - skb = skb_dequeue(&audit_queue); - if (skb) { - /* setup the netlink header, see the comments in - * kauditd_send_multicast_skb() for length quirks */ - nlh = nlmsg_hdr(skb); - nlh->nlmsg_len = skb->len - NLMSG_HDRLEN; - - /* attempt to send to any multicast listeners */ - kauditd_send_multicast_skb(skb); - - /* attempt to send to auditd, queue on failure */ - if (auditd) { - rc = kauditd_send_unicast_skb(skb); - if (rc) { - auditd = 0; - if (AUDITD_BAD(rc, reschedule)) { - mutex_lock(&audit_cmd_mutex); - auditd_reset(); - mutex_unlock(&audit_cmd_mutex); - reschedule = 0; - } - - /* move to the retry queue */ - kauditd_retry_skb(skb); - } else - /* everything is working so go fast! */ - goto quick_loop; - } else if (reschedule) - /* we are currently having problems, move to - * the retry queue */ - kauditd_retry_skb(skb); - else - /* dump the message via printk and hold it */ - kauditd_hold_skb(skb); - } else { - /* we have flushed the backlog so wake everyone */ - wake_up(&audit_backlog_wait); - - /* if everything is okay with auditd (if present), go - * to sleep until there is something new in the queue - * or we have a change in the connected auditd; - * otherwise simply reschedule to give things a chance - * to recover */ - if (reschedule) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - } else - wait_event_freezable(kauditd_wait, - kauditd_wake_condition()); - - /* update the auditd connection status */ - auditd = (audit_pid ? 1 : 0); +main_queue: + /* process the main queue - do the multicast send and attempt + * unicast, dump failed record sends to the retry queue; if + * sk == NULL due to previous failures we will just do the + * multicast send and move the record to the retry queue */ + kauditd_send_queue(sk, portid, &audit_queue, 1, + kauditd_send_multicast_skb, + kauditd_retry_skb); + + /* drop our netns reference, no auditd sends past this line */ + if (net) { + put_net(net); + net = NULL; } + sk = NULL; + + /* we have processed all the queues so wake everyone */ + wake_up(&audit_backlog_wait); + + /* NOTE: we want to wake up if there is anything on the queue, + * regardless of if an auditd is connected, as we need to + * do the multicast send and rotate records from the + * main queue to the retry/hold queues */ + wait_event_freezable(kauditd_wait, + (skb_queue_len(&audit_queue) ? 1 : 0)); } return 0; @@ -678,17 +804,16 @@ int audit_send_list(void *_dest) { struct audit_netlink_list *dest = _dest; struct sk_buff *skb; - struct net *net = dest->net; - struct audit_net *aunet = net_generic(net, audit_net_id); + struct sock *sk = audit_get_sk(dest->net); /* wait for parent to finish and send an ACK */ mutex_lock(&audit_cmd_mutex); mutex_unlock(&audit_cmd_mutex); while ((skb = __skb_dequeue(&dest->q)) != NULL) - netlink_unicast(aunet->nlsk, skb, dest->portid, 0); + netlink_unicast(sk, skb, dest->portid, 0); - put_net(net); + put_net(dest->net); kfree(dest); return 0; @@ -722,16 +847,15 @@ struct sk_buff *audit_make_reply(__u32 portid, int seq, int type, int done, static int audit_send_reply_thread(void *arg) { struct audit_reply *reply = (struct audit_reply *)arg; - struct net *net = reply->net; - struct audit_net *aunet = net_generic(net, audit_net_id); + struct sock *sk = audit_get_sk(reply->net); mutex_lock(&audit_cmd_mutex); mutex_unlock(&audit_cmd_mutex); /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ - netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); - put_net(net); + netlink_unicast(sk, reply->skb, reply->portid, 0); + put_net(reply->net); kfree(reply); return 0; } @@ -949,12 +1073,12 @@ static int audit_set_feature(struct sk_buff *skb) static int audit_replace(pid_t pid) { - struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, - &pid, sizeof(pid)); + struct sk_buff *skb; + skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, &pid, sizeof(pid)); if (!skb) return -ENOMEM; - return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0); + return auditd_send_unicast_skb(skb); } static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) @@ -981,7 +1105,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) memset(&s, 0, sizeof(s)); s.enabled = audit_enabled; s.failure = audit_failure; - s.pid = audit_pid; + rcu_read_lock(); + s.pid = auditd_conn.pid; + rcu_read_unlock(); s.rate_limit = audit_rate_limit; s.backlog_limit = audit_backlog_limit; s.lost = atomic_read(&audit_lost); @@ -1014,30 +1140,44 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) * from the initial pid namespace, but something * to keep in mind if this changes */ int new_pid = s.pid; + pid_t auditd_pid; pid_t requesting_pid = task_tgid_vnr(current); - if ((!new_pid) && (requesting_pid != audit_pid)) { - audit_log_config_change("audit_pid", new_pid, audit_pid, 0); + /* test the auditd connection */ + audit_replace(requesting_pid); + + rcu_read_lock(); + auditd_pid = auditd_conn.pid; + /* only the current auditd can unregister itself */ + if ((!new_pid) && (requesting_pid != auditd_pid)) { + rcu_read_unlock(); + audit_log_config_change("audit_pid", new_pid, + auditd_pid, 0); return -EACCES; } - if (audit_pid && new_pid && - audit_replace(requesting_pid) != -ECONNREFUSED) { - audit_log_config_change("audit_pid", new_pid, audit_pid, 0); + /* replacing a healthy auditd is not allowed */ + if (auditd_pid && new_pid) { + rcu_read_unlock(); + audit_log_config_change("audit_pid", new_pid, + auditd_pid, 0); return -EEXIST; } + rcu_read_unlock(); + if (audit_enabled != AUDIT_OFF) - audit_log_config_change("audit_pid", new_pid, audit_pid, 1); + audit_log_config_change("audit_pid", new_pid, + auditd_pid, 1); + if (new_pid) { - if (audit_sock) - sock_put(audit_sock); - audit_pid = new_pid; - audit_nlk_portid = NETLINK_CB(skb).portid; - sock_hold(skb->sk); - audit_sock = skb->sk; - } else { + /* register a new auditd connection */ + auditd_set(new_pid, + NETLINK_CB(skb).portid, + sock_net(NETLINK_CB(skb).sk)); + /* try to process any backlog */ + wake_up_interruptible(&kauditd_wait); + } else + /* unregister the auditd connection */ auditd_reset(); - } - wake_up_interruptible(&kauditd_wait); } if (s.mask & AUDIT_STATUS_RATE_LIMIT) { err = audit_set_rate_limit(s.rate_limit); @@ -1090,7 +1230,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (err) break; } - mutex_unlock(&audit_cmd_mutex); audit_log_common_recv_msg(&ab, msg_type); if (msg_type != AUDIT_USER_TTY) audit_log_format(ab, " msg='%.*s'", @@ -1108,7 +1247,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) } audit_set_portid(ab, NETLINK_CB(skb).portid); audit_log_end(ab); - mutex_lock(&audit_cmd_mutex); } break; case AUDIT_ADD_RULE: @@ -1298,26 +1436,26 @@ static int __net_init audit_net_init(struct net *net) struct audit_net *aunet = net_generic(net, audit_net_id); - aunet->nlsk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg); - if (aunet->nlsk == NULL) { + aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg); + if (aunet->sk == NULL) { audit_panic("cannot initialize netlink socket in namespace"); return -ENOMEM; } - aunet->nlsk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; + aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; + return 0; } static void __net_exit audit_net_exit(struct net *net) { struct audit_net *aunet = net_generic(net, audit_net_id); - struct sock *sock = aunet->nlsk; - mutex_lock(&audit_cmd_mutex); - if (sock == audit_sock) + + rcu_read_lock(); + if (net == auditd_conn.net) auditd_reset(); - mutex_unlock(&audit_cmd_mutex); + rcu_read_unlock(); - netlink_kernel_release(sock); - aunet->nlsk = NULL; + netlink_kernel_release(aunet->sk); } static struct pernet_operations audit_net_ops __net_initdata = { @@ -1335,20 +1473,24 @@ static int __init audit_init(void) if (audit_initialized == AUDIT_DISABLED) return 0; - pr_info("initializing netlink subsys (%s)\n", - audit_default ? "enabled" : "disabled"); - register_pernet_subsys(&audit_net_ops); + memset(&auditd_conn, 0, sizeof(auditd_conn)); + spin_lock_init(&auditd_conn.lock); skb_queue_head_init(&audit_queue); skb_queue_head_init(&audit_retry_queue); skb_queue_head_init(&audit_hold_queue); - audit_initialized = AUDIT_INITIALIZED; - audit_enabled = audit_default; - audit_ever_enabled |= !!audit_default; for (i = 0; i < AUDIT_INODE_BUCKETS; i++) INIT_LIST_HEAD(&audit_inode_hash[i]); + pr_info("initializing netlink subsys (%s)\n", + audit_default ? "enabled" : "disabled"); + register_pernet_subsys(&audit_net_ops); + + audit_initialized = AUDIT_INITIALIZED; + audit_enabled = audit_default; + audit_ever_enabled |= !!audit_default; + kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); if (IS_ERR(kauditd_task)) { int err = PTR_ERR(kauditd_task); @@ -1519,20 +1661,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE))) return NULL; - /* don't ever fail/sleep on these two conditions: + /* NOTE: don't ever fail/sleep on these two conditions: * 1. auditd generated record - since we need auditd to drain the * queue; also, when we are checking for auditd, compare PIDs using * task_tgid_vnr() since auditd_pid is set in audit_receive_msg() * using a PID anchored in the caller's namespace - * 2. audit command message - record types 1000 through 1099 inclusive - * are command messages/records used to manage the kernel subsystem - * and the audit userspace, blocking on these messages could cause - * problems under load so don't do it (note: not all of these - * command types are valid as record types, but it is quicker to - * just check two ints than a series of ints in a if/switch stmt) */ - if (!((audit_pid && audit_pid == task_tgid_vnr(current)) || - (type >= 1000 && type <= 1099))) { - long sleep_time = audit_backlog_wait_time; + * 2. generator holding the audit_cmd_mutex - we don't want to block + * while holding the mutex */ + if (!(auditd_test_task(current) || + (current == __mutex_owner(&audit_cmd_mutex)))) { + long stime = audit_backlog_wait_time; while (audit_backlog_limit && (skb_queue_len(&audit_queue) > audit_backlog_limit)) { @@ -1541,14 +1679,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, /* sleep if we are allowed and we haven't exhausted our * backlog wait limit */ - if ((gfp_mask & __GFP_DIRECT_RECLAIM) && - (sleep_time > 0)) { + if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&audit_backlog_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); - sleep_time = schedule_timeout(sleep_time); + stime = schedule_timeout(stime); remove_wait_queue(&audit_backlog_wait, &wait); } else { if (audit_rate_check() && printk_ratelimit()) @@ -2127,15 +2264,27 @@ void audit_log_link_denied(const char *operation, const struct path *link) */ void audit_log_end(struct audit_buffer *ab) { + struct sk_buff *skb; + struct nlmsghdr *nlh; + if (!ab) return; - if (!audit_rate_check()) { - audit_log_lost("rate limit exceeded"); - } else { - skb_queue_tail(&audit_queue, ab->skb); - wake_up_interruptible(&kauditd_wait); + + if (audit_rate_check()) { + skb = ab->skb; ab->skb = NULL; - } + + /* setup the netlink header, see the comments in + * kauditd_send_multicast_skb() for length quirks */ + nlh = nlmsg_hdr(skb); + nlh->nlmsg_len = skb->len - NLMSG_HDRLEN; + + /* queue the netlink packet and poke the kauditd thread */ + skb_queue_tail(&audit_queue, skb); + wake_up_interruptible(&kauditd_wait); + } else + audit_log_lost("rate limit exceeded"); + audit_buffer_free(ab); } diff --git a/kernel/audit.h b/kernel/audit.h index ca579880303ab4..0f1cf6d1878ab3 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -218,7 +218,7 @@ extern void audit_log_name(struct audit_context *context, struct audit_names *n, const struct path *path, int record_num, int *call_panic); -extern int audit_pid; +extern int auditd_test_task(const struct task_struct *task); #define AUDIT_INODE_BUCKETS 32 extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; @@ -250,10 +250,6 @@ struct audit_netlink_list { int audit_send_list(void *); -struct audit_net { - struct sock *nlsk; -}; - extern int selinux_audit_rule_update(void); extern struct mutex audit_filter_mutex; @@ -340,8 +336,7 @@ extern int audit_filter(int msgtype, unsigned int listtype); extern int __audit_signal_info(int sig, struct task_struct *t); static inline int audit_signal_info(int sig, struct task_struct *t) { - if (unlikely((audit_pid && t->tgid == audit_pid) || - (audit_signals && !audit_dummy_context()))) + if (auditd_test_task(t) || (audit_signals && !audit_dummy_context())) return __audit_signal_info(sig, t); return 0; } diff --git a/kernel/auditsc.c b/kernel/auditsc.c index d6a8de5f8fa3d0..e59ffc7fc522ad 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -762,7 +762,7 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, struct audit_entry *e; enum audit_state state; - if (audit_pid && tsk->tgid == audit_pid) + if (auditd_test_task(tsk)) return AUDIT_DISABLED; rcu_read_lock(); @@ -816,7 +816,7 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx) { struct audit_names *n; - if (audit_pid && tsk->tgid == audit_pid) + if (auditd_test_task(tsk)) return; rcu_read_lock(); @@ -2256,7 +2256,7 @@ int __audit_signal_info(int sig, struct task_struct *t) struct audit_context *ctx = tsk->audit_context; kuid_t uid = current_uid(), t_uid = task_uid(t); - if (audit_pid && t->tgid == audit_pid) { + if (auditd_test_task(t)) { if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { audit_sig_pid = task_tgid_nr(tsk); if (uid_valid(tsk->loginuid)) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 3ea87fb19a9416..361a69dfe5434d 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -13,11 +13,12 @@ #include #include #include +#include #include "percpu_freelist.h" #include "bpf_lru_list.h" struct bucket { - struct hlist_head head; + struct hlist_nulls_head head; raw_spinlock_t lock; }; @@ -29,28 +30,26 @@ struct bpf_htab { struct pcpu_freelist freelist; struct bpf_lru lru; }; - void __percpu *extra_elems; + struct htab_elem *__percpu *extra_elems; atomic_t count; /* number of elements in this hashtable */ u32 n_buckets; /* number of hash buckets */ u32 elem_size; /* size of each element in bytes */ }; -enum extra_elem_state { - HTAB_NOT_AN_EXTRA_ELEM = 0, - HTAB_EXTRA_ELEM_FREE, - HTAB_EXTRA_ELEM_USED -}; - /* each htab element is struct htab_elem + key + value */ struct htab_elem { union { - struct hlist_node hash_node; - struct bpf_htab *htab; - struct pcpu_freelist_node fnode; + struct hlist_nulls_node hash_node; + struct { + void *padding; + union { + struct bpf_htab *htab; + struct pcpu_freelist_node fnode; + }; + }; }; union { struct rcu_head rcu; - enum extra_elem_state state; struct bpf_lru_node lru_node; }; u32 hash; @@ -71,6 +70,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab) htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; } +static bool htab_is_prealloc(const struct bpf_htab *htab) +{ + return !(htab->map.map_flags & BPF_F_NO_PREALLOC); +} + static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, void __percpu *pptr) { @@ -122,17 +126,20 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, static int prealloc_init(struct bpf_htab *htab) { + u32 num_entries = htab->map.max_entries; int err = -ENOMEM, i; - htab->elems = bpf_map_area_alloc(htab->elem_size * - htab->map.max_entries); + if (!htab_is_percpu(htab) && !htab_is_lru(htab)) + num_entries += num_possible_cpus(); + + htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries); if (!htab->elems) return -ENOMEM; if (!htab_is_percpu(htab)) goto skip_percpu_elems; - for (i = 0; i < htab->map.max_entries; i++) { + for (i = 0; i < num_entries; i++) { u32 size = round_up(htab->map.value_size, 8); void __percpu *pptr; @@ -160,10 +167,11 @@ static int prealloc_init(struct bpf_htab *htab) if (htab_is_lru(htab)) bpf_lru_populate(&htab->lru, htab->elems, offsetof(struct htab_elem, lru_node), - htab->elem_size, htab->map.max_entries); + htab->elem_size, num_entries); else - pcpu_freelist_populate(&htab->freelist, htab->elems, - htab->elem_size, htab->map.max_entries); + pcpu_freelist_populate(&htab->freelist, + htab->elems + offsetof(struct htab_elem, fnode), + htab->elem_size, num_entries); return 0; @@ -184,16 +192,22 @@ static void prealloc_destroy(struct bpf_htab *htab) static int alloc_extra_elems(struct bpf_htab *htab) { - void __percpu *pptr; + struct htab_elem *__percpu *pptr, *l_new; + struct pcpu_freelist_node *l; int cpu; - pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN); + pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8, + GFP_USER | __GFP_NOWARN); if (!pptr) return -ENOMEM; for_each_possible_cpu(cpu) { - ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state = - HTAB_EXTRA_ELEM_FREE; + l = pcpu_freelist_pop(&htab->freelist); + /* pop will succeed, since prealloc_init() + * preallocated extra num_possible_cpus elements + */ + l_new = container_of(l, struct htab_elem, fnode); + *per_cpu_ptr(pptr, cpu) = l_new; } htab->extra_elems = pptr; return 0; @@ -217,6 +231,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) int err, i; u64 cost; + BUILD_BUG_ON(offsetof(struct htab_elem, htab) != + offsetof(struct htab_elem, hash_node.pprev)); + BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != + offsetof(struct htab_elem, hash_node.pprev)); + if (lru && !capable(CAP_SYS_ADMIN)) /* LRU implementation is much complicated than other * maps. Hence, limit to CAP_SYS_ADMIN for now. @@ -326,29 +345,29 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) goto free_htab; for (i = 0; i < htab->n_buckets; i++) { - INIT_HLIST_HEAD(&htab->buckets[i].head); + INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); raw_spin_lock_init(&htab->buckets[i].lock); } - if (!percpu && !lru) { - /* lru itself can remove the least used element, so - * there is no need for an extra elem during map_update. - */ - err = alloc_extra_elems(htab); - if (err) - goto free_buckets; - } - if (prealloc) { err = prealloc_init(htab); if (err) - goto free_extra_elems; + goto free_buckets; + + if (!percpu && !lru) { + /* lru itself can remove the least used element, so + * there is no need for an extra elem during map_update. + */ + err = alloc_extra_elems(htab); + if (err) + goto free_prealloc; + } } return &htab->map; -free_extra_elems: - free_percpu(htab->extra_elems); +free_prealloc: + prealloc_destroy(htab); free_buckets: bpf_map_area_free(htab->buckets); free_htab: @@ -366,20 +385,44 @@ static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) return &htab->buckets[hash & (htab->n_buckets - 1)]; } -static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) +static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) { return &__select_bucket(htab, hash)->head; } -static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, +/* this lookup function can only be called with bucket lock taken */ +static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, void *key, u32 key_size) { + struct hlist_nulls_node *n; + struct htab_elem *l; + + hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) + if (l->hash == hash && !memcmp(&l->key, key, key_size)) + return l; + + return NULL; +} + +/* can be called without bucket lock. it will repeat the loop in + * the unlikely event when elements moved from one bucket into another + * while link list is being walked + */ +static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, + u32 hash, void *key, + u32 key_size, u32 n_buckets) +{ + struct hlist_nulls_node *n; struct htab_elem *l; - hlist_for_each_entry_rcu(l, head, hash_node) +again: + hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) if (l->hash == hash && !memcmp(&l->key, key, key_size)) return l; + if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) + goto again; + return NULL; } @@ -387,7 +430,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct hlist_head *head; + struct hlist_nulls_head *head; struct htab_elem *l; u32 hash, key_size; @@ -400,7 +443,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) head = select_bucket(htab, hash); - l = lookup_elem_raw(head, hash, key, key_size); + l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); return l; } @@ -433,8 +476,9 @@ static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) { struct bpf_htab *htab = (struct bpf_htab *)arg; - struct htab_elem *l, *tgt_l; - struct hlist_head *head; + struct htab_elem *l = NULL, *tgt_l; + struct hlist_nulls_head *head; + struct hlist_nulls_node *n; unsigned long flags; struct bucket *b; @@ -444,9 +488,9 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) raw_spin_lock_irqsave(&b->lock, flags); - hlist_for_each_entry_rcu(l, head, hash_node) + hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) if (l == tgt_l) { - hlist_del_rcu(&l->hash_node); + hlist_nulls_del_rcu(&l->hash_node); break; } @@ -459,7 +503,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct hlist_head *head; + struct hlist_nulls_head *head; struct htab_elem *l, *next_l; u32 hash, key_size; int i; @@ -473,7 +517,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) head = select_bucket(htab, hash); /* lookup the key */ - l = lookup_elem_raw(head, hash, key, key_size); + l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); if (!l) { i = 0; @@ -481,7 +525,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) } /* key was found, get next key in the same bucket */ - next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)), + next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), struct htab_elem, hash_node); if (next_l) { @@ -500,7 +544,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) head = select_bucket(htab, i); /* pick first element in the bucket */ - next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), + next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), struct htab_elem, hash_node); if (next_l) { /* if it's not empty, just return it */ @@ -538,12 +582,7 @@ static void htab_elem_free_rcu(struct rcu_head *head) static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) { - if (l->state == HTAB_EXTRA_ELEM_USED) { - l->state = HTAB_EXTRA_ELEM_FREE; - return; - } - - if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) { + if (htab_is_prealloc(htab)) { pcpu_freelist_push(&htab->freelist, &l->fnode); } else { atomic_dec(&htab->count); @@ -573,43 +612,43 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, void *value, u32 key_size, u32 hash, bool percpu, bool onallcpus, - bool old_elem_exists) + struct htab_elem *old_elem) { u32 size = htab->map.value_size; - bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); - struct htab_elem *l_new; + bool prealloc = htab_is_prealloc(htab); + struct htab_elem *l_new, **pl_new; void __percpu *pptr; - int err = 0; if (prealloc) { - l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist); - if (!l_new) - err = -E2BIG; - } else { - if (atomic_inc_return(&htab->count) > htab->map.max_entries) { - atomic_dec(&htab->count); - err = -E2BIG; + if (old_elem) { + /* if we're updating the existing element, + * use per-cpu extra elems to avoid freelist_pop/push + */ + pl_new = this_cpu_ptr(htab->extra_elems); + l_new = *pl_new; + *pl_new = old_elem; } else { - l_new = kmalloc(htab->elem_size, - GFP_ATOMIC | __GFP_NOWARN); - if (!l_new) - return ERR_PTR(-ENOMEM); - } - } + struct pcpu_freelist_node *l; - if (err) { - if (!old_elem_exists) - return ERR_PTR(err); - - /* if we're updating the existing element and the hash table - * is full, use per-cpu extra elems - */ - l_new = this_cpu_ptr(htab->extra_elems); - if (l_new->state != HTAB_EXTRA_ELEM_FREE) - return ERR_PTR(-E2BIG); - l_new->state = HTAB_EXTRA_ELEM_USED; + l = pcpu_freelist_pop(&htab->freelist); + if (!l) + return ERR_PTR(-E2BIG); + l_new = container_of(l, struct htab_elem, fnode); + } } else { - l_new->state = HTAB_NOT_AN_EXTRA_ELEM; + if (atomic_inc_return(&htab->count) > htab->map.max_entries) + if (!old_elem) { + /* when map is full and update() is replacing + * old element, it's ok to allocate, since + * old element will be freed immediately. + * Otherwise return an error + */ + atomic_dec(&htab->count); + return ERR_PTR(-E2BIG); + } + l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); + if (!l_new) + return ERR_PTR(-ENOMEM); } memcpy(l_new->key, key, key_size); @@ -661,7 +700,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct htab_elem *l_new = NULL, *l_old; - struct hlist_head *head; + struct hlist_nulls_head *head; unsigned long flags; struct bucket *b; u32 key_size, hash; @@ -690,7 +729,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, goto err; l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, - !!l_old); + l_old); if (IS_ERR(l_new)) { /* all pre-allocated elements are in use or memory exhausted */ ret = PTR_ERR(l_new); @@ -700,10 +739,11 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, /* add new element to the head of the list, so that * concurrent search will find it before old elem */ - hlist_add_head_rcu(&l_new->hash_node, head); + hlist_nulls_add_head_rcu(&l_new->hash_node, head); if (l_old) { - hlist_del_rcu(&l_old->hash_node); - free_htab_elem(htab, l_old); + hlist_nulls_del_rcu(&l_old->hash_node); + if (!htab_is_prealloc(htab)) + free_htab_elem(htab, l_old); } ret = 0; err: @@ -716,7 +756,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct htab_elem *l_new, *l_old = NULL; - struct hlist_head *head; + struct hlist_nulls_head *head; unsigned long flags; struct bucket *b; u32 key_size, hash; @@ -757,10 +797,10 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, /* add new element to the head of the list, so that * concurrent search will find it before old elem */ - hlist_add_head_rcu(&l_new->hash_node, head); + hlist_nulls_add_head_rcu(&l_new->hash_node, head); if (l_old) { bpf_lru_node_set_ref(&l_new->lru_node); - hlist_del_rcu(&l_old->hash_node); + hlist_nulls_del_rcu(&l_old->hash_node); } ret = 0; @@ -781,7 +821,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct htab_elem *l_new = NULL, *l_old; - struct hlist_head *head; + struct hlist_nulls_head *head; unsigned long flags; struct bucket *b; u32 key_size, hash; @@ -815,12 +855,12 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, value, onallcpus); } else { l_new = alloc_htab_elem(htab, key, value, key_size, - hash, true, onallcpus, false); + hash, true, onallcpus, NULL); if (IS_ERR(l_new)) { ret = PTR_ERR(l_new); goto err; } - hlist_add_head_rcu(&l_new->hash_node, head); + hlist_nulls_add_head_rcu(&l_new->hash_node, head); } ret = 0; err: @@ -834,7 +874,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct htab_elem *l_new = NULL, *l_old; - struct hlist_head *head; + struct hlist_nulls_head *head; unsigned long flags; struct bucket *b; u32 key_size, hash; @@ -882,7 +922,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, } else { pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), value, onallcpus); - hlist_add_head_rcu(&l_new->hash_node, head); + hlist_nulls_add_head_rcu(&l_new->hash_node, head); l_new = NULL; } ret = 0; @@ -910,7 +950,7 @@ static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, static int htab_map_delete_elem(struct bpf_map *map, void *key) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct hlist_head *head; + struct hlist_nulls_head *head; struct bucket *b; struct htab_elem *l; unsigned long flags; @@ -930,7 +970,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) l = lookup_elem_raw(head, hash, key, key_size); if (l) { - hlist_del_rcu(&l->hash_node); + hlist_nulls_del_rcu(&l->hash_node); free_htab_elem(htab, l); ret = 0; } @@ -942,7 +982,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct hlist_head *head; + struct hlist_nulls_head *head; struct bucket *b; struct htab_elem *l; unsigned long flags; @@ -962,7 +1002,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) l = lookup_elem_raw(head, hash, key, key_size); if (l) { - hlist_del_rcu(&l->hash_node); + hlist_nulls_del_rcu(&l->hash_node); ret = 0; } @@ -977,14 +1017,13 @@ static void delete_all_elements(struct bpf_htab *htab) int i; for (i = 0; i < htab->n_buckets; i++) { - struct hlist_head *head = select_bucket(htab, i); - struct hlist_node *n; + struct hlist_nulls_head *head = select_bucket(htab, i); + struct hlist_nulls_node *n; struct htab_elem *l; - hlist_for_each_entry_safe(l, n, head, hash_node) { - hlist_del_rcu(&l->hash_node); - if (l->state != HTAB_EXTRA_ELEM_USED) - htab_elem_free(htab, l); + hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { + hlist_nulls_del_rcu(&l->hash_node); + htab_elem_free(htab, l); } } } @@ -1004,7 +1043,7 @@ static void htab_map_free(struct bpf_map *map) * not have executed. Wait for them. */ rcu_barrier(); - if (htab->map.map_flags & BPF_F_NO_PREALLOC) + if (!htab_is_prealloc(htab)) delete_all_elements(htab); else prealloc_destroy(htab); diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 8bfe0afaee1082..b37bd9ab7f5742 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -500,9 +500,15 @@ static void trie_free(struct bpf_map *map) raw_spin_unlock(&trie->lock); } +static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key) +{ + return -ENOTSUPP; +} + static const struct bpf_map_ops trie_ops = { .map_alloc = trie_alloc, .map_free = trie_free, + .map_get_next_key = trie_get_next_key, .map_lookup_elem = trie_lookup_elem, .map_update_elem = trie_update_elem, .map_delete_elem = trie_delete_elem, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 461eb1e66a0fdf..7af0dcc5d75556 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3fc6e39b223e2c..a834068a400e27 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -33,7 +33,7 @@ * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the - * analysis is limited to 32k insn, which may be hit even if total number of + * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * @@ -765,38 +765,56 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno) } } -static int check_ptr_alignment(struct bpf_verifier_env *env, - struct bpf_reg_state *reg, int off, int size) +static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, + int off, int size) { - if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) { - if (off % size != 0) { - verbose("misaligned access off %d size %d\n", - off, size); - return -EACCES; - } else { - return 0; - } - } - - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) - /* misaligned access to packet is ok on x86,arm,arm64 */ - return 0; - if (reg->id && size != 1) { - verbose("Unknown packet alignment. Only byte-sized access allowed\n"); + verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n"); return -EACCES; } /* skb->data is NET_IP_ALIGN-ed */ - if (reg->type == PTR_TO_PACKET && - (NET_IP_ALIGN + reg->off + off) % size != 0) { + if ((NET_IP_ALIGN + reg->off + off) % size != 0) { verbose("misaligned packet access off %d+%d+%d size %d\n", NET_IP_ALIGN, reg->off, off, size); return -EACCES; } + return 0; } +static int check_val_ptr_alignment(const struct bpf_reg_state *reg, + int size) +{ + if (size != 1) { + verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); + return -EACCES; + } + + return 0; +} + +static int check_ptr_alignment(const struct bpf_reg_state *reg, + int off, int size) +{ + switch (reg->type) { + case PTR_TO_PACKET: + return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : + check_pkt_ptr_alignment(reg, off, size); + case PTR_TO_MAP_VALUE_ADJ: + return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : + check_val_ptr_alignment(reg, size); + default: + if (off % size != 0) { + verbose("misaligned access off %d size %d\n", + off, size); + return -EACCES; + } + + return 0; + } +} + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -818,7 +836,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, if (size < 0) return size; - err = check_ptr_alignment(env, reg, off, size); + err = check_ptr_alignment(reg, off, size); if (err) return err; @@ -1925,6 +1943,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) * register as unknown. */ if (env->allow_ptr_leaks && + BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD && (dst_reg->type == PTR_TO_MAP_VALUE || dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) dst_reg->type = PTR_TO_MAP_VALUE_ADJ; @@ -1973,14 +1992,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) - regs[i].range = dst_reg->off; + /* keep the maximum range already checked */ + regs[i].range = max(regs[i].range, dst_reg->off); for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { if (state->stack_slot_type[i] != STACK_SPILL) continue; reg = &state->spilled_regs[i / BPF_REG_SIZE]; if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) - reg->range = dst_reg->off; + reg->range = max(reg->range, dst_reg->off); } } diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index fc34bcf2329f4d..1dc22f6b49f5e0 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -5,6 +5,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -1326,7 +1329,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v) struct task_struct *task; int count = 0; - seq_printf(seq, "css_set %p\n", cset); + seq_printf(seq, "css_set %pK\n", cset); list_for_each_entry(task, &cset->tasks, cg_list) { if (count++ > MAX_TASKS_SHOWN_PER_CSS) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index e8f87bf9840c0e..48851327a15e18 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -2668,7 +2669,7 @@ static bool css_visible(struct cgroup_subsys_state *css) * * Returns 0 on success, -errno on failure. On failure, csses which have * been processed already aren't cleaned up. The caller is responsible for - * cleaning up with cgroup_apply_control_disble(). + * cleaning up with cgroup_apply_control_disable(). */ static int cgroup_apply_control_enable(struct cgroup *cgrp) { diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index b3088886cd375b..0f41292be0fb7d 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -44,6 +44,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/kernel/cgroup/namespace.c b/kernel/cgroup/namespace.c index cff7ea62c38f01..96d38dab6fb2f7 100644 --- a/kernel/cgroup/namespace.c +++ b/kernel/cgroup/namespace.c @@ -1,6 +1,6 @@ #include "cgroup-internal.h" -#include +#include #include #include #include diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c index 2bd673783f1a95..2237201d66d5da 100644 --- a/kernel/cgroup/pids.c +++ b/kernel/cgroup/pids.c @@ -214,7 +214,7 @@ static void pids_cancel_attach(struct cgroup_taskset *tset) /* * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies - * on threadgroup_change_begin() held by the copy_process(). + * on cgroup_threadgroup_change_begin() held by the copy_process(). */ static int pids_can_fork(struct task_struct *task) { @@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task) /* Only log the first time events_limit is incremented. */ if (atomic64_inc_return(&pids->events_limit) == 1) { pr_info("cgroup: fork rejected by pids controller in "); - pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id)); + pr_cont_cgroup_path(css->cgroup); pr_cont("\n"); } cgroup_file_notify(&pids->events_file); diff --git a/kernel/cpu.c b/kernel/cpu.c index 0a5f630f5c5430..37b223e4fc05b7 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -7,7 +7,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include @@ -1333,26 +1335,21 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, struct cpuhp_step *sp; int ret = 0; - mutex_lock(&cpuhp_state_mutex); - if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) { ret = cpuhp_reserve_state(state); if (ret < 0) - goto out; + return ret; state = ret; } sp = cpuhp_get_step(state); - if (name && sp->name) { - ret = -EBUSY; - goto out; - } + if (name && sp->name) + return -EBUSY; + sp->startup.single = startup; sp->teardown.single = teardown; sp->name = name; sp->multi_instance = multi_instance; INIT_HLIST_HEAD(&sp->list); -out: - mutex_unlock(&cpuhp_state_mutex); return ret; } @@ -1426,6 +1423,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, return -EINVAL; get_online_cpus(); + mutex_lock(&cpuhp_state_mutex); if (!invoke || !sp->startup.multi) goto add_node; @@ -1445,16 +1443,14 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, if (ret) { if (sp->teardown.multi) cpuhp_rollback_install(cpu, state, node); - goto err; + goto unlock; } } add_node: ret = 0; - mutex_lock(&cpuhp_state_mutex); hlist_add_head(node, &sp->list); +unlock: mutex_unlock(&cpuhp_state_mutex); - -err: put_online_cpus(); return ret; } @@ -1489,6 +1485,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, return -EINVAL; get_online_cpus(); + mutex_lock(&cpuhp_state_mutex); ret = cpuhp_store_callbacks(state, name, startup, teardown, multi_instance); @@ -1522,6 +1519,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, } } out: + mutex_unlock(&cpuhp_state_mutex); put_online_cpus(); /* * If the requested state is CPUHP_AP_ONLINE_DYN, return the @@ -1545,6 +1543,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, return -EINVAL; get_online_cpus(); + mutex_lock(&cpuhp_state_mutex); + if (!invoke || !cpuhp_get_teardown_cb(state)) goto remove; /* @@ -1561,7 +1561,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, } remove: - mutex_lock(&cpuhp_state_mutex); hlist_del(node); mutex_unlock(&cpuhp_state_mutex); put_online_cpus(); @@ -1569,6 +1568,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, return 0; } EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); + /** * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state * @state: The state to remove @@ -1587,6 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) get_online_cpus(); + mutex_lock(&cpuhp_state_mutex); if (sp->multi_instance) { WARN(!hlist_empty(&sp->list), "Error: Removing state %d which has instances left.\n", @@ -1611,6 +1612,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) } remove: cpuhp_store_callbacks(state, NULL, NULL, NULL, false); + mutex_unlock(&cpuhp_state_mutex); put_online_cpus(); } EXPORT_SYMBOL(__cpuhp_remove_state); diff --git a/kernel/cred.c b/kernel/cred.c index 5f264fb5737dcd..2bc66075740fdd 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 79517e5549f119..65c0f13637882d 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -232,9 +233,9 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) int i; for (i = 0; i < VMACACHE_SIZE; i++) { - if (!current->vmacache[i]) + if (!current->vmacache.vmas[i]) continue; - flush_cache_range(current->vmacache[i], + flush_cache_range(current->vmacache.vmas[i], addr, addr + BREAK_INSTR_SIZE); } } diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 19d9a578c75316..7510dc687c0dc1 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -29,6 +29,7 @@ */ #include +#include #include #include #include diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c index fe15fff5df5340..6ad4a9fcbd6f70 100644 --- a/kernel/debug/kdb/kdb_bt.c +++ b/kernel/debug/kdb/kdb_bt.c @@ -12,7 +12,8 @@ #include #include #include -#include +#include +#include #include #include #include "kdb_private.h" diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index ca183919d3027a..c8146d53ca677a 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -18,6 +18,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 6605496569914d..4a1c33416b6a2d 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -14,6 +14,8 @@ */ #include +#include +#include #include #include #include diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index e9fdb5203de5c0..c04917cad1bfdc 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -11,6 +11,8 @@ #include #include +#include + #include "internal.h" struct callchain_cpus_entries { diff --git a/kernel/events/core.c b/kernel/events/core.c index 1031bdf9f01251..ff01cba86f430f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -46,6 +46,8 @@ #include #include #include +#include +#include #include "internal.h" @@ -996,7 +998,7 @@ list_update_cgroup_event(struct perf_event *event, */ #define PERF_CPU_HRTIMER (1000 / HZ) /* - * function must be called with interrupts disbled + * function must be called with interrupts disabled */ static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) { @@ -4254,7 +4256,7 @@ int perf_event_release_kernel(struct perf_event *event) raw_spin_lock_irq(&ctx->lock); /* - * Mark this even as STATE_DEAD, there is no external reference to it + * Mark this event as STATE_DEAD, there is no external reference to it * anymore. * * Anybody acquiring event->child_mutex after the below loop _must_ @@ -10415,21 +10417,22 @@ void perf_event_free_task(struct task_struct *task) continue; mutex_lock(&ctx->mutex); -again: - list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, - group_entry) - perf_free_event(event, ctx); + raw_spin_lock_irq(&ctx->lock); + /* + * Destroy the task <-> ctx relation and mark the context dead. + * + * This is important because even though the task hasn't been + * exposed yet the context has been (through child_list). + */ + RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); + WRITE_ONCE(ctx->task, TASK_TOMBSTONE); + put_task_struct(task); /* cannot be last */ + raw_spin_unlock_irq(&ctx->lock); - list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, - group_entry) + list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) perf_free_event(event, ctx); - if (!list_empty(&ctx->pinned_groups) || - !list_empty(&ctx->flexible_groups)) - goto again; - mutex_unlock(&ctx->mutex); - put_ctx(ctx); } } @@ -10467,7 +10470,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event) } /* - * inherit a event from parent task to child task: + * Inherit a event from parent task to child task. + * + * Returns: + * - valid pointer on success + * - NULL for orphaned events + * - IS_ERR() on error */ static struct perf_event * inherit_event(struct perf_event *parent_event, @@ -10561,6 +10569,16 @@ inherit_event(struct perf_event *parent_event, return child_event; } +/* + * Inherits an event group. + * + * This will quietly suppress orphaned events; !inherit_event() is not an error. + * This matches with perf_event_release_kernel() removing all child events. + * + * Returns: + * - 0 on success + * - <0 on error + */ static int inherit_group(struct perf_event *parent_event, struct task_struct *parent, struct perf_event_context *parent_ctx, @@ -10575,6 +10593,11 @@ static int inherit_group(struct perf_event *parent_event, child, NULL, child_ctx); if (IS_ERR(leader)) return PTR_ERR(leader); + /* + * @leader can be NULL here because of is_orphaned_event(). In this + * case inherit_event() will create individual events, similar to what + * perf_group_detach() would do anyway. + */ list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { child_ctr = inherit_event(sub, parent, parent_ctx, child, leader, child_ctx); @@ -10584,6 +10607,17 @@ static int inherit_group(struct perf_event *parent_event, return 0; } +/* + * Creates the child task context and tries to inherit the event-group. + * + * Clears @inherited_all on !attr.inherited or error. Note that we'll leave + * inherited_all set when we 'fail' to inherit an orphaned event; this is + * consistent with perf_event_release_kernel() removing all child events. + * + * Returns: + * - 0 on success + * - <0 on error + */ static int inherit_task_group(struct perf_event *event, struct task_struct *parent, struct perf_event_context *parent_ctx, @@ -10606,7 +10640,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, * First allocate and initialize a context for the * child. */ - child_ctx = alloc_perf_context(parent_ctx->pmu, child); if (!child_ctx) return -ENOMEM; @@ -10668,7 +10701,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) ret = inherit_task_group(event, parent, parent_ctx, child, ctxn, &inherited_all); if (ret) - break; + goto out_unlock; } /* @@ -10684,7 +10717,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) ret = inherit_task_group(event, parent, parent_ctx, child, ctxn, &inherited_all); if (ret) - break; + goto out_unlock; } raw_spin_lock_irqsave(&parent_ctx->lock, flags); @@ -10712,6 +10745,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) } raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); +out_unlock: mutex_unlock(&parent_ctx->mutex); perf_unpin_context(parent_ctx); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d630f8ac4d2f21..0e137f98a50c30 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -27,6 +27,8 @@ #include /* read_mapping_page */ #include #include +#include +#include #include #include /* anon_vma_prepare */ #include /* set_pte_at_notify */ diff --git a/kernel/exit.c b/kernel/exit.c index 8a768a3672a555..516acdb0e0ec9b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -6,6 +6,12 @@ #include #include +#include +#include +#include +#include +#include +#include #include #include #include @@ -548,7 +554,6 @@ static void exit_mm(void) enter_lazy_tlb(mm, current); task_unlock(current); mm_update_next_owner(mm); - userfaultfd_exit(mm); mmput(mm); if (test_thread_flag(TIF_MEMDIE)) exit_oom_victim(); diff --git a/kernel/fork.c b/kernel/fork.c index 246bf9aaf9dfdd..6c463c80e93de8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -12,6 +12,16 @@ */ #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include @@ -1455,6 +1465,21 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) task->pids[type].pid = pid; } +static inline void rcu_copy_process(struct task_struct *p) +{ +#ifdef CONFIG_PREEMPT_RCU + p->rcu_read_lock_nesting = 0; + p->rcu_read_unlock_special.s = 0; + p->rcu_blocked_node = NULL; + INIT_LIST_HEAD(&p->rcu_node_entry); +#endif /* #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_TASKS_RCU + p->rcu_tasks_holdout = false; + INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); + p->rcu_tasks_idle_cpu = -1; +#endif /* #ifdef CONFIG_TASKS_RCU */ +} + /* * This creates a new process as a copy of the old one, * but does not actually start it yet. @@ -1746,7 +1771,7 @@ static __latent_entropy struct task_struct *copy_process( INIT_LIST_HEAD(&p->thread_group); p->task_works = NULL; - threadgroup_change_begin(current); + cgroup_threadgroup_change_begin(current); /* * Ensure that the cgroup subsystem policies allow the new process to be * forked. It should be noted the the new process's css_set can be changed @@ -1843,7 +1868,7 @@ static __latent_entropy struct task_struct *copy_process( proc_fork_connector(p); cgroup_post_fork(p); - threadgroup_change_end(current); + cgroup_threadgroup_change_end(current); perf_event_fork(p); trace_task_newtask(p, clone_flags); @@ -1854,7 +1879,7 @@ static __latent_entropy struct task_struct *copy_process( bad_fork_cancel_cgroup: cgroup_cancel_fork(p); bad_fork_free_pid: - threadgroup_change_end(current); + cgroup_threadgroup_change_end(current); if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_thread: diff --git a/kernel/futex.c b/kernel/futex.c index b687cb22301ce0..45858ec739411f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -61,6 +61,8 @@ #include #include #include +#include +#include #include #include #include @@ -2813,7 +2815,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; - struct rt_mutex *pi_mutex = NULL; struct futex_hash_bucket *hb; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; @@ -2897,6 +2898,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) + rt_mutex_unlock(&q.pi_state->pi_mutex); /* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. @@ -2905,6 +2908,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, spin_unlock(q.lock_ptr); } } else { + struct rt_mutex *pi_mutex; + /* * We have been woken up by futex_unlock_pi(), a timeout, or a * signal. futex_unlock_pi() will not destroy the lock_ptr nor @@ -2928,18 +2933,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (res) ret = (res < 0) ? res : 0; + /* + * If fixup_pi_state_owner() faulted and was unable to handle + * the fault, unlock the rt_mutex and return the fault to + * userspace. + */ + if (ret && rt_mutex_owner(pi_mutex) == current) + rt_mutex_unlock(pi_mutex); + /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } - /* - * If fixup_pi_state_owner() faulted and was unable to handle the - * fault, unlock the rt_mutex and return the fault to userspace. - */ - if (ret == -EFAULT) { - if (pi_mutex && rt_mutex_owner(pi_mutex) == current) - rt_mutex_unlock(pi_mutex); - } else if (ret == -EINTR) { + if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 40c07e4fa116e0..f0f8e2a8496fea 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -16,6 +16,9 @@ #include #include #include +#include +#include + #include /* diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 944d068b6c4887..a4afe5cc5af182 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include "internals.h" diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index b56a558e406db6..b118735fea9da4 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -614,13 +614,13 @@ static int kexec_calculate_store_digests(struct kimage *image) ret = crypto_shash_final(desc, digest); if (ret) goto out_free_digest; - ret = kexec_purgatory_get_set_symbol(image, "sha_regions", - sha_regions, sha_region_sz, 0); + ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions", + sha_regions, sha_region_sz, 0); if (ret) goto out_free_digest; - ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", - digest, SHA256_DIGEST_SIZE, 0); + ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest", + digest, SHA256_DIGEST_SIZE, 0); if (ret) goto out_free_digest; } diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h index 4cef7e4706b098..799a8a4521870a 100644 --- a/kernel/kexec_internal.h +++ b/kernel/kexec_internal.h @@ -15,11 +15,7 @@ int kimage_is_destination_range(struct kimage *image, extern struct mutex kexec_mutex; #ifdef CONFIG_KEXEC_FILE -struct kexec_sha_region { - unsigned long start; - unsigned long len; -}; - +#include void kimage_file_post_load_cleanup(struct kimage *image); #else /* CONFIG_KEXEC_FILE */ static inline void kimage_file_post_load_cleanup(struct kimage *image) { } diff --git a/kernel/kmod.c b/kernel/kmod.c index 0c407f905ca4ef..563f97e2be3618 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -20,6 +20,8 @@ */ #include #include +#include +#include #include #include #include diff --git a/kernel/kthread.c b/kernel/kthread.c index 8461a4372e8aab..2f26adea0f84d2 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -5,7 +5,9 @@ * even if we're invoked from userspace (think modprobe, hotplug cpu, * etc.). */ +#include #include +#include #include #include #include diff --git a/kernel/latencytop.c b/kernel/latencytop.c index b5c30d9f46c508..96b4179cee6a76 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -55,6 +55,8 @@ #include #include #include +#include +#include #include #include diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 9812e5dd409e98..a95e5d1f4a9c44 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -28,6 +28,8 @@ #define DISABLE_BRANCH_PROFILING #include #include +#include +#include #include #include #include @@ -3260,10 +3262,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, if (depth) { hlock = curr->held_locks + depth - 1; if (hlock->class_idx == class_idx && nest_lock) { - if (hlock->references) + if (hlock->references) { + /* + * Check: unsigned int references:12, overflow. + */ + if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1)) + return 0; + hlock->references++; - else + } else { hlock->references = 2; + } return 1; } diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 28350dc8ecbb17..f24582d4dad37b 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index ad2d9e22697b92..198527a6214920 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -19,8 +19,10 @@ */ #include #include -#include +#include #include +#include +#include #include #include #include diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index e852be4851fc91..4a30ef63c60764 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h @@ -63,6 +63,7 @@ enum qlock_stats { */ #include #include +#include #include static const char * const qstat_names[qstat_num + 1] = { diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 62b6cee8ea7f9e..97ee9df32e0f03 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -18,6 +18,7 @@ */ #include #include +#include #include #include #include diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index d340be3a488f7a..6edc32ecd9c544 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -12,9 +12,11 @@ */ #include #include -#include +#include #include #include +#include +#include #include #include "rtmutex_common.h" diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 990134617b4c02..856dfff5c33ab5 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -13,6 +13,7 @@ #define __KERNEL_RTMUTEX_COMMON_H #include +#include /* * This is the control structure for tasks blocked on a rt_mutex, diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c index 5eacab880f672c..c65f7989f850d1 100644 --- a/kernel/locking/rwsem-spinlock.c +++ b/kernel/locking/rwsem-spinlock.c @@ -6,7 +6,8 @@ * - Derived also from comments by Linus */ #include -#include +#include +#include #include enum rwsem_waiter_type { @@ -212,10 +213,9 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) */ if (sem->count == 0) break; - if (signal_pending_state(state, current)) { - ret = -EINTR; - goto out; - } + if (signal_pending_state(state, current)) + goto out_nolock; + set_current_state(state); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); schedule(); @@ -223,12 +223,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) } /* got the lock */ sem->count = -1; -out: list_del(&waiter.list); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); return ret; + +out_nolock: + list_del(&waiter.list); + if (!list_empty(&sem->wait_list)) + __rwsem_do_wake(sem, 1); + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); + + return -EINTR; } void __sched __down_write(struct rw_semaphore *sem) diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 2ad8d8dc3bb19d..34e727f18e4945 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -10,10 +10,12 @@ * and Davidlohr Bueso . Based on mutexes. */ #include -#include #include #include +#include #include +#include +#include #include #include "rwsem.h" diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 45ba475d4be344..90a74ccd85a4b9 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c index 9512e37637dc70..561acdd399605b 100644 --- a/kernel/locking/semaphore.c +++ b/kernel/locking/semaphore.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index da6c9a34f62f5c..6b7abb334ca602 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c @@ -50,7 +50,7 @@ static void test_mutex_work(struct work_struct *work) if (mtx->flags & TEST_MTX_TRY) { while (!ww_mutex_trylock(&mtx->mutex)) - cpu_relax(); + cond_resched(); } else { ww_mutex_lock(&mtx->mutex, NULL); } @@ -88,7 +88,7 @@ static int __test_mutex(unsigned int flags) ret = -EINVAL; break; } - cpu_relax(); + cond_resched(); } while (time_before(jiffies, timeout)); } else { ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); @@ -627,7 +627,7 @@ static int __init test_ww_mutex_init(void) if (ret) return ret; - ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL); + ret = stress(4095, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL); if (ret) return ret; diff --git a/kernel/memremap.c b/kernel/memremap.c index 06123234f1189c..07e85e5229da84 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -247,11 +247,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data) align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(resource_size(res), SECTION_SIZE); - lock_device_hotplug(); mem_hotplug_begin(); arch_remove_memory(align_start, align_size); mem_hotplug_done(); - unlock_device_hotplug(); untrack_pfn(NULL, PHYS_PFN(align_start), align_size); pgmap_radix_release(res); @@ -364,11 +362,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (error) goto err_pfn_remap; - lock_device_hotplug(); mem_hotplug_begin(); error = arch_add_memory(nid, align_start, align_size, true); mem_hotplug_done(); - unlock_device_hotplug(); if (error) goto err_add_memory; diff --git a/kernel/padata.c b/kernel/padata.c index 05316c9f32da9d..3202aa17492c80 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -186,19 +186,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) reorder = &next_queue->reorder; + spin_lock(&reorder->lock); if (!list_empty(&reorder->list)) { padata = list_entry(reorder->list.next, struct padata_priv, list); - spin_lock(&reorder->lock); list_del_init(&padata->list); atomic_dec(&pd->reorder_objects); - spin_unlock(&reorder->lock); pd->processed++; + spin_unlock(&reorder->lock); goto out; } + spin_unlock(&reorder->lock); if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { padata = ERR_PTR(-ENODATA); diff --git a/kernel/panic.c b/kernel/panic.c index 3ec16e603e8828..a58932b41700a9 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -9,6 +9,7 @@ * to indicate a major problem. */ #include +#include #include #include #include diff --git a/kernel/pid.c b/kernel/pid.c index 0291804151b587..0143ac0ddceb9c 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -38,6 +38,7 @@ #include #include #include +#include #define pid_hashfn(nr, ns) \ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index eef2ce9686366a..de461aa0bf9acc 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -12,12 +12,15 @@ #include #include #include +#include #include #include #include #include #include #include +#include +#include struct pid_cache { int nr_ids; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 86385af1080f09..a8b978c35a6a93 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -10,6 +10,8 @@ * This file is released under the GPLv2. */ +#define pr_fmt(fmt) "PM: " fmt + #include #include #include @@ -21,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -104,7 +107,7 @@ EXPORT_SYMBOL(system_entering_hibernation); #ifdef CONFIG_PM_DEBUG static void hibernation_debug_sleep(void) { - printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n"); + pr_info("hibernation debug: Waiting for 5 seconds.\n"); mdelay(5000); } @@ -250,10 +253,9 @@ void swsusp_show_speed(ktime_t start, ktime_t stop, centisecs = 1; /* avoid div-by-zero */ k = nr_pages * (PAGE_SIZE / 1024); kps = (k * 100) / centisecs; - printk(KERN_INFO "PM: %s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n", - msg, k, - centisecs / 100, centisecs % 100, - kps / 1000, (kps % 1000) / 10); + pr_info("%s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n", + msg, k, centisecs / 100, centisecs % 100, kps / 1000, + (kps % 1000) / 10); } /** @@ -271,8 +273,7 @@ static int create_image(int platform_mode) error = dpm_suspend_end(PMSG_FREEZE); if (error) { - printk(KERN_ERR "PM: Some devices failed to power down, " - "aborting hibernation\n"); + pr_err("Some devices failed to power down, aborting hibernation\n"); return error; } @@ -288,8 +289,7 @@ static int create_image(int platform_mode) error = syscore_suspend(); if (error) { - printk(KERN_ERR "PM: Some system devices failed to power down, " - "aborting hibernation\n"); + pr_err("Some system devices failed to power down, aborting hibernation\n"); goto Enable_irqs; } @@ -304,8 +304,8 @@ static int create_image(int platform_mode) restore_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); if (error) - printk(KERN_ERR "PM: Error %d creating hibernation image\n", - error); + pr_err("Error %d creating hibernation image\n", error); + if (!in_suspend) { events_check_enabled = false; clear_free_pages(); @@ -432,8 +432,7 @@ static int resume_target_kernel(bool platform_mode) error = dpm_suspend_end(PMSG_QUIESCE); if (error) { - printk(KERN_ERR "PM: Some devices failed to power down, " - "aborting resume\n"); + pr_err("Some devices failed to power down, aborting resume\n"); return error; } @@ -608,6 +607,22 @@ static void power_down(void) { #ifdef CONFIG_SUSPEND int error; + + if (hibernation_mode == HIBERNATION_SUSPEND) { + error = suspend_devices_and_enter(PM_SUSPEND_MEM); + if (error) { + hibernation_mode = hibernation_ops ? + HIBERNATION_PLATFORM : + HIBERNATION_SHUTDOWN; + } else { + /* Restore swap signature. */ + error = swsusp_unmark(); + if (error) + pr_err("Swap will be unusable! Try swapon -a.\n"); + + return; + } + } #endif switch (hibernation_mode) { @@ -620,32 +635,13 @@ static void power_down(void) if (pm_power_off) kernel_power_off(); break; -#ifdef CONFIG_SUSPEND - case HIBERNATION_SUSPEND: - error = suspend_devices_and_enter(PM_SUSPEND_MEM); - if (error) { - if (hibernation_ops) - hibernation_mode = HIBERNATION_PLATFORM; - else - hibernation_mode = HIBERNATION_SHUTDOWN; - power_down(); - } - /* - * Restore swap signature. - */ - error = swsusp_unmark(); - if (error) - printk(KERN_ERR "PM: Swap will be unusable! " - "Try swapon -a.\n"); - return; -#endif } kernel_halt(); /* * Valid image is on the disk, if we continue we risk serious data * corruption after resume. */ - printk(KERN_CRIT "PM: Please power down manually\n"); + pr_crit("Power down manually\n"); while (1) cpu_relax(); } @@ -655,7 +651,7 @@ static int load_image_and_restore(void) int error; unsigned int flags; - pr_debug("PM: Loading hibernation image.\n"); + pr_debug("Loading hibernation image.\n"); lock_device_hotplug(); error = create_basic_memory_bitmaps(); @@ -667,7 +663,7 @@ static int load_image_and_restore(void) if (!error) hibernation_restore(flags & SF_PLATFORM_MODE); - printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); + pr_err("Failed to load hibernation image, recovering.\n"); swsusp_free(); free_basic_memory_bitmaps(); Unlock: @@ -685,7 +681,7 @@ int hibernate(void) bool snapshot_test = false; if (!hibernation_available()) { - pr_debug("PM: Hibernation not available.\n"); + pr_debug("Hibernation not available.\n"); return -EPERM; } @@ -703,9 +699,9 @@ int hibernate(void) goto Exit; } - printk(KERN_INFO "PM: Syncing filesystems ... "); + pr_info("Syncing filesystems ... \n"); sys_sync(); - printk("done.\n"); + pr_info("done.\n"); error = freeze_processes(); if (error) @@ -731,7 +727,7 @@ int hibernate(void) else flags |= SF_CRC32_MODE; - pr_debug("PM: writing image.\n"); + pr_debug("Writing image.\n"); error = swsusp_write(flags); swsusp_free(); if (!error) { @@ -743,7 +739,7 @@ int hibernate(void) in_suspend = 0; pm_restore_gfp_mask(); } else { - pr_debug("PM: Image restored successfully.\n"); + pr_debug("Image restored successfully.\n"); } Free_bitmaps: @@ -751,7 +747,7 @@ int hibernate(void) Thaw: unlock_device_hotplug(); if (snapshot_test) { - pr_debug("PM: Checking hibernation image\n"); + pr_debug("Checking hibernation image\n"); error = swsusp_check(); if (!error) error = load_image_and_restore(); @@ -815,10 +811,10 @@ static int software_resume(void) goto Unlock; } - pr_debug("PM: Checking hibernation image partition %s\n", resume_file); + pr_debug("Checking hibernation image partition %s\n", resume_file); if (resume_delay) { - printk(KERN_INFO "Waiting %dsec before reading resume device...\n", + pr_info("Waiting %dsec before reading resume device ...\n", resume_delay); ssleep(resume_delay); } @@ -857,10 +853,10 @@ static int software_resume(void) } Check_image: - pr_debug("PM: Hibernation image partition %d:%d present\n", + pr_debug("Hibernation image partition %d:%d present\n", MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); - pr_debug("PM: Looking for hibernation image.\n"); + pr_debug("Looking for hibernation image.\n"); error = swsusp_check(); if (error) goto Unlock; @@ -879,7 +875,7 @@ static int software_resume(void) goto Close_Finish; } - pr_debug("PM: Preparing processes for restore.\n"); + pr_debug("Preparing processes for restore.\n"); error = freeze_processes(); if (error) goto Close_Finish; @@ -892,7 +888,7 @@ static int software_resume(void) /* For success case, the suspend path will release the lock */ Unlock: mutex_unlock(&pm_mutex); - pr_debug("PM: Hibernation image not present or could not be loaded.\n"); + pr_debug("Hibernation image not present or could not be loaded.\n"); return error; Close_Finish: swsusp_close(FMODE_READ); @@ -1016,7 +1012,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, error = -EINVAL; if (!error) - pr_debug("PM: Hibernation mode set to '%s'\n", + pr_debug("Hibernation mode set to '%s'\n", hibernation_modes[mode]); unlock_system_sleep(); return error ? error : n; @@ -1052,7 +1048,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, lock_system_sleep(); swsusp_resume_device = res; unlock_system_sleep(); - printk(KERN_INFO "PM: Starting manual resume from disk\n"); + pr_info("Starting manual resume from disk\n"); noresume = 0; software_resume(); return n; diff --git a/kernel/power/process.c b/kernel/power/process.c index 2fba066e125fa9..c7209f060eeb7c 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 905d5bbd595fa3..d79a38de425a0d 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 34da86e73d00b9..2984fb0f025742 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -45,6 +45,9 @@ #include #include #include +#include +#include +#include #include #include diff --git a/kernel/profile.c b/kernel/profile.c index f67ce0aa6bc449..9aa2a4445b0d2a 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -25,6 +25,8 @@ #include #include #include +#include + #include #include #include diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 49ba7c1ade9d07..0af9287121746d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -10,6 +10,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index 123ccbd2244929..a4a86fb47e4a3c 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index d81345be730ea5..cccc417a813502 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -32,7 +32,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index e773129c8b08d2..ef3bcfb15b39ec 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index fa6a48d3917bf2..6ad330dbbae2ec 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -47,6 +47,18 @@ static void __call_rcu(struct rcu_head *head, #include "tiny_plugin.h" +void rcu_barrier_bh(void) +{ + wait_rcu_gp(call_rcu_bh); +} +EXPORT_SYMBOL(rcu_barrier_bh); + +void rcu_barrier_sched(void) +{ + wait_rcu_gp(call_rcu_sched); +} +EXPORT_SYMBOL(rcu_barrier_sched); + #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) /* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index d80e0d2f68c675..50fee7689e7125 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -32,9 +32,10 @@ #include #include #include -#include +#include #include #include +#include #include #include #include @@ -49,6 +50,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index b60f2b6caa1443..ec62a05bfdb3c8 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -24,6 +24,7 @@ #include #include +#include #include #include #include diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index a240f3308be61c..0a62a8f1caacfa 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -27,7 +27,9 @@ #include #include #include +#include #include +#include #include "../time/tick-internal.h" #ifdef CONFIG_RCU_BOOST diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 9e03db9ea9c09c..55c8530316c7ce 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -36,7 +36,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -49,6 +50,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS diff --git a/kernel/sched/autogroup.h b/kernel/sched/autogroup.h index 890c95f2587a4d..ce40c810cd5c34 100644 --- a/kernel/sched/autogroup.h +++ b/kernel/sched/autogroup.h @@ -2,6 +2,7 @@ #include #include +#include struct autogroup { /* diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index ad64efe41722be..00a45c45beca09 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -58,6 +58,8 @@ #include #include #include +#include +#include #include #include #include @@ -94,10 +96,10 @@ static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); static int __sched_clock_stable_early = 1; /* - * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset + * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset */ -static __read_mostly u64 raw_offset; -static __read_mostly u64 gtod_offset; +__read_mostly u64 __sched_clock_offset; +static __read_mostly u64 __gtod_offset; struct sched_clock_data { u64 tick_raw; @@ -129,17 +131,24 @@ static void __set_sched_clock_stable(void) /* * Attempt to make the (initial) unstable->stable transition continuous. */ - raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); + __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", - scd->tick_gtod, gtod_offset, - scd->tick_raw, raw_offset); + scd->tick_gtod, __gtod_offset, + scd->tick_raw, __sched_clock_offset); static_branch_enable(&__sched_clock_stable); tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); } -static void __clear_sched_clock_stable(struct work_struct *work) +static void __sched_clock_work(struct work_struct *work) +{ + static_branch_disable(&__sched_clock_stable); +} + +static DECLARE_WORK(sched_clock_work, __sched_clock_work); + +static void __clear_sched_clock_stable(void) { struct sched_clock_data *scd = this_scd(); @@ -152,17 +161,17 @@ static void __clear_sched_clock_stable(struct work_struct *work) * * Still do what we can. */ - gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); + __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod); printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", - scd->tick_gtod, gtod_offset, - scd->tick_raw, raw_offset); + scd->tick_gtod, __gtod_offset, + scd->tick_raw, __sched_clock_offset); - static_branch_disable(&__sched_clock_stable); tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); -} -static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); + if (sched_clock_stable()) + schedule_work(&sched_clock_work); +} void clear_sched_clock_stable(void) { @@ -171,7 +180,7 @@ void clear_sched_clock_stable(void) smp_mb(); /* matches sched_clock_init_late() */ if (sched_clock_running == 2) - schedule_work(&sched_clock_work); + __clear_sched_clock_stable(); } void sched_clock_init_late(void) @@ -212,7 +221,7 @@ static inline u64 wrap_max(u64 x, u64 y) */ static u64 sched_clock_local(struct sched_clock_data *scd) { - u64 now, clock, old_clock, min_clock, max_clock; + u64 now, clock, old_clock, min_clock, max_clock, gtod; s64 delta; again: @@ -229,9 +238,10 @@ static u64 sched_clock_local(struct sched_clock_data *scd) * scd->tick_gtod + TICK_NSEC); */ - clock = scd->tick_gtod + gtod_offset + delta; - min_clock = wrap_max(scd->tick_gtod, old_clock); - max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); + gtod = scd->tick_gtod + __gtod_offset; + clock = gtod + delta; + min_clock = wrap_max(gtod, old_clock); + max_clock = wrap_max(old_clock, gtod + TICK_NSEC); clock = wrap_max(clock, min_clock); clock = wrap_min(clock, max_clock); @@ -315,7 +325,7 @@ u64 sched_clock_cpu(int cpu) u64 clock; if (sched_clock_stable()) - return sched_clock() + raw_offset; + return sched_clock() + __sched_clock_offset; if (unlikely(!sched_clock_running)) return 0ull; diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index f063a25d44493f..53f9558fa925f3 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -11,7 +11,8 @@ * Waiting for completion is a typically sync point, but not an exclusion point. */ -#include +#include +#include #include /** diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bbfb917a9b4998..3b31fc05a0f1e4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6,10 +6,15 @@ * Copyright (C) 1991-2002 Linus Torvalds */ #include +#include +#include +#include +#include #include #include #include #include +#include #include #include @@ -981,7 +986,7 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_ return rq; /* Affinity changed (again). */ - if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) + if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) return rq; rq = move_queued_task(rq, p, dest_cpu); @@ -1259,10 +1264,10 @@ static int migrate_swap_stop(void *data) if (task_cpu(arg->src_task) != arg->src_cpu) goto unlock; - if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) + if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) goto unlock; - if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) + if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) goto unlock; __migrate_swap_task(arg->src_task, arg->dst_cpu); @@ -1303,10 +1308,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p) if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) goto out; - if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) + if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) goto out; - if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) + if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) goto out; trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); @@ -1490,14 +1495,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p) for_each_cpu(dest_cpu, nodemask) { if (!cpu_active(dest_cpu)) continue; - if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) + if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) return dest_cpu; } } for (;;) { /* Any allowed, online CPU? */ - for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { + for_each_cpu(dest_cpu, &p->cpus_allowed) { if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) continue; if (!cpu_online(dest_cpu)) @@ -1549,10 +1554,10 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) { lockdep_assert_held(&p->pi_lock); - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else - cpu = cpumask_any(tsk_cpus_allowed(p)); + cpu = cpumask_any(&p->cpus_allowed); /* * In order not to call set_task_cpu() on a blocking task we need @@ -1564,7 +1569,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) * [ this allows ->select_task() to simply return task_cpu(p) and * not worry about this generic constraint ] */ - if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || + if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || !cpu_online(cpu))) cpu = select_fallback_rq(task_cpu(p), p); @@ -3211,6 +3216,15 @@ static inline void preempt_latency_start(int val) { } static inline void preempt_latency_stop(int val) { } #endif +static inline unsigned long get_preempt_disable_ip(struct task_struct *p) +{ +#ifdef CONFIG_DEBUG_PREEMPT + return p->preempt_disable_ip; +#else + return 0; +#endif +} + /* * Print scheduling while atomic bug: */ @@ -3273,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) struct task_struct *p; /* - * Optimization: we know that if all tasks are in - * the fair class we can call that function directly: + * Optimization: we know that if all tasks are in the fair class we can + * call that function directly, but only if the @prev task wasn't of a + * higher scheduling class, because otherwise those loose the + * opportunity to pull in more work from other CPUs. */ - if (likely(rq->nr_running == rq->cfs.h_nr_running)) { + if (likely((prev->sched_class == &idle_sched_class || + prev->sched_class == &fair_sched_class) && + rq->nr_running == rq->cfs.h_nr_running)) { + p = fair_sched_class.pick_next_task(rq, prev, rf); if (unlikely(p == RETRY_TASK)) goto again; @@ -5233,6 +5252,9 @@ void sched_show_task(struct task_struct *p) int ppid; unsigned long state = p->state; + /* Make sure the string lines up properly with the number of task states: */ + BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); + if (!try_get_task_stack(p)) return; if (state) @@ -5461,7 +5483,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) if (curr_cpu == target_cpu) return 0; - if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) + if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed)) return -EINVAL; /* TODO: This is not properly updating schedstats */ diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index e73119013c5318..fba235c7d02679 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -128,10 +128,10 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, const struct sched_dl_entity *dl_se = &p->dl; if (later_mask && - cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) { + cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { best_cpu = cpumask_any(later_mask); goto out; - } else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) && + } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && dl_time_before(dl_se->deadline, cp->elements[0].dl)) { best_cpu = cpudl_maximum(cp); if (later_mask) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index fd465931364053..54c577578da689 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -35,6 +36,7 @@ struct sugov_policy { u64 last_freq_update_time; s64 freq_update_delay_ns; unsigned int next_freq; + unsigned int cached_raw_freq; /* The next fields are only needed if fast switch cannot be used. */ struct irq_work irq_work; @@ -51,7 +53,6 @@ struct sugov_cpu { struct update_util_data update_util; struct sugov_policy *sg_policy; - unsigned int cached_raw_freq; unsigned long iowait_boost; unsigned long iowait_boost_max; u64 last_update; @@ -115,7 +116,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, /** * get_next_freq - Compute a new frequency for a given cpufreq policy. - * @sg_cpu: schedutil cpu object to compute the new frequency for. + * @sg_policy: schedutil policy object to compute the new frequency for. * @util: Current CPU utilization. * @max: CPU capacity. * @@ -135,19 +136,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, * next_freq (as calculated above) is returned, subject to policy min/max and * cpufreq driver limitations. */ -static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util, - unsigned long max) +static unsigned int get_next_freq(struct sugov_policy *sg_policy, + unsigned long util, unsigned long max) { - struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct cpufreq_policy *policy = sg_policy->policy; unsigned int freq = arch_scale_freq_invariant() ? policy->cpuinfo.max_freq : policy->cur; freq = (freq + (freq >> 2)) * util / max; - if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX) + if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX) return sg_policy->next_freq; - sg_cpu->cached_raw_freq = freq; + sg_policy->cached_raw_freq = freq; return cpufreq_driver_resolve_freq(policy, freq); } @@ -212,7 +212,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, } else { sugov_get_util(&util, &max); sugov_iowait_boost(sg_cpu, &util, &max); - next_f = get_next_freq(sg_cpu, util, max); + next_f = get_next_freq(sg_policy, util, max); } sugov_update_commit(sg_policy, time, next_f); } @@ -266,7 +266,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, sugov_iowait_boost(j_sg_cpu, &util, &max); } - return get_next_freq(sg_cpu, util, max); + return get_next_freq(sg_policy, util, max); } static void sugov_update_shared(struct update_util_data *hook, u64 time, @@ -579,25 +579,19 @@ static int sugov_start(struct cpufreq_policy *policy) sg_policy->next_freq = UINT_MAX; sg_policy->work_in_progress = false; sg_policy->need_freq_update = false; + sg_policy->cached_raw_freq = 0; for_each_cpu(cpu, policy->cpus) { struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); + memset(sg_cpu, 0, sizeof(*sg_cpu)); sg_cpu->sg_policy = sg_policy; - if (policy_is_shared(policy)) { - sg_cpu->util = 0; - sg_cpu->max = 0; - sg_cpu->flags = SCHED_CPUFREQ_RT; - sg_cpu->last_update = 0; - sg_cpu->cached_raw_freq = 0; - sg_cpu->iowait_boost = 0; - sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; - cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, - sugov_update_shared); - } else { - cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, - sugov_update_single); - } + sg_cpu->flags = SCHED_CPUFREQ_RT; + sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; + cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, + policy_is_shared(policy) ? + sugov_update_shared : + sugov_update_single); } return 0; } diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 11e9705bf9378d..981fcd7dc394eb 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (skip) continue; - if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids) + if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; if (lowest_mask) { - cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask); + cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); /* * We have to ensure that we have at least one bit diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 2ecec3a4f1eecc..f3778e2b46c8dc 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -4,12 +4,8 @@ #include #include #include -#include +#include #include "sched.h" -#ifdef CONFIG_PARAVIRT -#include -#endif - #ifdef CONFIG_IRQ_TIME_ACCOUNTING diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 27737f34757d38..a2ce59015642c3 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -134,7 +134,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory++; update_dl_migration(dl_rq); @@ -144,7 +144,7 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory--; update_dl_migration(dl_rq); @@ -252,7 +252,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p * If we cannot preempt any rq, fall back to pick any * online cpu. */ - cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p)); + cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); if (cpu >= nr_cpu_ids) { /* * Fail to find any suitable cpu. @@ -445,13 +445,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, * * This function returns true if: * - * runtime / (deadline - t) > dl_runtime / dl_period , + * runtime / (deadline - t) > dl_runtime / dl_deadline , * * IOW we can't recycle current parameters. * - * Notice that the bandwidth check is done against the period. For + * Notice that the bandwidth check is done against the deadline. For * task with deadline equal to period this is the same of using - * dl_deadline instead of dl_period in the equation above. + * dl_period instead of dl_deadline in the equation above. */ static bool dl_entity_overflow(struct sched_dl_entity *dl_se, struct sched_dl_entity *pi_se, u64 t) @@ -476,7 +476,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se, * of anything below microseconds resolution is actually fiction * (but still we want to give the user that illusion >;). */ - left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); + left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); right = ((dl_se->deadline - t) >> DL_SCALE) * (pi_se->dl_runtime >> DL_SCALE); @@ -505,10 +505,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se, } } +static inline u64 dl_next_period(struct sched_dl_entity *dl_se) +{ + return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; +} + /* * If the entity depleted all its runtime, and if we want it to sleep * while waiting for some new execution time to become available, we - * set the bandwidth enforcement timer to the replenishment instant + * set the bandwidth replenishment timer to the replenishment instant * and try to activate it. * * Notice that it is important for the caller to know if the timer @@ -530,7 +535,7 @@ static int start_dl_timer(struct task_struct *p) * that it is actually coming from rq->clock and not from * hrtimer's time base reading. */ - act = ns_to_ktime(dl_se->deadline); + act = ns_to_ktime(dl_next_period(dl_se)); now = hrtimer_cb_get_time(timer); delta = ktime_to_ns(now) - rq_clock(rq); act = ktime_add_ns(act, delta); @@ -638,6 +643,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) lockdep_unpin_lock(&rq->lock, rf.cookie); rq = dl_task_offline_migration(rq, p); rf.cookie = lockdep_pin_lock(&rq->lock); + update_rq_clock(rq); /* * Now that the task has been migrated to the new RQ and we @@ -689,6 +695,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) timer->function = dl_task_timer; } +/* + * During the activation, CBS checks if it can reuse the current task's + * runtime and period. If the deadline of the task is in the past, CBS + * cannot use the runtime, and so it replenishes the task. This rule + * works fine for implicit deadline tasks (deadline == period), and the + * CBS was designed for implicit deadline tasks. However, a task with + * constrained deadline (deadine < period) might be awakened after the + * deadline, but before the next period. In this case, replenishing the + * task would allow it to run for runtime / deadline. As in this case + * deadline < period, CBS enables a task to run for more than the + * runtime / period. In a very loaded system, this can cause a domino + * effect, making other tasks miss their deadlines. + * + * To avoid this problem, in the activation of a constrained deadline + * task after the deadline but before the next period, throttle the + * task and set the replenishing timer to the begin of the next period, + * unless it is boosted. + */ +static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) +{ + struct task_struct *p = dl_task_of(dl_se); + struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); + + if (dl_time_before(dl_se->deadline, rq_clock(rq)) && + dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { + if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) + return; + dl_se->dl_throttled = 1; + } +} + static int dl_runtime_exceeded(struct sched_dl_entity *dl_se) { @@ -922,6 +959,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se) __dequeue_dl_entity(dl_se); } +static inline bool dl_is_constrained(struct sched_dl_entity *dl_se) +{ + return dl_se->dl_deadline < dl_se->dl_period; +} + static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) { struct task_struct *pi_task = rt_mutex_get_top_task(p); @@ -947,6 +989,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) return; } + /* + * Check if a constrained deadline task was activated + * after the deadline but before the next period. + * If that is the case, the task will be throttled and + * the replenishment timer will be set to the next period. + */ + if (!p->dl.dl_throttled && dl_is_constrained(&p->dl)) + dl_check_constrained_dl(&p->dl); + /* * If p is throttled, we do nothing. In fact, if it exhausted * its budget it needs a replenishment and, since it now is on @@ -958,7 +1009,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) enqueue_dl_entity(&p->dl, pi_se, flags); - if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) + if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_dl_task(rq, p); } @@ -1032,9 +1083,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) * try to make it stay here, it might be important. */ if (unlikely(dl_task(curr)) && - (tsk_nr_cpus_allowed(curr) < 2 || + (curr->nr_cpus_allowed < 2 || !dl_entity_preempt(&p->dl, &curr->dl)) && - (tsk_nr_cpus_allowed(p) > 1)) { + (p->nr_cpus_allowed > 1)) { int target = find_later_rq(p); if (target != -1 && @@ -1055,7 +1106,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * Current can't be migrated, useless to reschedule, * let's hope p can move out. */ - if (tsk_nr_cpus_allowed(rq->curr) == 1 || + if (rq->curr->nr_cpus_allowed == 1 || cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) return; @@ -1063,7 +1114,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * p is migratable, so let's not schedule it and * see if it is pushed or pulled somewhere else. */ - if (tsk_nr_cpus_allowed(p) != 1 && + if (p->nr_cpus_allowed != 1 && cpudl_find(&rq->rd->cpudl, p, NULL) != -1) return; @@ -1178,7 +1229,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p) { update_curr_dl(rq); - if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1) + if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) enqueue_pushable_dl_task(rq, p); } @@ -1235,7 +1286,7 @@ static void set_curr_task_dl(struct rq *rq) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && - cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) + cpumask_test_cpu(cpu, &p->cpus_allowed)) return 1; return 0; } @@ -1279,7 +1330,7 @@ static int find_later_rq(struct task_struct *task) if (unlikely(!later_mask)) return -1; - if (tsk_nr_cpus_allowed(task) == 1) + if (task->nr_cpus_allowed == 1) return -1; /* @@ -1384,8 +1435,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { if (unlikely(task_rq(task) != rq || - !cpumask_test_cpu(later_rq->cpu, - tsk_cpus_allowed(task)) || + !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || !dl_task(task) || !task_on_rq_queued(task))) { @@ -1425,7 +1475,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); - BUG_ON(tsk_nr_cpus_allowed(p) <= 1); + BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(!task_on_rq_queued(p)); BUG_ON(!dl_task(p)); @@ -1464,7 +1514,7 @@ static int push_dl_task(struct rq *rq) */ if (dl_task(rq->curr) && dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && - tsk_nr_cpus_allowed(rq->curr) > 1) { + rq->curr->nr_cpus_allowed > 1) { resched_curr(rq); return 0; } @@ -1611,9 +1661,9 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) { if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && - tsk_nr_cpus_allowed(p) > 1 && + p->nr_cpus_allowed > 1 && dl_task(rq->curr) && - (tsk_nr_cpus_allowed(rq->curr) < 2 || + (rq->curr->nr_cpus_allowed < 2 || !dl_entity_preempt(&p->dl, &rq->curr->dl))) { push_dl_tasks(rq); } @@ -1727,7 +1777,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) if (rq->curr != p) { #ifdef CONFIG_SMP - if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) + if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) queue_push_tasks(rq); #endif if (dl_task(rq->curr)) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 109adc0e9cb990..38f019324f1aaf 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -11,7 +11,8 @@ */ #include -#include +#include +#include #include #include #include diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 274c747a01ce48..dea138964b9107 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -20,7 +20,9 @@ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra */ -#include +#include +#include + #include #include #include @@ -1551,7 +1553,7 @@ static void task_numa_compare(struct task_numa_env *env, */ if (cur) { /* Skip this swap candidate if cannot move to the source cpu */ - if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur))) + if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) goto unlock; /* @@ -1661,7 +1663,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ - if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p))) + if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) continue; env->dst_cpu = cpu; @@ -5458,7 +5460,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_cpus(group), - tsk_cpus_allowed(p))) + &p->cpus_allowed)) continue; local_group = cpumask_test_cpu(this_cpu, @@ -5578,7 +5580,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) return cpumask_first(sched_group_cpus(group)); /* Traverse only the allowed CPUs */ - for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { + for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { if (idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); @@ -5717,7 +5719,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int if (!test_idle_cores(target, false)) return -1; - cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p)); + cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); for_each_cpu_wrap(core, cpus, target, wrap) { bool idle = true; @@ -5751,7 +5753,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { - if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) + if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; if (idle_cpu(cpu)) return cpu; @@ -5797,13 +5799,13 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t * Due to large variance we need a large fuzz factor; hackbench in * particularly is sensitive here. */ - if ((avg_idle / 512) < avg_cost) + if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost) return -1; time = local_clock(); for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) { - if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) + if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; if (idle_cpu(cpu)) break; @@ -5958,7 +5960,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) - && cpumask_test_cpu(cpu, tsk_cpus_allowed(p)); + && cpumask_test_cpu(cpu, &p->cpus_allowed); } rcu_read_lock(); @@ -6698,7 +6700,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) return 0; - if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { + if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); @@ -6718,7 +6720,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* Prevent to re-select dst_cpu via env's cpus */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { - if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) { + if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; @@ -7252,7 +7254,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) /* * Group imbalance indicates (and tries to solve) the problem where balancing - * groups is inadequate due to tsk_cpus_allowed() constraints. + * groups is inadequate due to ->cpus_allowed constraints. * * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a * cpumask covering 1 cpu of the first group and 3 cpus of the second group. @@ -8211,8 +8213,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, * if the curr task on busiest cpu can't be * moved to this_cpu */ - if (!cpumask_test_cpu(this_cpu, - tsk_cpus_allowed(busiest->curr))) { + if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { raw_spin_unlock_irqrestore(&busiest->lock, flags); env.flags |= LBF_ALL_PINNED; diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 69631fa46c2f84..1b3c8189b28656 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true) */ SCHED_FEAT(TTWU_QUEUE, true) +/* + * When doing wakeups, attempt to limit superfluous scans of the LLC domain. + */ +SCHED_FEAT(SIS_AVG_CPU, false) + #ifdef HAVE_RT_PUSH_IPI /* * In order to avoid a thundering herd attack of CPUs that are diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 6a4bae0a649d9a..ac6d5176463dca 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -2,6 +2,7 @@ * Generic entry point for the idle threads */ #include +#include #include #include #include diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index a2d6eb71f06b80..f15fb2bdbc0dee 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -7,6 +7,7 @@ */ #include +#include #include "sched.h" @@ -168,7 +169,7 @@ static inline int calc_load_write_idx(void) * If the folding window started, make sure we start writing in the * next idle-delta. */ - if (!time_before(jiffies, calc_load_update)) + if (!time_before(jiffies, READ_ONCE(calc_load_update))) idx++; return idx & 1; @@ -201,8 +202,9 @@ void calc_load_exit_idle(void) struct rq *this_rq = this_rq(); /* - * If we're still before the sample window, we're done. + * If we're still before the pending sample window, we're done. */ + this_rq->calc_load_update = READ_ONCE(calc_load_update); if (time_before(jiffies, this_rq->calc_load_update)) return; @@ -211,7 +213,6 @@ void calc_load_exit_idle(void) * accounted through the nohz accounting, so skip the entire deal and * sync up for the next window. */ - this_rq->calc_load_update = calc_load_update; if (time_before(jiffies, this_rq->calc_load_update + 10)) this_rq->calc_load_update += LOAD_FREQ; } @@ -307,13 +308,15 @@ calc_load_n(unsigned long load, unsigned long exp, */ static void calc_global_nohz(void) { + unsigned long sample_window; long delta, active, n; - if (!time_before(jiffies, calc_load_update + 10)) { + sample_window = READ_ONCE(calc_load_update); + if (!time_before(jiffies, sample_window + 10)) { /* * Catch-up, fold however many we are behind still */ - delta = jiffies - calc_load_update - 10; + delta = jiffies - sample_window - 10; n = 1 + (delta / LOAD_FREQ); active = atomic_long_read(&calc_load_tasks); @@ -323,7 +326,7 @@ static void calc_global_nohz(void) avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); - calc_load_update += n * LOAD_FREQ; + WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ); } /* @@ -351,9 +354,11 @@ static inline void calc_global_nohz(void) { } */ void calc_global_load(unsigned long ticks) { + unsigned long sample_window; long active, delta; - if (time_before(jiffies, calc_load_update + 10)) + sample_window = READ_ONCE(calc_load_update); + if (time_before(jiffies, sample_window + 10)) return; /* @@ -370,7 +375,7 @@ void calc_global_load(unsigned long ticks) avenrun[1] = calc_load(avenrun[1], EXP_5, active); avenrun[2] = calc_load(avenrun[2], EXP_15, active); - calc_load_update += LOAD_FREQ; + WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); /* * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e8836cfc4cdbee..9f3e40226dec87 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -335,7 +335,7 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total++; - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory++; update_rt_migration(rt_rq); @@ -352,7 +352,7 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total--; - if (tsk_nr_cpus_allowed(p) > 1) + if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory--; update_rt_migration(rt_rq); @@ -1324,7 +1324,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) enqueue_rt_entity(rt_se, flags); - if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) + if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); } @@ -1413,7 +1413,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) * will have to sort it out. */ if (curr && unlikely(rt_task(curr)) && - (tsk_nr_cpus_allowed(curr) < 2 || + (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio)) { int target = find_lowest_rq(p); @@ -1437,7 +1437,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) * Current can't be migrated, useless to reschedule, * let's hope p can move out. */ - if (tsk_nr_cpus_allowed(rq->curr) == 1 || + if (rq->curr->nr_cpus_allowed == 1 || !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) return; @@ -1445,7 +1445,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) * p is migratable, so let's not schedule it and * see if it is pushed or pulled somewhere else. */ - if (tsk_nr_cpus_allowed(p) != 1 + if (p->nr_cpus_allowed != 1 && cpupri_find(&rq->rd->cpupri, p, NULL)) return; @@ -1579,7 +1579,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) * The previous task needs to be made eligible for pushing * if it is still active */ - if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1) + if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); } @@ -1591,7 +1591,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && - cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) + cpumask_test_cpu(cpu, &p->cpus_allowed)) return 1; return 0; } @@ -1629,7 +1629,7 @@ static int find_lowest_rq(struct task_struct *task) if (unlikely(!lowest_mask)) return -1; - if (tsk_nr_cpus_allowed(task) == 1) + if (task->nr_cpus_allowed == 1) return -1; /* No other targets possible */ if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) @@ -1726,8 +1726,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) * Also make sure that it wasn't scheduled on its rq. */ if (unlikely(task_rq(task) != rq || - !cpumask_test_cpu(lowest_rq->cpu, - tsk_cpus_allowed(task)) || + !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || !rt_task(task) || !task_on_rq_queued(task))) { @@ -1762,7 +1761,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); - BUG_ON(tsk_nr_cpus_allowed(p) <= 1); + BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(!task_on_rq_queued(p)); BUG_ON(!rt_task(p)); @@ -2122,9 +2121,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) { if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && - tsk_nr_cpus_allowed(p) > 1 && + p->nr_cpus_allowed > 1 && (dl_task(rq->curr) || rt_task(rq->curr)) && - (tsk_nr_cpus_allowed(rq->curr) < 2 || + (rq->curr->nr_cpus_allowed < 2 || rq->curr->prio <= p->prio)) push_rt_tasks(rq); } @@ -2197,7 +2196,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) */ if (task_on_rq_queued(p) && rq->curr != p) { #ifdef CONFIG_SMP - if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) + if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) queue_push_tasks(rq); #endif /* CONFIG_SMP */ if (p->prio < rq->curr->prio) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 71b10a9b73cfe2..5cbf92214ad892 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1,9 +1,26 @@ #include +#include #include +#include #include -#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include #include #include #include @@ -13,6 +30,10 @@ #include #include +#ifdef CONFIG_PARAVIRT +#include +#endif + #include "cpupri.h" #include "cpudeadline.h" #include "cpuacct.h" @@ -1817,7 +1838,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu); extern void print_dl_stats(struct seq_file *m, int cpu); extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); - #ifdef CONFIG_NUMA_BALANCING extern void show_numa_stats(struct task_struct *p, struct seq_file *m); diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index bf0da0aa0a1443..d5710651043b62 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -164,114 +164,3 @@ sched_info_switch(struct rq *rq, #define sched_info_arrive(rq, next) do { } while (0) #define sched_info_switch(rq, t, next) do { } while (0) #endif /* CONFIG_SCHED_INFO */ - -/* - * The following are functions that support scheduler-internal time accounting. - * These functions are generally called at the timer tick. None of this depends - * on CONFIG_SCHEDSTATS. - */ - -/** - * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running - * - * @tsk: Pointer to target task. - */ -#ifdef CONFIG_POSIX_TIMERS -static inline -struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) -{ - struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; - - /* Check if cputimer isn't running. This is accessed without locking. */ - if (!READ_ONCE(cputimer->running)) - return NULL; - - /* - * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime - * in __exit_signal(), we won't account to the signal struct further - * cputime consumed by that task, even though the task can still be - * ticking after __exit_signal(). - * - * In order to keep a consistent behaviour between thread group cputime - * and thread group cputimer accounting, lets also ignore the cputime - * elapsing after __exit_signal() in any thread group timer running. - * - * This makes sure that POSIX CPU clocks and timers are synchronized, so - * that a POSIX CPU timer won't expire while the corresponding POSIX CPU - * clock delta is behind the expiring timer value. - */ - if (unlikely(!tsk->sighand)) - return NULL; - - return cputimer; -} -#else -static inline -struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) -{ - return NULL; -} -#endif - -/** - * account_group_user_time - Maintain utime for a thread group. - * - * @tsk: Pointer to task structure. - * @cputime: Time value by which to increment the utime field of the - * thread_group_cputime structure. - * - * If thread group time is being maintained, get the structure for the - * running CPU and update the utime field there. - */ -static inline void account_group_user_time(struct task_struct *tsk, - u64 cputime) -{ - struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); - - if (!cputimer) - return; - - atomic64_add(cputime, &cputimer->cputime_atomic.utime); -} - -/** - * account_group_system_time - Maintain stime for a thread group. - * - * @tsk: Pointer to task structure. - * @cputime: Time value by which to increment the stime field of the - * thread_group_cputime structure. - * - * If thread group time is being maintained, get the structure for the - * running CPU and update the stime field there. - */ -static inline void account_group_system_time(struct task_struct *tsk, - u64 cputime) -{ - struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); - - if (!cputimer) - return; - - atomic64_add(cputime, &cputimer->cputime_atomic.stime); -} - -/** - * account_group_exec_runtime - Maintain exec runtime for a thread group. - * - * @tsk: Pointer to task structure. - * @ns: Time value by which to increment the sum_exec_runtime field - * of the thread_group_cputime structure. - * - * If thread group time is being maintained, get the structure for the - * running CPU and update the sum_exec_runtime field there. - */ -static inline void account_group_exec_runtime(struct task_struct *tsk, - unsigned long long ns) -{ - struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); - - if (!cputimer) - return; - - atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); -} diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c index 82f0dff90030fc..3d5610dcce114d 100644 --- a/kernel/sched/swait.c +++ b/kernel/sched/swait.c @@ -1,4 +1,4 @@ -#include +#include #include void __init_swait_queue_head(struct swait_queue_head *q, const char *name, diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 9453efe9b25a64..b8c84c6dee64bd 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -5,7 +5,8 @@ */ #include #include -#include +#include +#include #include #include #include @@ -241,6 +242,45 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) } EXPORT_SYMBOL(prepare_to_wait_event); +/* + * Note! These two wait functions are entered with the + * wait-queue lock held (and interrupts off in the _irq + * case), so there is no race with testing the wakeup + * condition in the caller before they add the wait + * entry to the wake queue. + */ +int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait) +{ + if (likely(list_empty(&wait->task_list))) + __add_wait_queue_tail(wq, wait); + + set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) + return -ERESTARTSYS; + + spin_unlock(&wq->lock); + schedule(); + spin_lock(&wq->lock); + return 0; +} +EXPORT_SYMBOL(do_wait_intr); + +int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait) +{ + if (likely(list_empty(&wait->task_list))) + __add_wait_queue_tail(wq, wait); + + set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) + return -ERESTARTSYS; + + spin_unlock_irq(&wq->lock); + schedule(); + spin_lock_irq(&wq->lock); + return 0; +} +EXPORT_SYMBOL(do_wait_intr_irq); + /** * finish_wait - clean up after waiting in a queue * @q: waitqueue waited on diff --git a/kernel/seccomp.c b/kernel/seccomp.c index e15185c28de564..65f61077ad50d9 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/signal.c b/kernel/signal.c index 214a8feeb77124..7e59ebc2c25e66 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -13,7 +13,12 @@ #include #include #include -#include +#include +#include +#include +#include +#include +#include #include #include #include @@ -2395,11 +2400,11 @@ void exit_signals(struct task_struct *tsk) * @tsk is about to have PF_EXITING set - lock out users which * expect stable threadgroup. */ - threadgroup_change_begin(tsk); + cgroup_threadgroup_change_begin(tsk); if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { tsk->flags |= PF_EXITING; - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); return; } @@ -2410,7 +2415,7 @@ void exit_signals(struct task_struct *tsk) */ tsk->flags |= PF_EXITING; - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); if (!signal_pending(tsk)) goto out; diff --git a/kernel/smp.c b/kernel/smp.c index 77fcdb9f27756f..a817769b53c0e1 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "smpboot.h" diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 4a5c6e73ecd41e..1d71c051a9515c 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/sys.c b/kernel/sys.c index b07adca97ea3d3..7ff6d1b10cecac 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -49,6 +49,13 @@ #include #include +#include +#include +#include +#include +#include +#include +#include #include #include #include diff --git a/kernel/sysctl.c b/kernel/sysctl.c index bb260ceb371847..acf0a5a06da7c0 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index e6dc9a538efa21..ce3a31e8eb3687 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 8e11d8d9f419e2..ec08f527d7ee91 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -43,10 +43,12 @@ #include #include #include -#include +#include #include #include #include +#include +#include #include #include diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index a95f13c314645f..087d6a1279b833 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 7906b3f0c41a1a..497719127bf9f6 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second) shift_hz += cycles_per_tick/2; do_div(shift_hz, cycles_per_tick); /* Calculate nsec_per_tick using shift_hz */ - nsec_per_tick = (u64)TICK_NSEC << 8; + nsec_per_tick = (u64)NSEC_PER_SEC << 8; nsec_per_tick += (u32)shift_hz/2; do_div(nsec_per_tick, (u32)shift_hz); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index b4377a5e426948..4513ad16a253f6 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -2,7 +2,8 @@ * Implement CPU time clocks for the POSIX clock interface. */ -#include +#include +#include #include #include #include diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 1e6623d7675019..50a6a47020dea9 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index a26036d37a3895..ea6b610c4c57c3 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 2c115fdab39765..7fe53be860778b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -17,8 +17,12 @@ #include #include #include +#include #include -#include +#include +#include +#include +#include #include #include #include diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 95b258dd75dbb1..5b63a2102c2907 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -14,7 +14,9 @@ #include #include #include +#include #include +#include #include #include #include diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 82a6bfa0c30789..1dc0256bfb6e1f 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -38,8 +38,10 @@ #include #include #include -#include +#include #include +#include +#include #include #include diff --git a/kernel/torture.c b/kernel/torture.c index 01a99976f072e5..55de96529287a2 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d5038005eb5dc0..d4a06e714645df 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -429,7 +429,7 @@ config BLK_DEV_IO_TRACE If unsure, say N. -config KPROBE_EVENT +config KPROBE_EVENTS depends on KPROBES depends on HAVE_REGS_AND_STACK_ACCESS_API bool "Enable kprobes-based dynamic events" @@ -447,7 +447,7 @@ config KPROBE_EVENT This option is also required by perf-probe subcommand of perf tools. If you want to use perf tools, this option is strongly recommended. -config UPROBE_EVENT +config UPROBE_EVENTS bool "Enable uprobes-based dynamic events" depends on ARCH_SUPPORTS_UPROBES depends on MMU @@ -466,7 +466,7 @@ config UPROBE_EVENT config BPF_EVENTS depends on BPF_SYSCALL - depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS + depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS bool default y help diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index e5798084554911..90f2701d92a7ee 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -57,7 +57,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o -obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o +obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o obj-$(CONFIG_TRACEPOINTS) += power-traces.o ifeq ($(CONFIG_PM),y) obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o @@ -66,7 +66,7 @@ ifeq ($(CONFIG_TRACING),y) obj-$(CONFIG_KGDB_KDB) += trace_kdb.o endif obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o -obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o +obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0c060932639140..b9691ee8f6c182 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -4415,16 +4416,24 @@ static int __init set_graph_notrace_function(char *str) } __setup("ftrace_graph_notrace=", set_graph_notrace_function); +static int __init set_graph_max_depth_function(char *str) +{ + if (!str) + return 0; + fgraph_max_depth = simple_strtoul(str, NULL, 0); + return 1; +} +__setup("ftrace_graph_max_depth=", set_graph_max_depth_function); + static void __init set_ftrace_early_graph(char *buf, int enable) { int ret; char *func; struct ftrace_hash *hash; - if (enable) - hash = ftrace_graph_hash; - else - hash = ftrace_graph_notrace_hash; + hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); + if (WARN_ON(!hash)) + return; while (buf) { func = strsep(&buf, ","); @@ -4434,6 +4443,11 @@ static void __init set_ftrace_early_graph(char *buf, int enable) printk(KERN_DEBUG "ftrace: function %s not " "traceable\n", func); } + + if (enable) + ftrace_graph_hash = hash; + else + ftrace_graph_notrace_hash = hash; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ @@ -5487,7 +5501,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, * Normally the mcount trampoline will call the ops->func, but there * are times that it should not. For example, if the ops does not * have its own recursion protection, then it should call the - * ftrace_ops_recurs_func() instead. + * ftrace_ops_assist_func() instead. * * Returns the function that the trampoline should call for @ops. */ diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a85739efcc304b..54e7a90db848df 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -4825,9 +4826,9 @@ static __init int test_ringbuffer(void) rb_data[cpu].cnt = cpu; rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], "rbtester/%d", cpu); - if (WARN_ON(!rb_threads[cpu])) { + if (WARN_ON(IS_ERR(rb_threads[cpu]))) { pr_cont("FAILED\n"); - ret = -1; + ret = PTR_ERR(rb_threads[cpu]); goto out_free; } @@ -4837,9 +4838,9 @@ static __init int test_ringbuffer(void) /* Now create the rb hammer! */ rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); - if (WARN_ON(!rb_hammer)) { + if (WARN_ON(IS_ERR(rb_hammer))) { pr_cont("FAILED\n"); - ret = -1; + ret = PTR_ERR(rb_hammer); goto out_free; } diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 6df9a83e20d7eb..c190a4d5013c5e 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 707445ceb7efd4..f35109514a015c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4341,22 +4341,22 @@ static const char readme_msg[] = "\t\t\t traces\n" #endif #endif /* CONFIG_STACK_TRACER */ -#ifdef CONFIG_KPROBE_EVENT +#ifdef CONFIG_KPROBE_EVENTS " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif -#ifdef CONFIG_UPROBE_EVENT +#ifdef CONFIG_UPROBE_EVENTS " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif -#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT) +#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) "\t accepts: event-definitions (one definition per line)\n" "\t Format: p|r[:[/]] []\n" "\t -:[/]\n" -#ifdef CONFIG_KPROBE_EVENT +#ifdef CONFIG_KPROBE_EVENTS "\t place: [:][+]|\n" #endif -#ifdef CONFIG_UPROBE_EVENT +#ifdef CONFIG_UPROBE_EVENTS "\t place: :\n" #endif "\t args: =fetcharg[:type]\n" diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 0f06532a755b71..5fdc779f411d83 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index f3a960ed75a197..1c21d0e2a145a6 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "tracing_map.h" #include "trace.h" diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 6721a1e89f39c7..f2ac9d44f6c4b1 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "trace.h" diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index edfacd954e1bb5..21ea6ae77d93fd 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -44,6 +44,7 @@ #include #include #include +#include #include "trace.h" static struct trace_array *hwlat_trace; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index eadd96ef772f78..5f688cc724f00a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -20,6 +20,7 @@ #include #include +#include #include "trace_probe.h" diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 070866c32eb9d6..02a4aeb22c4785 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include "trace_output.h" diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 0c0ae54d44c616..903273c93e6167 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -248,7 +248,7 @@ ASSIGN_FETCH_FUNC(file_offset, ftype), \ #define FETCH_TYPE_STRING 0 #define FETCH_TYPE_STRSIZE 1 -#ifdef CONFIG_KPROBE_EVENT +#ifdef CONFIG_KPROBE_EVENTS struct symbol_cache; unsigned long update_symbol_cache(struct symbol_cache *sc); void free_symbol_cache(struct symbol_cache *sc); @@ -278,7 +278,7 @@ alloc_symbol_cache(const char *sym, long offset) { return NULL; } -#endif /* CONFIG_KPROBE_EVENT */ +#endif /* CONFIG_KPROBE_EVENTS */ struct probe_arg { struct fetch_param fetch; diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index b0f86ea77881ec..cb917cebae291b 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -1,5 +1,6 @@ /* Include in trace.c */ +#include #include #include #include diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 2a1abbaca10ec9..5fb1f2c87e6b84 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -2,6 +2,7 @@ * Copyright (C) 2008 Steven Rostedt * */ +#include #include #include #include @@ -64,7 +65,7 @@ void stack_trace_print(void) } /* - * When arch-specific code overides this function, the following + * When arch-specific code overrides this function, the following * data should be filled up, assuming stack_trace_max_lock is held to * prevent concurrent updates. * stack_trace_index[] diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index f4379e772171dc..a7581fec96818e 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "trace_probe.h" diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 1f9a31f934a417..685c50ae63000a 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -24,7 +24,8 @@ #include #include #include -#include +#include +#include #include extern struct tracepoint * const __start___tracepoints_ptrs[]; diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 5c21f053505655..370724b4539185 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -17,7 +17,9 @@ */ #include -#include +#include +#include +#include #include #include #include diff --git a/kernel/ucount.c b/kernel/ucount.c index 8a11fc0cb45912..b4eeee03934fe8 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -143,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) new->ns = ns; new->uid = uid; - atomic_set(&new->count, 0); + new->count = 0; spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); @@ -154,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) ucounts = new; } } - if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) + if (ucounts->count == INT_MAX) ucounts = NULL; + else + ucounts->count += 1; spin_unlock_irq(&ucounts_lock); return ucounts; } @@ -164,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts) { unsigned long flags; - if (atomic_dec_and_test(&ucounts->count)) { - spin_lock_irqsave(&ucounts_lock, flags); + spin_lock_irqsave(&ucounts_lock, flags); + ucounts->count -= 1; + if (!ucounts->count) hlist_del_init(&ucounts->node); - spin_unlock_irqrestore(&ucounts_lock, flags); + else + ucounts = NULL; + spin_unlock_irqrestore(&ucounts_lock, flags); - kfree(ucounts); - } + kfree(ucounts); } static inline bool atomic_inc_below(atomic_t *v, int u) diff --git a/kernel/uid16.c b/kernel/uid16.c index 71645ae9303a52..5c2dc5b2bf4fe8 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/kernel/user.c b/kernel/user.c index b069ccbfb0b037..00281add65b251 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 86b7854fec8ee0..2f735cbe05e8ac 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/utsname.c b/kernel/utsname.c index 6976cd47dcf602..913fe4336d2b75 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c @@ -14,8 +14,10 @@ #include #include #include +#include #include #include +#include static struct ucounts *inc_uts_namespaces(struct user_namespace *ns) { diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index c8eac43267e90d..233cd8fc691082 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c @@ -14,6 +14,7 @@ #include #include #include +#include #ifdef CONFIG_PROC_SYSCTL diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 63177be0159e94..03e0b69bb5bfd6 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -19,8 +19,11 @@ #include #include #include +#include #include #include +#include +#include #include #include diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index b5de262a9eb98c..54a427d1f34454 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -13,6 +13,8 @@ #include #include +#include + #include #include diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 072cbc9b175dc1..c0168b7da1eaf2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1507,6 +1507,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; + WARN_ON_ONCE(!wq); WARN_ON_ONCE(timer->function != delayed_work_timer_fn || timer->data != (unsigned long)dwork); WARN_ON_ONCE(timer_pending(timer)); diff --git a/lib/bug.c b/lib/bug.c index bc3656e944d29b..06edbbef062322 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -45,6 +45,7 @@ #include #include #include +#include extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 8c28cbd7e104b6..17afb043016133 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/lib/digsig.c b/lib/digsig.c index 55b8b2f41a9e0a..03d7c63837aecb 100644 --- a/lib/digsig.c +++ b/lib/digsig.c @@ -85,7 +85,7 @@ static int digsig_verify_rsa(struct key *key, struct pubkey_hdr *pkh; down_read(&key->sem); - ukp = user_key_payload(key); + ukp = user_key_payload_locked(key); if (ukp->datalen < sizeof(*pkh)) goto err1; diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 60c57ec936dbdc..b157b46cc9a69c 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -17,8 +17,10 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include +#include #include #include #include diff --git a/lib/dump_stack.c b/lib/dump_stack.c index c30d07e99dba4c..625375e7f11f98 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/lib/ioremap.c b/lib/ioremap.c index a3e14ce92a5684..4bb30206b9426f 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -14,6 +14,7 @@ #include #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +static int __read_mostly ioremap_p4d_capable; static int __read_mostly ioremap_pud_capable; static int __read_mostly ioremap_pmd_capable; static int __read_mostly ioremap_huge_disabled; @@ -35,6 +36,11 @@ void __init ioremap_huge_init(void) } } +static inline int ioremap_p4d_enabled(void) +{ + return ioremap_p4d_capable; +} + static inline int ioremap_pud_enabled(void) { return ioremap_pud_capable; @@ -46,6 +52,7 @@ static inline int ioremap_pmd_enabled(void) } #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int ioremap_p4d_enabled(void) { return 0; } static inline int ioremap_pud_enabled(void) { return 0; } static inline int ioremap_pmd_enabled(void) { return 0; } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ @@ -94,14 +101,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, return 0; } -static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, +static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { pud_t *pud; unsigned long next; phys_addr -= addr; - pud = pud_alloc(&init_mm, pgd, addr); + pud = pud_alloc(&init_mm, p4d, addr); if (!pud) return -ENOMEM; do { @@ -120,6 +127,32 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, return 0; } +static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, + unsigned long end, phys_addr_t phys_addr, pgprot_t prot) +{ + p4d_t *p4d; + unsigned long next; + + phys_addr -= addr; + p4d = p4d_alloc(&init_mm, pgd, addr); + if (!p4d) + return -ENOMEM; + do { + next = p4d_addr_end(addr, end); + + if (ioremap_p4d_enabled() && + ((next - addr) == P4D_SIZE) && + IS_ALIGNED(phys_addr + addr, P4D_SIZE)) { + if (p4d_set_huge(p4d, phys_addr + addr, prot)) + continue; + } + + if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot)) + return -ENOMEM; + } while (p4d++, addr = next, addr != end); + return 0; +} + int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { @@ -135,7 +168,7 @@ int ioremap_page_range(unsigned long addr, pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot); + err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot); if (err) break; } while (pgd++, addr = next, addr != end); diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c index 391fd23976a2c0..9c7d89df40ed9b 100644 --- a/lib/is_single_threaded.c +++ b/lib/is_single_threaded.c @@ -9,8 +9,9 @@ * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ - -#include +#include +#include +#include /* * Returns true if the task does not share ->mm with another thread/process. diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 5f7999eacad5da..4e8a30d1c22ff5 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -17,6 +17,7 @@ #include #include #include +#include #ifdef arch_trigger_cpumask_backtrace /* For reliability, we're prepared to waste bits here. */ diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 6d40944960de77..6016f1deb1f5f7 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -14,6 +14,7 @@ * General Public License for more details. */ +#include #include #include #include @@ -22,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/lib/plist.c b/lib/plist.c index 3a30c53db06158..199408f91057d5 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -175,6 +175,7 @@ void plist_requeue(struct plist_node *node, struct plist_head *head) #ifdef CONFIG_DEBUG_PI_LIST #include +#include #include #include diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 5ed506d648c4e5..691a9ad48497b0 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -2129,8 +2129,8 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); if (!bitmap) return 0; - bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap); - kfree(bitmap); + if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) + kfree(bitmap); } return 1; diff --git a/lib/refcount.c b/lib/refcount.c index 1d33366189d10c..aa09ad3c30b0dc 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -58,7 +58,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) val = old; } - WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); return true; } @@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(refcount_add_not_zero); void refcount_add(unsigned int i, refcount_t *r) { - WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); + WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); } EXPORT_SYMBOL_GPL(refcount_add); @@ -97,7 +97,7 @@ bool refcount_inc_not_zero(refcount_t *r) val = old; } - WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); return true; } @@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(refcount_inc_not_zero); */ void refcount_inc(refcount_t *r) { - WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); + WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); } EXPORT_SYMBOL_GPL(refcount_inc); @@ -125,7 +125,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r) new = val - i; if (new > val) { - WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); return false; } @@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(refcount_dec_and_test); void refcount_dec(refcount_t *r) { - WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); + WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); } EXPORT_SYMBOL_GPL(refcount_dec); @@ -204,7 +204,7 @@ bool refcount_dec_not_one(refcount_t *r) new = val - 1; if (new > val) { - WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); return true; } diff --git a/lib/rhashtable.c b/lib/rhashtable.c index c5b9b9351cec8a..f8635fd5744259 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 55e11c4b2f3b8e..60e800e0b5a0d9 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include #include diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 1afec32de6f21c..690d75b132fa7c 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -22,7 +22,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1, * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ - if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) + if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) goto out; /* diff --git a/lib/syscall.c b/lib/syscall.c index 63239e097b13a8..2c6cd1b5c3ea86 100644 --- a/lib/syscall.c +++ b/lib/syscall.c @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -11,6 +12,7 @@ static int collect_syscall(struct task_struct *target, long *callno, if (!try_get_task_stack(target)) { /* Task has no stack, so the task isn't in a syscall. */ + *sp = *pc = 0; *callno = -1; return 0; } diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 0b1d3140fbb877..a25c9763fce19f 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -20,6 +20,7 @@ #include #include #include +#include /* * Note: test functions are marked noinline so that their names appear in @@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void) static int __init kmalloc_tests_init(void) { + /* + * Temporarily enable multi-shot mode. Otherwise, we'd only get a + * report for the first case. + */ + bool multishot = kasan_save_enable_multi_shot(); + kmalloc_oob_right(); kmalloc_oob_left(); kmalloc_node_oob_right(); @@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void) ksize_unpoisons_memory(); copy_user_test(); use_after_scope_test(); + + kasan_restore_multi_shot(multishot); + return -EAGAIN; } diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6d861d090e9fc7..c6f2a37028c205 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -683,33 +683,26 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { struct radix_tree_iter iter; - struct rb_node *rbn; void **slot; WARN_ON(test_bit(WB_registered, &bdi->wb.state)); spin_lock_irq(&cgwb_lock); - radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) cgwb_kill(*slot); - - while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { - struct bdi_writeback_congested *congested = - rb_entry(rbn, struct bdi_writeback_congested, rb_node); - - rb_erase(rbn, &bdi->cgwb_congested_tree); - congested->bdi = NULL; /* mark @congested unlinked */ - } - spin_unlock_irq(&cgwb_lock); /* - * All cgwb's and their congested states must be shutdown and - * released before returning. Drain the usage counter to wait for - * all cgwb's and cgwb_congested's ever created on @bdi. + * All cgwb's must be shutdown and released before returning. Drain + * the usage counter to wait for all cgwb's ever created on @bdi. */ atomic_dec(&bdi->usage_cnt); wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt)); + /* + * Grab back our reference so that we hold it when @bdi gets + * re-registered. + */ + atomic_inc(&bdi->usage_cnt); } /** @@ -749,6 +742,21 @@ void wb_blkcg_offline(struct blkcg *blkcg) spin_unlock_irq(&cgwb_lock); } +static void cgwb_bdi_exit(struct backing_dev_info *bdi) +{ + struct rb_node *rbn; + + spin_lock_irq(&cgwb_lock); + while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { + struct bdi_writeback_congested *congested = + rb_entry(rbn, struct bdi_writeback_congested, rb_node); + + rb_erase(rbn, &bdi->cgwb_congested_tree); + congested->bdi = NULL; /* mark @congested unlinked */ + } + spin_unlock_irq(&cgwb_lock); +} + #else /* CONFIG_CGROUP_WRITEBACK */ static int cgwb_bdi_init(struct backing_dev_info *bdi) @@ -769,7 +777,9 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) return 0; } -static void cgwb_bdi_destroy(struct backing_dev_info *bdi) +static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } + +static void cgwb_bdi_exit(struct backing_dev_info *bdi) { wb_congested_put(bdi->wb_congested); } @@ -857,6 +867,8 @@ int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner) MINOR(owner->devt)); if (rc) return rc; + /* Leaking owner reference... */ + WARN_ON(bdi->owner); bdi->owner = owner; get_device(owner); return 0; @@ -898,6 +910,7 @@ static void bdi_exit(struct backing_dev_info *bdi) { WARN_ON_ONCE(bdi->dev); wb_exit(&bdi->wb); + cgwb_bdi_exit(bdi); } static void release_bdi(struct kref *ref) diff --git a/mm/compaction.c b/mm/compaction.c index 0fdfde016ee283..81e1eaa2a2cf1b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/mm/filemap.c b/mm/filemap.c index 1944c631e3e660..1694623a628902 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/mm/gup.c b/mm/gup.c index 94fab8fa432b2f..04aa405350dce8 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include @@ -226,6 +226,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned int *page_mask) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; spinlock_t *ptl; @@ -243,8 +244,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma, pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); - - pud = pud_offset(pgd, address); + p4d = p4d_offset(pgd, address); + if (p4d_none(*p4d)) + return no_page_table(vma, flags); + BUILD_BUG_ON(p4d_huge(*p4d)); + if (unlikely(p4d_bad(*p4d))) + return no_page_table(vma, flags); + pud = pud_offset(p4d, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { @@ -325,6 +331,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, struct page **page) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -338,7 +345,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); - pud = pud_offset(pgd, address); + p4d = p4d_offset(pgd, address); + BUG_ON(p4d_none(*p4d)); + pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) @@ -1400,13 +1409,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, return 1; } -static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, +static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; - pudp = pud_offset(&pgd, addr); + pudp = pud_offset(&p4d, addr); do { pud_t pud = READ_ONCE(*pudp); @@ -1428,6 +1437,31 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, return 1; } +static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + p4d_t *p4dp; + + p4dp = p4d_offset(&pgd, addr); + do { + p4d_t p4d = READ_ONCE(*p4dp); + + next = p4d_addr_end(addr, end); + if (p4d_none(p4d)) + return 0; + BUILD_BUG_ON(p4d_huge(p4d)); + if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { + if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, + P4D_SHIFT, next, write, pages, nr)) + return 0; + } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) + return 0; + } while (p4dp++, addr = next, addr != end); + + return 1; +} + /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. It will only return non-negative values. @@ -1478,7 +1512,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, write, pages, &nr)) break; - } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr)) break; } while (pgdp++, addr = next, addr != end); local_irq_restore(flags); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 71e3dede95b424..1ebc93e179f3ea 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -9,6 +9,8 @@ #include #include +#include +#include #include #include #include @@ -1826,7 +1828,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); - count_vm_event(THP_SPLIT_PMD); + count_vm_event(THP_SPLIT_PUD); pudp_huge_clear_flush_notify(vma, haddr, pud); } @@ -2046,6 +2048,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; @@ -2053,7 +2056,11 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, if (!pgd_present(*pgd)) return; - pud = pud_offset(pgd, address); + p4d = p4d_offset(pgd, address); + if (!p4d_present(*p4d)) + return; + + pud = pud_offset(p4d, address); if (!pud_present(*pud)) return; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2e0e8159ce8e06..e5828875f7bbd7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -4402,7 +4403,9 @@ int hugetlb_reserve_pages(struct inode *inode, return 0; out_err: if (!vma || vma->vm_flags & VM_MAYSHARE) - region_abort(resv_map, from, to); + /* Don't call region_abort if region_chg failed */ + if (chg >= 0) + region_abort(resv_map, from, to); if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) kref_put(&resv_map->refs, resv_map_release); return ret; @@ -4554,7 +4557,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { pgd_t *pgd = pgd_offset(mm, *addr); - pud_t *pud = pud_offset(pgd, *addr); + p4d_t *p4d = p4d_offset(pgd, *addr); + pud_t *pud = pud_offset(p4d, *addr); BUG_ON(page_count(virt_to_page(ptep)) == 0); if (page_count(virt_to_page(ptep)) == 1) @@ -4585,11 +4589,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); + p4d = p4d_offset(pgd, addr); + pud = pud_alloc(mm, p4d, addr); if (pud) { if (sz == PUD_SIZE) { pte = (pte_t *)pud; @@ -4609,18 +4615,22 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; - pmd_t *pmd = NULL; + pmd_t *pmd; pgd = pgd_offset(mm, addr); - if (pgd_present(*pgd)) { - pud = pud_offset(pgd, addr); - if (pud_present(*pud)) { - if (pud_huge(*pud)) - return (pte_t *)pud; - pmd = pmd_offset(pud, addr); - } - } + if (!pgd_present(*pgd)) + return NULL; + p4d = p4d_offset(pgd, addr); + if (!p4d_present(*p4d)) + return NULL; + pud = pud_offset(p4d, addr); + if (!pud_present(*pud)) + return NULL; + if (pud_huge(*pud)) + return (pte_t *)pud; + pmd = pmd_offset(pud, addr); return (pte_t *) pmd; } @@ -4643,6 +4653,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, { struct page *page = NULL; spinlock_t *ptl; + pte_t pte; retry: ptl = pmd_lockptr(mm, pmd); spin_lock(ptl); @@ -4652,12 +4663,13 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, */ if (!pmd_huge(*pmd)) goto out; - if (pmd_present(*pmd)) { + pte = huge_ptep_get((pte_t *)pmd); + if (pte_present(pte)) { page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); if (flags & FOLL_GET) get_page(page); } else { - if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { + if (is_hugetlb_entry_migration(pte)) { spin_unlock(ptl); __migration_entry_wait(mm, (pte_t *)pmd, ptl); goto retry; diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 25f0e6521f36c6..98b27195e38b07 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,16 @@ #include "kasan.h" #include "../slab.h" +void kasan_enable_current(void) +{ + current->kasan_depth++; +} + +void kasan_disable_current(void) +{ + current->kasan_depth--; +} + /* * Poisons the shadow memory for 'size' bytes starting from 'addr'. * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 1c260e6b3b3c6a..dd2dea8eb0771a 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -96,11 +96,6 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr) << KASAN_SHADOW_SCALE_SHIFT); } -static inline bool kasan_report_enabled(void) -{ - return !current->kasan_depth; -} - void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); void kasan_report_double_free(struct kmem_cache *cache, void *object, diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c index 31238dad85fbc6..b96a5f773d8808 100644 --- a/mm/kasan/kasan_init.c +++ b/mm/kasan/kasan_init.c @@ -30,6 +30,9 @@ */ unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; +#if CONFIG_PGTABLE_LEVELS > 4 +p4d_t kasan_zero_p4d[PTRS_PER_P4D] __page_aligned_bss; +#endif #if CONFIG_PGTABLE_LEVELS > 3 pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; #endif @@ -82,10 +85,10 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, } while (pmd++, addr = next, addr != end); } -static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, +static void __init zero_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end) { - pud_t *pud = pud_offset(pgd, addr); + pud_t *pud = pud_offset(p4d, addr); unsigned long next; do { @@ -107,6 +110,23 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, } while (pud++, addr = next, addr != end); } +static void __init zero_p4d_populate(pgd_t *pgd, unsigned long addr, + unsigned long end) +{ + p4d_t *p4d = p4d_offset(pgd, addr); + unsigned long next; + + do { + next = p4d_addr_end(addr, end); + + if (p4d_none(*p4d)) { + p4d_populate(&init_mm, p4d, + early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + } + zero_pud_populate(p4d, addr, next); + } while (p4d++, addr = next, addr != end); +} + /** * kasan_populate_zero_shadow - populate shadow memory region with * kasan_zero_page @@ -125,6 +145,7 @@ void __init kasan_populate_zero_shadow(const void *shadow_start, next = pgd_addr_end(addr, end); if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { + p4d_t *p4d; pud_t *pud; pmd_t *pmd; @@ -135,9 +156,22 @@ void __init kasan_populate_zero_shadow(const void *shadow_start, * 3,2 - level page tables where we don't have * puds,pmds, so pgd_populate(), pud_populate() * is noops. + * + * The ifndef is required to avoid build breakage. + * + * With 5level-fixup.h, pgd_populate() is not nop and + * we reference kasan_zero_p4d. It's not defined + * unless 5-level paging enabled. + * + * The ifndef can be dropped once all KASAN-enabled + * architectures will switch to pgtable-nop4d.h. */ - pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud)); - pud = pud_offset(pgd, addr); +#ifndef __ARCH_HAS_5LEVEL_HACK + pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d)); +#endif + p4d = p4d_offset(pgd, addr); + p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud)); + pud = pud_offset(p4d, addr); pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); pmd = pmd_offset(pud, addr); pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); @@ -148,6 +182,6 @@ void __init kasan_populate_zero_shadow(const void *shadow_start, pgd_populate(&init_mm, pgd, early_alloc(PAGE_SIZE, NUMA_NO_NODE)); } - zero_pud_populate(pgd, addr, next); + zero_p4d_populate(pgd, addr, next); } while (pgd++, addr = next, addr != end); } diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 6f1ed163087369..3a8ddf8baf7dc3 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -103,6 +104,7 @@ static int quarantine_tail; /* Total size of all objects in global_quarantine across all batches. */ static unsigned long quarantine_size; static DEFINE_SPINLOCK(quarantine_lock); +DEFINE_STATIC_SRCU(remove_cache_srcu); /* Maximum size of the global queue. */ static unsigned long quarantine_max_size; @@ -173,17 +175,22 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) struct qlist_head *q; struct qlist_head temp = QLIST_INIT; + /* + * Note: irq must be disabled until after we move the batch to the + * global quarantine. Otherwise quarantine_remove_cache() can miss + * some objects belonging to the cache if they are in our local temp + * list. quarantine_remove_cache() executes on_each_cpu() at the + * beginning which ensures that it either sees the objects in per-cpu + * lists or in the global quarantine. + */ local_irq_save(flags); q = this_cpu_ptr(&cpu_quarantine); qlist_put(q, &info->quarantine_link, cache->size); - if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) + if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { qlist_move_all(q, &temp); - local_irq_restore(flags); - - if (unlikely(!qlist_empty(&temp))) { - spin_lock_irqsave(&quarantine_lock, flags); + spin_lock(&quarantine_lock); WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); qlist_move_all(&temp, &global_quarantine[quarantine_tail]); if (global_quarantine[quarantine_tail].bytes >= @@ -196,20 +203,33 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) if (new_tail != quarantine_head) quarantine_tail = new_tail; } - spin_unlock_irqrestore(&quarantine_lock, flags); + spin_unlock(&quarantine_lock); } + + local_irq_restore(flags); } void quarantine_reduce(void) { size_t total_size, new_quarantine_size, percpu_quarantines; unsigned long flags; + int srcu_idx; struct qlist_head to_free = QLIST_INIT; if (likely(READ_ONCE(quarantine_size) <= READ_ONCE(quarantine_max_size))) return; + /* + * srcu critical section ensures that quarantine_remove_cache() + * will not miss objects belonging to the cache while they are in our + * local to_free list. srcu is chosen because (1) it gives us private + * grace period domain that does not interfere with anything else, + * and (2) it allows synchronize_srcu() to return without waiting + * if there are no pending read critical sections (which is the + * expected case). + */ + srcu_idx = srcu_read_lock(&remove_cache_srcu); spin_lock_irqsave(&quarantine_lock, flags); /* @@ -237,6 +257,7 @@ void quarantine_reduce(void) spin_unlock_irqrestore(&quarantine_lock, flags); qlist_free_all(&to_free, NULL); + srcu_read_unlock(&remove_cache_srcu, srcu_idx); } static void qlist_move_cache(struct qlist_head *from, @@ -280,12 +301,28 @@ void quarantine_remove_cache(struct kmem_cache *cache) unsigned long flags, i; struct qlist_head to_free = QLIST_INIT; + /* + * Must be careful to not miss any objects that are being moved from + * per-cpu list to the global quarantine in quarantine_put(), + * nor objects being freed in quarantine_reduce(). on_each_cpu() + * achieves the first goal, while synchronize_srcu() achieves the + * second. + */ on_each_cpu(per_cpu_remove_cache, cache, 1); spin_lock_irqsave(&quarantine_lock, flags); - for (i = 0; i < QUARANTINE_BATCHES; i++) + for (i = 0; i < QUARANTINE_BATCHES; i++) { + if (qlist_empty(&global_quarantine[i])) + continue; qlist_move_cache(&global_quarantine[i], &to_free, cache); + /* Scanning whole quarantine can take a while. */ + spin_unlock_irqrestore(&quarantine_lock, flags); + cond_resched(); + spin_lock_irqsave(&quarantine_lock, flags); + } spin_unlock_irqrestore(&quarantine_lock, flags); qlist_free_all(&to_free, cache); + + synchronize_srcu(&remove_cache_srcu); } diff --git a/mm/kasan/report.c b/mm/kasan/report.c index f479365530b648..ab42a0803f161c 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -13,7 +13,9 @@ * */ +#include #include +#include #include #include #include @@ -293,6 +295,40 @@ static void kasan_report_error(struct kasan_access_info *info) kasan_end_report(&flags); } +static unsigned long kasan_flags; + +#define KASAN_BIT_REPORTED 0 +#define KASAN_BIT_MULTI_SHOT 1 + +bool kasan_save_enable_multi_shot(void) +{ + return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); +} +EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot); + +void kasan_restore_multi_shot(bool enabled) +{ + if (!enabled) + clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); +} +EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); + +static int __init kasan_set_multi_shot(char *str) +{ + set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); + return 1; +} +__setup("kasan_multi_shot", kasan_set_multi_shot); + +static inline bool kasan_report_enabled(void) +{ + if (current->kasan_depth) + return false; + if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) + return true; + return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); +} + void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 34bce5c308e3b1..ba40b7f673f4dd 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2,6 +2,8 @@ #include #include +#include +#include #include #include #include diff --git a/mm/kmemleak.c b/mm/kmemleak.c index da343695302277..20036d4f9f13d4 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -73,7 +73,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include @@ -1414,7 +1416,7 @@ static void kmemleak_scan(void) /* data/bss scanning */ scan_large_block(_sdata, _edata); scan_large_block(__bss_start, __bss_stop); - scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); + scan_large_block(__start_ro_after_init, __end_ro_after_init); #ifdef CONFIG_SMP /* per-cpu sections scanning */ diff --git a/mm/ksm.c b/mm/ksm.c index 520e4c37fec738..19b4f2dea7a591 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/mm/madvise.c b/mm/madvise.c index dc5927c812d3d1..7a2abf0127aef7 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -513,7 +513,43 @@ static long madvise_dontneed(struct vm_area_struct *vma, if (!can_madv_dontneed_vma(vma)) return -EINVAL; - userfaultfd_remove(vma, prev, start, end); + if (!userfaultfd_remove(vma, start, end)) { + *prev = NULL; /* mmap_sem has been dropped, prev is stale */ + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, start); + if (!vma) + return -ENOMEM; + if (start < vma->vm_start) { + /* + * This "vma" under revalidation is the one + * with the lowest vma->vm_start where start + * is also < vma->vm_end. If start < + * vma->vm_start it means an hole materialized + * in the user address space within the + * virtual range passed to MADV_DONTNEED. + */ + return -ENOMEM; + } + if (!can_madv_dontneed_vma(vma)) + return -EINVAL; + if (end > vma->vm_end) { + /* + * Don't fail if end > vma->vm_end. If the old + * vma was splitted while the mmap_sem was + * released the effect of the concurrent + * operation may not cause MADV_DONTNEED to + * have an undefined result. There may be an + * adjacent next vma that we'll walk + * next. userfaultfd_remove() will generate an + * UFFD_EVENT_REMOVE repetition on the + * end-vma->vm_end range, but the manager can + * handle a repetition fine. + */ + end = vma->vm_end; + } + VM_WARN_ON(start >= end); + } zap_page_range(vma, start, end - start); return 0; } @@ -554,8 +590,10 @@ static long madvise_remove(struct vm_area_struct *vma, * mmap_sem. */ get_file(f); - userfaultfd_remove(vma, prev, start, end); - up_read(¤t->mm->mmap_sem); + if (userfaultfd_remove(vma, start, end)) { + /* mmap_sem was not released by userfaultfd_remove() */ + up_read(¤t->mm->mmap_sem); + } error = vfs_fallocate(f, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, end - start); diff --git a/mm/memblock.c b/mm/memblock.c index b64b47803e529a..696f06d17c4e89 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1118,7 +1118,10 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, } } while (left < right); - return min(PHYS_PFN(type->regions[right].base), max_pfn); + if (right == type->cnt) + return max_pfn; + else + return min(PHYS_PFN(type->regions[right].base), max_pfn); } /** diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 45867e439d31d7..2bd7541d7c1123 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -465,6 +466,8 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) struct mem_cgroup_tree_per_node *mctz; mctz = soft_limit_tree_from_page(page); + if (!mctz) + return; /* * Necessary to update all ancestors when hierarchy is used. * because their event counter is not touched. @@ -502,7 +505,8 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) for_each_node(nid) { mz = mem_cgroup_nodeinfo(memcg, nid); mctz = soft_limit_tree_node(nid); - mem_cgroup_remove_exceeded(mz, mctz); + if (mctz) + mem_cgroup_remove_exceeded(mz, mctz); } } @@ -2557,7 +2561,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, * is empty. Do it lockless to prevent lock bouncing. Races * are acceptable as soft limit is best effort anyway. */ - if (RB_EMPTY_ROOT(&mctz->rb_root)) + if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) return 0; /* @@ -4134,17 +4138,22 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) kfree(memcg->nodeinfo[node]); } -static void mem_cgroup_free(struct mem_cgroup *memcg) +static void __mem_cgroup_free(struct mem_cgroup *memcg) { int node; - memcg_wb_domain_exit(memcg); for_each_node(node) free_mem_cgroup_per_node_info(memcg, node); free_percpu(memcg->stat); kfree(memcg); } +static void mem_cgroup_free(struct mem_cgroup *memcg) +{ + memcg_wb_domain_exit(memcg); + __mem_cgroup_free(memcg); +} + static struct mem_cgroup *mem_cgroup_alloc(void) { struct mem_cgroup *memcg; @@ -4195,7 +4204,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) fail: if (memcg->id.id > 0) idr_remove(&mem_cgroup_idr, memcg->id.id); - mem_cgroup_free(memcg); + __mem_cgroup_free(memcg); return NULL; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 3d0f2fd4bf73fe..27f7210e7fabd1 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -40,7 +40,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/mm/memory.c b/mm/memory.c index 14fc0b40f0bb6c..235ba51b2fbf07 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -40,6 +40,10 @@ #include #include +#include +#include +#include +#include #include #include #include @@ -441,7 +445,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, mm_dec_nr_pmds(tlb->mm); } -static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, +static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { @@ -450,7 +454,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long start; start = addr; - pud = pud_offset(pgd, addr); + pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) @@ -458,6 +462,39 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, free_pmd_range(tlb, pud, addr, next, floor, ceiling); } while (pud++, addr = next, addr != end); + start &= P4D_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= P4D_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pud = pud_offset(p4d, start); + p4d_clear(p4d); + pud_free_tlb(tlb, pud, start); +} + +static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + p4d_t *p4d; + unsigned long next; + unsigned long start; + + start = addr; + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + if (p4d_none_or_clear_bad(p4d)) + continue; + free_pud_range(tlb, p4d, addr, next, floor, ceiling); + } while (p4d++, addr = next, addr != end); + start &= PGDIR_MASK; if (start < floor) return; @@ -469,9 +506,9 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, if (end - 1 > ceiling - 1) return; - pud = pud_offset(pgd, start); + p4d = p4d_offset(pgd, start); pgd_clear(pgd); - pud_free_tlb(tlb, pud, start); + p4d_free_tlb(tlb, p4d, start); } /* @@ -535,7 +572,7 @@ void free_pgd_range(struct mmu_gather *tlb, next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - free_pud_range(tlb, pgd, addr, next, floor, ceiling); + free_p4d_range(tlb, pgd, addr, next, floor, ceiling); } while (pgd++, addr = next, addr != end); } @@ -654,7 +691,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); - pud_t *pud = pud_offset(pgd, addr); + p4d_t *p4d = p4d_offset(pgd, addr); + pud_t *pud = pud_offset(p4d, addr); pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping; pgoff_t index; @@ -1019,16 +1057,16 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src } static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, - pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, + p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pud_t *src_pud, *dst_pud; unsigned long next; - dst_pud = pud_alloc(dst_mm, dst_pgd, addr); + dst_pud = pud_alloc(dst_mm, dst_p4d, addr); if (!dst_pud) return -ENOMEM; - src_pud = pud_offset(src_pgd, addr); + src_pud = pud_offset(src_p4d, addr); do { next = pud_addr_end(addr, end); if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { @@ -1052,6 +1090,28 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src return 0; } +static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, + unsigned long addr, unsigned long end) +{ + p4d_t *src_p4d, *dst_p4d; + unsigned long next; + + dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); + if (!dst_p4d) + return -ENOMEM; + src_p4d = p4d_offset(src_pgd, addr); + do { + next = p4d_addr_end(addr, end); + if (p4d_none_or_clear_bad(src_p4d)) + continue; + if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d, + vma, addr, next)) + return -ENOMEM; + } while (dst_p4d++, src_p4d++, addr = next, addr != end); + return 0; +} + int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *vma) { @@ -1107,7 +1167,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(src_pgd)) continue; - if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, + if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd, vma, addr, next))) { ret = -ENOMEM; break; @@ -1263,14 +1323,14 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, } static inline unsigned long zap_pud_range(struct mmu_gather *tlb, - struct vm_area_struct *vma, pgd_t *pgd, + struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, struct zap_details *details) { pud_t *pud; unsigned long next; - pud = pud_offset(pgd, addr); + pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_trans_huge(*pud) || pud_devmap(*pud)) { @@ -1291,6 +1351,25 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb, return addr; } +static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, pgd_t *pgd, + unsigned long addr, unsigned long end, + struct zap_details *details) +{ + p4d_t *p4d; + unsigned long next; + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + if (p4d_none_or_clear_bad(p4d)) + continue; + next = zap_pud_range(tlb, vma, p4d, addr, next, details); + } while (p4d++, addr = next, addr != end); + + return addr; +} + void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, @@ -1306,7 +1385,7 @@ void unmap_page_range(struct mmu_gather *tlb, next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - next = zap_pud_range(tlb, vma, pgd, addr, next, details); + next = zap_p4d_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); } @@ -1461,16 +1540,24 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes); pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { - pgd_t *pgd = pgd_offset(mm, addr); - pud_t *pud = pud_alloc(mm, pgd, addr); - if (pud) { - pmd_t *pmd = pmd_alloc(mm, pud, addr); - if (pmd) { - VM_BUG_ON(pmd_trans_huge(*pmd)); - return pte_alloc_map_lock(mm, pmd, addr, ptl); - } - } - return NULL; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return NULL; + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return NULL; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return NULL; + + VM_BUG_ON(pmd_trans_huge(*pmd)); + return pte_alloc_map_lock(mm, pmd, addr, ptl); } /* @@ -1736,7 +1823,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, return 0; } -static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, +static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { @@ -1744,7 +1831,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long next; pfn -= addr >> PAGE_SHIFT; - pud = pud_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); if (!pud) return -ENOMEM; do { @@ -1756,6 +1843,26 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, return 0; } +static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end, + unsigned long pfn, pgprot_t prot) +{ + p4d_t *p4d; + unsigned long next; + + pfn -= addr >> PAGE_SHIFT; + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return -ENOMEM; + do { + next = p4d_addr_end(addr, end); + if (remap_pud_range(mm, p4d, addr, next, + pfn + (addr >> PAGE_SHIFT), prot)) + return -ENOMEM; + } while (p4d++, addr = next, addr != end); + return 0; +} + /** * remap_pfn_range - remap kernel memory to userspace * @vma: user vma to map to @@ -1812,7 +1919,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, flush_cache_range(vma, addr, end); do { next = pgd_addr_end(addr, end); - err = remap_pud_range(mm, pgd, addr, next, + err = remap_p4d_range(mm, pgd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) break; @@ -1928,7 +2035,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, return err; } -static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, +static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { @@ -1936,7 +2043,7 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long next; int err; - pud = pud_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); if (!pud) return -ENOMEM; do { @@ -1948,6 +2055,26 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, return err; } +static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end, + pte_fn_t fn, void *data) +{ + p4d_t *p4d; + unsigned long next; + int err; + + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return -ENOMEM; + do { + next = p4d_addr_end(addr, end); + err = apply_to_pud_range(mm, p4d, addr, next, fn, data); + if (err) + break; + } while (p4d++, addr = next, addr != end); + return err; +} + /* * Scan a region of virtual memory, filling in page tables as necessary * and calling a provided function on each leaf page table. @@ -1966,7 +2093,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); - err = apply_to_pud_range(mm, pgd, addr, next, fn, data); + err = apply_to_p4d_range(mm, pgd, addr, next, fn, data); if (err) break; } while (pgd++, addr = next, addr != end); @@ -3649,11 +3776,15 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, }; struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; + p4d_t *p4d; int ret; pgd = pgd_offset(mm, address); + p4d = p4d_alloc(mm, pgd, address); + if (!p4d) + return VM_FAULT_OOM; - vmf.pud = pud_alloc(mm, pgd, address); + vmf.pud = pud_alloc(mm, p4d, address); if (!vmf.pud) return VM_FAULT_OOM; if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { @@ -3775,12 +3906,35 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, } EXPORT_SYMBOL_GPL(handle_mm_fault); +#ifndef __PAGETABLE_P4D_FOLDED +/* + * Allocate p4d page table. + * We've already handled the fast-path in-line. + */ +int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +{ + p4d_t *new = p4d_alloc_one(mm, address); + if (!new) + return -ENOMEM; + + smp_wmb(); /* See comment in __pte_alloc */ + + spin_lock(&mm->page_table_lock); + if (pgd_present(*pgd)) /* Another has populated it */ + p4d_free(mm, new); + else + pgd_populate(mm, pgd, new); + spin_unlock(&mm->page_table_lock); + return 0; +} +#endif /* __PAGETABLE_P4D_FOLDED */ + #ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory. * We've already handled the fast-path in-line. */ -int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) { pud_t *new = pud_alloc_one(mm, address); if (!new) @@ -3789,10 +3943,17 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&mm->page_table_lock); - if (pgd_present(*pgd)) /* Another has populated it */ +#ifndef __ARCH_HAS_5LEVEL_HACK + if (p4d_present(*p4d)) /* Another has populated it */ pud_free(mm, new); else - pgd_populate(mm, pgd, new); + p4d_populate(mm, p4d, new); +#else + if (pgd_present(*p4d)) /* Another has populated it */ + pud_free(mm, new); + else + pgd_populate(mm, p4d, new); +#endif /* __ARCH_HAS_5LEVEL_HACK */ spin_unlock(&mm->page_table_lock); return 0; } @@ -3835,6 +3996,7 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *ptep; @@ -3843,7 +4005,11 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) goto out; - pud = pud_offset(pgd, address); + p4d = p4d_offset(pgd, address); + if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) + goto out; + + pud = pud_offset(p4d, address); if (pud_none(*pud) || unlikely(pud_bad(*pud))) goto out; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 1d3ed58f92abe1..6fa7208bcd564e 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -124,9 +125,12 @@ void put_online_mems(void) } +/* Serializes write accesses to mem_hotplug.active_writer. */ +static DEFINE_MUTEX(memory_add_remove_lock); + void mem_hotplug_begin(void) { - assert_held_device_hotplug(); + mutex_lock(&memory_add_remove_lock); mem_hotplug.active_writer = current; @@ -146,6 +150,7 @@ void mem_hotplug_done(void) mem_hotplug.active_writer = NULL; mutex_unlock(&mem_hotplug.lock); memhp_lock_release(); + mutex_unlock(&memory_add_remove_lock); } /* add this memory to iomem resource */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 1e7873e40c9a16..75b2745bac4145 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -73,6 +73,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/mm/migrate.c b/mm/migrate.c index 2c63ac06791bbd..ed97c2c14fa80b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -40,6 +40,7 @@ #include #include #include +#include #include @@ -208,8 +209,11 @@ static int remove_migration_pte(struct page *page, struct vm_area_struct *vma, VM_BUG_ON_PAGE(PageTail(page), page); while (page_vma_mapped_walk(&pvmw)) { - new = page - pvmw.page->index + - linear_page_index(vma, pvmw.address); + if (PageKsm(page)) + new = page; + else + new = page - pvmw.page->index + + linear_page_index(vma, pvmw.address); get_page(new); pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); diff --git a/mm/mlock.c b/mm/mlock.c index cdbed8aaa4268c..0dd9ca18e19ed7 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -379,6 +380,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, pte = get_locked_pte(vma->vm_mm, start, &ptl); /* Make sure we do not cross the page table boundary */ end = pgd_addr_end(start, end); + end = p4d_addr_end(start, end); end = pud_addr_end(start, end); end = pmd_addr_end(start, end); @@ -441,7 +443,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, while (start < end) { struct page *page; - unsigned int page_mask; + unsigned int page_mask = 0; unsigned long page_increm; struct pagevec pvec; struct zone *zone; @@ -455,8 +457,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, * suits munlock very well (and if somehow an abnormal page * has sneaked into the range, we won't oops here: great). */ - page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, - &page_mask); + page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); if (page && !IS_ERR(page)) { if (PageTransTail(page)) { @@ -467,8 +468,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, /* * Any THP page found by follow_page_mask() may * have gotten split before reaching - * munlock_vma_page(), so we need to recompute - * the page_mask here. + * munlock_vma_page(), so we need to compute + * the page_mask here instead. */ page_mask = munlock_vma_page(page); unlock_page(page); diff --git a/mm/mmap.c b/mm/mmap.c index 499b988b1639ac..bfbe8856d134f3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1672,7 +1672,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, * new file must not have been exposed to user-space, yet. */ vma->vm_file = get_file(file); - error = file->f_op->mmap(file, vma); + error = call_mmap(file, vma); if (error) goto unmap_and_free_vma; diff --git a/mm/mmu_context.c b/mm/mmu_context.c index daf67bb02b4af8..3e612ae748e966 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -5,6 +5,8 @@ #include #include +#include +#include #include #include diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 32bc9f2ff7eb93..a7652acd2ab93c 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -17,6 +17,7 @@ #include #include #include +#include #include /* global SRCU for all MMs */ diff --git a/mm/mprotect.c b/mm/mprotect.c index 848e946b08e58e..8edd0d576254d4 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -193,14 +193,14 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, } static inline unsigned long change_pud_range(struct vm_area_struct *vma, - pgd_t *pgd, unsigned long addr, unsigned long end, + p4d_t *p4d, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { pud_t *pud; unsigned long next; unsigned long pages = 0; - pud = pud_offset(pgd, addr); + pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) @@ -212,6 +212,26 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma, return pages; } +static inline unsigned long change_p4d_range(struct vm_area_struct *vma, + pgd_t *pgd, unsigned long addr, unsigned long end, + pgprot_t newprot, int dirty_accountable, int prot_numa) +{ + p4d_t *p4d; + unsigned long next; + unsigned long pages = 0; + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + if (p4d_none_or_clear_bad(p4d)) + continue; + pages += change_pud_range(vma, p4d, addr, next, newprot, + dirty_accountable, prot_numa); + } while (p4d++, addr = next, addr != end); + + return pages; +} + static unsigned long change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) @@ -230,7 +250,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - pages += change_pud_range(vma, pgd, addr, next, newprot, + pages += change_p4d_range(vma, pgd, addr, next, newprot, dirty_accountable, prot_numa); } while (pgd++, addr = next, addr != end); diff --git a/mm/mremap.c b/mm/mremap.c index 8233b0105c8258..cd8a1b199ef949 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -32,6 +32,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; @@ -39,7 +40,11 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) if (pgd_none_or_clear_bad(pgd)) return NULL; - pud = pud_offset(pgd, addr); + p4d = p4d_offset(pgd, addr); + if (p4d_none_or_clear_bad(p4d)) + return NULL; + + pud = pud_offset(p4d, addr); if (pud_none_or_clear_bad(pud)) return NULL; @@ -54,11 +59,15 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return NULL; + pud = pud_alloc(mm, p4d, addr); if (!pud) return NULL; diff --git a/mm/nommu.c b/mm/nommu.c index fe9f4fa4a7a741..2d131b97a85169 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -757,7 +758,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) mm->map_count--; for (i = 0; i < VMACACHE_SIZE; i++) { /* if the vma is cached, invalidate the entire cache */ - if (curr->vmacache[i] == vma) { + if (curr->vmacache.vmas[i] == vma) { vmacache_invalidate(mm); break; } @@ -1084,7 +1085,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma) { int ret; - ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); + ret = call_mmap(vma->vm_file, vma); if (ret == 0) { vma->vm_region->vm_top = vma->vm_region->vm_end; return 0; @@ -1115,7 +1116,7 @@ static int do_mmap_private(struct vm_area_struct *vma, * - VM_MAYSHARE will be set if it may attempt to share */ if (capabilities & NOMMU_MAP_DIRECT) { - ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); + ret = call_mmap(vma->vm_file, vma); if (ret == 0) { /* shouldn't return success if we're not sharing */ BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 51c091849dcb65..d083714a2bb924 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -22,6 +22,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 26a60818a8fcf7..d8ac2a7fb9e7b6 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a7a6aac95a6d15..6cbde310abed8d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include @@ -872,7 +873,8 @@ static inline void __free_one_page(struct page *page, higher_page = page + (combined_pfn - pfn); buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); higher_buddy = higher_page + (buddy_pfn - combined_pfn); - if (page_is_buddy(higher_page, higher_buddy, order + 1)) { + if (pfn_valid_within(buddy_pfn) && + page_is_buddy(higher_page, higher_buddy, order + 1)) { list_add_tail(&page->lru, &zone->free_area[order].free_list[migratetype]); goto out; diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index a23001a22c1518..c4c9def8ffea47 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -104,6 +104,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) struct mm_struct *mm = pvmw->vma->vm_mm; struct page *page = pvmw->page; pgd_t *pgd; + p4d_t *p4d; pud_t *pud; /* The only possible pmd mapping has been handled on last iteration */ @@ -133,7 +134,10 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) pgd = pgd_offset(mm, pvmw->address); if (!pgd_present(*pgd)) return false; - pud = pud_offset(pgd, pvmw->address); + p4d = p4d_offset(pgd, pvmw->address); + if (!p4d_present(*p4d)) + return false; + pud = pud_offset(p4d, pvmw->address); if (!pud_present(*pud)) return false; pvmw->pmd = pmd_offset(pud, pvmw->address); diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 03761577ae86e4..60f7856e508fb9 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -69,14 +69,14 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, return err; } -static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, +static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, struct mm_walk *walk) { pud_t *pud; unsigned long next; int err = 0; - pud = pud_offset(pgd, addr); + pud = pud_offset(p4d, addr); do { again: next = pud_addr_end(addr, end); @@ -113,6 +113,32 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, return err; } +static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + p4d_t *p4d; + unsigned long next; + int err = 0; + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + if (p4d_none_or_clear_bad(p4d)) { + if (walk->pte_hole) + err = walk->pte_hole(addr, next, walk); + if (err) + break; + continue; + } + if (walk->pmd_entry || walk->pte_entry) + err = walk_pud_range(p4d, addr, next, walk); + if (err) + break; + } while (p4d++, addr = next, addr != end); + + return err; +} + static int walk_pgd_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -131,7 +157,7 @@ static int walk_pgd_range(unsigned long addr, unsigned long end, continue; } if (walk->pmd_entry || walk->pte_entry) - err = walk_pud_range(pgd, addr, next, walk); + err = walk_p4d_range(pgd, addr, next, walk); if (err) break; } while (pgd++, addr = next, addr != end); diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 538998a137d24e..9ac639499bd114 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -21,7 +21,6 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, /** * pcpu_get_pages - get temp pages array - * @chunk: chunk of interest * * Returns pointer to array of pointers to struct page which can be indexed * with pcpu_page_idx(). Note that there is only one array and accesses @@ -30,7 +29,7 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, * RETURNS: * Pointer to temp pages array on success. */ -static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc) +static struct page **pcpu_get_pages(void) { static struct page **pages; size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); @@ -275,7 +274,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, { struct page **pages; - pages = pcpu_get_pages(chunk); + pages = pcpu_get_pages(); if (!pages) return -ENOMEM; @@ -313,7 +312,7 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, * successful population attempt so the temp pages array must * be available now. */ - pages = pcpu_get_pages(chunk); + pages = pcpu_get_pages(); BUG_ON(!pages); /* unmap and free */ diff --git a/mm/percpu.c b/mm/percpu.c index 5696039b5c0707..60a6488e9e6d49 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1011,8 +1011,11 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, mutex_unlock(&pcpu_alloc_mutex); } - if (chunk != pcpu_reserved_chunk) + if (chunk != pcpu_reserved_chunk) { + spin_lock_irqsave(&pcpu_lock, flags); pcpu_nr_empty_pop_pages -= occ_pages; + spin_unlock_irqrestore(&pcpu_lock, flags); + } if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) pcpu_schedule_balance_work(); diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 4ed5908c65b0f1..c99d9512a45b8a 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -22,6 +22,12 @@ void pgd_clear_bad(pgd_t *pgd) pgd_clear(pgd); } +void p4d_clear_bad(p4d_t *p4d) +{ + p4d_ERROR(*p4d); + p4d_clear(p4d); +} + void pud_clear_bad(pud_t *pud) { pud_ERROR(*pud); diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 84d0c7eada2b50..8973cd231ecee9 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/mm/rmap.c b/mm/rmap.c index 8774791e28099b..f6838015810f56 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -46,6 +46,8 @@ */ #include +#include +#include #include #include #include @@ -682,6 +684,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd = NULL; pmd_t pmde; @@ -690,7 +693,11 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) if (!pgd_present(*pgd)) goto out; - pud = pud_offset(pgd, address); + p4d = p4d_offset(pgd, address); + if (!p4d_present(*p4d)) + goto out; + + pud = pud_offset(p4d, address); if (!pud_present(*pud)) goto out; @@ -1152,7 +1159,7 @@ void page_add_file_rmap(struct page *page, bool compound) goto out; } __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); - mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); + mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr); out: unlock_page_memcg(page); } @@ -1192,7 +1199,7 @@ static void page_remove_file_rmap(struct page *page, bool compound) * pte lock(a spinlock) is held, which implies preemption disabled. */ __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); - mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); + mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr); if (unlikely(PageMlocked(page))) clear_page_mlock(page); @@ -1314,12 +1321,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } while (page_vma_mapped_walk(&pvmw)) { - subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); - address = pvmw.address; - - /* Unexpected PMD-mapped THP? */ - VM_BUG_ON_PAGE(!pvmw.pte, page); - /* * If the page is mlock()d, we cannot swap it out. * If it's recently referenced (perhaps page_referenced @@ -1343,6 +1344,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, continue; } + /* Unexpected PMD-mapped THP? */ + VM_BUG_ON_PAGE(!pvmw.pte, page); + + subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); + address = pvmw.address; + + if (!(flags & TTU_IGNORE_ACCESS)) { if (ptep_clear_flush_young_notify(vma, address, pvmw.pte)) { diff --git a/mm/shmem.c b/mm/shmem.c index a26649a6633fbf..e67d6ba4e98e73 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -958,10 +959,10 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) } EXPORT_SYMBOL_GPL(shmem_truncate_range); -static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) +static int shmem_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) { - struct inode *inode = dentry->d_inode; + struct inode *inode = path->dentry->d_inode; struct shmem_inode_info *info = SHMEM_I(inode); if (info->alloced - info->swapped != inode->i_mapping->nrpages) { diff --git a/mm/slab.c b/mm/slab.c index bd63450a9b167f..807d86c7690886 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -116,6 +116,7 @@ #include #include #include +#include #include diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 574c67b663fe8a..a56c3989f77312 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -196,9 +196,9 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) return pmd; } -pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) +pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) { - pud_t *pud = pud_offset(pgd, addr); + pud_t *pud = pud_offset(p4d, addr); if (pud_none(*pud)) { void *p = vmemmap_alloc_block(PAGE_SIZE, node); if (!p) @@ -208,6 +208,18 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) return pud; } +p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) +{ + p4d_t *p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + void *p = vmemmap_alloc_block(PAGE_SIZE, node); + if (!p) + return NULL; + p4d_populate(&init_mm, p4d, p); + } + return p4d; +} + pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) { pgd_t *pgd = pgd_offset_k(addr); @@ -225,6 +237,7 @@ int __meminit vmemmap_populate_basepages(unsigned long start, { unsigned long addr = start; pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -233,7 +246,10 @@ int __meminit vmemmap_populate_basepages(unsigned long start, pgd = vmemmap_pgd_populate(addr, node); if (!pgd) return -ENOMEM; - pud = vmemmap_pud_populate(pgd, addr, node); + p4d = vmemmap_p4d_populate(pgd, addr, node); + if (!p4d) + return -ENOMEM; + pud = vmemmap_pud_populate(p4d, addr, node); if (!pud) return -ENOMEM; pmd = vmemmap_pmd_populate(pud, addr, node); diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 9b5bc86f96ad73..b1ccb58ad39740 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -267,8 +267,6 @@ int free_swap_slot(swp_entry_t entry) { struct swap_slots_cache *cache; - BUG_ON(!swap_slot_cache_initialized); - cache = &get_cpu_var(swp_slots); if (use_swap_slot_cache && cache->slots_ret) { spin_lock_irq(&cache->free_lock); diff --git a/mm/swapfile.c b/mm/swapfile.c index fadc6a1c0da0b2..178130880b9085 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -6,6 +6,8 @@ */ #include +#include +#include #include #include #include @@ -1515,7 +1517,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, return 0; } -static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, +static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, swp_entry_t entry, struct page *page) { @@ -1523,7 +1525,7 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long next; int ret; - pud = pud_offset(pgd, addr); + pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) @@ -1535,6 +1537,26 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, return 0; } +static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, + unsigned long addr, unsigned long end, + swp_entry_t entry, struct page *page) +{ + p4d_t *p4d; + unsigned long next; + int ret; + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + if (p4d_none_or_clear_bad(p4d)) + continue; + ret = unuse_pud_range(vma, p4d, addr, next, entry, page); + if (ret) + return ret; + } while (p4d++, addr = next, addr != end); + return 0; +} + static int unuse_vma(struct vm_area_struct *vma, swp_entry_t entry, struct page *page) { @@ -1558,7 +1580,7 @@ static int unuse_vma(struct vm_area_struct *vma, next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - ret = unuse_pud_range(vma, pgd, addr, next, entry, page); + ret = unuse_p4d_range(vma, pgd, addr, next, entry, page); if (ret) return ret; } while (pgd++, addr = next, addr != end); diff --git a/mm/usercopy.c b/mm/usercopy.c index 8345299e3e3b08..d155e12563b139 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -16,6 +16,9 @@ #include #include +#include +#include +#include #include enum { diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 9f0ad2a4f10244..8bcb501bce60b8 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -127,19 +128,22 @@ static int mfill_zeropage_pte(struct mm_struct *dst_mm, static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; - pmd_t *pmd = NULL; pgd = pgd_offset(mm, address); - pud = pud_alloc(mm, pgd, address); - if (pud) - /* - * Note that we didn't run this because the pmd was - * missing, the *pmd may be already established and in - * turn it may also be a trans_huge_pmd. - */ - pmd = pmd_alloc(mm, pud, address); - return pmd; + p4d = p4d_alloc(mm, pgd, address); + if (!p4d) + return NULL; + pud = pud_alloc(mm, p4d, address); + if (!pud) + return NULL; + /* + * Note that we didn't run this because the pmd was + * missing, the *pmd may be already established and in + * turn it may also be a trans_huge_pmd. + */ + return pmd_alloc(mm, pud, address); } #ifdef CONFIG_HUGETLB_PAGE diff --git a/mm/util.c b/mm/util.c index b8f538863b5a19..656dc5e37a8721 100644 --- a/mm/util.c +++ b/mm/util.c @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/mm/vmacache.c b/mm/vmacache.c index 035fdeb35b43b9..7ffa0ee341b5da 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -1,7 +1,8 @@ /* * Copyright (C) 2014 Davidlohr Bueso. */ -#include +#include +#include #include #include @@ -60,7 +61,7 @@ static inline bool vmacache_valid_mm(struct mm_struct *mm) void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) { if (vmacache_valid_mm(newvma->vm_mm)) - current->vmacache[VMACACHE_HASH(addr)] = newvma; + current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; } static bool vmacache_valid(struct mm_struct *mm) @@ -71,12 +72,12 @@ static bool vmacache_valid(struct mm_struct *mm) return false; curr = current; - if (mm->vmacache_seqnum != curr->vmacache_seqnum) { + if (mm->vmacache_seqnum != curr->vmacache.seqnum) { /* * First attempt will always be invalid, initialize * the new cache for this task here. */ - curr->vmacache_seqnum = mm->vmacache_seqnum; + curr->vmacache.seqnum = mm->vmacache_seqnum; vmacache_flush(curr); return false; } @@ -93,7 +94,7 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) return NULL; for (i = 0; i < VMACACHE_SIZE; i++) { - struct vm_area_struct *vma = current->vmacache[i]; + struct vm_area_struct *vma = current->vmacache.vmas[i]; if (!vma) continue; @@ -121,7 +122,7 @@ struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, return NULL; for (i = 0; i < VMACACHE_SIZE; i++) { - struct vm_area_struct *vma = current->vmacache[i]; + struct vm_area_struct *vma = current->vmacache.vmas[i]; if (vma && vma->vm_start == start && vma->vm_end == end) { count_vm_vmacache_event(VMACACHE_FIND_HITS); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index be93949b488599..0b057628a7ba5c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include @@ -86,12 +86,12 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) } while (pmd++, addr = next, addr != end); } -static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) +static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) { pud_t *pud; unsigned long next; - pud = pud_offset(pgd, addr); + pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_clear_huge(pud)) @@ -102,6 +102,22 @@ static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) } while (pud++, addr = next, addr != end); } +static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) +{ + p4d_t *p4d; + unsigned long next; + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + if (p4d_clear_huge(p4d)) + continue; + if (p4d_none_or_clear_bad(p4d)) + continue; + vunmap_pud_range(p4d, addr, next); + } while (p4d++, addr = next, addr != end); +} + static void vunmap_page_range(unsigned long addr, unsigned long end) { pgd_t *pgd; @@ -113,7 +129,7 @@ static void vunmap_page_range(unsigned long addr, unsigned long end) next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - vunmap_pud_range(pgd, addr, next); + vunmap_p4d_range(pgd, addr, next); } while (pgd++, addr = next, addr != end); } @@ -160,13 +176,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, return 0; } -static int vmap_pud_range(pgd_t *pgd, unsigned long addr, +static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pud_t *pud; unsigned long next; - pud = pud_alloc(&init_mm, pgd, addr); + pud = pud_alloc(&init_mm, p4d, addr); if (!pud) return -ENOMEM; do { @@ -177,6 +193,23 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, return 0; } +static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, + unsigned long end, pgprot_t prot, struct page **pages, int *nr) +{ + p4d_t *p4d; + unsigned long next; + + p4d = p4d_alloc(&init_mm, pgd, addr); + if (!p4d) + return -ENOMEM; + do { + next = p4d_addr_end(addr, end); + if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) + return -ENOMEM; + } while (p4d++, addr = next, addr != end); + return 0; +} + /* * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and * will have pfns corresponding to the "pages" array. @@ -196,7 +229,7 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end, pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); + err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); if (err) return err; } while (pgd++, addr = next, addr != end); @@ -237,6 +270,10 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *ptep, pte; /* * XXX we might need to change this if we add VIRTUAL_BUG_ON for @@ -244,21 +281,23 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); - if (!pgd_none(*pgd)) { - pud_t *pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { - pmd_t *pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) { - pte_t *ptep, pte; - - ptep = pte_offset_map(pmd, addr); - pte = *ptep; - if (pte_present(pte)) - page = pte_page(pte); - pte_unmap(ptep); - } - } - } + if (pgd_none(*pgd)) + return NULL; + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) + return NULL; + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + return NULL; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return NULL; + + ptep = pte_offset_map(pmd, addr); + pte = *ptep; + if (pte_present(pte)) + page = pte_page(pte); + pte_unmap(ptep); return page; } EXPORT_SYMBOL(vmalloc_to_page); @@ -1644,7 +1683,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, if (fatal_signal_pending(current)) { area->nr_pages = i; - goto fail; + goto fail_no_warn; } if (node == NUMA_NO_NODE) @@ -1670,6 +1709,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, warn_alloc(gfp_mask, NULL, "vmalloc: allocation failure, allocated %ld of %ld bytes", (area->nr_pages*PAGE_SIZE), area->size); +fail_no_warn: vfree(area->addr); return NULL; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 70aa739c6b68be..bc8031ef994d57 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include diff --git a/mm/vmstat.c b/mm/vmstat.c index 69f9aff39a2eaf..89f95396ec46be 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1065,6 +1065,9 @@ const char * const vmstat_text[] = { "thp_split_page_failed", "thp_deferred_split_page", "thp_split_pmd", +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + "thp_split_pud", +#endif "thp_zero_page_alloc", "thp_zero_page_alloc_failed", #endif @@ -1761,7 +1764,7 @@ static int vmstat_cpu_dead(unsigned int cpu) #endif -static int __init setup_vmstat(void) +void __init init_mm_internals(void) { #ifdef CONFIG_SMP int ret; @@ -1789,9 +1792,7 @@ static int __init setup_vmstat(void) proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); #endif - return 0; } -module_init(setup_vmstat) #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) diff --git a/mm/workingset.c b/mm/workingset.c index ac839fca0e76ae..eda05c71fa49e6 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -532,7 +532,7 @@ static int __init workingset_init(void) pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", timestamp_bits, max_order, bucket_order); - ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key); + ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key); if (ret) goto err; ret = register_shrinker(&workingset_shadow_shrinker); diff --git a/mm/z3fold.c b/mm/z3fold.c index 8970a2fd3b1a53..f9492bccfd794a 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -667,6 +667,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) z3fold_page_unlock(zhdr); spin_lock(&pool->lock); if (kref_put(&zhdr->refcount, release_z3fold_page)) { + spin_unlock(&pool->lock); atomic64_dec(&pool->pages_nr); return 0; } diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index b7b1fb6c8c21d4..b7ee9c34dbd678 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include diff --git a/net/9p/client.c b/net/9p/client.c index 3fc94a49ccd53c..3ce672af1596cf 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include @@ -1101,7 +1101,7 @@ void p9_client_begin_disconnect(struct p9_client *clnt) EXPORT_SYMBOL(p9_client_begin_disconnect); struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, - char *uname, kuid_t n_uname, char *aname) + const char *uname, kuid_t n_uname, const char *aname) { int err = 0; struct p9_req_t *req; @@ -1149,7 +1149,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, EXPORT_SYMBOL(p9_client_attach); struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, - char **wnames, int clone) + const unsigned char * const *wnames, int clone) { int err; struct p9_client *clnt; @@ -1271,7 +1271,7 @@ int p9_client_open(struct p9_fid *fid, int mode) } EXPORT_SYMBOL(p9_client_open); -int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, +int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags, u32 mode, kgid_t gid, struct p9_qid *qid) { int err = 0; @@ -1316,7 +1316,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, } EXPORT_SYMBOL(p9_client_create_dotl); -int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, +int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode, char *extension) { int err; @@ -1361,8 +1361,8 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, } EXPORT_SYMBOL(p9_client_fcreate); -int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, kgid_t gid, - struct p9_qid *qid) +int p9_client_symlink(struct p9_fid *dfid, const char *name, + const char *symtgt, kgid_t gid, struct p9_qid *qid) { int err = 0; struct p9_client *clnt; @@ -1395,7 +1395,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, kgid_t gid, } EXPORT_SYMBOL(p9_client_symlink); -int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname) +int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, const char *newname) { struct p9_client *clnt; struct p9_req_t *req; @@ -2117,7 +2117,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) } EXPORT_SYMBOL(p9_client_readdir); -int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode, +int p9_client_mknod_dotl(struct p9_fid *fid, const char *name, int mode, dev_t rdev, kgid_t gid, struct p9_qid *qid) { int err; @@ -2148,7 +2148,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode, } EXPORT_SYMBOL(p9_client_mknod_dotl); -int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, +int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode, kgid_t gid, struct p9_qid *qid) { int err; diff --git a/net/atm/common.c b/net/atm/common.c index a3ca922d307b0a..9613381f5db04e 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -13,7 +13,7 @@ #include /* error codes */ #include #include -#include +#include #include /* struct timeval */ #include #include diff --git a/net/atm/svc.c b/net/atm/svc.c index 878563a8354d10..5589de7086af4e 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -10,7 +10,7 @@ #include /* printk */ #include #include -#include /* jiffies and HZ */ +#include #include /* O_NONBLOCK */ #include #include /* ATM stuff */ @@ -318,7 +318,8 @@ static int svc_listen(struct socket *sock, int backlog) return error; } -static int svc_accept(struct socket *sock, struct socket *newsock, int flags) +static int svc_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sock *sk = sock->sk; struct sk_buff *skb; @@ -329,7 +330,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags) lock_sock(sk); - error = svc_create(sock_net(sk), newsock, 0, 0); + error = svc_create(sock_net(sk), newsock, 0, kern); if (error) goto out; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 90fcf5fc2e0ac8..b7c486752b3acf 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -1320,7 +1320,8 @@ static int __must_check ax25_connect(struct socket *sock, return err; } -static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) +static int ax25_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sk_buff *skb; struct sock *newsk; diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 7c3d994e90d87b..71343d0fec94b5 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface) batadv_iv_ogm_schedule(hard_iface); } +/** + * batadv_iv_init_sel_class - initialize GW selection class + * @bat_priv: the bat priv with all the soft interface information + */ +static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv) +{ + /* set default TQ difference threshold to 20 */ + atomic_set(&bat_priv->gw.sel_class, 20); +} + static struct batadv_gw_node * batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) { @@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = { .del_if = batadv_iv_ogm_orig_del_if, }, .gw = { + .init_sel_class = batadv_iv_init_sel_class, .get_best_gw_node = batadv_iv_gw_get_best_gw_node, .is_eligible = batadv_iv_gw_is_eligible, #ifdef CONFIG_BATMAN_ADV_DEBUGFS diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 0acd081dd28699..a36c8e7291d61f 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@ -668,6 +668,16 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1, return ret; } +/** + * batadv_v_init_sel_class - initialize GW selection class + * @bat_priv: the bat priv with all the soft interface information + */ +static void batadv_v_init_sel_class(struct batadv_priv *bat_priv) +{ + /* set default throughput difference threshold to 5Mbps */ + atomic_set(&bat_priv->gw.sel_class, 50); +} + static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, char *buff, size_t count) { @@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = { .dump = batadv_v_orig_dump, }, .gw = { + .init_sel_class = batadv_v_init_sel_class, .store_sel_class = batadv_v_store_sel_class, .show_sel_class = batadv_v_show_sel_class, .get_best_gw_node = batadv_v_gw_get_best_gw_node, @@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv) if (ret < 0) return ret; - /* set default throughput difference threshold to 5Mbps */ - atomic_set(&bat_priv->gw.sel_class, 50); - return 0; } diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index ead18ca836de7b..8f964beaac2849 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -239,8 +239,10 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, spin_unlock_bh(&chain->lock); err: - if (!ret) + if (!ret) { kfree(frag_entry_new); + kfree_skb(skb); + } return ret; } @@ -313,7 +315,7 @@ batadv_frag_merge_packets(struct hlist_head *chain) * * There are three possible outcomes: 1) Packet is merged: Return true and * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb - * to NULL; 3) Error: Return false and leave skb as is. + * to NULL; 3) Error: Return false and free skb. * * Return: true when packet is merged or buffered, false when skb is not not * used. @@ -338,9 +340,9 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb, goto out_err; out: - *skb = skb_out; ret = true; out_err: + *skb = skb_out; return ret; } @@ -402,7 +404,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb, * batadv_frag_create - create a fragment from skb * @skb: skb to create fragment from * @frag_head: header to use in new fragment - * @mtu: size of new fragment + * @fragment_size: size of new fragment * * Split the passed skb into two fragments: A new one with size matching the * passed mtu and the old one with the rest. The new skb contains data from the @@ -412,11 +414,11 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb, */ static struct sk_buff *batadv_frag_create(struct sk_buff *skb, struct batadv_frag_packet *frag_head, - unsigned int mtu) + unsigned int fragment_size) { struct sk_buff *skb_fragment; unsigned int header_size = sizeof(*frag_head); - unsigned int fragment_size = mtu - header_size; + unsigned int mtu = fragment_size + header_size; skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); if (!skb_fragment) @@ -454,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb, struct sk_buff *skb_fragment; unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; unsigned int header_size = sizeof(frag_header); - unsigned int max_fragment_size, max_packet_size; + unsigned int max_fragment_size, num_fragments; int ret; /* To avoid merge and refragmentation at next-hops we never send @@ -462,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb, */ mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); max_fragment_size = mtu - header_size; - max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; + + if (skb->len == 0 || max_fragment_size == 0) + return -EINVAL; + + num_fragments = (skb->len - 1) / max_fragment_size + 1; + max_fragment_size = (skb->len - 1) / num_fragments + 1; /* Don't even try to fragment, if we need more than 16 fragments */ - if (skb->len > max_packet_size) { + if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) { ret = -EAGAIN; goto free_skb; } @@ -499,7 +506,14 @@ int batadv_frag_send_packet(struct sk_buff *skb, /* Eat and send fragments from the tail of skb */ while (skb->len > max_fragment_size) { - skb_fragment = batadv_frag_create(skb, &frag_header, mtu); + /* The initial check in this function should cover this case */ + if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) { + ret = -EINVAL; + goto put_primary_if; + } + + skb_fragment = batadv_frag_create(skb, &frag_header, + max_fragment_size); if (!skb_fragment) { ret = -ENOMEM; goto put_primary_if; @@ -515,12 +529,6 @@ int batadv_frag_send_packet(struct sk_buff *skb, } frag_header.no++; - - /* The initial check in this function should cover this case */ - if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) { - ret = -EINVAL; - goto put_primary_if; - } } /* Make room for the fragment header. */ diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c index 5db2e43e3775ef..33940c5c74a873 100644 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c @@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, */ void batadv_gw_init(struct batadv_priv *bat_priv) { + if (bat_priv->algo_ops->gw.init_sel_class) + bat_priv->algo_ops->gw.init_sel_class(bat_priv); + else + atomic_set(&bat_priv->gw.sel_class, 1); + batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1, NULL, BATADV_TVLV_GW, 1, BATADV_TVLV_HANDLER_OGM_CIFNOTFND); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5d099b2e6cfccb..d042c99af028e2 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev) atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); #endif atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); - atomic_set(&bat_priv->gw.sel_class, 20); atomic_set(&bat_priv->gw.bandwidth_down, 100); atomic_set(&bat_priv->gw.bandwidth_up, 20); atomic_set(&bat_priv->orig_interval, 1000); diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 8f64a5c013454a..246f21b4973bc3 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -402,7 +402,7 @@ struct batadv_gw_node { struct rcu_head rcu; }; -DECLARE_EWMA(throughput, 1024, 8) +DECLARE_EWMA(throughput, 10, 8) /** * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor @@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops { /** * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) + * @init_sel_class: initialize GW selection class (optional) * @store_sel_class: parse and stores a new GW selection class (optional) * @show_sel_class: prints the current GW selection class (optional) * @get_best_gw_node: select the best GW from the list of available nodes @@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops { * @dump: dump gateways to a netlink socket (optional) */ struct batadv_algo_gw_ops { + void (*init_sel_class)(struct batadv_priv *bat_priv); ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, size_t count); ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff); diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index cfb2faba46ded9..69e1f7d362a8b7 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -27,6 +27,8 @@ #include #include #include +#include + #include #include diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index 46ac686c8911e7..bb308224099c47 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 1015d9c8d97ddb..b5faff458d8bea 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -21,6 +21,8 @@ SOFTWARE IS DISCLAIMED. */ +#include + #include #include #include diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a8ba752732c985..507b80d59dec4f 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -29,6 +29,7 @@ #include #include +#include #include #include @@ -300,7 +301,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) } static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, - int flags) + int flags, bool kern) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *nsk; diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7511df72347f30..ac3c650cb234f9 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -27,6 +27,7 @@ #include #include +#include #include #include @@ -470,7 +471,8 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog) return err; } -static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags) +static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *nsk; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 3125ce670c2f24..728e0c8dc8e74c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -626,7 +627,7 @@ static int sco_sock_listen(struct socket *sock, int backlog) } static int sco_sock_accept(struct socket *sock, struct socket *newsock, - int flags) + int flags, bool kern) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *ch; diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 4f598dc2d9168c..6e08b7199dd744 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br, struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; struct net_bridge_fdb_entry *fdb; - WARN_ON_ONCE(!br_hash_lock_held(br)); + lockdep_assert_held_once(&br->hash_lock); rcu_read_lock(); fdb = fdb_find_rcu(head, addr, vid); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 6bfac29318f21e..902af6ba481c99 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -186,8 +186,9 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, /* Do not flood unicast traffic to ports that turn it off */ if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD)) continue; + /* Do not flood if mc off, except for traffic we originate */ if (pkt_type == BR_PKT_MULTICAST && - !(p->flags & BR_MCAST_FLOOD)) + !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) continue; /* Do not flood to ports that enable proxy ARP */ diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 236f34244dbe1f..013f2290bfa56d 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -30,6 +30,7 @@ EXPORT_SYMBOL(br_should_route_hook); static int br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) { + br_drop_fake_rtable(skb); return netif_receive_skb(skb); } diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 95087e6e825836..1f1e62095464f9 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv, } -/* PF_BRIDGE/LOCAL_IN ************************************************/ -/* The packet is locally destined, which requires a real - * dst_entry, so detach the fake one. On the way up, the - * packet would pass through PRE_ROUTING again (which already - * took place when the packet entered the bridge), but we - * register an IPv4 PRE_ROUTING 'sabotage' hook that will - * prevent this from happening. */ -static unsigned int br_nf_local_in(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - br_drop_fake_rtable(skb); - return NF_ACCEPT; -} - /* PF_BRIDGE/FORWARD *************************************************/ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { @@ -721,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct nf_bridge_info *nf_bridge; - unsigned int mtu_reserved; + struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + unsigned int mtu, mtu_reserved; mtu_reserved = nf_bridge_mtu_reduction(skb); + mtu = skb->dev->mtu; - if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) { + if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) + mtu = nf_bridge->frag_max_size; + + if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) { nf_bridge_info_free(skb); return br_dev_queue_push_xmit(net, sk, skb); } - nf_bridge = nf_bridge_info_get(skb); - /* This is wrong! We should preserve the original fragment * boundaries by preserving frag_list rather than refragmenting. */ @@ -907,12 +894,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = { .hooknum = NF_BR_PRE_ROUTING, .priority = NF_BR_PRI_BRNF, }, - { - .hook = br_nf_local_in, - .pf = NFPROTO_BRIDGE, - .hooknum = NF_BR_LOCAL_IN, - .priority = NF_BR_PRI_BRNF, - }, { .hook = br_nf_forward_ip, .pf = NFPROTO_BRIDGE, diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2288fca7756c51..61368186edea53 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid); -static inline bool br_hash_lock_held(struct net_bridge *br) -{ -#ifdef CONFIG_LOCKDEP - return lockdep_is_held(&br->hash_lock); -#else - return true; -#endif -} - /* br_forward.c */ enum br_pkt_type { BR_PKT_UNICAST, diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 0f4034934d56f7..0b5dd607444c71 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "br_private.h" diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 05e8946ccc0355..79aee759aba590 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "br_private.h" diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 62e68c0dc68740..b838213c408e24 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -997,10 +997,10 @@ int nbp_vlan_init(struct net_bridge_port *p) RCU_INIT_POINTER(p->vlgrp, NULL); synchronize_rcu(); vlan_tunnel_deinit(vg); -err_vlan_enabled: err_tunnel_init: rhashtable_destroy(&vg->vlan_hash); err_rhtbl: +err_vlan_enabled: kfree(vg); goto out; diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 92cbbd2afddbf1..adcad344c84398 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 464e88599b9d29..108533859a5329 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -230,6 +230,7 @@ enum { Opt_osdkeepalivetimeout, Opt_mount_timeout, Opt_osd_idle_ttl, + Opt_osd_request_timeout, Opt_last_int, /* int args above */ Opt_fsid, @@ -256,6 +257,7 @@ static match_table_t opt_tokens = { {Opt_osdkeepalivetimeout, "osdkeepalive=%d"}, {Opt_mount_timeout, "mount_timeout=%d"}, {Opt_osd_idle_ttl, "osd_idle_ttl=%d"}, + {Opt_osd_request_timeout, "osd_request_timeout=%d"}, /* int args above */ {Opt_fsid, "fsid=%s"}, {Opt_name, "name=%s"}, @@ -361,6 +363,7 @@ ceph_parse_options(char *options, const char *dev_name, opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; + opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT; /* get mon ip(s) */ /* ip1[:port1][,ip2[:port2]...] */ @@ -473,6 +476,15 @@ ceph_parse_options(char *options, const char *dev_name, } opt->mount_timeout = msecs_to_jiffies(intval * 1000); break; + case Opt_osd_request_timeout: + /* 0 is "wait forever" (i.e. infinite timeout) */ + if (intval < 0 || intval > INT_MAX / 1000) { + pr_err("osd_request_timeout out of range\n"); + err = -EINVAL; + goto out; + } + opt->osd_request_timeout = msecs_to_jiffies(intval * 1000); + break; case Opt_share: opt->flags &= ~CEPH_OPT_NOSHARE; @@ -557,6 +569,9 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client) if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) seq_printf(m, "osdkeepalivetimeout=%d,", jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000); + if (opt->osd_request_timeout != CEPH_OSD_REQUEST_TIMEOUT_DEFAULT) + seq_printf(m, "osd_request_timeout=%d,", + jiffies_to_msecs(opt->osd_request_timeout) / 1000); /* drop redundant comma */ if (m->count != pos) diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 85747b7f91a918..46008d5ac504cd 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index bad3d4ae43f6e9..f76bb333261384 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_connection *con) { struct sockaddr_storage *paddr = &con->peer_addr.in_addr; struct socket *sock; + unsigned int noio_flag; int ret; BUG_ON(con->sock); + + /* sock_create_kern() allocates with GFP_KERNEL */ + noio_flag = memalloc_noio_save(); ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, SOCK_STREAM, IPPROTO_TCP, &sock); + memalloc_noio_restore(noio_flag); if (ret) return ret; sock->sk->sk_allocation = GFP_NOFS; @@ -520,7 +526,8 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; int r; - r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); + iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len); + r = sock_recvmsg(sock, &msg, msg.msg_flags); if (r == -EAGAIN) r = 0; return r; @@ -529,17 +536,20 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) static int ceph_tcp_recvpage(struct socket *sock, struct page *page, int page_offset, size_t length) { - void *kaddr; - int ret; + struct bio_vec bvec = { + .bv_page = page, + .bv_offset = page_offset, + .bv_len = length + }; + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + int r; BUG_ON(page_offset + length > PAGE_SIZE); - - kaddr = kmap(page); - BUG_ON(!kaddr); - ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); - kunmap(page); - - return ret; + iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length); + r = sock_recvmsg(sock, &msg, msg.msg_flags); + if (r == -EAGAIN) + r = 0; + return r; } /* @@ -579,18 +589,28 @@ static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, static int ceph_tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, bool more) { + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + struct bio_vec bvec; int ret; - struct kvec iov; /* sendpage cannot properly handle pages with page_count == 0, * we need to fallback to sendmsg if that's the case */ if (page_count(page) >= 1) return __ceph_tcp_sendpage(sock, page, offset, size, more); - iov.iov_base = kmap(page) + offset; - iov.iov_len = size; - ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more); - kunmap(page); + bvec.bv_page = page; + bvec.bv_offset = offset; + bvec.bv_len = size; + + if (more) + msg.msg_flags |= MSG_MORE; + else + msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ + + iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size); + ret = sock_sendmsg(sock, &msg); + if (ret == -EAGAIN) + ret = 0; return ret; } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index b65bbf9f45ebb2..e15ea9e4c4955f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1709,6 +1709,8 @@ static void account_request(struct ceph_osd_request *req) req->r_flags |= CEPH_OSD_FLAG_ONDISK; atomic_inc(&req->r_osdc->num_requests); + + req->r_start_stamp = jiffies; } static void submit_request(struct ceph_osd_request *req, bool wrlocked) @@ -1789,6 +1791,14 @@ static void cancel_request(struct ceph_osd_request *req) ceph_osdc_put_request(req); } +static void abort_request(struct ceph_osd_request *req, int err) +{ + dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); + + cancel_map_check(req); + complete_request(req, err); +} + static void check_pool_dne(struct ceph_osd_request *req) { struct ceph_osd_client *osdc = req->r_osdc; @@ -2487,6 +2497,7 @@ static void handle_timeout(struct work_struct *work) container_of(work, struct ceph_osd_client, timeout_work.work); struct ceph_options *opts = osdc->client->options; unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; + unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout; LIST_HEAD(slow_osds); struct rb_node *n, *p; @@ -2502,15 +2513,23 @@ static void handle_timeout(struct work_struct *work) struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); bool found = false; - for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { + for (p = rb_first(&osd->o_requests); p; ) { struct ceph_osd_request *req = rb_entry(p, struct ceph_osd_request, r_node); + p = rb_next(p); /* abort_request() */ + if (time_before(req->r_stamp, cutoff)) { dout(" req %p tid %llu on osd%d is laggy\n", req, req->r_tid, osd->o_osd); found = true; } + if (opts->osd_request_timeout && + time_before(req->r_start_stamp, expiry_cutoff)) { + pr_err_ratelimited("tid %llu on osd%d timeout\n", + req->r_tid, osd->o_osd); + abort_request(req, -ETIMEDOUT); + } } for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { struct ceph_osd_linger_request *lreq = @@ -2530,6 +2549,21 @@ static void handle_timeout(struct work_struct *work) list_move_tail(&osd->o_keepalive_item, &slow_osds); } + if (opts->osd_request_timeout) { + for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { + struct ceph_osd_request *req = + rb_entry(p, struct ceph_osd_request, r_node); + + p = rb_next(p); /* abort_request() */ + + if (time_before(req->r_start_stamp, expiry_cutoff)) { + pr_err_ratelimited("tid %llu on osd%d timeout\n", + req->r_tid, osdc->homeless_osd.o_osd); + abort_request(req, -ETIMEDOUT); + } + } + } + if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) maybe_request_map(osdc); diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 6824c0ec8373e7..ffe9e904d4d1d1 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -390,9 +390,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end) dout("crush decode tunable chooseleaf_stable = %d\n", c->chooseleaf_stable); - crush_finalize(c); - done: + crush_finalize(c); dout("crush_decode success\n"); return c; @@ -1380,7 +1379,6 @@ static int decode_new_up_state_weight(void **p, void *end, if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && (xorstate & CEPH_OSD_EXISTS)) { pr_info("osd%d does not exist\n", osd); - map->osd_weight[osd] = CEPH_OSD_IN; ret = set_primary_affinity(map, osd, CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); if (ret) diff --git a/net/core/dev.c b/net/core/dev.c index 304f2deae5f989..7869ae3837ca74 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev) { rtnl_lock(); call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); + call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); rtnl_unlock(); } EXPORT_SYMBOL(netdev_notify_peers); @@ -1698,27 +1699,54 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); static struct static_key netstamp_needed __read_mostly; #ifdef HAVE_JUMP_LABEL static atomic_t netstamp_needed_deferred; +static atomic_t netstamp_wanted; static void netstamp_clear(struct work_struct *work) { int deferred = atomic_xchg(&netstamp_needed_deferred, 0); + int wanted; - while (deferred--) - static_key_slow_dec(&netstamp_needed); + wanted = atomic_add_return(deferred, &netstamp_wanted); + if (wanted > 0) + static_key_enable(&netstamp_needed); + else + static_key_disable(&netstamp_needed); } static DECLARE_WORK(netstamp_work, netstamp_clear); #endif void net_enable_timestamp(void) { +#ifdef HAVE_JUMP_LABEL + int wanted; + + while (1) { + wanted = atomic_read(&netstamp_wanted); + if (wanted <= 0) + break; + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) + return; + } + atomic_inc(&netstamp_needed_deferred); + schedule_work(&netstamp_work); +#else static_key_slow_inc(&netstamp_needed); +#endif } EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { #ifdef HAVE_JUMP_LABEL - /* net_disable_timestamp() can be called from non process context */ - atomic_inc(&netstamp_needed_deferred); + int wanted; + + while (1) { + wanted = atomic_read(&netstamp_wanted); + if (wanted <= 1) + break; + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) + return; + } + atomic_dec(&netstamp_needed_deferred); schedule_work(&netstamp_work); #else static_key_slow_dec(&netstamp_needed); @@ -4883,6 +4911,39 @@ void __napi_schedule(struct napi_struct *n) } EXPORT_SYMBOL(__napi_schedule); +/** + * napi_schedule_prep - check if napi can be scheduled + * @n: napi context + * + * Test if NAPI routine is already running, and if not mark + * it as running. This is used as a condition variable + * insure only one NAPI poll instance runs. We also make + * sure there is no pending NAPI disable. + */ +bool napi_schedule_prep(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (unlikely(val & NAPIF_STATE_DISABLE)) + return false; + new = val | NAPIF_STATE_SCHED; + + /* Sets STATE_MISSED bit if STATE_SCHED was already set + * This was suggested by Alexander Duyck, as compiler + * emits better code than : + * if (val & NAPIF_STATE_SCHED) + * new |= NAPIF_STATE_MISSED; + */ + new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * + NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return !(val & NAPIF_STATE_SCHED); +} +EXPORT_SYMBOL(napi_schedule_prep); + /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule @@ -4897,7 +4958,7 @@ EXPORT_SYMBOL(__napi_schedule_irqoff); bool napi_complete_done(struct napi_struct *n, int work_done) { - unsigned long flags; + unsigned long flags, val, new; /* * 1) Don't let napi dequeue from the cpu poll list @@ -4927,7 +4988,27 @@ bool napi_complete_done(struct napi_struct *n, int work_done) list_del_init(&n->poll_list); local_irq_restore(flags); } - WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state)); + + do { + val = READ_ONCE(n->state); + + WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); + + new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED); + + /* If STATE_MISSED was set, leave STATE_SCHED set, + * because we will call napi->poll() one more time. + * This C code was suggested by Alexander Duyck to help gcc. + */ + new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * + NAPIF_STATE_SCHED; + } while (cmpxchg(&n->state, val, new) != val); + + if (unlikely(val & NAPIF_STATE_MISSED)) { + __napi_schedule(n); + return false; + } + return true; } EXPORT_SYMBOL(napi_complete_done); @@ -4953,6 +5034,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) { int rc; + /* Busy polling means there is a high chance device driver hard irq + * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was + * set in napi_schedule_prep(). + * Since we are about to call napi->poll() once more, we can safely + * clear NAPI_STATE_MISSED. + * + * Note: x86 could use a single "lock and ..." instruction + * to perform these two clear_bit() + */ + clear_bit(NAPI_STATE_MISSED, &napi->state); clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); local_bh_disable(); @@ -5088,8 +5179,13 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) struct napi_struct *napi; napi = container_of(timer, struct napi_struct, timer); - if (napi->gro_list) - napi_schedule_irqoff(napi); + + /* Note : we use a relaxed variant of napi_schedule_prep() not setting + * NAPI_STATE_MISSED, since we do not react to a device IRQ. + */ + if (napi->gro_list && !napi_disable_pending(napi) && + !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) + __napi_schedule_irqoff(napi); return HRTIMER_NORESTART; } diff --git a/net/core/ethtool.c b/net/core/ethtool.c index be7bab1adcde3d..aecb2c7241b697 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include /* diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index c35aae13c8d226..d98d4998213da6 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -390,7 +390,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, unsigned char ar_tip[4]; } *arp_eth, _arp_eth; const struct arphdr *arp; - struct arphdr *_arp; + struct arphdr _arp; arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, hlen, &_arp); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index e7c12caa20c88a..4526cbd7e28a1f 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -860,7 +860,8 @@ static void neigh_probe(struct neighbour *neigh) if (skb) skb = skb_clone(skb, GFP_ATOMIC); write_unlock(&neigh->lock); - neigh->ops->solicit(neigh, skb); + if (neigh->ops->solicit) + neigh->ops->solicit(neigh, skb); atomic_inc(&neigh->probes); kfree_skb(skb); } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b0c04cf4851d67..65ea0ff4017c16 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -952,7 +953,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) while (--i >= new_num) { struct kobject *kobj = &dev->_rx[i].kobj; - if (!list_empty(&dev_net(dev)->exit_list)) + if (!atomic_read(&dev_net(dev)->count)) kobj->uevent_suppress = 1; if (dev->sysfs_rx_queue_group) sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); @@ -1370,7 +1371,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) while (--i >= new_num) { struct netdev_queue *queue = dev->_tx + i; - if (!list_empty(&dev_net(dev)->exit_list)) + if (!atomic_read(&dev_net(dev)->count)) queue->kobj.uevent_suppress = 1; #ifdef CONFIG_BQL sysfs_remove_group(&queue->kobj, &dql_group); @@ -1557,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev) { struct device *dev = &(ndev->dev); - if (!list_empty(&dev_net(ndev)->exit_list)) + if (!atomic_read(&dev_net(ndev)->count)) dev_set_uevent_suppress(dev, 1); kobject_get(&dev->kobj); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 3c4bbec3971309..652468ff65b79d 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -16,6 +16,8 @@ #include #include #include +#include + #include #include #include diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 11fce17274f6ce..029a61ac6cdd8a 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -12,6 +12,8 @@ #include #include #include +#include + #include #include @@ -69,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n) return 0; } -static void update_classid(struct cgroup_subsys_state *css, void *v) +static void cgrp_attach(struct cgroup_taskset *tset) { - struct css_task_iter it; + struct cgroup_subsys_state *css; struct task_struct *p; - css_task_iter_start(css, &it); - while ((p = css_task_iter_next(&it))) { + cgroup_taskset_for_each(p, css, tset) { task_lock(p); - iterate_fd(p->files, 0, update_classid_sock, v); + iterate_fd(p->files, 0, update_classid_sock, + (void *)(unsigned long)css_cls_state(css)->classid); task_unlock(p); } - css_task_iter_end(&it); -} - -static void cgrp_attach(struct cgroup_taskset *tset) -{ - struct cgroup_subsys_state *css; - - cgroup_taskset_first(tset, &css); - update_classid(css, - (void *)(unsigned long)css_cls_state(css)->classid); } static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) @@ -101,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, u64 value) { struct cgroup_cls_state *cs = css_cls_state(css); + struct css_task_iter it; + struct task_struct *p; cgroup_sk_alloc_disable(); cs->classid = (u32)value; - update_classid(css, (void *)(unsigned long)cs->classid); + css_task_iter_start(css, &it); + while ((p = css_task_iter_next(&it))) { + task_lock(p); + iterate_fd(p->files, 0, update_classid_sock, + (void *)(unsigned long)cs->classid); + task_unlock(p); + } + css_task_iter_end(&it); + return 0; } diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 756637dc7a5769..0f9275ee559581 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -20,6 +20,8 @@ #include #include #include +#include + #include #include #include diff --git a/net/core/scm.c b/net/core/scm.c index b6d83686e1496d..b1ff8a4417489f 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 758f140b6bedc5..d28da7d363f170 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -20,9 +20,11 @@ #include static siphash_key_t net_secret __read_mostly; +static siphash_key_t ts_secret __read_mostly; static __always_inline void net_secret_init(void) { + net_get_random_once(&ts_secret, sizeof(ts_secret)); net_get_random_once(&net_secret, sizeof(net_secret)); } #endif @@ -45,6 +47,23 @@ static u32 seq_scale(u32 seq) #endif #if IS_ENABLED(CONFIG_IPV6) +static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr) +{ + const struct { + struct in6_addr saddr; + struct in6_addr daddr; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .saddr = *(struct in6_addr *)saddr, + .daddr = *(struct in6_addr *)daddr, + }; + + if (sysctl_tcp_timestamps != 1) + return 0; + + return siphash(&combined, offsetofend(typeof(combined), daddr), + &ts_secret); +} + u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, __be16 sport, __be16 dport, u32 *tsoff) { @@ -63,7 +82,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, net_secret_init(); hash = siphash(&combined, offsetofend(typeof(combined), dport), &net_secret); - *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; + *tsoff = secure_tcpv6_ts_off(saddr, daddr); return seq_scale(hash); } EXPORT_SYMBOL(secure_tcpv6_sequence_number); @@ -88,6 +107,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); #endif #ifdef CONFIG_INET +static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr) +{ + if (sysctl_tcp_timestamps != 1) + return 0; + + return siphash_2u32((__force u32)saddr, (__force u32)daddr, + &ts_secret); +} /* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, @@ -103,7 +130,7 @@ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, (__force u32)sport << 16 | (__force u32)dport, &net_secret); - *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; + *tsoff = secure_tcp_ts_off(saddr, daddr); return seq_scale(hash); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f3557958e9bf14..9f781092fda9cb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3694,6 +3694,15 @@ static void sock_rmem_free(struct sk_buff *skb) atomic_sub(skb->truesize, &sk->sk_rmem_alloc); } +static void skb_set_err_queue(struct sk_buff *skb) +{ + /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. + * So, it is safe to (mis)use it to mark skbs on the error queue. + */ + skb->pkt_type = PACKET_OUTGOING; + BUILD_BUG_ON(PACKET_OUTGOING == 0); +} + /* * Note: We dont mem charge error packets (no sk_forward_alloc changes) */ @@ -3707,6 +3716,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_rmem_free; atomic_add(skb->truesize, &sk->sk_rmem_alloc); + skb_set_err_queue(skb); /* before exiting rcu section, make sure dst is refcounted */ skb_dst_force(skb); @@ -3783,16 +3793,20 @@ EXPORT_SYMBOL(skb_clone_sk); static void __skb_complete_tx_timestamp(struct sk_buff *skb, struct sock *sk, - int tstype) + int tstype, + bool opt_stats) { struct sock_exterr_skb *serr; int err; + BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); + serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; serr->ee.ee_info = tstype; + serr->opt_stats = opt_stats; if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { serr->ee.ee_data = skb_shinfo(skb)->tskey; if (sk->sk_protocol == IPPROTO_TCP && @@ -3828,13 +3842,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, if (!skb_may_tx_timestamp(sk, false)) return; - /* take a reference to prevent skb_orphan() from freeing the socket */ - sock_hold(sk); - - *skb_hwtstamps(skb) = *hwtstamps; - __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); - - sock_put(sk); + /* Take a reference to prevent skb_orphan() from freeing the socket, + * but only if the socket refcount is not zero. + */ + if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { + *skb_hwtstamps(skb) = *hwtstamps; + __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); + sock_put(sk); + } } EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); @@ -3843,7 +3858,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, struct sock *sk, int tstype) { struct sk_buff *skb; - bool tsonly; + bool tsonly, opt_stats = false; if (!sk) return; @@ -3856,9 +3871,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, #ifdef CONFIG_INET if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && sk->sk_protocol == IPPROTO_TCP && - sk->sk_type == SOCK_STREAM) + sk->sk_type == SOCK_STREAM) { skb = tcp_get_timestamping_opt_stats(sk); - else + opt_stats = true; + } else #endif skb = alloc_skb(0, GFP_ATOMIC); } else { @@ -3877,7 +3893,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, else skb->tstamp = ktime_get_real(); - __skb_complete_tx_timestamp(skb, sk, tstype); + __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); } EXPORT_SYMBOL_GPL(__skb_tstamp_tx); @@ -3893,7 +3909,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) { struct sock *sk = skb->sk; struct sock_exterr_skb *serr; - int err; + int err = 1; skb->wifi_acked_valid = 1; skb->wifi_acked = acked; @@ -3903,14 +3919,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; - /* take a reference to prevent skb_orphan() from freeing the socket */ - sock_hold(sk); - - err = sock_queue_err_skb(sk, skb); + /* Take a reference to prevent skb_orphan() from freeing the socket, + * but only if the socket refcount is not zero. + */ + if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { + err = sock_queue_err_skb(sk, skb); + sock_put(sk); + } if (err) kfree_skb(skb); - - sock_put(sk); } EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); diff --git a/net/core/sock.c b/net/core/sock.c index e7d74940e8637d..2c4f574168fbdc 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -197,66 +197,55 @@ EXPORT_SYMBOL(sk_net_capable); /* * Each address family might have different locking rules, so we have - * one slock key per address family: + * one slock key per address family and separate keys for internal and + * userspace sockets. */ static struct lock_class_key af_family_keys[AF_MAX]; +static struct lock_class_key af_family_kern_keys[AF_MAX]; static struct lock_class_key af_family_slock_keys[AF_MAX]; +static struct lock_class_key af_family_kern_slock_keys[AF_MAX]; /* * Make lock validator output more readable. (we pre-construct these * strings build-time, so that runtime initialization of socket * locks is fast): */ + +#define _sock_locks(x) \ + x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \ + x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \ + x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \ + x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \ + x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \ + x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \ + x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \ + x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \ + x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \ + x "27" , x "28" , x "AF_CAN" , \ + x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \ + x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \ + x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \ + x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \ + x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX" + static const char *const af_family_key_strings[AF_MAX+1] = { - "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , - "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", - "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , - "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , - "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , - "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , - "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , - "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , - "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , - "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , - "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , - "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , - "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , - "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" , - "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC" , "sk_lock-AF_MAX" + _sock_locks("sk_lock-") }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { - "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , - "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", - "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , - "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , - "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , - "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , - "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , - "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , - "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , - "slock-27" , "slock-28" , "slock-AF_CAN" , - "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , - "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , - "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , - "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" , - "slock-AF_QIPCRTR", "slock-AF_SMC" , "slock-AF_MAX" + _sock_locks("slock-") }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { - "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , - "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", - "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , - "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , - "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , - "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , - "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , - "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , - "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , - "clock-27" , "clock-28" , "clock-AF_CAN" , - "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , - "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , - "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , - "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" , - "clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX" + _sock_locks("clock-") +}; + +static const char *const af_family_kern_key_strings[AF_MAX+1] = { + _sock_locks("k-sk_lock-") +}; +static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = { + _sock_locks("k-slock-") +}; +static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { + _sock_locks("k-clock-") }; /* @@ -264,6 +253,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { * so split the lock classes by using a per-AF key: */ static struct lock_class_key af_callback_keys[AF_MAX]; +static struct lock_class_key af_kern_callback_keys[AF_MAX]; /* Take into consideration the size of the struct sk_buff overhead in the * determination of these values, since that is non-constant across @@ -1293,7 +1283,16 @@ int sock_getsockopt(struct socket *sock, int level, int optname, */ static inline void sock_lock_init(struct sock *sk) { - sock_lock_init_class_and_name(sk, + if (sk->sk_kern_sock) + sock_lock_init_class_and_name( + sk, + af_family_kern_slock_key_strings[sk->sk_family], + af_family_kern_slock_keys + sk->sk_family, + af_family_kern_key_strings[sk->sk_family], + af_family_kern_keys + sk->sk_family); + else + sock_lock_init_class_and_name( + sk, af_family_slock_key_strings[sk->sk_family], af_family_slock_keys + sk->sk_family, af_family_key_strings[sk->sk_family], @@ -1399,6 +1398,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, * why we need sk_prot_creator -acme */ sk->sk_prot = sk->sk_prot_creator = prot; + sk->sk_kern_sock = kern; sock_lock_init(sk); sk->sk_net_refcnt = kern ? 0 : 1; if (likely(sk->sk_net_refcnt)) @@ -1442,6 +1442,11 @@ static void __sk_destruct(struct rcu_head *head) pr_debug("%s: optmem leakage (%d bytes) detected\n", __func__, atomic_read(&sk->sk_omem_alloc)); + if (sk->sk_frag.page) { + put_page(sk->sk_frag.page); + sk->sk_frag.page = NULL; + } + if (sk->sk_peer_cred) put_cred(sk->sk_peer_cred); put_pid(sk->sk_peer_pid); @@ -1539,11 +1544,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) is_charged = sk_filter_charge(newsk, filter); if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { - /* It is still raw copy of parent, so invalidate - * destructor and make plain sk_free() */ - newsk->sk_destruct = NULL; - bh_unlock_sock(newsk); - sk_free(newsk); + /* We need to make sure that we don't uncharge the new + * socket if we couldn't charge it in the first place + * as otherwise we uncharge the parent's filter. + */ + if (!is_charged) + RCU_INIT_POINTER(newsk->sk_filter, NULL); + sk_free_unlock_clone(newsk); newsk = NULL; goto out; } @@ -1592,6 +1599,16 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) } EXPORT_SYMBOL_GPL(sk_clone_lock); +void sk_free_unlock_clone(struct sock *sk) +{ + /* It is still raw copy of parent, so invalidate + * destructor and make plain sk_free() */ + sk->sk_destruct = NULL; + bh_unlock_sock(sk); + sk_free(sk); +} +EXPORT_SYMBOL_GPL(sk_free_unlock_clone); + void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { u32 max_segs = 1; @@ -2271,7 +2288,8 @@ int sock_no_socketpair(struct socket *sock1, struct socket *sock2) } EXPORT_SYMBOL(sock_no_socketpair); -int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) +int sock_no_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { return -EOPNOTSUPP; } @@ -2475,7 +2493,14 @@ void sock_init_data(struct socket *sock, struct sock *sk) } rwlock_init(&sk->sk_callback_lock); - lockdep_set_class_and_name(&sk->sk_callback_lock, + if (sk->sk_kern_sock) + lockdep_set_class_and_name( + &sk->sk_callback_lock, + af_kern_callback_keys + sk->sk_family, + af_family_kern_clock_key_strings[sk->sk_family]); + else + lockdep_set_class_and_name( + &sk->sk_callback_lock, af_callback_keys + sk->sk_family, af_family_clock_key_strings[sk->sk_family]); @@ -2773,11 +2798,6 @@ void sk_common_release(struct sock *sk) sk_refcnt_debug_release(sk); - if (sk->sk_frag.page) { - put_page(sk->sk_frag.page); - sk->sk_frag.page = NULL; - } - sock_put(sk); } EXPORT_SYMBOL(sk_common_release); diff --git a/net/core/stream.c b/net/core/stream.c index f575bcf64af2c3..20231dbb1da0c6 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 4ead336e14ea0b..7f9cc400eca08c 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -408,14 +408,16 @@ static struct ctl_table net_core_table[] = { .data = &sysctl_net_busy_poll, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, }, { .procname = "busy_read", .data = &sysctl_net_busy_read, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, }, #endif #ifdef CONFIG_NET_SCHED diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index f053198e730c48..5e3a7302f7747e 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk) for (i = 0; i < hc->tx_seqbufc; i++) kfree(hc->tx_seqbuf[i]); hc->tx_seqbufc = 0; + dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); } static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) diff --git a/net/dccp/input.c b/net/dccp/input.c index 8fedc2d497709b..4a05d78768502d 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -577,6 +577,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); const int old_state = sk->sk_state; + bool acceptable; int queued = 0; /* @@ -603,8 +604,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, */ if (sk->sk_state == DCCP_LISTEN) { if (dh->dccph_type == DCCP_PKT_REQUEST) { - if (inet_csk(sk)->icsk_af_ops->conn_request(sk, - skb) < 0) + /* It is possible that we process SYN packets from backlog, + * so we need to make sure to disable BH right there. + */ + local_bh_disable(); + acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0; + local_bh_enable(); + if (!acceptable) return 1; consume_skb(skb); return 0; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 409d0cfd344748..b99168b0fabf2a 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) switch (type) { case ICMP_REDIRECT: - dccp_do_redirect(skb, sk); + if (!sock_owned_by_user(sk)) + dccp_do_redirect(skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 233b57367758c6..d9b6a4e403e701 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, np = inet6_sk(sk); if (type == NDISC_REDIRECT) { - struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); + if (!sock_owned_by_user(sk)) { + struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); - if (dst) - dst->ops->redirect(dst, sk, skb); + if (dst) + dst->ops->redirect(dst, sk, skb); + } goto out; } diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 53eddf99e4f6eb..abd07a44321985 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -119,10 +119,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, * Activate features: initialise CCIDs, sequence windows etc. */ if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) { - /* It is still raw copy of parent, so invalidate - * destructor and make plain sk_free() */ - newsk->sk_destruct = NULL; - sk_free(newsk); + sk_free_unlock_clone(newsk); return NULL; } dccp_init_xmit_timers(newsk); @@ -145,6 +142,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, struct dccp_request_sock *dreq = dccp_rsk(req); bool own_req; + /* TCP/DCCP listeners became lockless. + * DCCP stores complex state in its request_sock, so we need + * a protection for them, now this code runs without being protected + * by the parent (listener) lock. + */ + spin_lock_bh(&dreq->dreq_lock); + /* Check for retransmitted REQUEST */ if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { @@ -159,7 +163,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, inet_rtx_syn_ack(sk, req); } /* Network Duplicate, discard packet */ - return NULL; + goto out; } DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; @@ -185,20 +189,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, req, &own_req); - if (!child) - goto listen_overflow; - - return inet_csk_complete_hashdance(sk, child, req, own_req); + if (child) { + child = inet_csk_complete_hashdance(sk, child, req, own_req); + goto out; + } -listen_overflow: - dccp_pr_debug("listen_overflow!\n"); DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; drop: if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) req->rsk_ops->send_reset(sk, skb); inet_csk_reqsk_queue_drop(sk, req); - return NULL; +out: + spin_unlock_bh(&dreq->dreq_lock); + return child; } EXPORT_SYMBOL_GPL(dccp_check_req); @@ -249,6 +253,7 @@ int dccp_reqsk_init(struct request_sock *req, { struct dccp_request_sock *dreq = dccp_rsk(req); + spin_lock_init(&dreq->dreq_lock); inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); inet_rsk(req)->acked = 0; diff --git a/net/dccp/output.c b/net/dccp/output.c index b66c84db0766f5..91a15b3c4915a3 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index a90ed67027b0cf..7de5b40a5d0d12 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -106,7 +106,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat #include #include #include -#include +#include #include #include #include @@ -1070,7 +1070,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) return skb == NULL ? ERR_PTR(err) : skb; } -static int dn_accept(struct socket *sock, struct socket *newsock, int flags) +static int dn_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sock *sk = sock->sk, *newsk; struct sk_buff *skb = NULL; @@ -1099,7 +1100,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags) cb = DN_SKB_CB(skb); sk->sk_ack_backlog--; - newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0); + newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern); if (newsk == NULL) { release_sock(sk); kfree_skb(skb); diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c index ecc28cff08ab81..af781010753b0f 100644 --- a/net/dns_resolver/dns_query.c +++ b/net/dns_resolver/dns_query.c @@ -37,8 +37,10 @@ #include #include +#include #include #include + #include #include @@ -70,7 +72,7 @@ int dns_query(const char *type, const char *name, size_t namelen, const char *options, char **_result, time64_t *_expiry) { struct key *rkey; - const struct user_key_payload *upayload; + struct user_key_payload *upayload; const struct cred *saved_cred; size_t typelen, desclen; char *desc, *cp; @@ -141,7 +143,7 @@ int dns_query(const char *type, const char *name, size_t namelen, if (ret) goto put; - upayload = user_key_payload(rkey); + upayload = user_key_payload_locked(rkey); len = upayload->datalen; ret = -ENOMEM; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 602d40f43687c9..6b1fc6e4278ef4 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -689,11 +689,12 @@ EXPORT_SYMBOL(inet_stream_connect); * Accept a pending connection. The TCP layer now gives BSD semantics. */ -int inet_accept(struct socket *sock, struct socket *newsock, int flags) +int inet_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sock *sk1 = sock->sk; int err = -EINVAL; - struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err); + struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern); if (!sk2) goto do_err; @@ -1487,8 +1488,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff) int proto = iph->protocol; int err = -ENOSYS; - if (skb->encapsulation) + if (skb->encapsulation) { + skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP)); skb_set_inner_network_header(skb, nhoff); + } csum_replace2(&iph->check, iph->tot_len, newlen); iph->tot_len = newlen; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5d367b7ff542c0..cebedd545e5e28 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index b39a791f6756fc..8f2133ffc2ff1b 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -622,6 +622,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = { [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, [RTA_ENCAP] = { .type = NLA_NESTED }, [RTA_UID] = { .type = NLA_U32 }, + [RTA_MARK] = { .type = NLA_U32 }, }; static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, @@ -1082,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb) net = sock_net(skb->sk); nlh = nlmsg_hdr(skb); - if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || + if (skb->len < nlmsg_total_size(sizeof(*frn)) || + skb->len < nlh->nlmsg_len || nlmsg_len(nlh) < sizeof(*frn)) return; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index b4d5980ade3b58..5e313c1ac94fc8 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -424,7 +424,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) /* * This will accept the next outstanding connection. */ -struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) +struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern) { struct inet_connection_sock *icsk = inet_csk(sk); struct request_sock_queue *queue = &icsk->icsk_accept_queue; diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index bbe7f72db9c157..b3cdeec85f1f2c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg) qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); net = container_of(qp->q.net, struct net, ipv4.frags); + rcu_read_lock(); spin_lock(&qp->q.lock); if (qp->q.flags & INET_FRAG_COMPLETE) @@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg) __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); if (!inet_frag_evicting(&qp->q)) { - struct sk_buff *head = qp->q.fragments; + struct sk_buff *clone, *head = qp->q.fragments; const struct iphdr *iph; int err; @@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg) if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) goto out; - rcu_read_lock(); head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) - goto out_rcu_unlock; + goto out; + /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); err = ip_route_input_noref(head, iph->daddr, iph->saddr, iph->tos, head->dev); if (err) - goto out_rcu_unlock; + goto out; /* Only an end host needs to send an ICMP * "Fragment Reassembly Timeout" message, per RFC792. */ if (frag_expire_skip_icmp(qp->user) && (skb_rtable(head)->rt_type != RTN_LOCAL)) - goto out_rcu_unlock; + goto out; + + clone = skb_clone(head, GFP_ATOMIC); /* Send an ICMP "Fragment Reassembly Timeout" message. */ - icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); -out_rcu_unlock: - rcu_read_unlock(); + if (clone) { + spin_unlock(&qp->q.lock); + icmp_send(clone, ICMP_TIME_EXCEEDED, + ICMP_EXC_FRAGTIME, 0); + consume_skb(clone); + goto out_rcu_unlock; + } } out: spin_unlock(&qp->q.lock); +out_rcu_unlock: + rcu_read_unlock(); ipq_put(qp); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 737ce826d7ecfa..7a3fd25e8913a9 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -966,7 +966,7 @@ static int __ip_append_data(struct sock *sk, cork->length += length; if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && - (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && + (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { err = ip_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index fd9f34bbd7408a..dfb2ab2dd3c84d 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -306,7 +306,7 @@ static void __init ic_close_devs(void) while ((d = next)) { next = d->next; dev = d->dev; - if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) { + if (d != ic_dev && !netdev_uses_dsa(dev)) { pr_debug("IP-Config: Downing %s\n", dev->name); dev_change_flags(dev, d->flags); } diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index b3cc1335adbc1a..c0cc6aa8cfaa9c 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -23,7 +23,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; - __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; + const struct sock *sk = skb_to_full_sk(skb); + __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0; struct net_device *dev = skb_dst(skb)->dev; unsigned int hh_len; @@ -40,7 +41,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t fl4.daddr = iph->daddr; fl4.saddr = saddr; fl4.flowi4_tos = RT_TOS(iph->tos); - fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; + fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0; if (!fl4.flowi4_oif) fl4.flowi4_oif = l3mdev_master_ifindex(dev); fl4.flowi4_mark = skb->mark; @@ -61,7 +62,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); - dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); + dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index bc1486f2c06433..2e14ed11a35cfc 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv, if (skb->len < sizeof(struct iphdr) || ip_hdrlen(skb) < sizeof(struct iphdr)) return NF_ACCEPT; + + if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */ + return NF_ACCEPT; + return nf_conntrack_in(state->net, PF_INET, state->hook, skb); } diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index f8aad03d674b05..6f5e8d01b87693 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c @@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb, /* maniptype == SRC for postrouting. */ enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook); - /* We never see fragments: conntrack defrags on pre-routing - * and local-out, and nf_nat_out protects post-routing. - */ - NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb))); - ct = nf_ct_get(skb, &ctinfo); /* Can't track? It's not due to stress, or conntrack would * have dropped it. Hence it's the user's responsibilty to diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index c9b52c361da2e6..53e49f5011d3ce 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = { .timeout = 180, }; -static struct nf_conntrack_helper snmp_helper __read_mostly = { - .me = THIS_MODULE, - .help = help, - .expect_policy = &snmp_exp_policy, - .name = "snmp", - .tuple.src.l3num = AF_INET, - .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT), - .tuple.dst.protonum = IPPROTO_UDP, -}; - static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { .me = THIS_MODULE, .help = help, @@ -1288,22 +1278,16 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { static int __init nf_nat_snmp_basic_init(void) { - int ret = 0; - BUG_ON(nf_nat_snmp_hook != NULL); RCU_INIT_POINTER(nf_nat_snmp_hook, help); - ret = nf_conntrack_helper_register(&snmp_trap_helper); - if (ret < 0) { - nf_conntrack_helper_unregister(&snmp_helper); - return ret; - } - return ret; + return nf_conntrack_helper_register(&snmp_trap_helper); } static void __exit nf_nat_snmp_basic_fini(void) { RCU_INIT_POINTER(nf_nat_snmp_hook, NULL); + synchronize_rcu(); nf_conntrack_helper_unregister(&snmp_trap_helper); } diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index a0ea8aad1bf150..f1867727711930 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c @@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr, memset(&range, 0, sizeof(range)); range.flags = priv->flags; if (priv->sreg_proto_min) { - range.min_proto.all = - *(__be16 *)®s->data[priv->sreg_proto_min]; - range.max_proto.all = - *(__be16 *)®s->data[priv->sreg_proto_max]; + range.min_proto.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_min]); + range.max_proto.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_max]); } regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt), &range, nft_out(pkt)); diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c index 1650ed23c15dd0..5120be1d31185d 100644 --- a/net/ipv4/netfilter/nft_redir_ipv4.c +++ b/net/ipv4/netfilter/nft_redir_ipv4.c @@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr, memset(&mr, 0, sizeof(mr)); if (priv->sreg_proto_min) { - mr.range[0].min.all = - *(__be16 *)®s->data[priv->sreg_proto_min]; - mr.range[0].max.all = - *(__be16 *)®s->data[priv->sreg_proto_max]; + mr.range[0].min.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_min]); + mr.range[0].max.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_max]); mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 2af6244b83e27a..ccfbce13a6333a 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -156,17 +156,18 @@ int ping_hash(struct sock *sk) void ping_unhash(struct sock *sk) { struct inet_sock *isk = inet_sk(sk); + pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); + write_lock_bh(&ping_table.lock); if (sk_hashed(sk)) { - write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); sk_nulls_node_init(&sk->sk_nulls_node); sock_put(sk); isk->inet_num = 0; isk->inet_sport = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - write_unlock_bh(&ping_table.lock); } + write_unlock_bh(&ping_table.lock); } EXPORT_SYMBOL_GPL(ping_unhash); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index da385ae997a3d6..1e319a525d51b0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1110,9 +1110,14 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; err = __inet_stream_connect(sk->sk_socket, msg->msg_name, msg->msg_namelen, flags, 1); - inet->defer_connect = 0; - *copied = tp->fastopen_req->copied; - tcp_free_fastopen_req(tp); + /* fastopen_req could already be freed in __inet_stream_connect + * if the connection times out or gets rst + */ + if (tp->fastopen_req) { + *copied = tp->fastopen_req->copied; + tcp_free_fastopen_req(tp); + inet->defer_connect = 0; + } return err; } @@ -2318,6 +2323,10 @@ int tcp_disconnect(struct sock *sk, int flags) memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); + /* Clean up fastopen related fields */ + tcp_free_fastopen_req(tp); + inet->defer_connect = 0; + WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); sk->sk_error_report(sk); @@ -2761,7 +2770,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) { const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ const struct inet_connection_sock *icsk = inet_csk(sk); - u32 now = tcp_time_stamp, intv; + u32 now, intv; u64 rate64; bool slow; u32 rate; @@ -2830,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_retrans = tp->retrans_out; info->tcpi_fackets = tp->fackets_out; + now = tcp_time_stamp; info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index 35b280361cb20f..50a0f3e51d5ba3 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -27,6 +27,8 @@ #include #include #include +#include + #include #define HYSTART_ACK_TRAIN 1 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2c0ff327b6dfe6..2c1f59386a7bac 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -126,7 +126,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; #define REXMIT_LOST 1 /* retransmit packets marked lost */ #define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ -static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb) +static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb, + unsigned int len) { static bool __once __read_mostly; @@ -137,8 +138,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb) rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); - pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", - dev ? dev->name : "Unknown driver"); + if (!dev || len >= dev->mtu) + pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", + dev ? dev->name : "Unknown driver"); rcu_read_unlock(); } } @@ -161,8 +163,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) if (len >= icsk->icsk_ack.rcv_mss) { icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, tcp_sk(sk)->advmss); - if (unlikely(icsk->icsk_ack.rcv_mss != len)) - tcp_gro_dev_warn(sk, skb); + /* Account for possibly-removed options */ + if (unlikely(len > icsk->icsk_ack.rcv_mss + + MAX_TCP_OPTION_SPACE)) + tcp_gro_dev_warn(sk, skb, len); } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. @@ -874,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric, const int ts) { struct tcp_sock *tp = tcp_sk(sk); - if (metric > tp->reordering) { - int mib_idx; + int mib_idx; + if (metric > tp->reordering) { tp->reordering = min(sysctl_tcp_max_reordering, metric); - /* This exciting event is worth to be remembered. 8) */ - if (ts) - mib_idx = LINUX_MIB_TCPTSREORDER; - else if (tcp_is_reno(tp)) - mib_idx = LINUX_MIB_TCPRENOREORDER; - else if (tcp_is_fack(tp)) - mib_idx = LINUX_MIB_TCPFACKREORDER; - else - mib_idx = LINUX_MIB_TCPSACKREORDER; - - NET_INC_STATS(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, @@ -902,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric, } tp->rack.reord = 1; + + /* This exciting event is worth to be remembered. 8) */ + if (ts) + mib_idx = LINUX_MIB_TCPTSREORDER; + else if (tcp_is_reno(tp)) + mib_idx = LINUX_MIB_TCPRENOREORDER; + else if (tcp_is_fack(tp)) + mib_idx = LINUX_MIB_TCPFACKREORDER; + else + mib_idx = LINUX_MIB_TCPSACKREORDER; + + NET_INC_STATS(sock_net(sk), mib_idx); } /* This must be called before lost_out is incremented */ @@ -5541,6 +5546,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) struct inet_connection_sock *icsk = inet_csk(sk); tcp_set_state(sk, TCP_ESTABLISHED); + icsk->icsk_ack.lrcvtime = tcp_time_stamp; if (skb) { icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); @@ -5759,7 +5765,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, * to stand against the temptation 8) --ANK */ inet_csk_schedule_ack(sk); - icsk->icsk_ack.lrcvtime = tcp_time_stamp; tcp_enter_quickack_mode(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); @@ -5886,9 +5891,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (th->syn) { if (th->fin) goto discard; - if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) - return 1; + /* It is possible that we process SYN packets from backlog, + * so we need to make sure to disable BH right there. + */ + local_bh_disable(); + acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; + local_bh_enable(); + if (!acceptable) + return 1; consume_skb(skb); return 0; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9a89b8deafae1e..575e19dcc01763 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -279,10 +279,13 @@ EXPORT_SYMBOL(tcp_v4_connect); */ void tcp_v4_mtu_reduced(struct sock *sk) { - struct dst_entry *dst; struct inet_sock *inet = inet_sk(sk); - u32 mtu = tcp_sk(sk)->mtu_info; + struct dst_entry *dst; + u32 mtu; + if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) + return; + mtu = tcp_sk(sk)->mtu_info; dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; @@ -428,7 +431,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) switch (type) { case ICMP_REDIRECT: - do_redirect(icmp_skb, sk); + if (!sock_owned_by_user(sk)) + do_redirect(icmp_skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 7e16243cdb58c8..65c0f3d13eca47 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -460,6 +460,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U); newicsk->icsk_rto = TCP_TIMEOUT_INIT; + newicsk->icsk_ack.lrcvtime = tcp_time_stamp; newtp->packets_out = 0; newtp->retrans_out = 0; diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index 4ecb38ae85042d..d8acbd9f477a2a 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -12,7 +12,8 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) /* Account for retransmits that are lost again */ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT, + tcp_skb_pcount(skb)); } } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 40d893556e6701..b2ab411c6d3728 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk) sk_mem_reclaim_partial(sk); - if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) + if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || + !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) goto out; if (time_after(icsk->icsk_ack.timeout, jiffies)) { @@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); int event; - if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) + if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || + !icsk->icsk_pending) goto out; if (time_after(icsk->icsk_timeout, jiffies)) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3a2025f5bf2c33..363172527e433e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -5692,13 +5693,18 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1; struct net *net = (struct net *)ctl->extra2; + if (!rtnl_trylock()) + return restart_syscall(); + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (write) { new_val = *((int *)ctl->data); - if (check_addr_gen_mode(new_val) < 0) - return -EINVAL; + if (check_addr_gen_mode(new_val) < 0) { + ret = -EINVAL; + goto out; + } /* request for default */ if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) { @@ -5707,20 +5713,23 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, /* request for individual net device */ } else { if (!idev) - return ret; + goto out; - if (check_stable_privacy(idev, net, new_val) < 0) - return -EINVAL; + if (check_stable_privacy(idev, net, new_val) < 0) { + ret = -EINVAL; + goto out; + } if (idev->cnf.addr_gen_mode != new_val) { idev->cnf.addr_gen_mode = new_val; - rtnl_lock(); addrconf_dev_config(idev->dev); - rtnl_unlock(); } } } +out: + rtnl_unlock(); + return ret; } diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 04db40620ea65c..a9a9553ee63df8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -920,12 +920,12 @@ static int __init inet6_init(void) err = register_pernet_subsys(&inet6_net_ops); if (err) goto register_pernet_fail; - err = icmpv6_init(); - if (err) - goto icmp_fail; err = ip6_mr_init(); if (err) goto ipmr_fail; + err = icmpv6_init(); + if (err) + goto icmp_fail; err = ndisc_init(); if (err) goto ndisc_fail; @@ -1061,10 +1061,10 @@ static int __init inet6_init(void) ndisc_cleanup(); ndisc_fail: ip6_mr_cleanup(); -ipmr_fail: - icmpv6_cleanup(); icmp_fail: unregister_pernet_subsys(&inet6_net_ops); +ipmr_fail: + icmpv6_cleanup(); register_pernet_fail: sock_unregister(PF_INET6); rtnl_unregister_all(PF_INET6); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index e4266746e4a2af..d4bf2c68a545b4 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -923,6 +923,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, ins = &rt->dst.rt6_next; iter = *ins; while (iter) { + if (iter->rt6i_metric > rt->rt6i_metric) + break; if (rt6_qualify_for_ecmp(iter)) { *ins = iter->dst.rt6_next; fib6_purge_rt(iter, fn, info->nl_net); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 0838e6d01d2e49..93e58a5e18374b 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); int err = -ENOSYS; - if (skb->encapsulation) + if (skb->encapsulation) { + skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); skb_set_inner_network_header(skb, nhoff); + } iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 528b3c1f3fdee4..58f6288e9ba53e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -768,13 +768,14 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, * Fragment the datagram. */ - *prevhdr = NEXTHDR_FRAGMENT; troom = rt->dst.dev->needed_tailroom; /* * Keep copying data until we run out. */ while (left > 0) { + u8 *fragnexthdr_offset; + len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) @@ -819,6 +820,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, */ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); + fragnexthdr_offset = skb_network_header(frag); + fragnexthdr_offset += prevhdr - skb_network_header(skb); + *fragnexthdr_offset = NEXTHDR_FRAGMENT; + /* * Build fragment header. */ @@ -1385,7 +1390,7 @@ static int __ip6_append_data(struct sock *sk, if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && - (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && + (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { err = ip6_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, exthdrlen, diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 644ba59fbd9d5e..3d8a3b63b4fdbe 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) if (!skb->ignore_df && skb->len > mtu) { skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); - if (skb->protocol == htons(ETH_P_IPV6)) + if (skb->protocol == htons(ETH_P_IPV6)) { + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); - else + } else { icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); + } return -EMSGSIZE; } diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 9948b5ce52dad3..986d4ca38832b1 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -589,6 +589,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); + skb_orphan(skb); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 6c5b5b1830a74f..4146536e9c1517 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c @@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr, memset(&range, 0, sizeof(range)); range.flags = priv->flags; if (priv->sreg_proto_min) { - range.min_proto.all = - *(__be16 *)®s->data[priv->sreg_proto_min]; - range.max_proto.all = - *(__be16 *)®s->data[priv->sreg_proto_max]; + range.min_proto.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_min]); + range.max_proto.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_max]); } regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, nft_out(pkt)); diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c index f5ac080fc0849b..a27e424f690d69 100644 --- a/net/ipv6/netfilter/nft_redir_ipv6.c +++ b/net/ipv6/netfilter/nft_redir_ipv6.c @@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr, memset(&range, 0, sizeof(range)); if (priv->sreg_proto_min) { - range.min_proto.all = - *(__be16 *)®s->data[priv->sreg_proto_min], - range.max_proto.all = - *(__be16 *)®s->data[priv->sreg_proto_max], + range.min_proto.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_min]); + range.max_proto.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_max]); range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f54f4265b37f29..9db1418993f2b8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2169,10 +2169,13 @@ int ip6_del_rt(struct rt6_info *rt) static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) { struct nl_info *info = &cfg->fc_nlinfo; + struct net *net = info->nl_net; struct sk_buff *skb = NULL; struct fib6_table *table; - int err; + int err = -ENOENT; + if (rt == net->ipv6.ip6_null_entry) + goto out_put; table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); @@ -2184,7 +2187,7 @@ static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) if (skb) { u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; - if (rt6_fill_node(info->nl_net, skb, rt, + if (rt6_fill_node(net, skb, rt, NULL, NULL, 0, RTM_DELROUTE, info->portid, seq, 0) < 0) { kfree_skb(skb); @@ -2198,17 +2201,18 @@ static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) rt6i_siblings) { err = fib6_del(sibling, info); if (err) - goto out; + goto out_unlock; } } err = fib6_del(rt, info); -out: +out_unlock: write_unlock_bh(&table->tb6_lock); +out_put: ip6_rt_put(rt); if (skb) { - rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV6_ROUTE, + rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); } return err; @@ -2891,6 +2895,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { [RTA_ENCAP] = { .type = NLA_NESTED }, [RTA_EXPIRES] = { .type = NLA_U32 }, [RTA_UID] = { .type = NLA_U32 }, + [RTA_MARK] = { .type = NLA_U32 }, }; static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -3294,7 +3299,6 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt) nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ + NLA_ALIGN(sizeof(struct rtnexthop)) + nla_total_size(16) /* RTA_GATEWAY */ - + nla_total_size(4) /* RTA_OIF */ + lwtunnel_get_encap_size(rt->dst.lwtstate); nexthop_len *= rt->rt6i_nsiblings; @@ -3318,7 +3322,7 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt) } static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, - unsigned int *flags) + unsigned int *flags, bool skip_oif) { if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { *flags |= RTNH_F_LINKDOWN; @@ -3331,7 +3335,8 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, goto nla_put_failure; } - if (rt->dst.dev && + /* not needed for multipath encoding b/c it has a rtnexthop struct */ + if (!skip_oif && rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) goto nla_put_failure; @@ -3345,6 +3350,7 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, return -EMSGSIZE; } +/* add multipath next hop */ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) { struct rtnexthop *rtnh; @@ -3357,7 +3363,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) rtnh->rtnh_hops = 0; rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; - if (rt6_nexthop_info(skb, rt, &flags) < 0) + if (rt6_nexthop_info(skb, rt, &flags, true) < 0) goto nla_put_failure; rtnh->rtnh_flags = flags; @@ -3417,6 +3423,8 @@ static int rt6_fill_node(struct net *net, } else if (rt->rt6i_flags & RTF_LOCAL) rtm->rtm_type = RTN_LOCAL; + else if (rt->rt6i_flags & RTF_ANYCAST) + rtm->rtm_type = RTN_ANYCAST; else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) rtm->rtm_type = RTN_LOCAL; else @@ -3510,7 +3518,7 @@ static int rt6_fill_node(struct net *net, nla_nest_end(skb, mp); } else { - if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0) + if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0) goto nla_put_failure; } @@ -3627,6 +3635,12 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); } + if (rt == net->ipv6.ip6_null_entry) { + err = rt->dst.error; + ip6_rt_put(rt); + goto errout; + } + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ip6_rt_put(rt); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 60a5295a7de6e8..49fa2e8c3fa921 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -391,10 +391,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, np = inet6_sk(sk); if (type == NDISC_REDIRECT) { - struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); + if (!sock_owned_by_user(sk)) { + struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); - if (dst) - dst->ops->redirect(dst, sk, skb); + if (dst) + dst->ops->redirect(dst, sk, skb); + } goto out; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4e4c401e3bc690..e28082f0a307eb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1035,6 +1035,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc6.hlimit = -1; ipc6.tclass = -1; ipc6.dontfrag = -1; + sockc.tsflags = sk->sk_tsflags; /* destination address check */ if (sin6) { @@ -1159,7 +1160,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = sk->sk_mark; fl6.flowi6_uid = sk->sk_uid; - sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { opt = &opt_space; diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index ab254041dab7f6..8d77ad5cadaff3 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -827,7 +828,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) * Wait for incoming connection * */ -static int irda_accept(struct socket *sock, struct socket *newsock, int flags) +static int irda_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sock *sk = sock->sk; struct irda_sock *new, *self = irda_sk(sk); @@ -835,7 +837,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) struct sk_buff *skb = NULL; int err; - err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); + err = irda_create(sock_net(sk), newsock, sk->sk_protocol, kern); if (err) return err; diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 817b1b186aff78..f6061c4bb0a805 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 35dbf3dc3d2831..7025dcb853d06b 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -13,8 +13,9 @@ * 2) as a control channel (write commands, read events) */ -#include +#include #include + #include "irnet_ppp.h" /* Private header */ /* Please put other headers in irnet.h - Thanks */ diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 13190b38f22ee5..84de7b6326dcdf 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -938,7 +938,7 @@ static int iucv_sock_listen(struct socket *sock, int backlog) /* Accept a pending connection */ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, - int flags) + int flags, bool kern) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *nsk; diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index a646f348124095..31762f76cdb5f2 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -24,6 +24,8 @@ #include #include #include +#include + #include #include #include @@ -1685,7 +1687,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct kcm_attach info; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) - err = -EFAULT; + return -EFAULT; err = kcm_attach_ioctl(sock, &info); @@ -1695,7 +1697,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct kcm_unattach info; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) - err = -EFAULT; + return -EFAULT; err = kcm_unattach_ioctl(sock, &info); @@ -1706,7 +1708,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct socket *newsock = NULL; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) - err = -EFAULT; + return -EFAULT; err = kcm_clone(sock, &info, &newsock); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 8adab6335ced9f..e37d9554da7b47 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -278,7 +278,57 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn } EXPORT_SYMBOL_GPL(l2tp_session_find); -struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) +/* Like l2tp_session_find() but takes a reference on the returned session. + * Optionally calls session->ref() too if do_ref is true. + */ +struct l2tp_session *l2tp_session_get(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref) +{ + struct hlist_head *session_list; + struct l2tp_session *session; + + if (!tunnel) { + struct l2tp_net *pn = l2tp_pernet(net); + + session_list = l2tp_session_id_hash_2(pn, session_id); + + rcu_read_lock_bh(); + hlist_for_each_entry_rcu(session, session_list, global_hlist) { + if (session->session_id == session_id) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); + rcu_read_unlock_bh(); + + return session; + } + } + rcu_read_unlock_bh(); + + return NULL; + } + + session_list = l2tp_session_id_hash(tunnel, session_id); + read_lock_bh(&tunnel->hlist_lock); + hlist_for_each_entry(session, session_list, hlist) { + if (session->session_id == session_id) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); + read_unlock_bh(&tunnel->hlist_lock); + + return session; + } + } + read_unlock_bh(&tunnel->hlist_lock); + + return NULL; +} +EXPORT_SYMBOL_GPL(l2tp_session_get); + +struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, + bool do_ref) { int hash; struct l2tp_session *session; @@ -288,6 +338,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { if (++count > nth) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); read_unlock_bh(&tunnel->hlist_lock); return session; } @@ -298,12 +351,13 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) return NULL; } -EXPORT_SYMBOL_GPL(l2tp_session_find_nth); +EXPORT_SYMBOL_GPL(l2tp_session_get_nth); /* Lookup a session by interface name. * This is very inefficient but is only used by management interfaces. */ -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, + bool do_ref) { struct l2tp_net *pn = l2tp_pernet(net); int hash; @@ -313,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { if (!strcmp(session->ifname, ifname)) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); rcu_read_unlock_bh(); + return session; } } @@ -323,7 +381,49 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) return NULL; } -EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); +EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname); + +static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, + struct l2tp_session *session) +{ + struct l2tp_session *session_walk; + struct hlist_head *g_head; + struct hlist_head *head; + struct l2tp_net *pn; + + head = l2tp_session_id_hash(tunnel, session->session_id); + + write_lock_bh(&tunnel->hlist_lock); + hlist_for_each_entry(session_walk, head, hlist) + if (session_walk->session_id == session->session_id) + goto exist; + + if (tunnel->version == L2TP_HDR_VER_3) { + pn = l2tp_pernet(tunnel->l2tp_net); + g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net), + session->session_id); + + spin_lock_bh(&pn->l2tp_session_hlist_lock); + hlist_for_each_entry(session_walk, g_head, global_hlist) + if (session_walk->session_id == session->session_id) + goto exist_glob; + + hlist_add_head_rcu(&session->global_hlist, g_head); + spin_unlock_bh(&pn->l2tp_session_hlist_lock); + } + + hlist_add_head(&session->hlist, head); + write_unlock_bh(&tunnel->hlist_lock); + + return 0; + +exist_glob: + spin_unlock_bh(&pn->l2tp_session_hlist_lock); +exist: + write_unlock_bh(&tunnel->hlist_lock); + + return -EEXIST; +} /* Lookup a tunnel by id */ @@ -633,6 +733,9 @@ static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb) * a data (not control) frame before coming here. Fields up to the * session-id have already been parsed and ptr points to the data * after the session-id. + * + * session->ref() must have been called prior to l2tp_recv_common(). + * session->deref() will be called automatically after skb is processed. */ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, @@ -642,14 +745,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, int offset; u32 ns, nr; - /* The ref count is increased since we now hold a pointer to - * the session. Take care to decrement the refcnt when exiting - * this function from now on... - */ - l2tp_session_inc_refcount(session); - if (session->ref) - (*session->ref)(session); - /* Parse and check optional cookie */ if (session->peer_cookie_len > 0) { if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { @@ -802,8 +897,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, /* Try to dequeue as many skbs from reorder_q as we can. */ l2tp_recv_dequeue(session); - l2tp_session_dec_refcount(session); - return; discard: @@ -812,8 +905,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, if (session->deref) (*session->deref)(session); - - l2tp_session_dec_refcount(session); } EXPORT_SYMBOL(l2tp_recv_common); @@ -920,8 +1011,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, } /* Find the session context */ - session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); + session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true); if (!session || !session->recv_skb) { + if (session) { + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + } + /* Not found? Pass to userspace to deal with */ l2tp_info(tunnel, L2TP_MSG_DATA, "%s: no session found (%u/%u). Passing up.\n", @@ -930,6 +1027,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, } l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); + l2tp_session_dec_refcount(session); return 0; @@ -1738,6 +1836,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct l2tp_session *session; + int err; session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); if (session != NULL) { @@ -1793,6 +1892,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn l2tp_session_set_header_len(session, tunnel->version); + err = l2tp_session_add_to_tunnel(tunnel, session); + if (err) { + kfree(session); + + return ERR_PTR(err); + } + /* Bump the reference count. The session context is deleted * only when this drops to zero. */ @@ -1802,28 +1908,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn /* Ensure tunnel socket isn't deleted */ sock_hold(tunnel->sock); - /* Add session to the tunnel's hash list */ - write_lock_bh(&tunnel->hlist_lock); - hlist_add_head(&session->hlist, - l2tp_session_id_hash(tunnel, session_id)); - write_unlock_bh(&tunnel->hlist_lock); - - /* And to the global session list if L2TPv3 */ - if (tunnel->version != L2TP_HDR_VER_2) { - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); - - spin_lock_bh(&pn->l2tp_session_hlist_lock); - hlist_add_head_rcu(&session->global_hlist, - l2tp_session_id_hash_2(pn, session_id)); - spin_unlock_bh(&pn->l2tp_session_hlist_lock); - } - /* Ignore management session in session count value */ if (session->session_id != 0) atomic_inc(&l2tp_session_count); + + return session; } - return session; + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(l2tp_session_create); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index aebf281d09eeb3..8ce7818c7a9d05 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -230,11 +230,16 @@ static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) return tunnel; } +struct l2tp_session *l2tp_session_get(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref); struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); -struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); +struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, + bool do_ref); +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, + bool do_ref); struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 2d6760a2ae347b..d100aed3d06fb6 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) { - pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); + pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); pd->session_idx++; if (pd->session == NULL) { @@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v) } /* Show the tunnel or session context */ - if (pd->session == NULL) + if (!pd->session) { l2tp_dfs_seq_tunnel_show(m, pd->tunnel); - else + } else { l2tp_dfs_seq_session_show(m, pd->session); + if (pd->session->deref) + pd->session->deref(pd->session); + l2tp_session_dec_refcount(pd->session); + } out: return 0; diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 8bf18a5f66e0c4..6fd41d7afe1ef2 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -221,12 +221,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p goto out; } - session = l2tp_session_find(net, tunnel, session_id); - if (session) { - rc = -EEXIST; - goto out; - } - if (cfg->ifname) { dev = dev_get_by_name(net, cfg->ifname); if (dev) { @@ -240,8 +234,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, peer_session_id, cfg); - if (!session) { - rc = -ENOMEM; + if (IS_ERR(session)) { + rc = PTR_ERR(session); goto out; } diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index d25038cfd64e1a..4d322c1b7233e5 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_find(net, NULL, session_id); - if (session == NULL) + session = l2tp_session_get(net, NULL, session_id, true); + if (!session) goto discard; tunnel = session->tunnel; - if (tunnel == NULL) - goto discard; + if (!tunnel) + goto discard_sess; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) - goto discard; + goto discard_sess; /* Point to L2TP header */ optr = ptr = skb->data; @@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) } l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); + l2tp_session_dec_refcount(session); return 0; @@ -178,9 +179,10 @@ static int l2tp_ip_recv(struct sk_buff *skb) tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel != NULL) + if (tunnel) { sk = tunnel->sock; - else { + sock_hold(sk); + } else { struct iphdr *iph = (struct iphdr *) skb_network_header(skb); read_lock_bh(&l2tp_ip_lock); @@ -202,6 +204,12 @@ static int l2tp_ip_recv(struct sk_buff *skb) return sk_receive_skb(sk, skb, 1); +discard_sess: + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + goto discard; + discard_put: sock_put(sk); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index a4abcbc4c09ae6..88b397c30d86af 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_find(net, NULL, session_id); - if (session == NULL) + session = l2tp_session_get(net, NULL, session_id, true); + if (!session) goto discard; tunnel = session->tunnel; - if (tunnel == NULL) - goto discard; + if (!tunnel) + goto discard_sess; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) - goto discard; + goto discard_sess; /* Point to L2TP header */ optr = ptr = skb->data; @@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb) l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); + l2tp_session_dec_refcount(session); + return 0; pass_up: @@ -191,9 +193,10 @@ static int l2tp_ip6_recv(struct sk_buff *skb) tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel != NULL) + if (tunnel) { sk = tunnel->sock; - else { + sock_hold(sk); + } else { struct ipv6hdr *iph = ipv6_hdr(skb); read_lock_bh(&l2tp_ip6_lock); @@ -215,6 +218,12 @@ static int l2tp_ip6_recv(struct sk_buff *skb) return sk_receive_skb(sk, skb, 1); +discard_sess: + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + goto discard; + discard_put: sock_put(sk); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 3620fba317863d..7e3e669baac42d 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -48,7 +48,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, /* Accessed under genl lock */ static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; -static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) +static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info, + bool do_ref) { u32 tunnel_id; u32 session_id; @@ -59,14 +60,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) if (info->attrs[L2TP_ATTR_IFNAME]) { ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); - session = l2tp_session_find_by_ifname(net, ifname); + session = l2tp_session_get_by_ifname(net, ifname, do_ref); } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && (info->attrs[L2TP_ATTR_CONN_ID])) { tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel) - session = l2tp_session_find(net, tunnel, session_id); + session = l2tp_session_get(net, tunnel, session_id, + do_ref); } return session; @@ -642,10 +644,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf session_id, peer_session_id, &cfg); if (ret >= 0) { - session = l2tp_session_find(net, tunnel, session_id); - if (session) + session = l2tp_session_get(net, tunnel, session_id, false); + if (session) { ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_CREATE); + l2tp_session_dec_refcount(session); + } } out: @@ -658,7 +662,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf struct l2tp_session *session; u16 pw_type; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, true); if (session == NULL) { ret = -ENODEV; goto out; @@ -672,6 +676,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -681,7 +689,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf int ret = 0; struct l2tp_session *session; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; goto out; @@ -716,6 +724,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_MODIFY); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -811,29 +821,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; int ret; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; - goto out; + goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; - goto out; + goto err_ref; } ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 0, session, L2TP_CMD_SESSION_GET); if (ret < 0) - goto err_out; + goto err_ref_msg; - return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); + ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); -err_out: - nlmsg_free(msg); + l2tp_session_dec_refcount(session); -out: + return ret; + +err_ref_msg: + nlmsg_free(msg); +err_ref: + l2tp_session_dec_refcount(session); +err: return ret; } @@ -852,7 +867,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback goto out; } - session = l2tp_session_find_nth(tunnel, si); + session = l2tp_session_get_nth(tunnel, si, false); if (session == NULL) { ti++; tunnel = NULL; @@ -862,8 +877,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - session, L2TP_CMD_SESSION_GET) < 0) + session, L2TP_CMD_SESSION_GET) < 0) { + l2tp_session_dec_refcount(session); break; + } + l2tp_session_dec_refcount(session); si++; } diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 36cc56fd041871..861b255a2d5195 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session) static void pppol2tp_session_destruct(struct sock *sk) { struct l2tp_session *session = sk->sk_user_data; + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); + if (session) { sk->sk_user_data = NULL; BUG_ON(session->magic != L2TP_SESSION_MAGIC); @@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock) l2tp_session_queue_purge(session); sock_put(sk); } - skb_queue_purge(&sk->sk_receive_queue); - skb_queue_purge(&sk->sk_write_queue); - release_sock(sk); /* This will delete the session context via @@ -582,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, int error = 0; u32 tunnel_id, peer_tunnel_id; u32 session_id, peer_session_id; + bool drop_refcnt = false; int ver = 2; int fd; @@ -683,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (tunnel->peer_tunnel_id == 0) tunnel->peer_tunnel_id = peer_tunnel_id; - /* Create session if it doesn't already exist. We handle the - * case where a session was previously created by the netlink - * interface by checking that the session doesn't already have - * a socket and its tunnel socket are what we expect. If any - * of those checks fail, return EEXIST to the caller. - */ - session = l2tp_session_find(sock_net(sk), tunnel, session_id); - if (session == NULL) { - /* Default MTU must allow space for UDP/L2TP/PPP - * headers. + session = l2tp_session_get(sock_net(sk), tunnel, session_id, false); + if (session) { + drop_refcnt = true; + ps = l2tp_session_priv(session); + + /* Using a pre-existing session is fine as long as it hasn't + * been connected yet. */ - cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; + if (ps->sock) { + error = -EEXIST; + goto end; + } - /* Allocate and initialize a new session context. */ - session = l2tp_session_create(sizeof(struct pppol2tp_session), - tunnel, session_id, - peer_session_id, &cfg); - if (session == NULL) { - error = -ENOMEM; + /* consistency checks */ + if (ps->tunnel_sock != tunnel->sock) { + error = -EEXIST; goto end; } } else { - ps = l2tp_session_priv(session); - error = -EEXIST; - if (ps->sock != NULL) - goto end; + /* Default MTU must allow space for UDP/L2TP/PPP headers */ + cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; + cfg.mru = cfg.mtu; - /* consistency checks */ - if (ps->tunnel_sock != tunnel->sock) + session = l2tp_session_create(sizeof(struct pppol2tp_session), + tunnel, session_id, + peer_session_id, &cfg); + if (IS_ERR(session)) { + error = PTR_ERR(session); goto end; + } } /* Associate session with its PPPoL2TP socket */ @@ -777,6 +779,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, session->name); end: + if (drop_refcnt) + l2tp_session_dec_refcount(session); release_sock(sk); return error; @@ -804,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i if (tunnel->sock == NULL) goto out; - /* Check that this session doesn't already exist */ - error = -EEXIST; - session = l2tp_session_find(net, tunnel, session_id); - if (session != NULL) - goto out; - /* Default MTU values. */ if (cfg->mtu == 0) cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; @@ -817,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i cfg->mru = cfg->mtu; /* Allocate and initialize a new session context. */ - error = -ENOMEM; session = l2tp_session_create(sizeof(struct pppol2tp_session), tunnel, session_id, peer_session_id, cfg); - if (session == NULL) + if (IS_ERR(session)) { + error = PTR_ERR(session); goto out; + } ps = l2tp_session_priv(session); ps->tunnel_sock = tunnel->sock; @@ -1140,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, if (stats.session_id != 0) { /* resend to session ioctl handler */ struct l2tp_session *session = - l2tp_session_find(sock_net(sk), tunnel, stats.session_id); - if (session != NULL) - err = pppol2tp_session_ioctl(session, cmd, arg); - else + l2tp_session_get(sock_net(sk), tunnel, + stats.session_id, true); + + if (session) { + err = pppol2tp_session_ioctl(session, cmd, + arg); + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + } else { err = -EBADR; + } break; } #ifdef CONFIG_XFRM @@ -1554,7 +1560,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) { - pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); + pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); pd->session_idx++; if (pd->session == NULL) { @@ -1681,10 +1687,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v) /* Show the tunnel or session context. */ - if (pd->session == NULL) + if (!pd->session) { pppol2tp_seq_tunnel_show(m, pd->tunnel); - else + } else { pppol2tp_seq_session_show(m, pd->session); + if (pd->session->deref) + pd->session->deref(pd->session); + l2tp_session_dec_refcount(pd->session); + } out: return 0; @@ -1843,4 +1853,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP"); MODULE_LICENSE("GPL"); MODULE_VERSION(PPPOL2TP_DRV_VERSION); MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP); -MODULE_ALIAS_L2TP_PWTYPE(11); +MODULE_ALIAS_L2TP_PWTYPE(7); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 5e929638242028..cb4fff785cbf5a 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -26,6 +26,8 @@ #include #include #include +#include + #include #include #include @@ -639,11 +641,13 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb) * @sock: Socket which connections arrive on. * @newsock: Socket to move incoming connection to. * @flags: User specified operational flags. + * @kern: If the socket is kernel internal * * Accept a new incoming connection. * Returns 0 upon success, negative otherwise. */ -static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) +static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sock *sk = sock->sk, *newsk; struct llc_sock *llc, *newllc; diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 3b5fd4188f2ac7..4456559cb056d1 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -85,7 +85,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, ht_dbg(sta->sdata, "Rx BA session stop requested for %pM tid %u %s reason: %d\n", sta->sta.addr, tid, - initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator", + initiator == WLAN_BACK_RECIPIENT ? "recipient" : "initiator", (int)reason); if (drv_ampdu_action(local, sta->sdata, ¶ms)) @@ -398,6 +398,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, tid_agg_rx->timeout = timeout; tid_agg_rx->stored_mpdu_num = 0; tid_agg_rx->auto_seq = auto_seq; + tid_agg_rx->started = false; tid_agg_rx->reorder_buf_filtered = 0; status = WLAN_STATUS_SUCCESS; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 159a1a73372506..0e718437d080e7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -428,7 +428,7 @@ struct ieee80211_sta_tx_tspec { bool downgraded; }; -DECLARE_EWMA(beacon_signal, 16, 4) +DECLARE_EWMA(beacon_signal, 4, 4) struct ieee80211_if_managed { struct timer_list timer; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 40813dd3301c60..5bb0c501281954 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) ieee80211_recalc_ps(local); if (sdata->vif.type == NL80211_IFTYPE_MONITOR || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + local->ops->wake_tx_queue) { /* XXX: for AP_VLAN, actually track AP queues */ netif_tx_start_all_queues(dev); } else if (dev) { diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index fcba70e57073f3..953d71e784a9ab 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -9,6 +9,8 @@ #include #include #include +#include + #include "ieee80211_i.h" #include "rate.h" #include "mesh.h" diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 28a3a0957c9e35..76a8bcd8ef1123 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) break; } + flush_delayed_work(&sdata->dec_tailroom_needed_wk); drv_remove_interface(local, sdata); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 50ca3828b1242e..e48724a6725e32 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -4,7 +4,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1034,6 +1034,18 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata buf_size = tid_agg_rx->buf_size; head_seq_num = tid_agg_rx->head_seq_num; + /* + * If the current MPDU's SN is smaller than the SSN, it shouldn't + * be reordered. + */ + if (unlikely(!tid_agg_rx->started)) { + if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { + ret = false; + goto out; + } + tid_agg_rx->started = true; + } + /* frame with out of date sequence number */ if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { dev_kfree_skb(skb); @@ -3880,6 +3892,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, stats->last_rate = sta_stats_encode_rate(status); stats->fragments++; + stats->packets++; if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { stats->last_signal = status->signal; @@ -4073,15 +4086,17 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, ieee80211_is_beacon(hdr->frame_control))) ieee80211_scan_rx(local, skb); - if (pubsta) { - rx.sta = container_of(pubsta, struct sta_info, sta); - rx.sdata = rx.sta->sdata; - if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) - return; - goto out; - } else if (ieee80211_is_data(fc)) { + if (ieee80211_is_data(fc)) { struct sta_info *sta, *prev_sta; + if (pubsta) { + rx.sta = container_of(pubsta, struct sta_info, sta); + rx.sdata = rx.sta->sdata; + if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) + return; + goto out; + } + prev_sta = NULL; for_each_sta_info(local, hdr->addr2, sta, tmp) { diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 4774e663a4112f..3323a2fb289bd0 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) } /* No need to do anything if the driver does all */ - if (ieee80211_hw_check(&local->hw, AP_LINK_PS)) + if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) return; if (sta->dead) @@ -1264,7 +1264,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) sta_info_recalc_tim(sta); ps_dbg(sdata, - "STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n", + "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", sta->sta.addr, sta->sta.aid, filtered, buffered); ieee80211_check_fast_xmit(sta); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index dd06ef0b886145..e65cda34d2bc00 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -189,6 +189,7 @@ struct tid_ampdu_tx { * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and * and ssn. * @removed: this session is removed (but might have been found due to RCU) + * @started: this session has started (head ssn or higher was received) * * This structure's lifetime is managed by RCU, assignments to * the array holding it must hold the aggregation mutex. @@ -212,8 +213,9 @@ struct tid_ampdu_rx { u16 ssn; u16 buf_size; u16 timeout; - bool auto_seq; - bool removed; + u8 auto_seq:1, + removed:1, + started:1; }; /** @@ -370,7 +372,7 @@ struct mesh_sta { unsigned int fail_avg; }; -DECLARE_EWMA(signal, 1024, 8) +DECLARE_EWMA(signal, 10, 8) struct ieee80211_sta_rx_stats { unsigned long packets; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 0dd7c351002dbe..83b8b11f24ea1d 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -51,7 +51,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, struct ieee80211_hdr *hdr = (void *)skb->data; int ac; - if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) { + if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_CTL_AMPDU)) { ieee80211_free_txskb(&local->hw, skb); return; } diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index 6a3e1c2181d3a9..1e1c9b20bab7f4 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -18,6 +18,8 @@ #include #include #include +#include + #include #include diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 3818686182b210..6414079aa7297e 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1269,6 +1269,8 @@ static void mpls_ifdown(struct net_device *dev, int event) { struct mpls_route __rcu **platform_label; struct net *net = dev_net(dev); + unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN; + unsigned int alive; unsigned index; platform_label = rtnl_dereference(net->mpls.platform_label); @@ -1278,9 +1280,11 @@ static void mpls_ifdown(struct net_device *dev, int event) if (!rt) continue; + alive = 0; change_nexthops(rt) { if (rtnl_dereference(nh->nh_dev) != dev) - continue; + goto next; + switch (event) { case NETDEV_DOWN: case NETDEV_UNREGISTER: @@ -1288,12 +1292,16 @@ static void mpls_ifdown(struct net_device *dev, int event) /* fall through */ case NETDEV_CHANGE: nh->nh_flags |= RTNH_F_LINKDOWN; - ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; break; } if (event == NETDEV_UNREGISTER) RCU_INIT_POINTER(nh->nh_dev, NULL); +next: + if (!(nh->nh_flags & nh_flags)) + alive++; } endfor_nexthops(rt); + + WRITE_ONCE(rt->rt_nhn_alive, alive); } } @@ -2028,6 +2036,7 @@ static void mpls_net_exit(struct net *net) for (index = 0; index < platform_labels; index++) { struct mpls_route *rt = rtnl_dereference(platform_label[index]); RCU_INIT_POINTER(platform_label[index], NULL); + mpls_notify_route(net, index, rt, NULL, NULL); mpls_rt_free(rt); } rtnl_unlock(); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 071b97fcbefb08..ffb78e5f7b7091 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; seqcount_t nf_conntrack_generation __read_mostly; -DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); +/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used + * for the nfctinfo. We cheat by (ab)using the PER CPU cache line + * alignment to enforce this. + */ +DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); static unsigned int nf_conntrack_hash_rnd __read_mostly; diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index da9df2d56e669e..22fc32143e9c4a 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net, BUG_ON(notify != new); RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); mutex_unlock(&nf_ct_ecache_mutex); + /* synchronize_rcu() is called from ctnetlink_exit. */ } EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); @@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net, BUG_ON(notify != new); RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); mutex_unlock(&nf_ct_ecache_mutex); + /* synchronize_rcu() is called from ctnetlink_exit. */ } EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 02bcf00c24920b..008299b7f78fe3 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[id]); - BUG_ON(t == NULL); + if (!t) { + rcu_read_unlock(); + return NULL; + } + off = ALIGN(sizeof(struct nf_ct_ext), t->align); len = off + t->len + var_alloc_len; alloc_size = t->alloc_size + var_alloc_len; @@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id, rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[id]); - BUG_ON(t == NULL); + if (!t) { + rcu_read_unlock(); + return NULL; + } newoff = ALIGN(old->len, t->align); newlen = newoff + t->len + var_alloc_len; @@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type) RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); update_alloc_size(type); mutex_unlock(&nf_ct_ext_type_mutex); - rcu_barrier(); /* Wait for completion of call_rcu()'s */ + synchronize_rcu(); } EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 6806b5e73567bb..908d858034e4f4 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -3442,6 +3442,7 @@ static void __exit ctnetlink_exit(void) #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT RCU_INIT_POINTER(nfnl_ct_hook, NULL); #endif + synchronize_rcu(); } module_init(ctnetlink_init); diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 24174c5202398f..0d17894798b5ca 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1628,8 +1628,6 @@ static int __init nf_conntrack_sip_init(void) ports[ports_c++] = SIP_PORT; for (i = 0; i < ports_c; i++) { - memset(&sip[i], 0, sizeof(sip[i])); - nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip", SIP_PORT, ports[i], i, sip_exp_policy, SIP_EXPECT_MAX, diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 94b14c5a8b1772..82802e4a664081 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -903,6 +903,8 @@ static void __exit nf_nat_cleanup(void) #ifdef CONFIG_XFRM RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); #endif + synchronize_rcu(); + for (i = 0; i < NFPROTO_NUMPROTO; i++) kfree(nf_nat_l4protos[i]); diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c index 31d358691af096..804e8a0ab36ef5 100644 --- a/net/netfilter/nf_nat_proto_sctp.c +++ b/net/netfilter/nf_nat_proto_sctp.c @@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb, enum nf_nat_manip_type maniptype) { sctp_sctphdr_t *hdr; + int hdrsize = 8; - if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) + /* This could be an inner header returned in imcp packet; in such + * cases we cannot update the checksum field since it is outside + * of the 8 bytes of transport layer headers we are guaranteed. + */ + if (skb->len >= hdroff + sizeof(*hdr)) + hdrsize = sizeof(*hdr); + + if (!skb_make_writable(skb, hdroff + hdrsize)) return false; hdr = (struct sctphdr *)(skb->data + hdroff); @@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb, hdr->dest = tuple->dst.u.sctp.port; } + if (hdrsize < sizeof(*hdr)) + return true; + if (skb->ip_summed != CHECKSUM_PARTIAL) { hdr->checksum = sctp_compute_cksum(skb, hdroff); skb->ip_summed = CHECKSUM_NONE; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ff7304ae58ac4f..434c739dfecaa8 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -461,16 +461,15 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, return -1; } -static int nf_tables_table_notify(const struct nft_ctx *ctx, int event) +static void nf_tables_table_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) - return 0; + return; - err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; @@ -482,14 +481,11 @@ static int nf_tables_table_notify(const struct nft_ctx *ctx, int event) goto err; } - err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, - ctx->report, GFP_KERNEL); + nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, + ctx->report, GFP_KERNEL); + return; err: - if (err < 0) { - nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, - err); - } - return err; + nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); } static int nf_tables_dump_tables(struct sk_buff *skb, @@ -1050,16 +1046,15 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, return -1; } -static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event) +static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) - return 0; + return; - err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; @@ -1072,14 +1067,11 @@ static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event) goto err; } - err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, - ctx->report, GFP_KERNEL); + nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, + ctx->report, GFP_KERNEL); + return; err: - if (err < 0) { - nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, - err); - } - return err; + nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); } static int nf_tables_dump_chains(struct sk_buff *skb, @@ -1934,18 +1926,16 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, return -1; } -static int nf_tables_rule_notify(const struct nft_ctx *ctx, - const struct nft_rule *rule, - int event) +static void nf_tables_rule_notify(const struct nft_ctx *ctx, + const struct nft_rule *rule, int event) { struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) - return 0; + return; - err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; @@ -1958,14 +1948,11 @@ static int nf_tables_rule_notify(const struct nft_ctx *ctx, goto err; } - err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, - ctx->report, GFP_KERNEL); + nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, + ctx->report, GFP_KERNEL); + return; err: - if (err < 0) { - nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, - err); - } - return err; + nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); } struct nft_rule_dump_ctx { @@ -2696,9 +2683,9 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, return -1; } -static int nf_tables_set_notify(const struct nft_ctx *ctx, - const struct nft_set *set, - int event, gfp_t gfp_flags) +static void nf_tables_set_notify(const struct nft_ctx *ctx, + const struct nft_set *set, int event, + gfp_t gfp_flags) { struct sk_buff *skb; u32 portid = ctx->portid; @@ -2706,9 +2693,8 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx, if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) - return 0; + return; - err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags); if (skb == NULL) goto err; @@ -2719,12 +2705,11 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx, goto err; } - err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, - ctx->report, gfp_flags); + nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, ctx->report, + gfp_flags); + return; err: - if (err < 0) - nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err); - return err; + nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, -ENOBUFS); } static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb) @@ -3160,7 +3145,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, iter.count = 0; iter.err = 0; iter.fn = nf_tables_bind_check_setelem; - iter.flush = false; set->ops->walk(ctx, set, &iter); if (iter.err < 0) @@ -3414,7 +3398,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) args.iter.count = 0; args.iter.err = 0; args.iter.fn = nf_tables_dump_setelem; - args.iter.flush = false; set->ops->walk(&ctx, set, &args.iter); nla_nest_end(skb, nest); @@ -3504,10 +3487,10 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb, return -1; } -static int nf_tables_setelem_notify(const struct nft_ctx *ctx, - const struct nft_set *set, - const struct nft_set_elem *elem, - int event, u16 flags) +static void nf_tables_setelem_notify(const struct nft_ctx *ctx, + const struct nft_set *set, + const struct nft_set_elem *elem, + int event, u16 flags) { struct net *net = ctx->net; u32 portid = ctx->portid; @@ -3515,9 +3498,8 @@ static int nf_tables_setelem_notify(const struct nft_ctx *ctx, int err; if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) - return 0; + return; - err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; @@ -3529,12 +3511,11 @@ static int nf_tables_setelem_notify(const struct nft_ctx *ctx, goto err; } - err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report, - GFP_KERNEL); + nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report, + GFP_KERNEL); + return; err: - if (err < 0) - nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); - return err; + nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS); } static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx, @@ -3980,7 +3961,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, struct nft_set_iter iter = { .genmask = genmask, .fn = nft_flush_set, - .flush = true, }; set->ops->walk(&ctx, set, &iter); @@ -4476,18 +4456,17 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk, return nft_delobj(&ctx, obj); } -int nft_obj_notify(struct net *net, struct nft_table *table, - struct nft_object *obj, u32 portid, u32 seq, int event, - int family, int report, gfp_t gfp) +void nft_obj_notify(struct net *net, struct nft_table *table, + struct nft_object *obj, u32 portid, u32 seq, int event, + int family, int report, gfp_t gfp) { struct sk_buff *skb; int err; if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) - return 0; + return; - err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, gfp); if (skb == NULL) goto err; @@ -4499,21 +4478,18 @@ int nft_obj_notify(struct net *net, struct nft_table *table, goto err; } - err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp); + nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp); + return; err: - if (err < 0) { - nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); - } - return err; + nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS); } EXPORT_SYMBOL_GPL(nft_obj_notify); -static int nf_tables_obj_notify(const struct nft_ctx *ctx, - struct nft_object *obj, int event) +static void nf_tables_obj_notify(const struct nft_ctx *ctx, + struct nft_object *obj, int event) { - return nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, - ctx->seq, event, ctx->afi->family, ctx->report, - GFP_KERNEL); + nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event, + ctx->afi->family, ctx->report, GFP_KERNEL); } static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, @@ -4543,7 +4519,8 @@ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, return -EMSGSIZE; } -static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event) +static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb, + int event) { struct nlmsghdr *nlh = nlmsg_hdr(skb); struct sk_buff *skb2; @@ -4551,9 +4528,8 @@ static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event) if (nlmsg_report(nlh) && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) - return 0; + return; - err = -ENOBUFS; skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb2 == NULL) goto err; @@ -4565,14 +4541,12 @@ static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event) goto err; } - err = nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, - NFNLGRP_NFTABLES, nlmsg_report(nlh), GFP_KERNEL); + nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, + nlmsg_report(nlh), GFP_KERNEL); + return; err: - if (err < 0) { - nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, - err); - } - return err; + nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, + -ENOBUFS); } static int nf_tables_getgen(struct net *net, struct sock *nlsk, @@ -5137,7 +5111,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, iter.count = 0; iter.err = 0; iter.fn = nf_tables_loop_check_setelem; - iter.flush = false; set->ops->walk(ctx, set, &iter); if (iter.err < 0) diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index de8782345c8637..d45558178da5b6 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -32,6 +32,13 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers"); +struct nfnl_cthelper { + struct list_head list; + struct nf_conntrack_helper helper; +}; + +static LIST_HEAD(nfnl_cthelper_list); + static int nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) @@ -161,6 +168,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper, int i, ret; struct nf_conntrack_expect_policy *expect_policy; struct nlattr *tb[NFCTH_POLICY_SET_MAX+1]; + unsigned int class_max; ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, nfnl_cthelper_expect_policy_set); @@ -170,19 +178,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper, if (!tb[NFCTH_POLICY_SET_NUM]) return -EINVAL; - helper->expect_class_max = - ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); - - if (helper->expect_class_max != 0 && - helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES) + class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); + if (class_max == 0) + return -EINVAL; + if (class_max > NF_CT_MAX_EXPECT_CLASSES) return -EOVERFLOW; expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) * - helper->expect_class_max, GFP_KERNEL); + class_max, GFP_KERNEL); if (expect_policy == NULL) return -ENOMEM; - for (i=0; iexpect_class_max; i++) { + for (i = 0; i < class_max; i++) { if (!tb[NFCTH_POLICY_SET+i]) goto err; @@ -191,6 +198,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper, if (ret < 0) goto err; } + + helper->expect_class_max = class_max - 1; helper->expect_policy = expect_policy; return 0; err: @@ -203,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[], struct nf_conntrack_tuple *tuple) { struct nf_conntrack_helper *helper; + struct nfnl_cthelper *nfcth; int ret; if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) return -EINVAL; - helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL); - if (helper == NULL) + nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL); + if (nfcth == NULL) return -ENOMEM; + helper = &nfcth->helper; ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); if (ret < 0) - goto err; + goto err1; strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); @@ -245,14 +256,100 @@ nfnl_cthelper_create(const struct nlattr * const tb[], ret = nf_conntrack_helper_register(helper); if (ret < 0) - goto err; + goto err2; + list_add_tail(&nfcth->list, &nfnl_cthelper_list); return 0; -err: - kfree(helper); +err2: + kfree(helper->expect_policy); +err1: + kfree(nfcth); return ret; } +static int +nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy, + struct nf_conntrack_expect_policy *new_policy, + const struct nlattr *attr) +{ + struct nlattr *tb[NFCTH_POLICY_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, + nfnl_cthelper_expect_pol); + if (err < 0) + return err; + + if (!tb[NFCTH_POLICY_NAME] || + !tb[NFCTH_POLICY_EXPECT_MAX] || + !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) + return -EINVAL; + + if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name)) + return -EBUSY; + + new_policy->max_expected = + ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); + new_policy->timeout = + ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT])); + + return 0; +} + +static int nfnl_cthelper_update_policy_all(struct nlattr *tb[], + struct nf_conntrack_helper *helper) +{ + struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1]; + struct nf_conntrack_expect_policy *policy; + int i, err; + + /* Check first that all policy attributes are well-formed, so we don't + * leave things in inconsistent state on errors. + */ + for (i = 0; i < helper->expect_class_max + 1; i++) { + + if (!tb[NFCTH_POLICY_SET + i]) + return -EINVAL; + + err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i], + &new_policy[i], + tb[NFCTH_POLICY_SET + i]); + if (err < 0) + return err; + } + /* Now we can safely update them. */ + for (i = 0; i < helper->expect_class_max + 1; i++) { + policy = (struct nf_conntrack_expect_policy *) + &helper->expect_policy[i]; + policy->max_expected = new_policy->max_expected; + policy->timeout = new_policy->timeout; + } + + return 0; +} + +static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper, + const struct nlattr *attr) +{ + struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1]; + unsigned int class_max; + int err; + + err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, + nfnl_cthelper_expect_policy_set); + if (err < 0) + return err; + + if (!tb[NFCTH_POLICY_SET_NUM]) + return -EINVAL; + + class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); + if (helper->expect_class_max + 1 != class_max) + return -EBUSY; + + return nfnl_cthelper_update_policy_all(tb, helper); +} + static int nfnl_cthelper_update(const struct nlattr * const tb[], struct nf_conntrack_helper *helper) @@ -263,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[], return -EBUSY; if (tb[NFCTH_POLICY]) { - ret = nfnl_cthelper_parse_expect_policy(helper, - tb[NFCTH_POLICY]); + ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); if (ret < 0) return ret; } @@ -293,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl, const char *helper_name; struct nf_conntrack_helper *cur, *helper = NULL; struct nf_conntrack_tuple tuple; - int ret = 0, i; + struct nfnl_cthelper *nlcth; + int ret = 0; if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) return -EINVAL; @@ -304,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl, if (ret < 0) return ret; - rcu_read_lock(); - for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { - hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { + list_for_each_entry(nlcth, &nfnl_cthelper_list, list) { + cur = &nlcth->helper; - /* skip non-userspace conntrack helpers. */ - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) - continue; + if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN)) + continue; - if (strncmp(cur->name, helper_name, - NF_CT_HELPER_NAME_LEN) != 0) - continue; + if ((tuple.src.l3num != cur->tuple.src.l3num || + tuple.dst.protonum != cur->tuple.dst.protonum)) + continue; - if ((tuple.src.l3num != cur->tuple.src.l3num || - tuple.dst.protonum != cur->tuple.dst.protonum)) - continue; + if (nlh->nlmsg_flags & NLM_F_EXCL) + return -EEXIST; - if (nlh->nlmsg_flags & NLM_F_EXCL) { - ret = -EEXIST; - goto err; - } - helper = cur; - break; - } + helper = cur; + break; } - rcu_read_unlock(); if (helper == NULL) ret = nfnl_cthelper_create(tb, &tuple); @@ -336,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl, ret = nfnl_cthelper_update(tb, helper); return ret; -err: - rcu_read_unlock(); - return ret; } static int @@ -377,10 +462,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb, goto nla_put_failure; if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM, - htonl(helper->expect_class_max))) + htonl(helper->expect_class_max + 1))) goto nla_put_failure; - for (i=0; iexpect_class_max; i++) { + for (i = 0; i < helper->expect_class_max + 1; i++) { nest_parms2 = nla_nest_start(skb, (NFCTH_POLICY_SET+i) | NLA_F_NESTED); if (nest_parms2 == NULL) @@ -502,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) { - int ret = -ENOENT, i; + int ret = -ENOENT; struct nf_conntrack_helper *cur; struct sk_buff *skb2; char *helper_name = NULL; struct nf_conntrack_tuple tuple; + struct nfnl_cthelper *nlcth; bool tuple_set = false; if (nlh->nlmsg_flags & NLM_F_DUMP) { @@ -527,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl, tuple_set = true; } - for (i = 0; i < nf_ct_helper_hsize; i++) { - hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { + list_for_each_entry(nlcth, &nfnl_cthelper_list, list) { + cur = &nlcth->helper; + if (helper_name && + strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN)) + continue; - /* skip non-userspace conntrack helpers. */ - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) - continue; + if (tuple_set && + (tuple.src.l3num != cur->tuple.src.l3num || + tuple.dst.protonum != cur->tuple.dst.protonum)) + continue; - if (helper_name && strncmp(cur->name, helper_name, - NF_CT_HELPER_NAME_LEN) != 0) { - continue; - } - if (tuple_set && - (tuple.src.l3num != cur->tuple.src.l3num || - tuple.dst.protonum != cur->tuple.dst.protonum)) - continue; - - skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (skb2 == NULL) { - ret = -ENOMEM; - break; - } + skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (skb2 == NULL) { + ret = -ENOMEM; + break; + } - ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, - nlh->nlmsg_seq, - NFNL_MSG_TYPE(nlh->nlmsg_type), - NFNL_MSG_CTHELPER_NEW, cur); - if (ret <= 0) { - kfree_skb(skb2); - break; - } + ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, + nlh->nlmsg_seq, + NFNL_MSG_TYPE(nlh->nlmsg_type), + NFNL_MSG_CTHELPER_NEW, cur); + if (ret <= 0) { + kfree_skb(skb2); + break; + } - ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, - MSG_DONTWAIT); - if (ret > 0) - ret = 0; + ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, + MSG_DONTWAIT); + if (ret > 0) + ret = 0; - /* this avoids a loop in nfnetlink. */ - return ret == -EAGAIN ? -ENOBUFS : ret; - } + /* this avoids a loop in nfnetlink. */ + return ret == -EAGAIN ? -ENOBUFS : ret; } return ret; } @@ -576,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, { char *helper_name = NULL; struct nf_conntrack_helper *cur; - struct hlist_node *tmp; struct nf_conntrack_tuple tuple; bool tuple_set = false, found = false; - int i, j = 0, ret; + struct nfnl_cthelper *nlcth, *n; + int j = 0, ret; if (tb[NFCTH_NAME]) helper_name = nla_data(tb[NFCTH_NAME]); @@ -592,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, tuple_set = true; } - for (i = 0; i < nf_ct_helper_hsize; i++) { - hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], - hnode) { - /* skip non-userspace conntrack helpers. */ - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) - continue; + list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { + cur = &nlcth->helper; + j++; - j++; + if (helper_name && + strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN)) + continue; - if (helper_name && strncmp(cur->name, helper_name, - NF_CT_HELPER_NAME_LEN) != 0) { - continue; - } - if (tuple_set && - (tuple.src.l3num != cur->tuple.src.l3num || - tuple.dst.protonum != cur->tuple.dst.protonum)) - continue; + if (tuple_set && + (tuple.src.l3num != cur->tuple.src.l3num || + tuple.dst.protonum != cur->tuple.dst.protonum)) + continue; - found = true; - nf_conntrack_helper_unregister(cur); - } + found = true; + nf_conntrack_helper_unregister(cur); + kfree(cur->expect_policy); + + list_del(&nlcth->list); + kfree(nlcth); } + /* Make sure we return success if we flush and there is no helpers */ return (found || j == 0) ? 0 : -ENOENT; } @@ -662,20 +741,16 @@ static int __init nfnl_cthelper_init(void) static void __exit nfnl_cthelper_exit(void) { struct nf_conntrack_helper *cur; - struct hlist_node *tmp; - int i; + struct nfnl_cthelper *nlcth, *n; nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); - for (i=0; iflags & NF_CT_HELPER_F_USERSPACE)) - continue; + list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { + cur = &nlcth->helper; - nf_conntrack_helper_unregister(cur); - } + nf_conntrack_helper_unregister(cur); + kfree(cur->expect_policy); + kfree(nlcth); } } diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 139e0867e56e9e..47d6656c9119fd 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void) #ifdef CONFIG_NF_CONNTRACK_TIMEOUT RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL); RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL); + synchronize_rcu(); #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ - rcu_barrier(); } module_init(cttimeout_init); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 3ee0b8a000a41e..933509ebf3d3e2 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { skb_tx_error(entskb); - return NULL; + goto nlmsg_failure; } nlh = nlmsg_put(skb, 0, 0, @@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, if (!nlh) { skb_tx_error(entskb); kfree_skb(skb); - return NULL; + goto nlmsg_failure; } nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = entry->state.pf; @@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, } nlh->nlmsg_len = skb->len; + if (seclen) + security_release_secctx(secdata, seclen); return skb; nla_put_failure: skb_tx_error(entskb); kfree_skb(skb); net_err_ratelimited("nf_queue: error creating packet message\n"); +nlmsg_failure: + if (seclen) + security_release_secctx(secdata, seclen); return NULL; } diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index bf548a7a71ec9b..0264258c46feb5 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -83,7 +83,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, switch (priv->key) { case NFT_CT_DIRECTION: - *dest = CTINFO2DIR(ctinfo); + nft_reg_store8(dest, CTINFO2DIR(ctinfo)); return; case NFT_CT_STATUS: *dest = ct->status; @@ -151,20 +151,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr, return; } case NFT_CT_L3PROTOCOL: - *dest = nf_ct_l3num(ct); + nft_reg_store8(dest, nf_ct_l3num(ct)); return; case NFT_CT_PROTOCOL: - *dest = nf_ct_protonum(ct); + nft_reg_store8(dest, nf_ct_protonum(ct)); return; #ifdef CONFIG_NF_CONNTRACK_ZONES case NFT_CT_ZONE: { const struct nf_conntrack_zone *zone = nf_ct_zone(ct); + u16 zoneid; if (priv->dir < IP_CT_DIR_MAX) - *dest = nf_ct_zone_id(zone, priv->dir); + zoneid = nf_ct_zone_id(zone, priv->dir); else - *dest = zone->id; + zoneid = zone->id; + nft_reg_store16(dest, zoneid); return; } #endif @@ -183,10 +185,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr, nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); return; case NFT_CT_PROTO_SRC: - *dest = (__force __u16)tuple->src.u.all; + nft_reg_store16(dest, (__force u16)tuple->src.u.all); return; case NFT_CT_PROTO_DST: - *dest = (__force __u16)tuple->dst.u.all; + nft_reg_store16(dest, (__force u16)tuple->dst.u.all); return; default: break; @@ -205,7 +207,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, const struct nft_ct *priv = nft_expr_priv(expr); struct sk_buff *skb = pkt->skb; enum ip_conntrack_info ctinfo; - u16 value = regs->data[priv->sreg]; + u16 value = nft_reg_load16(®s->data[priv->sreg]); struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); @@ -542,7 +544,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, case IP_CT_DIR_REPLY: break; default: - return -EINVAL; + err = -EINVAL; + goto err1; } } diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index e1f5ca9b423b5f..7b60e01f38ff9f 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr, *dest = skb->len; break; case NFT_META_PROTOCOL: - *dest = 0; - *(__be16 *)dest = skb->protocol; + nft_reg_store16(dest, (__force u16)skb->protocol); break; case NFT_META_NFPROTO: - *dest = nft_pf(pkt); + nft_reg_store8(dest, nft_pf(pkt)); break; case NFT_META_L4PROTO: if (!pkt->tprot_set) goto err; - *dest = pkt->tprot; + nft_reg_store8(dest, pkt->tprot); break; case NFT_META_PRIORITY: *dest = skb->priority; @@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr, case NFT_META_IIFTYPE: if (in == NULL) goto err; - *dest = 0; - *(u16 *)dest = in->type; + nft_reg_store16(dest, in->type); break; case NFT_META_OIFTYPE: if (out == NULL) goto err; - *dest = 0; - *(u16 *)dest = out->type; + nft_reg_store16(dest, out->type); break; case NFT_META_SKUID: sk = skb_to_full_sk(skb); @@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr, #endif case NFT_META_PKTTYPE: if (skb->pkt_type != PACKET_LOOPBACK) { - *dest = skb->pkt_type; + nft_reg_store8(dest, skb->pkt_type); break; } switch (nft_pf(pkt)) { case NFPROTO_IPV4: if (ipv4_is_multicast(ip_hdr(skb)->daddr)) - *dest = PACKET_MULTICAST; + nft_reg_store8(dest, PACKET_MULTICAST); else - *dest = PACKET_BROADCAST; + nft_reg_store8(dest, PACKET_BROADCAST); break; case NFPROTO_IPV6: - *dest = PACKET_MULTICAST; + nft_reg_store8(dest, PACKET_MULTICAST); break; case NFPROTO_NETDEV: switch (skb->protocol) { @@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr, goto err; if (ipv4_is_multicast(iph->daddr)) - *dest = PACKET_MULTICAST; + nft_reg_store8(dest, PACKET_MULTICAST); else - *dest = PACKET_BROADCAST; + nft_reg_store8(dest, PACKET_BROADCAST); break; } case htons(ETH_P_IPV6): - *dest = PACKET_MULTICAST; + nft_reg_store8(dest, PACKET_MULTICAST); break; default: WARN_ON_ONCE(1); @@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr, { const struct nft_meta *meta = nft_expr_priv(expr); struct sk_buff *skb = pkt->skb; - u32 value = regs->data[meta->sreg]; + u32 *sreg = ®s->data[meta->sreg]; + u32 value = *sreg; + u8 pkt_type; switch (meta->key) { case NFT_META_MARK: @@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr, skb->priority = value; break; case NFT_META_PKTTYPE: - if (skb->pkt_type != value && - skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type)) - skb->pkt_type = value; + pkt_type = nft_reg_load8(sreg); + + if (skb->pkt_type != pkt_type && + skb_pkt_type_ok(pkt_type) && + skb_pkt_type_ok(skb->pkt_type)) + skb->pkt_type = pkt_type; break; case NFT_META_NFTRACE: skb->nf_trace = !!value; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 19a7bf3236f968..439e0bd152a004 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr, } if (priv->sreg_proto_min) { - range.min_proto.all = - *(__be16 *)®s->data[priv->sreg_proto_min]; - range.max_proto.all = - *(__be16 *)®s->data[priv->sreg_proto_max]; + range.min_proto.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_min]); + range.max_proto.all = (__force __be16)nft_reg_load16( + ®s->data[priv->sreg_proto_max]); range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c index 152d226552c174..8ebbc2940f4c59 100644 --- a/net/netfilter/nft_set_bitmap.c +++ b/net/netfilter/nft_set_bitmap.c @@ -15,6 +15,11 @@ #include #include +struct nft_bitmap_elem { + struct list_head head; + struct nft_set_ext ext; +}; + /* This bitmap uses two bits to represent one element. These two bits determine * the element state in the current and the future generation. * @@ -41,13 +46,22 @@ * restore its previous state. */ struct nft_bitmap { - u16 bitmap_size; - u8 bitmap[]; + struct list_head list; + u16 bitmap_size; + u8 bitmap[]; }; -static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off) +static inline void nft_bitmap_location(const struct nft_set *set, + const void *key, + u32 *idx, u32 *off) { - u32 k = (key << 1); + u32 k; + + if (set->klen == 2) + k = *(u16 *)key; + else + k = *(u8 *)key; + k <<= 1; *idx = k / BITS_PER_BYTE; *off = k % BITS_PER_BYTE; @@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set, u8 genmask = nft_genmask_cur(net); u32 idx, off; - nft_bitmap_location(*key, &idx, &off); + nft_bitmap_location(set, key, &idx, &off); return nft_bitmap_active(priv->bitmap, idx, off, genmask); } +static struct nft_bitmap_elem * +nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this, + u8 genmask) +{ + const struct nft_bitmap *priv = nft_set_priv(set); + struct nft_bitmap_elem *be; + + list_for_each_entry_rcu(be, &priv->list, head) { + if (memcmp(nft_set_ext_key(&be->ext), + nft_set_ext_key(&this->ext), set->klen) || + !nft_set_elem_active(&be->ext, genmask)) + continue; + + return be; + } + return NULL; +} + static int nft_bitmap_insert(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, - struct nft_set_ext **_ext) + struct nft_set_ext **ext) { struct nft_bitmap *priv = nft_set_priv(set); - struct nft_set_ext *ext = elem->priv; + struct nft_bitmap_elem *new = elem->priv, *be; u8 genmask = nft_genmask_next(net); u32 idx, off; - nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); - if (nft_bitmap_active(priv->bitmap, idx, off, genmask)) + be = nft_bitmap_elem_find(set, new, genmask); + if (be) { + *ext = &be->ext; return -EEXIST; + } + nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off); /* Enter 01 state. */ priv->bitmap[idx] |= (genmask << off); + list_add_tail_rcu(&new->head, &priv->list); return 0; } @@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net, const struct nft_set_elem *elem) { struct nft_bitmap *priv = nft_set_priv(set); - struct nft_set_ext *ext = elem->priv; + struct nft_bitmap_elem *be = elem->priv; u8 genmask = nft_genmask_next(net); u32 idx, off; - nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); + nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); /* Enter 00 state. */ priv->bitmap[idx] &= ~(genmask << off); + list_del_rcu(&be->head); } static void nft_bitmap_activate(const struct net *net, @@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net, const struct nft_set_elem *elem) { struct nft_bitmap *priv = nft_set_priv(set); - struct nft_set_ext *ext = elem->priv; + struct nft_bitmap_elem *be = elem->priv; u8 genmask = nft_genmask_next(net); u32 idx, off; - nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); + nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); /* Enter 11 state. */ priv->bitmap[idx] |= (genmask << off); + nft_set_elem_change_active(net, set, &be->ext); } static bool nft_bitmap_flush(const struct net *net, - const struct nft_set *set, void *ext) + const struct nft_set *set, void *_be) { struct nft_bitmap *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); + struct nft_bitmap_elem *be = _be; u32 idx, off; - nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); + nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); /* Enter 10 state, similar to deactivation. */ priv->bitmap[idx] &= ~(genmask << off); + nft_set_elem_change_active(net, set, &be->ext); return true; } -static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set, - const struct nft_set_elem *elem) -{ - struct nft_set_ext_tmpl tmpl; - struct nft_set_ext *ext; - - nft_set_ext_prepare(&tmpl); - nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); - - ext = kzalloc(tmpl.len, GFP_KERNEL); - if (!ext) - return NULL; - - nft_set_ext_init(ext, &tmpl); - memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen); - - return ext; -} - static void *nft_bitmap_deactivate(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem) { struct nft_bitmap *priv = nft_set_priv(set); + struct nft_bitmap_elem *this = elem->priv, *be; u8 genmask = nft_genmask_next(net); - struct nft_set_ext *ext; - u32 idx, off, key = 0; - - memcpy(&key, elem->key.val.data, set->klen); - nft_bitmap_location(key, &idx, &off); + u32 idx, off; - if (!nft_bitmap_active(priv->bitmap, idx, off, genmask)) - return NULL; + nft_bitmap_location(set, elem->key.val.data, &idx, &off); - /* We have no real set extension since this is a bitmap, allocate this - * dummy object that is released from the commit/abort path. - */ - ext = nft_bitmap_ext_alloc(set, elem); - if (!ext) + be = nft_bitmap_elem_find(set, this, genmask); + if (!be) return NULL; /* Enter 10 state. */ priv->bitmap[idx] &= ~(genmask << off); + nft_set_elem_change_active(net, set, &be->ext); - return ext; + return be; } static void nft_bitmap_walk(const struct nft_ctx *ctx, @@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx, struct nft_set_iter *iter) { const struct nft_bitmap *priv = nft_set_priv(set); - struct nft_set_ext_tmpl tmpl; + struct nft_bitmap_elem *be; struct nft_set_elem elem; - struct nft_set_ext *ext; - int idx, off; - u16 key; - - nft_set_ext_prepare(&tmpl); - nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); - - for (idx = 0; idx < priv->bitmap_size; idx++) { - for (off = 0; off < BITS_PER_BYTE; off += 2) { - if (iter->count < iter->skip) - goto cont; - - if (!nft_bitmap_active(priv->bitmap, idx, off, - iter->genmask)) - goto cont; - - ext = kzalloc(tmpl.len, GFP_KERNEL); - if (!ext) { - iter->err = -ENOMEM; - return; - } - nft_set_ext_init(ext, &tmpl); - key = ((idx * BITS_PER_BYTE) + off) >> 1; - memcpy(nft_set_ext_key(ext), &key, set->klen); - - elem.priv = ext; - iter->err = iter->fn(ctx, set, iter, &elem); - - /* On set flush, this dummy extension object is released - * from the commit/abort path. - */ - if (!iter->flush) - kfree(ext); - - if (iter->err < 0) - return; + + list_for_each_entry_rcu(be, &priv->list, head) { + if (iter->count < iter->skip) + goto cont; + if (!nft_set_elem_active(&be->ext, iter->genmask)) + goto cont; + + elem.priv = be; + + iter->err = iter->fn(ctx, set, iter, &elem); + + if (iter->err < 0) + return; cont: - iter->count++; - } + iter->count++; } } @@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set, { struct nft_bitmap *priv = nft_set_priv(set); + INIT_LIST_HEAD(&priv->list); priv->bitmap_size = nft_bitmap_size(set->klen); return 0; @@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features, static struct nft_set_ops nft_bitmap_ops __read_mostly = { .privsize = nft_bitmap_privsize, + .elemsize = offsetof(struct nft_bitmap_elem, ext), .estimate = nft_bitmap_estimate, .init = nft_bitmap_init, .destroy = nft_bitmap_destroy, diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 71e8fb886a73b7..78dfbf9588b368 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -60,11 +60,10 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, d = memcmp(this, key, set->klen); if (d < 0) { parent = parent->rb_left; - /* In case of adjacent ranges, we always see the high - * part of the range in first place, before the low one. - * So don't update interval if the keys are equal. - */ - if (interval && nft_rbtree_equal(set, this, interval)) + if (interval && + nft_rbtree_equal(set, this, interval) && + nft_rbtree_interval_end(this) && + !nft_rbtree_interval_end(interval)) continue; interval = rbe; } else if (d > 0) diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c index 16477df45b3bf1..3d705c688a27b5 100644 --- a/net/netfilter/xt_owner.c +++ b/net/netfilter/xt_owner.c @@ -13,6 +13,8 @@ #include #include #include +#include + #include #include #include diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7b73c7c161a968..596eaff66649e5 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table); static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); +static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS]; + +static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = { + "nlk_cb_mutex-ROUTE", + "nlk_cb_mutex-1", + "nlk_cb_mutex-USERSOCK", + "nlk_cb_mutex-FIREWALL", + "nlk_cb_mutex-SOCK_DIAG", + "nlk_cb_mutex-NFLOG", + "nlk_cb_mutex-XFRM", + "nlk_cb_mutex-SELINUX", + "nlk_cb_mutex-ISCSI", + "nlk_cb_mutex-AUDIT", + "nlk_cb_mutex-FIB_LOOKUP", + "nlk_cb_mutex-CONNECTOR", + "nlk_cb_mutex-NETFILTER", + "nlk_cb_mutex-IP6_FW", + "nlk_cb_mutex-DNRTMSG", + "nlk_cb_mutex-KOBJECT_UEVENT", + "nlk_cb_mutex-GENERIC", + "nlk_cb_mutex-17", + "nlk_cb_mutex-SCSITRANSPORT", + "nlk_cb_mutex-ECRYPTFS", + "nlk_cb_mutex-RDMA", + "nlk_cb_mutex-CRYPTO", + "nlk_cb_mutex-SMC", + "nlk_cb_mutex-23", + "nlk_cb_mutex-24", + "nlk_cb_mutex-25", + "nlk_cb_mutex-26", + "nlk_cb_mutex-27", + "nlk_cb_mutex-28", + "nlk_cb_mutex-29", + "nlk_cb_mutex-30", + "nlk_cb_mutex-31", + "nlk_cb_mutex-MAX_LINKS" +}; + static int netlink_dump(struct sock *sk); static void netlink_skb_destructor(struct sk_buff *skb); @@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock, } else { nlk->cb_mutex = &nlk->cb_def_mutex; mutex_init(nlk->cb_mutex); + lockdep_set_class_and_name(nlk->cb_mutex, + nlk_cb_mutex_keys + protocol, + nlk_cb_mutex_key_strings[protocol]); } init_waitqueue_head(&nlk->wait); diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index fb6e10fdb21743..92e0981f74040d 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - skb, CTRL_CMD_NEWFAMILY) < 0) + skb, CTRL_CMD_NEWFAMILY) < 0) { + n--; break; + } } cb->args[0] = n; diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index ed212ffc1d9d31..ebf16f7f90892d 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -765,7 +765,8 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, return err; } -static int nr_accept(struct socket *sock, struct socket *newsock, int flags) +static int nr_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sk_buff *skb; struct sock *newsk; diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index b9edf5fae6ae97..2ffb18e73df6c0 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "nfc.h" #include "llcp.h" @@ -440,7 +441,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, } static int llcp_sock_accept(struct socket *sock, struct socket *newsock, - int flags) + int flags, bool kern) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *new_sk; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index b1beb2b94ec76c..c82301ce3fffb6 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -796,9 +796,8 @@ static void ovs_fragment(struct net *net, struct vport *vport, unsigned long orig_dst; struct rt6_info ovs_rt; - if (!v6ops) { + if (!v6ops) goto err; - } prepare_frag(vport, skb, orig_network_offset, ovs_key_mac_proto(key)); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 85cd5952667068..7b2c2fce408a02 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -485,7 +485,6 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, } else if (key->eth.type == htons(ETH_P_IPV6)) { enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; - skb_orphan(skb); memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); err = nf_ct_frag6_gather(net, skb, user); if (err) { @@ -644,8 +643,8 @@ static bool skb_nfct_cached(struct net *net, */ if (nf_ct_is_confirmed(ct)) nf_ct_delete(ct, 0, 0); - else - nf_conntrack_put(&ct->ct_general); + + nf_conntrack_put(&ct->ct_general); nf_ct_set(skb, NULL, 0); return false; } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 9d4bb8eb63f25c..3f76cb765e5bb7 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) /* Link layer. */ clear_vlan(key); - if (key->mac_proto == MAC_PROTO_NONE) { + if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { if (unlikely(eth_type_vlan(skb->protocol))) return -EINVAL; @@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) { - return key_extract(skb, key); + int res; + + res = key_extract(skb, key); + if (!res) + key->mac_proto &= ~SW_FLOW_KEY_INVALID; + + return res; } static int key_extract_mac_proto(struct sk_buff *skb) diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 6f5fa50f716d06..1105a838bab83f 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -604,7 +604,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr, ipv4 = true; break; case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: - SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, + SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src, nla_get_in6_addr(a), is_mask); ipv6 = true; break; @@ -665,6 +665,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr, tun_flags |= TUNNEL_VXLAN_OPT; opts_type = type; break; + case OVS_TUNNEL_KEY_ATTR_PAD: + break; default: OVS_NLERR(log, "Unknown IP tunnel attribute %d", type); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2bd0d1949312c3..8489beff5c25c9 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3103,7 +3103,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; - char name[15]; + char name[sizeof(uaddr->sa_data) + 1]; /* * Check legality @@ -3111,7 +3111,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, if (addr_len != sizeof(struct sockaddr)) return -EINVAL; - strlcpy(name, uaddr->sa_data, sizeof(name)); + /* uaddr->sa_data comes from the userspace, it's not guaranteed to be + * zero-terminated. + */ + memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); + name[sizeof(uaddr->sa_data)] = 0; return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); } @@ -3661,6 +3665,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; + if (val > INT_MAX) + return -EINVAL; po->tp_reserve = val; return 0; } @@ -4189,8 +4195,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; if (po->tp_version >= TPACKET_V3 && - (int)(req->tp_block_size - - BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) + req->tp_block_size <= + BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv)) goto out; if (unlikely(req->tp_frame_size < po->tp_hdrlen + po->tp_reserve)) @@ -4201,6 +4207,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, rb->frames_per_block = req->tp_block_size / req->tp_frame_size; if (unlikely(rb->frames_per_block == 0)) goto out; + if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) + goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) goto out; diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 8bad5624a27a9f..e81537991ddf0d 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -23,6 +23,7 @@ */ #include +#include #include #include #include @@ -771,7 +772,8 @@ static void pep_sock_close(struct sock *sk, long timeout) sock_put(sk); } -static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) +static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp, + bool kern) { struct pep_sock *pn = pep_sk(sk), *newpn; struct sock *newsk = NULL; @@ -845,7 +847,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) } /* Create a new to-be-accepted sock */ - newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0); + newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, + kern); if (!newsk) { pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); err = -ENOBUFS; diff --git a/net/phonet/socket.c b/net/phonet/socket.c index ffd5f229758487..64634e3ec2fc78 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -27,6 +27,8 @@ #include #include #include +#include + #include #include @@ -303,7 +305,7 @@ static int pn_socket_connect(struct socket *sock, struct sockaddr *addr, } static int pn_socket_accept(struct socket *sock, struct socket *newsock, - int flags) + int flags, bool kern) { struct sock *sk = sock->sk; struct sock *newsk; @@ -312,7 +314,7 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock, if (unlikely(sk->sk_state != TCP_LISTEN)) return -EINVAL; - newsk = sk->sk_prot->accept(sk, flags, &err); + newsk = sk->sk_prot->accept(sk, flags, &err, kern); if (!newsk) return err; diff --git a/net/rds/connection.c b/net/rds/connection.c index 0e04dcceb1d416..1fa75ab7b73323 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -429,6 +429,7 @@ void rds_conn_destroy(struct rds_connection *conn) */ rds_cong_remove_conn(conn); + put_net(conn->c_net); kmem_cache_free(rds_conn_slab, conn); spin_lock_irqsave(&rds_conn_lock, flags); diff --git a/net/rds/ib.c b/net/rds/ib.c index 91fe46f1e4ccf0..7a64c8db81abdc 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -45,8 +45,8 @@ #include "ib.h" #include "ib_mr.h" -unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE; -unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE; +static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE; +static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE; unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; module_param(rds_ib_mr_1m_pool_size, int, 0444); @@ -438,16 +438,12 @@ int rds_ib_init(void) if (ret) goto out_sysctl; - ret = rds_trans_register(&rds_ib_transport); - if (ret) - goto out_recv; + rds_trans_register(&rds_ib_transport); rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); goto out; -out_recv: - rds_ib_recv_exit(); out_sysctl: rds_ib_sysctl_exit(); out_ibreg: diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index ce3775abc6e7a1..1c38d2c7caa8e9 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -442,7 +442,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ic->i_send_cq = NULL; ibdev_put_vector(rds_ibdev, ic->i_scq_vector); rdsdebug("ib_create_cq send failed: %d\n", ret); - goto out; + goto rds_ibdev_out; } ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); @@ -456,19 +456,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ic->i_recv_cq = NULL; ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); rdsdebug("ib_create_cq recv failed: %d\n", ret); - goto out; + goto send_cq_out; } ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); if (ret) { rdsdebug("ib_req_notify_cq send failed: %d\n", ret); - goto out; + goto recv_cq_out; } ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); if (ret) { rdsdebug("ib_req_notify_cq recv failed: %d\n", ret); - goto out; + goto recv_cq_out; } /* XXX negotiate max send/recv with remote? */ @@ -494,7 +494,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); if (ret) { rdsdebug("rdma_create_qp failed: %d\n", ret); - goto out; + goto recv_cq_out; } ic->i_send_hdrs = ib_dma_alloc_coherent(dev, @@ -504,7 +504,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) if (!ic->i_send_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent send failed\n"); - goto out; + goto qp_out; } ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, @@ -514,7 +514,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) if (!ic->i_recv_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent recv failed\n"); - goto out; + goto send_hdrs_dma_out; } ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), @@ -522,7 +522,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) if (!ic->i_ack) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent ack failed\n"); - goto out; + goto recv_hdrs_dma_out; } ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work), @@ -530,7 +530,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) if (!ic->i_sends) { ret = -ENOMEM; rdsdebug("send allocation failed\n"); - goto out; + goto ack_dma_out; } ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work), @@ -538,7 +538,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) if (!ic->i_recvs) { ret = -ENOMEM; rdsdebug("recv allocation failed\n"); - goto out; + goto sends_out; } rds_ib_recv_init_ack(ic); @@ -546,8 +546,33 @@ static int rds_ib_setup_qp(struct rds_connection *conn) rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, ic->i_send_cq, ic->i_recv_cq); -out: + return ret; + +sends_out: + vfree(ic->i_sends); +ack_dma_out: + ib_dma_free_coherent(dev, sizeof(struct rds_header), + ic->i_ack, ic->i_ack_dma); +recv_hdrs_dma_out: + ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr * + sizeof(struct rds_header), + ic->i_recv_hdrs, ic->i_recv_hdrs_dma); +send_hdrs_dma_out: + ib_dma_free_coherent(dev, ic->i_send_ring.w_nr * + sizeof(struct rds_header), + ic->i_send_hdrs, ic->i_send_hdrs_dma); +qp_out: + rdma_destroy_qp(ic->i_cm_id); +recv_cq_out: + if (!ib_destroy_cq(ic->i_recv_cq)) + ic->i_recv_cq = NULL; +send_cq_out: + if (!ib_destroy_cq(ic->i_send_cq)) + ic->i_send_cq = NULL; +rds_ibdev_out: + rds_ib_remove_conn(rds_ibdev, conn); rds_ib_dev_put(rds_ibdev); + return ret; } diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h index 24c086db4511d2..5d6e98a79a5e4b 100644 --- a/net/rds/ib_mr.h +++ b/net/rds/ib_mr.h @@ -107,8 +107,6 @@ struct rds_ib_mr_pool { }; extern struct workqueue_struct *rds_ib_mr_wq; -extern unsigned int rds_ib_mr_1m_pool_size; -extern unsigned int rds_ib_mr_8k_pool_size; extern bool prefer_frmr; struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev, diff --git a/net/rds/page.c b/net/rds/page.c index e2b5a5832d3d52..7cc57e098ddb98 100644 --- a/net/rds/page.c +++ b/net/rds/page.c @@ -45,35 +45,6 @@ struct rds_page_remainder { static DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders); -/* - * returns 0 on success or -errno on failure. - * - * We don't have to worry about flush_dcache_page() as this only works - * with private pages. If, say, we were to do directed receive to pinned - * user pages we'd have to worry more about cache coherence. (Though - * the flush_dcache_page() in get_user_pages() would probably be enough). - */ -int rds_page_copy_user(struct page *page, unsigned long offset, - void __user *ptr, unsigned long bytes, - int to_user) -{ - unsigned long ret; - void *addr; - - addr = kmap(page); - if (to_user) { - rds_stats_add(s_copy_to_user, bytes); - ret = copy_to_user(ptr, addr + offset, bytes); - } else { - rds_stats_add(s_copy_from_user, bytes); - ret = copy_from_user(addr + offset, ptr, bytes); - } - kunmap(page); - - return ret ? -EFAULT : 0; -} -EXPORT_SYMBOL_GPL(rds_page_copy_user); - /** * rds_page_remainder_alloc - build up regions of a message. * diff --git a/net/rds/rds.h b/net/rds/rds.h index 07fff73dd4f3f9..82d38ccf5e8bcf 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -147,7 +147,7 @@ struct rds_connection { /* Protocol version */ unsigned int c_version; - possible_net_t c_net; + struct net *c_net; struct list_head c_map_item; unsigned long c_map_queued; @@ -162,13 +162,13 @@ struct rds_connection { static inline struct net *rds_conn_net(struct rds_connection *conn) { - return read_pnet(&conn->c_net); + return conn->c_net; } static inline void rds_conn_net_set(struct rds_connection *conn, struct net *net) { - write_pnet(&conn->c_net, net); + conn->c_net = get_net(net); } #define RDS_FLAG_CONG_BITMAP 0x01 @@ -798,13 +798,6 @@ static inline int rds_message_verify_checksum(const struct rds_header *hdr) /* page.c */ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, gfp_t gfp); -int rds_page_copy_user(struct page *page, unsigned long offset, - void __user *ptr, unsigned long bytes, - int to_user); -#define rds_page_copy_to_user(page, offset, ptr, bytes) \ - rds_page_copy_user(page, offset, ptr, bytes, 1) -#define rds_page_copy_from_user(page, offset, ptr, bytes) \ - rds_page_copy_user(page, offset, ptr, bytes, 0) void rds_page_exit(void); /* recv.c */ @@ -910,7 +903,7 @@ void rds_connect_path_complete(struct rds_conn_path *conn, int curr); void rds_connect_complete(struct rds_connection *conn); /* transport.c */ -int rds_trans_register(struct rds_transport *trans); +void rds_trans_register(struct rds_transport *trans); void rds_trans_unregister(struct rds_transport *trans); struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr); void rds_trans_put(struct rds_transport *trans); diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 5438f6725092b7..22569007677357 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -484,9 +484,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net) * we do need to clean up the listen socket here. */ if (rtn->rds_tcp_listen_sock) { - rds_tcp_listen_stop(rtn->rds_tcp_listen_sock); + struct socket *lsock = rtn->rds_tcp_listen_sock; + rtn->rds_tcp_listen_sock = NULL; - flush_work(&rtn->rds_tcp_accept_w); + rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); } } @@ -523,13 +524,13 @@ static void rds_tcp_kill_sock(struct net *net) struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); + struct socket *lsock = rtn->rds_tcp_listen_sock; - rds_tcp_listen_stop(rtn->rds_tcp_listen_sock); rtn->rds_tcp_listen_sock = NULL; - flush_work(&rtn->rds_tcp_accept_w); + rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { - struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); + struct net *c_net = tc->t_cpath->cp_conn->c_net; if (net != c_net || !tc->t_sock) continue; @@ -546,8 +547,12 @@ static void rds_tcp_kill_sock(struct net *net) void *rds_tcp_listen_sock_def_readable(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); + struct socket *lsock = rtn->rds_tcp_listen_sock; + + if (!lsock) + return NULL; - return rtn->rds_tcp_listen_sock->sk->sk_user_data; + return lsock->sk->sk_user_data; } static int rds_tcp_dev_event(struct notifier_block *this, @@ -584,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net) spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { - struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); + struct net *c_net = tc->t_cpath->cp_conn->c_net; if (net != c_net || !tc->t_sock) continue; @@ -638,35 +643,30 @@ static int rds_tcp_init(void) goto out; } - ret = register_netdevice_notifier(&rds_tcp_dev_notifier); - if (ret) { - pr_warn("could not register rds_tcp_dev_notifier\n"); + ret = rds_tcp_recv_init(); + if (ret) goto out_slab; - } ret = register_pernet_subsys(&rds_tcp_net_ops); if (ret) - goto out_notifier; + goto out_recv; - ret = rds_tcp_recv_init(); - if (ret) + ret = register_netdevice_notifier(&rds_tcp_dev_notifier); + if (ret) { + pr_warn("could not register rds_tcp_dev_notifier\n"); goto out_pernet; + } - ret = rds_trans_register(&rds_tcp_transport); - if (ret) - goto out_recv; + rds_trans_register(&rds_tcp_transport); rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); goto out; -out_recv: - rds_tcp_recv_exit(); out_pernet: unregister_pernet_subsys(&rds_tcp_net_ops); -out_notifier: - if (unregister_netdevice_notifier(&rds_tcp_dev_notifier)) - pr_warn("could not unregister rds_tcp_dev_notifier\n"); +out_recv: + rds_tcp_recv_exit(); out_slab: kmem_cache_destroy(rds_tcp_conn_slab); out: diff --git a/net/rds/tcp.h b/net/rds/tcp.h index 9a1cc890657679..56ea6620fcf97c 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h @@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk); /* tcp_listen.c */ struct socket *rds_tcp_listen_init(struct net *); -void rds_tcp_listen_stop(struct socket *); +void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor); void rds_tcp_listen_data_ready(struct sock *sk); int rds_tcp_accept_one(struct socket *sock); int rds_tcp_keepalive(struct socket *sock); diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 67d0929c7d3d0c..507678853e6cb3 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -133,7 +133,7 @@ int rds_tcp_accept_one(struct socket *sock) new_sock->type = sock->type; new_sock->ops = sock->ops; - ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); + ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); if (ret < 0) goto out; @@ -223,6 +223,9 @@ void rds_tcp_listen_data_ready(struct sock *sk) * before it has been accepted and the accepter has set up their * data_ready.. we only want to queue listen work for our listening * socket + * + * (*ready)() may be null if we are racing with netns delete, and + * the listen socket is being torn down. */ if (sk->sk_state == TCP_LISTEN) rds_tcp_accept_work(sk); @@ -231,7 +234,8 @@ void rds_tcp_listen_data_ready(struct sock *sk) out: read_unlock_bh(&sk->sk_callback_lock); - ready(sk); + if (ready) + ready(sk); } struct socket *rds_tcp_listen_init(struct net *net) @@ -271,7 +275,7 @@ struct socket *rds_tcp_listen_init(struct net *net) return NULL; } -void rds_tcp_listen_stop(struct socket *sock) +void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor) { struct sock *sk; @@ -292,5 +296,6 @@ void rds_tcp_listen_stop(struct socket *sock) /* wait for accepts to stop and close the socket */ flush_workqueue(rds_wq); + flush_work(acceptor); sock_release(sock); } diff --git a/net/rds/transport.c b/net/rds/transport.c index 2ffd3e30c6434e..0b188dd0a344cb 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c @@ -40,7 +40,7 @@ static struct rds_transport *transports[RDS_TRANS_COUNT]; static DECLARE_RWSEM(rds_trans_sem); -int rds_trans_register(struct rds_transport *trans) +void rds_trans_register(struct rds_transport *trans) { BUG_ON(strlen(trans->t_name) + 1 > TRANSNAMSIZ); @@ -55,8 +55,6 @@ int rds_trans_register(struct rds_transport *trans) } up_write(&rds_trans_sem); - - return 0; } EXPORT_SYMBOL_GPL(rds_trans_register); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 9ad301c46b888f..4a972925702367 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -871,7 +871,8 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le return err; } -static int rose_accept(struct socket *sock, struct socket *newsock, int flags) +static int rose_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sk_buff *skb; struct sock *newsk; diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 199b46e93e64ee..7fb59c3f1542af 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -290,10 +290,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, cp.exclusive = false; cp.service_id = srx->srx_service; call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp); + /* The socket has been unlocked. */ if (!IS_ERR(call)) call->notify_rx = notify_rx; - release_sock(&rx->sk); + mutex_unlock(&call->user_mutex); _leave(" = %p", call); return call; } @@ -310,7 +311,10 @@ EXPORT_SYMBOL(rxrpc_kernel_begin_call); void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) { _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); + + mutex_lock(&call->user_mutex); rxrpc_release_call(rxrpc_sk(sock->sk), call); + mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put_kernel); } EXPORT_SYMBOL(rxrpc_kernel_end_call); @@ -450,14 +454,16 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) case RXRPC_SERVER_BOUND: case RXRPC_SERVER_LISTENING: ret = rxrpc_do_sendmsg(rx, m, len); - break; + /* The socket has been unlocked */ + goto out; default: ret = -EINVAL; - break; + goto error_unlock; } error_unlock: release_sock(&rx->sk); +out: _leave(" = %d", ret); return ret; } diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 12be432be9b2fe..26a7b1db1361e5 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -467,6 +467,7 @@ struct rxrpc_call { struct rxrpc_connection *conn; /* connection carrying call */ struct rxrpc_peer *peer; /* Peer record for remote address */ struct rxrpc_sock __rcu *socket; /* socket responsible */ + struct mutex user_mutex; /* User access mutex */ ktime_t ack_at; /* When deferred ACK needs to happen */ ktime_t resend_at; /* When next resend needs to happen */ ktime_t ping_at; /* When next to send a ping */ diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 7c4c64ab8da2e2..0ed181f53f32a0 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -323,6 +323,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, * * If we want to report an error, we mark the skb with the packet type and * abort code and return NULL. + * + * The call is returned with the user access mutex held. */ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, struct rxrpc_connection *conn, @@ -371,6 +373,18 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, trace_rxrpc_receive(call, rxrpc_receive_incoming, sp->hdr.serial, sp->hdr.seq); + /* Lock the call to prevent rxrpc_kernel_send/recv_data() and + * sendmsg()/recvmsg() inconveniently stealing the mutex once the + * notification is generated. + * + * The BUG should never happen because the kernel should be well + * behaved enough not to access the call before the first notification + * event and userspace is prevented from doing so until the state is + * appropriate. + */ + if (!mutex_trylock(&call->user_mutex)) + BUG(); + /* Make the call live. */ rxrpc_incoming_call(rx, call, skb); conn = call->conn; @@ -429,10 +443,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, /* * handle acceptance of a call by userspace * - assign the user call ID to the call at the front of the queue + * - called with the socket locked. */ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, unsigned long user_call_ID, rxrpc_notify_rx_t notify_rx) + __releases(&rx->sk.sk_lock.slock) { struct rxrpc_call *call; struct rb_node *parent, **pp; @@ -446,6 +462,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, if (list_empty(&rx->to_be_accepted)) { write_unlock(&rx->call_lock); + release_sock(&rx->sk); kleave(" = -ENODATA [empty]"); return ERR_PTR(-ENODATA); } @@ -470,10 +487,39 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, */ call = list_entry(rx->to_be_accepted.next, struct rxrpc_call, accept_link); + write_unlock(&rx->call_lock); + + /* We need to gain the mutex from the interrupt handler without + * upsetting lockdep, so we have to release it there and take it here. + * We are, however, still holding the socket lock, so other accepts + * must wait for us and no one can add the user ID behind our backs. + */ + if (mutex_lock_interruptible(&call->user_mutex) < 0) { + release_sock(&rx->sk); + kleave(" = -ERESTARTSYS"); + return ERR_PTR(-ERESTARTSYS); + } + + write_lock(&rx->call_lock); list_del_init(&call->accept_link); sk_acceptq_removed(&rx->sk); rxrpc_see_call(call); + /* Find the user ID insertion point. */ + pp = &rx->calls.rb_node; + parent = NULL; + while (*pp) { + parent = *pp; + call = rb_entry(parent, struct rxrpc_call, sock_node); + + if (user_call_ID < call->user_call_ID) + pp = &(*pp)->rb_left; + else if (user_call_ID > call->user_call_ID) + pp = &(*pp)->rb_right; + else + BUG(); + } + write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: @@ -499,6 +545,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, write_unlock(&rx->call_lock); rxrpc_notify_socket(call); rxrpc_service_prealloc(rx, GFP_KERNEL); + release_sock(&rx->sk); _leave(" = %p{%d}", call, call->debug_id); return call; @@ -515,6 +562,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, write_unlock(&rx->call_lock); out: rxrpc_service_prealloc(rx, GFP_KERNEL); + release_sock(&rx->sk); _leave(" = %d", ret); return ERR_PTR(ret); } diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 8b94db3c9b2ecb..d79cd36987a95b 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -115,6 +115,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) if (!call->rxtx_annotations) goto nomem_2; + mutex_init(&call->user_mutex); setup_timer(&call->timer, rxrpc_call_timer_expired, (unsigned long)call); INIT_WORK(&call->processor, &rxrpc_process_call); @@ -194,14 +195,16 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call) } /* - * set up a call for the given data - * - called in process context with IRQs enabled + * Set up a call for the given parameters. + * - Called with the socket lock held, which it must release. + * - If it returns a call, the call's lock will need releasing by the caller. */ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, struct rxrpc_conn_parameters *cp, struct sockaddr_rxrpc *srx, unsigned long user_call_ID, gfp_t gfp) + __releases(&rx->sk.sk_lock.slock) { struct rxrpc_call *call, *xcall; struct rb_node *parent, **pp; @@ -212,6 +215,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, call = rxrpc_alloc_client_call(srx, gfp); if (IS_ERR(call)) { + release_sock(&rx->sk); _leave(" = %ld", PTR_ERR(call)); return call; } @@ -219,6 +223,11 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), here, (const void *)user_call_ID); + /* We need to protect a partially set up call against the user as we + * will be acting outside the socket lock. + */ + mutex_lock(&call->user_mutex); + /* Publish the call, even though it is incompletely set up as yet */ write_lock(&rx->call_lock); @@ -250,6 +259,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, list_add_tail(&call->link, &rxrpc_calls); write_unlock(&rxrpc_call_lock); + /* From this point on, the call is protected by its own lock. */ + release_sock(&rx->sk); + /* Set up or get a connection record and set the protocol parameters, * including channel number and call ID. */ @@ -279,6 +291,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, */ error_dup_user_ID: write_unlock(&rx->call_lock); + release_sock(&rx->sk); ret = -EEXIST; error: @@ -287,6 +300,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), here, ERR_PTR(ret)); rxrpc_release_call(rx, call); + mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); _leave(" = %d", ret); return ERR_PTR(ret); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 40a1ef2adeb45c..c3be03e8d09821 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -76,6 +76,8 @@ #include #include #include +#include + #include "ar-internal.h" __read_mostly unsigned int rxrpc_max_client_connections = 1000; diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 3f9d8d7ec6323a..b099b64366f356 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, rxrpc_conn_retransmit_call(conn, skb); return 0; + case RXRPC_PACKET_TYPE_BUSY: + /* Just ignore BUSY packets for now. */ + return 0; + case RXRPC_PACKET_TYPE_ABORT: if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), &wtmp, sizeof(wtmp)) < 0) diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 78ec33477adf6c..18b2ad8be8e2b5 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -420,6 +420,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, u16 skew) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + enum rxrpc_call_state state; unsigned int offset = sizeof(struct rxrpc_wire_header); unsigned int ix; rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; @@ -434,14 +435,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, _proto("Rx DATA %%%u { #%u f=%02x }", sp->hdr.serial, seq, sp->hdr.flags); - if (call->state >= RXRPC_CALL_COMPLETE) + state = READ_ONCE(call->state); + if (state >= RXRPC_CALL_COMPLETE) return; /* Received data implicitly ACKs all of the request packets we sent * when we're acting as a client. */ - if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST || - call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && + if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || + state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && !rxrpc_receiving_reply(call)) return; @@ -650,6 +652,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_peer *peer; unsigned int mtu; + bool wake = false; u32 rwind = ntohl(ackinfo->rwind); _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }", @@ -657,9 +660,14 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), rwind, ntohl(ackinfo->jumbo_max)); - if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) - rwind = RXRPC_RXTX_BUFF_SIZE - 1; - call->tx_winsize = rwind; + if (call->tx_winsize != rwind) { + if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) + rwind = RXRPC_RXTX_BUFF_SIZE - 1; + if (rwind > call->tx_winsize) + wake = true; + call->tx_winsize = rwind; + } + if (call->cong_ssthresh > rwind) call->cong_ssthresh = rwind; @@ -673,6 +681,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, spin_unlock_bh(&peer->lock); _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); } + + if (wake) + wake_up(&call->waitq); } /* @@ -799,7 +810,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, return rxrpc_proto_abort("AK0", call, 0); /* Ignore ACKs unless we are or have just been transmitting. */ - switch (call->state) { + switch (READ_ONCE(call->state)) { case RXRPC_CALL_CLIENT_SEND_REQUEST: case RXRPC_CALL_CLIENT_AWAIT_REPLY: case RXRPC_CALL_SERVER_SEND_REPLY: @@ -940,7 +951,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, struct rxrpc_call *call) { - switch (call->state) { + switch (READ_ONCE(call->state)) { case RXRPC_CALL_SERVER_AWAIT_ACK: rxrpc_call_completed(call); break; @@ -1194,6 +1205,7 @@ void rxrpc_data_ready(struct sock *udp_sk) goto reject_packet; } rxrpc_send_ping(call, skb, skew); + mutex_unlock(&call->user_mutex); } rxrpc_input_call_packet(call, skb, skew); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index f3a688e108430a..3e2f1a8e9c5b51 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -14,6 +14,8 @@ #include #include #include +#include + #include #include #include "ar-internal.h" @@ -487,6 +489,20 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0); + /* We're going to drop the socket lock, so we need to lock the call + * against interference by sendmsg. + */ + if (!mutex_trylock(&call->user_mutex)) { + ret = -EWOULDBLOCK; + if (flags & MSG_DONTWAIT) + goto error_requeue_call; + ret = -ERESTARTSYS; + if (mutex_lock_interruptible(&call->user_mutex) < 0) + goto error_requeue_call; + } + + release_sock(&rx->sk); + if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) BUG(); @@ -502,7 +518,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, &call->user_call_ID); } if (ret < 0) - goto error; + goto error_unlock_call; } if (msg->msg_name) { @@ -511,7 +527,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, msg->msg_namelen = len; } - switch (call->state) { + switch (READ_ONCE(call->state)) { case RXRPC_CALL_SERVER_ACCEPTING: ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); break; @@ -533,12 +549,12 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, } if (ret < 0) - goto error; + goto error_unlock_call; if (call->state == RXRPC_CALL_COMPLETE) { ret = rxrpc_recvmsg_term(call, msg); if (ret < 0) - goto error; + goto error_unlock_call; if (!(flags & MSG_PEEK)) rxrpc_release_call(rx, call); msg->msg_flags |= MSG_EOR; @@ -551,8 +567,21 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, msg->msg_flags &= ~MSG_MORE; ret = copied; -error: +error_unlock_call: + mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); + trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); + return ret; + +error_requeue_call: + if (!(flags & MSG_PEEK)) { + write_lock_bh(&rx->recvmsg_lock); + list_add(&call->recvmsg_link, &rx->recvmsg_q); + write_unlock_bh(&rx->recvmsg_lock); + trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0); + } else { + rxrpc_put_call(call, rxrpc_call_put); + } error_no_call: release_sock(&rx->sk); trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); @@ -609,9 +638,9 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, iov.iov_len = size - *_offset; iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset); - lock_sock(sock->sk); + mutex_lock(&call->user_mutex); - switch (call->state) { + switch (READ_ONCE(call->state)) { case RXRPC_CALL_CLIENT_RECV_REPLY: case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST: @@ -648,7 +677,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, read_phase_complete: ret = 1; out: - release_sock(sock->sk); + mutex_unlock(&call->user_mutex); _leave(" = %d [%zu,%d]", ret, *_offset, *_abort); return ret; diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 0a6ef217aa8ada..97ab214ca4118d 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -15,6 +15,8 @@ #include #include #include +#include + #include #include #include "ar-internal.h" @@ -59,9 +61,12 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, } trace_rxrpc_transmit(call, rxrpc_transmit_wait); - release_sock(&rx->sk); + mutex_unlock(&call->user_mutex); *timeo = schedule_timeout(*timeo); - lock_sock(&rx->sk); + if (mutex_lock_interruptible(&call->user_mutex) < 0) { + ret = sock_intr_errno(*timeo); + break; + } } remove_wait_queue(&call->waitq, &myself); @@ -171,7 +176,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, /* * send data through a socket * - must be called in process context - * - caller holds the socket locked + * - The caller holds the call user access mutex, but not the socket lock. */ static int rxrpc_send_data(struct rxrpc_sock *rx, struct rxrpc_call *call, @@ -437,10 +442,13 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, /* * Create a new client call for sendmsg(). + * - Called with the socket lock held, which it must release. + * - If it returns a call, the call's lock will need releasing by the caller. */ static struct rxrpc_call * rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, unsigned long user_call_ID, bool exclusive) + __releases(&rx->sk.sk_lock.slock) { struct rxrpc_conn_parameters cp; struct rxrpc_call *call; @@ -450,8 +458,10 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, _enter(""); - if (!msg->msg_name) + if (!msg->msg_name) { + release_sock(&rx->sk); return ERR_PTR(-EDESTADDRREQ); + } key = rx->key; if (key && !rx->key->payload.data[0]) @@ -464,6 +474,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, cp.exclusive = rx->exclusive | exclusive; cp.service_id = srx->srx_service; call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL); + /* The socket is now unlocked */ _leave(" = %p\n", call); return call; @@ -475,7 +486,9 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, * - the socket may be either a client socket or a server socket */ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) + __releases(&rx->sk.sk_lock.slock) { + enum rxrpc_call_state state; enum rxrpc_command cmd; struct rxrpc_call *call; unsigned long user_call_ID = 0; @@ -488,12 +501,14 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code, &exclusive); if (ret < 0) - return ret; + goto error_release_sock; if (cmd == RXRPC_CMD_ACCEPT) { + ret = -EINVAL; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) - return -EINVAL; + goto error_release_sock; call = rxrpc_accept_call(rx, user_call_ID, NULL); + /* The socket is now unlocked. */ if (IS_ERR(call)) return PTR_ERR(call); rxrpc_put_call(call, rxrpc_call_put); @@ -502,18 +517,41 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) call = rxrpc_find_call_by_user_ID(rx, user_call_ID); if (!call) { + ret = -EBADSLT; if (cmd != RXRPC_CMD_SEND_DATA) - return -EBADSLT; + goto error_release_sock; call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID, exclusive); + /* The socket is now unlocked... */ if (IS_ERR(call)) return PTR_ERR(call); + /* ... and we have the call lock. */ + } else { + switch (READ_ONCE(call->state)) { + case RXRPC_CALL_UNINITIALISED: + case RXRPC_CALL_CLIENT_AWAIT_CONN: + case RXRPC_CALL_SERVER_PREALLOC: + case RXRPC_CALL_SERVER_SECURING: + case RXRPC_CALL_SERVER_ACCEPTING: + ret = -EBUSY; + goto error_release_sock; + default: + break; + } + + ret = mutex_lock_interruptible(&call->user_mutex); + release_sock(&rx->sk); + if (ret < 0) { + ret = -ERESTARTSYS; + goto error_put; + } } + state = READ_ONCE(call->state); _debug("CALL %d USR %lx ST %d on CONN %p", - call->debug_id, call->user_call_ID, call->state, call->conn); + call->debug_id, call->user_call_ID, state, call->conn); - if (call->state >= RXRPC_CALL_COMPLETE) { + if (state >= RXRPC_CALL_COMPLETE) { /* it's too late for this call */ ret = -ESHUTDOWN; } else if (cmd == RXRPC_CMD_SEND_ABORT) { @@ -523,21 +561,27 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) } else if (cmd != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; } else if (rxrpc_is_client_call(call) && - call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { + state != RXRPC_CALL_CLIENT_SEND_REQUEST) { /* request phase complete for this client call */ ret = -EPROTO; } else if (rxrpc_is_service_call(call) && - call->state != RXRPC_CALL_SERVER_ACK_REQUEST && - call->state != RXRPC_CALL_SERVER_SEND_REPLY) { + state != RXRPC_CALL_SERVER_ACK_REQUEST && + state != RXRPC_CALL_SERVER_SEND_REPLY) { /* Reply phase not begun or not complete for service call. */ ret = -EPROTO; } else { ret = rxrpc_send_data(rx, call, msg, len); } + mutex_unlock(&call->user_mutex); +error_put: rxrpc_put_call(call, rxrpc_call_put); _leave(" = %d", ret); return ret; + +error_release_sock: + release_sock(&rx->sk); + return ret; } /** @@ -562,22 +606,29 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, ASSERTCMP(msg->msg_name, ==, NULL); ASSERTCMP(msg->msg_control, ==, NULL); - lock_sock(sock->sk); + mutex_lock(&call->user_mutex); _debug("CALL %d USR %lx ST %d on CONN %p", call->debug_id, call->user_call_ID, call->state, call->conn); - if (call->state >= RXRPC_CALL_COMPLETE) { - ret = -ESHUTDOWN; /* it's too late for this call */ - } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && - call->state != RXRPC_CALL_SERVER_ACK_REQUEST && - call->state != RXRPC_CALL_SERVER_SEND_REPLY) { - ret = -EPROTO; /* request phase complete for this client call */ - } else { + switch (READ_ONCE(call->state)) { + case RXRPC_CALL_CLIENT_SEND_REQUEST: + case RXRPC_CALL_SERVER_ACK_REQUEST: + case RXRPC_CALL_SERVER_SEND_REPLY: ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len); + break; + case RXRPC_CALL_COMPLETE: + read_lock_bh(&call->state_lock); + ret = -call->error; + read_unlock_bh(&call->state_lock); + break; + default: + /* Request phase complete for this client call */ + ret = -EPROTO; + break; } - release_sock(sock->sk); + mutex_unlock(&call->user_mutex); _leave(" = %d", ret); return ret; } @@ -598,12 +649,12 @@ void rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, { _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why); - lock_sock(sock->sk); + mutex_lock(&call->user_mutex); if (rxrpc_abort_call(why, call, 0, abort_code, error)) rxrpc_send_abort_packet(call); - release_sock(sock->sk); + mutex_unlock(&call->user_mutex); _leave(""); } diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index ab80629099622c..f9bb43c25697e7 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, if (ret < 0) return ret; + if (!tb[TCA_CONNMARK_PARMS]) + return -EINVAL; + parm = nla_data(tb[TCA_CONNMARK_PARMS]); if (!tcf_hash_check(tn, parm->index, a, bind)) { diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 3b7074e2302487..c736627f8f4a0e 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, return skb->len; nla_put_failure: - rcu_read_unlock(); nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 41c80b6c39063a..ae7e4f5b348b86 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 802ac7c2e5e87e..5334e309f17f0e 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); if (p->set_tc_index) { + int wlen = skb_network_offset(skb); + switch (tc_skb_protocol(skb)) { case htons(ETH_P_IP): - if (skb_cow_head(skb, sizeof(struct iphdr))) + wlen += sizeof(struct iphdr); + if (!pskb_may_pull(skb, wlen) || + skb_try_make_writable(skb, wlen)) goto drop; skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) @@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, break; case htons(ETH_P_IPV6): - if (skb_cow_head(skb, sizeof(struct ipv6hdr))) + wlen += sizeof(struct ipv6hdr); + if (!pskb_may_pull(skb, wlen) || + skb_try_make_writable(skb, wlen)) goto drop; skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 2a6835b4562b61..a9708da28eb53f 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a { struct net *net = sock_net(sk); struct sctp_sock *sp; - int i; sctp_paramhdr_t *p; - int err; + int i; /* Retrieve the SCTP per socket area. */ sp = sctp_sk((struct sock *)sk); @@ -247,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a if (!sctp_ulpq_init(&asoc->ulpq, asoc)) goto fail_init; + if (sctp_stream_new(asoc, gfp)) + goto fail_init; + /* Assume that peer would support both address types unless we are * told otherwise. */ @@ -264,9 +266,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a /* AUTH related initializations */ INIT_LIST_HEAD(&asoc->endpoint_shared_keys); - err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); - if (err) - goto fail_init; + if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) + goto stream_free; asoc->active_key_id = ep->active_key_id; asoc->prsctp_enable = ep->prsctp_enable; @@ -289,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a return asoc; +stream_free: + sctp_stream_free(asoc->stream); fail_init: sock_put(asoc->base.sk); sctp_endpoint_put(asoc->ep); @@ -1409,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc, /* Update the association's pmtu and frag_point by going through all the * transports. This routine is called when a transport's PMTU has changed. */ -void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) +void sctp_assoc_sync_pmtu(struct sctp_association *asoc) { struct sctp_transport *t; __u32 pmtu = 0; @@ -1421,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (t->pmtu_pending && t->dst) { - sctp_transport_update_pmtu(sk, t, - SCTP_TRUNC4(dst_mtu(t->dst))); + sctp_transport_update_pmtu( + t, SCTP_TRUNC4(dst_mtu(t->dst))); t->pmtu_pending = 0; } if (!pmtu || (t->pathmtu < pmtu)) diff --git a/net/sctp/input.c b/net/sctp/input.c index fc458968fe4bd8..0e06a278d2a911 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, if (t->param_flags & SPP_PMTUD_ENABLE) { /* Update transports view of the MTU */ - sctp_transport_update_pmtu(sk, t, pmtu); + sctp_transport_update_pmtu(t, pmtu); /* Update association pmtu. */ - sctp_assoc_sync_pmtu(sk, asoc); + sctp_assoc_sync_pmtu(asoc); } /* Retransmit with the new pmtu setting. @@ -884,14 +884,17 @@ int sctp_hash_transport(struct sctp_transport *t) arg.paddr = &t->ipaddr; arg.lport = htons(t->asoc->base.bind_addr.port); + rcu_read_lock(); list = rhltable_lookup(&sctp_transport_hashtable, &arg, sctp_hash_params); rhl_for_each_entry_rcu(transport, tmp, list, node) if (transport->asoc->ep == t->asoc->ep) { + rcu_read_unlock(); err = -EEXIST; goto out; } + rcu_read_unlock(); err = rhltable_insert_key(&sctp_transport_hashtable, &arg, &t->node, sctp_hash_params); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 063baac5b9fe40..961ee59f696a0b 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -640,14 +640,15 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr) /* Create and initialize a new sk for the socket to be returned by accept(). */ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, - struct sctp_association *asoc) + struct sctp_association *asoc, + bool kern) { struct sock *newsk; struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct sctp6_sock *newsctp6sk; struct ipv6_txoptions *opt; - newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); + newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern); if (!newsk) goto out; diff --git a/net/sctp/output.c b/net/sctp/output.c index 71ce6b945dcb54..1409a875ad8e22 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; + struct sock *sk; pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); - packet->vtag = vtag; - if (asoc && tp->dst) { - struct sock *sk = asoc->base.sk; - - rcu_read_lock(); - if (__sk_dst_get(sk) != tp->dst) { - dst_hold(tp->dst); - sk_setup_caps(sk, tp->dst); - } - - if (sk_can_gso(sk)) { - struct net_device *dev = tp->dst->dev; + /* do the following jobs only once for a flush schedule */ + if (!sctp_packet_empty(packet)) + return; - packet->max_size = dev->gso_max_size; - } else { - packet->max_size = asoc->pathmtu; - } - rcu_read_unlock(); + /* set packet max_size with pathmtu */ + packet->max_size = tp->pathmtu; + if (!asoc) + return; - } else { - packet->max_size = tp->pathmtu; + /* update dst or transport pathmtu if in need */ + sk = asoc->base.sk; + if (!sctp_transport_dst_check(tp)) { + sctp_transport_route(tp, NULL, sctp_sk(sk)); + if (asoc->param_flags & SPP_PMTUD_ENABLE) + sctp_assoc_sync_pmtu(asoc); + } else if (!sctp_transport_pmtu_check(tp)) { + if (asoc->param_flags & SPP_PMTUD_ENABLE) + sctp_assoc_sync_pmtu(asoc); } - if (ecn_capable && sctp_packet_empty(packet)) { - struct sctp_chunk *chunk; + /* If there a is a prepend chunk stick it on the list before + * any other chunks get appended. + */ + if (ecn_capable) { + struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); - /* If there a is a prepend chunk stick it on the list before - * any other chunks get appended. - */ - chunk = sctp_get_ecne_prepend(asoc); if (chunk) sctp_packet_append_chunk(packet, chunk); } + + if (!tp->dst) + return; + + /* set packet max_size with gso_max_size if gso is enabled*/ + rcu_read_lock(); + if (__sk_dst_get(sk) != tp->dst) { + dst_hold(tp->dst); + sk_setup_caps(sk, tp->dst); + } + packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size + : asoc->pathmtu; + rcu_read_unlock(); } /* Initialize the packet structure. */ @@ -546,7 +556,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) struct sctp_association *asoc = tp->asoc; struct sctp_chunk *chunk, *tmp; int pkt_count, gso = 0; - int confirm; struct dst_entry *dst; struct sk_buff *head; struct sctphdr *sh; @@ -583,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) sh->vtag = htonl(packet->vtag); sh->checksum = 0; - /* update dst if in need */ - if (!sctp_transport_dst_check(tp)) { - sctp_transport_route(tp, NULL, sctp_sk(sk)); - if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE) - sctp_assoc_sync_pmtu(sk, asoc); - } + /* drop packet if no dst */ dst = dst_clone(tp->dst); if (!dst) { IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); @@ -625,13 +629,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) asoc->peer.last_sent_to = tp; } head->ignore_df = packet->ipfragok; - confirm = tp->dst_pending_confirm; - if (confirm) + if (tp->dst_pending_confirm) skb_set_dst_pending_confirm(head, 1); /* neighbour should be confirmed on successful transmission or * positive error */ - if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm) + if (tp->af_specific->sctp_xmit(head, tp) >= 0 && + tp->dst_pending_confirm) tp->dst_pending_confirm = 0; out: @@ -705,7 +709,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, */ if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && - !chunk->msg->force_delay) + !asoc->force_delay) /* Nothing unacked */ return SCTP_XMIT_OK; diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index db352e5d61f898..8081476ed313cc 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc, } static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, - struct sctp_sndrcvinfo *sinfo, - struct list_head *queue, int msg_len) + struct sctp_sndrcvinfo *sinfo, int msg_len) { + struct sctp_outq *q = &asoc->outqueue; struct sctp_chunk *chk, *temp; - list_for_each_entry_safe(chk, temp, queue, list) { + list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) { if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) continue; list_del_init(&chk->list); + q->out_qlen -= chk->skb->len; asoc->sent_cnt_removable--; asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; @@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc, return; } - sctp_prsctp_prune_unsent(asoc, sinfo, - &asoc->outqueue.out_chunk_list, - msg_len); + sctp_prsctp_prune_unsent(asoc, sinfo, msg_len); } /* Mark all the eligible packets on a transport for retransmission. */ @@ -1027,8 +1026,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. */ - if (chunk->sinfo.sinfo_stream >= - asoc->c.sinit_num_ostreams) { + if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) { /* Mark as failed send. */ sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 206377fe91ec4d..a0b29d43627f48 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) sctp_seq_dump_remote_addrs(seq, assoc); seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d " "%8d %8d %8d %8d", - assoc->hbinterval, assoc->c.sinit_max_instreams, - assoc->c.sinit_num_ostreams, assoc->max_retrans, + assoc->hbinterval, assoc->stream->incnt, + assoc->stream->outcnt, assoc->max_retrans, assoc->init_retries, assoc->shutdown_retries, assoc->rtx_data_chunks, atomic_read(&sk->sk_wmem_alloc), diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 1b6d4574d2b02a..989a900383b57c 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -575,10 +575,11 @@ static int sctp_v4_is_ce(const struct sk_buff *skb) /* Create and initialize a new sk for the socket returned by accept(). */ static struct sock *sctp_v4_create_accept_sk(struct sock *sk, - struct sctp_association *asoc) + struct sctp_association *asoc, + bool kern) { struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, - sk->sk_prot, 0); + sk->sk_prot, kern); struct inet_sock *newinet; if (!newsk) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 969a30c7bb5431..118faff6a332ee 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, * association. */ if (!asoc->temp) { - int error; - - asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams, - asoc->c.sinit_num_ostreams, gfp); - if (!asoc->stream) + if (sctp_stream_init(asoc, gfp)) goto clean_up; - error = sctp_assoc_set_id(asoc, gfp); - if (error) + if (sctp_assoc_set_id(asoc, gfp)) goto clean_up; } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index e03bb1aab4d095..24c6ccce753909 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3946,7 +3946,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, /* Silently discard the chunk if stream-id is not valid */ sctp_walk_fwdtsn(skip, chunk) { - if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) + if (ntohs(skip->stream) >= asoc->stream->incnt) goto discard_noforce; } @@ -4017,7 +4017,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( /* Silently discard the chunk if stream-id is not valid */ sctp_walk_fwdtsn(skip, chunk) { - if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) + if (ntohs(skip->stream) >= asoc->stream->incnt) goto gen_shutdown; } @@ -6353,7 +6353,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, * and discard the DATA chunk. */ sid = ntohs(data_hdr->stream); - if (sid >= asoc->c.sinit_max_instreams) { + if (sid >= asoc->stream->incnt) { /* Mark tsn as received even though we drop it */ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 465a9c8464f947..c1401f43d40fc5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include @@ -1906,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) } if (asoc->pmtu_pending) - sctp_assoc_pending_pmtu(sk, asoc); + sctp_assoc_pending_pmtu(asoc); /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D @@ -1919,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) } /* Check for invalid stream. */ - if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { + if (sinfo->sinfo_stream >= asoc->stream->outcnt) { err = -EINVAL; goto out_free; } @@ -1964,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) err = PTR_ERR(datamsg); goto out_free; } - datamsg->force_delay = !!(msg->msg_flags & MSG_MORE); + asoc->force_delay = !!(msg->msg_flags & MSG_MORE); /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { @@ -2434,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; - sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); + sctp_assoc_sync_pmtu(asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; } else { @@ -2450,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); - sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); + sctp_assoc_sync_pmtu(asoc); } } else if (asoc) { asoc->param_flags = @@ -4115,7 +4116,7 @@ static int sctp_disconnect(struct sock *sk, int flags) * descriptor will be returned from accept() to represent the newly * formed association. */ -static struct sock *sctp_accept(struct sock *sk, int flags, int *err) +static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern) { struct sctp_sock *sp; struct sctp_endpoint *ep; @@ -4150,7 +4151,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err) */ asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); - newsk = sp->pf->create_accept_sk(sk, asoc); + newsk = sp->pf->create_accept_sk(sk, asoc, kern); if (!newsk) { error = -ENOMEM; goto out; @@ -4460,8 +4461,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, info->sctpi_rwnd = asoc->a_rwnd; info->sctpi_unackdata = asoc->unack_data; info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); - info->sctpi_instrms = asoc->c.sinit_max_instreams; - info->sctpi_outstrms = asoc->c.sinit_num_ostreams; + info->sctpi_instrms = asoc->stream->incnt; + info->sctpi_outstrms = asoc->stream->outcnt; list_for_each(pos, &asoc->base.inqueue.in_chunk_list) info->sctpi_inqueue++; list_for_each(pos, &asoc->outqueue.out_chunk_list) @@ -4690,8 +4691,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, status.sstat_unackdata = asoc->unack_data; status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); - status.sstat_instrms = asoc->c.sinit_max_instreams; - status.sstat_outstrms = asoc->c.sinit_num_ostreams; + status.sstat_instrms = asoc->stream->incnt; + status.sstat_outstrms = asoc->stream->outcnt; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 1c6cc04fa3a41f..bbed997e1c5f01 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -35,33 +35,60 @@ #include #include -struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp) +int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp) { struct sctp_stream *stream; int i; stream = kzalloc(sizeof(*stream), gfp); if (!stream) - return NULL; + return -ENOMEM; - stream->outcnt = outcnt; + stream->outcnt = asoc->c.sinit_num_ostreams; stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); if (!stream->out) { kfree(stream); - return NULL; + return -ENOMEM; } for (i = 0; i < stream->outcnt; i++) stream->out[i].state = SCTP_STREAM_OPEN; - stream->incnt = incnt; + asoc->stream = stream; + + return 0; +} + +int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp) +{ + struct sctp_stream *stream = asoc->stream; + int i; + + /* Initial stream->out size may be very big, so free it and alloc + * a new one with new outcnt to save memory. + */ + kfree(stream->out); + stream->outcnt = asoc->c.sinit_num_ostreams; + stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); + if (!stream->out) + goto nomem; + + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_OPEN; + + stream->incnt = asoc->c.sinit_max_instreams; stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); if (!stream->in) { kfree(stream->out); - kfree(stream); - return NULL; + goto nomem; } - return stream; + return 0; + +nomem: + asoc->stream = NULL; + kfree(stream); + + return -ENOMEM; } void sctp_stream_free(struct sctp_stream *stream) diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 3379668af3686d..721eeebfcd8a50 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } -void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) +void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { - struct dst_entry *dst; + struct dst_entry *dst = sctp_transport_dst_check(t); if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", - __func__, pmtu, - SCTP_DEFAULT_MINSEGMENT); + __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); /* Use default minimum segment size and disable * pmtu discovery on this transport. */ @@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p t->pathmtu = pmtu; } - dst = sctp_transport_dst_check(t); - if (!dst) - t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); - if (dst) { - dst->ops->update_pmtu(dst, sk, NULL, pmtu); - + dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); dst = sctp_transport_dst_check(t); - if (!dst) - t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); } + + if (!dst) + t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); } /* Caches the dst entry and source address for a transport's destination diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 5d4208ad029e27..093803786eacf3 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -27,6 +27,8 @@ #include #include #include +#include + #include #include #include @@ -942,7 +944,7 @@ static int smc_listen(struct socket *sock, int backlog) } static int smc_accept(struct socket *sock, struct socket *new_sock, - int flags) + int flags, bool kern) { struct sock *sk = sock->sk, *nsk; DECLARE_WAITQUEUE(wait, current); diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index cc6b6f8651ebc0..e41f594a1e1d0c 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -11,6 +11,8 @@ #include #include +#include + #include #include diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 03dfcc6b76614a..67a71d170bedb4 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -9,6 +9,8 @@ */ #include +#include + #include #include "smc.h" diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 5d1878732f4647..c4ef9a4ec56971 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -11,6 +11,8 @@ #include #include +#include + #include #include "smc.h" diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 6e73b28915ea6d..69a0013dd25cec 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -15,6 +15,8 @@ #include #include #include +#include + #include #include "smc.h" diff --git a/net/socket.c b/net/socket.c index 2c1e8677ff2d4f..985ef06792d6e5 100644 --- a/net/socket.c +++ b/net/socket.c @@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, } EXPORT_SYMBOL(kernel_sendmsg); +static bool skb_is_err_queue(const struct sk_buff *skb) +{ + /* pkt_type of skbs enqueued on the error queue are set to + * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do + * in recvmsg, since skbs received on a local socket will never + * have a pkt_type of PACKET_OUTGOING. + */ + return skb->pkt_type == PACKET_OUTGOING; +} + /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ @@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(tss), &tss); - if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS)) + if (skb_is_err_queue(skb) && skb->len && + SKB_EXT_ERR(skb)->opt_stats) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS, skb->len, skb->data); } @@ -1506,7 +1517,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, if (err) goto out_fd; - err = sock->ops->accept(sock, newsock, sock->file->f_flags); + err = sock->ops->accept(sock, newsock, sock->file->f_flags, false); if (err < 0) goto out_fd; @@ -1731,6 +1742,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, /* We assume all kernel code knows the size of sockaddr_storage */ msg.msg_namelen = 0; msg.msg_iocb = NULL; + msg.msg_flags = 0; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, flags); @@ -3238,7 +3250,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags) if (err < 0) goto done; - err = sock->ops->accept(sock, *newsock, flags); + err = sock->ops->accept(sock, *newsock, flags, true); if (err < 0) { sock_release(*newsock); *newsock = NULL; diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 41adf362936d7d..b5c279b2268017 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -504,6 +504,7 @@ static int __init strp_mod_init(void) static void __exit strp_mod_exit(void) { + destroy_workqueue(strp_wq); } module_init(strp_mod_init); module_exit(strp_mod_exit); diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index a1ee933e3029b3..d2623b9f23d66c 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index b94efd93d3e498..a08aeb56b8e457 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -11,7 +11,7 @@ */ #include -#include +#include #include #include #include diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 8931e33b65412d..2b720fa35c4ff7 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1635,6 +1635,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv, xprt = &svsk->sk_xprt; svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv); + set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags); serv->sv_bc_xprt = xprt; diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index c13a5c35ce14d9..fc8f14c7bfec60 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -127,6 +127,7 @@ static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv, xprt = &cma_xprt->sc_xprt; svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); + set_bit(XPT_CONG_CTRL, &xprt->xpt_flags); serv->sv_bc_xprt = xprt; dprintk("svcrdma: %s(%p)\n", __func__, xprt); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 81cd31acf690f4..3b332b395045b5 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -503,7 +503,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, struct ib_cq *sendcq, *recvcq; int rc; - max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); + max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, + RPCRDMA_MAX_SEND_SGES); if (max_sge < RPCRDMA_MIN_SEND_SGES) { pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); return -ENOMEM; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 6b09a778cc71fa..7130e73bd42c21 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -35,6 +35,8 @@ */ #include +#include + #include "core.h" #include "name_table.h" #include "node.h" @@ -113,7 +115,8 @@ static void tipc_data_ready(struct sock *sk); static void tipc_write_space(struct sock *sk); static void tipc_sock_destruct(struct sock *sk); static int tipc_release(struct socket *sock); -static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); +static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, + bool kern); static void tipc_sk_timeout(unsigned long data); static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, struct tipc_name_seq const *seq); @@ -2027,7 +2030,8 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) * * Returns 0 on success, errno otherwise */ -static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) +static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, + bool kern) { struct sock *new_sk, *sk = sock->sk; struct sk_buff *buf; @@ -2049,7 +2053,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) buf = skb_peek(&sk->sk_receive_queue); - res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0); + res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); if (res) goto exit; security_sk_clone(sock->sk, new_sock->sk); diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 9d94e65d089418..271cd66e4b3b66 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, static void tipc_subscrp_timeout(unsigned long data) { struct tipc_subscription *sub = (struct tipc_subscription *)data; + struct tipc_subscriber *subscriber = sub->subscriber; + + spin_lock_bh(&subscriber->lock); + tipc_nametbl_unsubscribe(sub); + spin_unlock_bh(&subscriber->lock); /* Notify subscriber of timeout */ tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, @@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref) struct tipc_subscriber *subscriber = sub->subscriber; spin_lock_bh(&subscriber->lock); - tipc_nametbl_unsubscribe(sub); list_del(&sub->subscrp_list); atomic_dec(&tn->subscription_count); spin_unlock_bh(&subscriber->lock); @@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber, if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) continue; + tipc_nametbl_unsubscribe(sub); tipc_subscrp_get(sub); spin_unlock_bh(&subscriber->lock); tipc_subscrp_delete(sub); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e2d18b9f910fd1..928691c434087e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -85,7 +85,7 @@ #include #include #include -#include +#include #include #include #include @@ -636,7 +636,7 @@ static int unix_bind(struct socket *, struct sockaddr *, int); static int unix_stream_connect(struct socket *, struct sockaddr *, int addr_len, int flags); static int unix_socketpair(struct socket *, struct socket *); -static int unix_accept(struct socket *, struct socket *, int); +static int unix_accept(struct socket *, struct socket *, int, bool); static int unix_getname(struct socket *, struct sockaddr *, int *, int); static unsigned int unix_poll(struct file *, struct socket *, poll_table *); static unsigned int unix_dgram_poll(struct file *, struct socket *, @@ -1402,7 +1402,8 @@ static void unix_sock_inherit_flags(const struct socket *old, set_bit(SOCK_PASSSEC, &new->flags); } -static int unix_accept(struct socket *sock, struct socket *newsock, int flags) +static int unix_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sock *sk = sock->sk; struct sock *tsk; diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 6a0d48525fcf9a..c36757e728442b 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp) if (s) { struct unix_sock *u = unix_sk(s); + BUG_ON(!atomic_long_read(&u->inflight)); BUG_ON(list_empty(&u->link)); if (atomic_long_dec_and_test(&u->inflight)) @@ -341,6 +342,14 @@ void unix_gc(void) } list_del(&cursor); + /* Now gc_candidates contains only garbage. Restore original + * inflight counters for these as well, and remove the skbuffs + * which are creating the cycle(s). + */ + skb_queue_head_init(&hitlist); + list_for_each_entry(u, &gc_candidates, link) + scan_children(&u->sk, inc_inflight, &hitlist); + /* not_cycle_list contains those sockets which do not make up a * cycle. Restore these to the inflight list. */ @@ -350,14 +359,6 @@ void unix_gc(void) list_move_tail(&u->link, &gc_inflight_list); } - /* Now gc_candidates contains only garbage. Restore original - * inflight counters for these as well, and remove the skbuffs - * which are creating the cycle(s). - */ - skb_queue_head_init(&hitlist); - list_for_each_entry(u, &gc_candidates, link) - scan_children(&u->sk, inc_inflight, &hitlist); - spin_unlock(&unix_gc_lock); /* Here we are. Hitlist is filled. Die. */ diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 8a398b3fb532aa..6f7f6757ceefb5 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -90,6 +90,7 @@ #include #include #include +#include #include #include #include @@ -1101,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = { .sendpage = sock_no_sendpage, }; +static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) +{ + if (!transport->cancel_pkt) + return -EOPNOTSUPP; + + return transport->cancel_pkt(vsk); +} + static void vsock_connect_timeout(struct work_struct *work) { struct sock *sk; struct vsock_sock *vsk; + int cancel = 0; vsk = container_of(work, struct vsock_sock, dwork.work); sk = sk_vsock(vsk); @@ -1115,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work) sk->sk_state = SS_UNCONNECTED; sk->sk_err = ETIMEDOUT; sk->sk_error_report(sk); + cancel = 1; } release_sock(sk); + if (cancel) + vsock_transport_cancel_pkt(vsk); sock_put(sk); } @@ -1223,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, err = sock_intr_errno(timeout); sk->sk_state = SS_UNCONNECTED; sock->state = SS_UNCONNECTED; + vsock_transport_cancel_pkt(vsk); goto out_wait; } else if (timeout == 0) { err = -ETIMEDOUT; sk->sk_state = SS_UNCONNECTED; sock->state = SS_UNCONNECTED; + vsock_transport_cancel_pkt(vsk); goto out_wait; } @@ -1249,7 +1264,8 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, return err; } -static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) +static int vsock_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sock *listener; int err; diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 6788264acc632d..68675a151f22b8 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) return len; } +static int +virtio_transport_cancel_pkt(struct vsock_sock *vsk) +{ + struct virtio_vsock *vsock; + struct virtio_vsock_pkt *pkt, *n; + int cnt = 0; + LIST_HEAD(freeme); + + vsock = virtio_vsock_get(); + if (!vsock) { + return -ENODEV; + } + + spin_lock_bh(&vsock->send_pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { + if (pkt->vsk != vsk) + continue; + list_move(&pkt->list, &freeme); + } + spin_unlock_bh(&vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + if (cnt) { + struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; + int new_cnt; + + new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); + if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && + new_cnt < virtqueue_get_vring_size(rx_vq)) + queue_work(virtio_vsock_workqueue, &vsock->rx_work); + } + + return 0; +} + static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; @@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = { .release = virtio_transport_release, .connect = virtio_transport_connect, .shutdown = virtio_transport_shutdown, + .cancel_pkt = virtio_transport_cancel_pkt, .dgram_bind = virtio_transport_dgram_bind, .dgram_dequeue = virtio_transport_dgram_dequeue, @@ -532,7 +574,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev) vsock->vdev = vdev; ret = vsock->vdev->config->find_vqs(vsock->vdev, VSOCK_VQ_MAX, - vsock->vqs, callbacks, names); + vsock->vqs, callbacks, names, + NULL); if (ret < 0) goto out; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 849c4ad0411ee2..af087b44ceea23 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -9,6 +9,7 @@ */ #include #include +#include #include #include #include @@ -57,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, pkt->len = len; pkt->hdr.len = cpu_to_le32(len); pkt->reply = info->reply; + pkt->vsk = info->vsk; if (info->msg && len > 0) { pkt->buf = kmalloc(len, GFP_KERNEL); @@ -179,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk, struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, .type = type, + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); @@ -518,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk) struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_REQUEST, .type = VIRTIO_VSOCK_TYPE_STREAM, + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); @@ -533,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode) VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | (mode & SEND_SHUTDOWN ? VIRTIO_VSOCK_SHUTDOWN_SEND : 0), + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); @@ -559,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk, .type = VIRTIO_VSOCK_TYPE_STREAM, .msg = msg, .pkt_len = len, + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); @@ -580,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk, .op = VIRTIO_VSOCK_OP_RST, .type = VIRTIO_VSOCK_TYPE_STREAM, .reply = !!pkt, + .vsk = vsk, }; /* Send RST only if the original pkt is not a RST pkt */ @@ -825,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk, .remote_cid = le64_to_cpu(pkt->hdr.src_cid), .remote_port = le32_to_cpu(pkt->hdr.src_port), .reply = true, + .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d7f8be4e321a32..2312dc2ffdb98b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, { int err; - rtnl_lock(); - if (!cb->args[0]) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, genl_family_attrbuf(&nl80211_fam), nl80211_fam.maxattr, nl80211_policy); if (err) - goto out_unlock; + return err; *wdev = __cfg80211_wdev_from_attrs( sock_net(skb->sk), genl_family_attrbuf(&nl80211_fam)); - if (IS_ERR(*wdev)) { - err = PTR_ERR(*wdev); - goto out_unlock; - } + if (IS_ERR(*wdev)) + return PTR_ERR(*wdev); *rdev = wiphy_to_rdev((*wdev)->wiphy); /* 0 is the first index - add 1 to parse only once */ cb->args[0] = (*rdev)->wiphy_idx + 1; @@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); struct wireless_dev *tmp; - if (!wiphy) { - err = -ENODEV; - goto out_unlock; - } + if (!wiphy) + return -ENODEV; *rdev = wiphy_to_rdev(wiphy); *wdev = NULL; @@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, } } - if (!*wdev) { - err = -ENODEV; - goto out_unlock; - } + if (!*wdev) + return -ENODEV; } return 0; - out_unlock: - rtnl_unlock(); - return err; -} - -static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev) -{ - rtnl_unlock(); } /* IE validation */ @@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * int filter_wiphy = -1; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; + int ret; rtnl_lock(); if (!cb->args[2]) { struct nl80211_dump_wiphy_state state = { .filter_wiphy = -1, }; - int ret; ret = nl80211_dump_wiphy_parse(skb, cb, &state); if (ret) - return ret; + goto out_unlock; filter_wiphy = state.filter_wiphy; @@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * wp_idx++; } out: - rtnl_unlock(); - cb->args[0] = wp_idx; cb->args[1] = if_idx; - return skb->len; + ret = skb->len; + out_unlock: + rtnl_unlock(); + + return ret; } static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) @@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb, int sta_idx = cb->args[2]; int err; + rtnl_lock(); err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); if (err) - return err; + goto out_err; if (!wdev->netdev) { err = -EINVAL; @@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb, cb->args[2] = sta_idx; err = skb->len; out_err: - nl80211_finish_wdev_dump(rdev); + rtnl_unlock(); return err; } @@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb, int path_idx = cb->args[2]; int err; + rtnl_lock(); err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); if (err) - return err; + goto out_err; if (!rdev->ops->dump_mpath) { err = -EOPNOTSUPP; @@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb, cb->args[2] = path_idx; err = skb->len; out_err: - nl80211_finish_wdev_dump(rdev); + rtnl_unlock(); return err; } @@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb, int path_idx = cb->args[2]; int err; + rtnl_lock(); err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); if (err) - return err; + goto out_err; if (!rdev->ops->dump_mpp) { err = -EOPNOTSUPP; @@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb, cb->args[2] = path_idx; err = skb->len; out_err: - nl80211_finish_wdev_dump(rdev); + rtnl_unlock(); return err; } @@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) int start = cb->args[2], idx = 0; int err; + rtnl_lock(); err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); - if (err) + if (err) { + rtnl_unlock(); return err; + } wdev_lock(wdev); spin_lock_bh(&rdev->bss_lock); @@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) wdev_unlock(wdev); cb->args[2] = idx; - nl80211_finish_wdev_dump(rdev); + rtnl_unlock(); return skb->len; } @@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) int res; bool radio_stats; + rtnl_lock(); res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); if (res) - return res; + goto out_err; /* prepare_wdev_dump parsed the attributes */ radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; @@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) cb->args[2] = survey_idx; res = skb->len; out_err: - nl80211_finish_wdev_dump(rdev); + rtnl_unlock(); return res; } @@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, void *data = NULL; unsigned int data_len = 0; - rtnl_lock(); - if (cb->args[0]) { /* subtract the 1 again here */ struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); struct wireless_dev *tmp; - if (!wiphy) { - err = -ENODEV; - goto out_unlock; - } + if (!wiphy) + return -ENODEV; *rdev = wiphy_to_rdev(wiphy); *wdev = NULL; @@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, attrbuf, nl80211_fam.maxattr, nl80211_policy); if (err) - goto out_unlock; + return err; if (!attrbuf[NL80211_ATTR_VENDOR_ID] || - !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { - err = -EINVAL; - goto out_unlock; - } + !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) + return -EINVAL; *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf); if (IS_ERR(*wdev)) *wdev = NULL; *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); - if (IS_ERR(*rdev)) { - err = PTR_ERR(*rdev); - goto out_unlock; - } + if (IS_ERR(*rdev)) + return PTR_ERR(*rdev); vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); @@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) continue; - if (!vcmd->dumpit) { - err = -EOPNOTSUPP; - goto out_unlock; - } + if (!vcmd->dumpit) + return -EOPNOTSUPP; vcmd_idx = i; break; } - if (vcmd_idx < 0) { - err = -EOPNOTSUPP; - goto out_unlock; - } + if (vcmd_idx < 0) + return -EOPNOTSUPP; if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); @@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, /* keep rtnl locked in successful case */ return 0; - out_unlock: - rtnl_unlock(); - return err; } static int nl80211_vendor_cmd_dump(struct sk_buff *skb, @@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb, int err; struct nlattr *vendor_data; + rtnl_lock(); err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); if (err) - return err; + goto out; vcmd_idx = cb->args[2]; data = (void *)cb->args[3]; @@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb, if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV)) { - if (!wdev) - return -EINVAL; + if (!wdev) { + err = -EINVAL; + goto out; + } if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && - !wdev->netdev) - return -EINVAL; + !wdev->netdev) { + err = -EINVAL; + goto out; + } if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { - if (!wdev_running(wdev)) - return -ENETDOWN; + if (!wdev_running(wdev)) { + err = -ENETDOWN; + goto out; + } } } diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 16b6b5988be969..570a2b67ca1036 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -132,12 +132,10 @@ static int wiphy_resume(struct device *dev) /* Age scan results with time spent in suspend */ cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); - if (rdev->ops->resume) { - rtnl_lock(); - if (rdev->wiphy.registered) - ret = rdev_resume(rdev); - rtnl_unlock(); - } + rtnl_lock(); + if (rdev->wiphy.registered && rdev->ops->resume) + ret = rdev_resume(rdev); + rtnl_unlock(); return ret; } diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 079c883aa96e5a..8b911c29860e79 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include #include @@ -852,7 +852,8 @@ static int x25_wait_for_data(struct sock *sk, long timeout) return rc; } -static int x25_accept(struct socket *sock, struct socket *newsock, int flags) +static int x25_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) { struct sock *sk = sock->sk; struct sock *newsk; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 0806dccdf50784..236cbbc0ab9cff 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1243,7 +1243,7 @@ static inline int policy_to_flow_dir(int dir) } static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, - const struct flowi *fl) + const struct flowi *fl, u16 family) { struct xfrm_policy *pol; @@ -1251,8 +1251,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, again: pol = rcu_dereference(sk->sk_policy[dir]); if (pol != NULL) { - bool match = xfrm_selector_match(&pol->selector, fl, - sk->sk_family); + bool match = xfrm_selector_match(&pol->selector, fl, family); int err = 0; if (match) { @@ -2239,7 +2238,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, sk = sk_const_to_full_sk(sk); if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { num_pols = 1; - pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); + pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family); err = xfrm_expand_policies(fl, family, pols, &num_pols, &num_xfrms); if (err < 0) @@ -2518,7 +2517,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, pol = NULL; sk = sk_to_full_sk(sk); if (sk && sk->sk_policy[dir]) { - pol = xfrm_sk_policy_lookup(sk, dir, &fl); + pol = xfrm_sk_policy_lookup(sk, dir, &fl, family); if (IS_ERR(pol)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); return 0; @@ -3069,6 +3068,11 @@ static int __net_init xfrm_net_init(struct net *net) { int rv; + /* Initialize the per-net locks here */ + spin_lock_init(&net->xfrm.xfrm_state_lock); + spin_lock_init(&net->xfrm.xfrm_policy_lock); + mutex_init(&net->xfrm.xfrm_cfg_mutex); + rv = xfrm_statistics_init(net); if (rv < 0) goto out_statistics; @@ -3085,11 +3089,6 @@ static int __net_init xfrm_net_init(struct net *net) if (rv < 0) goto out; - /* Initialize the per-net locks here */ - spin_lock_init(&net->xfrm.xfrm_state_lock); - spin_lock_init(&net->xfrm.xfrm_policy_lock); - mutex_init(&net->xfrm.xfrm_cfg_mutex); - return 0; out: diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 9705c279494b24..40a8aa39220d67 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -412,7 +412,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es up = nla_data(rp); ulen = xfrm_replay_state_esn_len(up); - if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) + /* Check the overall length and the internal bitmap length to avoid + * potential overflow. */ + if (nla_len(rp) < ulen || + xfrm_replay_state_esn_len(replay_esn) != ulen || + replay_esn->bmp_len != up->bmp_len) + return -EINVAL; + + if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) return -EINVAL; return 0; diff --git a/samples/Kconfig b/samples/Kconfig index b124f62ed6cb30..9cb63188d3ef2f 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -112,4 +112,10 @@ config SAMPLE_VFIO_MDEV_MTTY Build a virtual tty sample driver for use as a VFIO mediated device +config SAMPLE_STATX + bool "Build example extended-stat using code" + depends on BROKEN + help + Build example userspace program to use the new extended-stat syscall. + endif # SAMPLES diff --git a/samples/Makefile b/samples/Makefile index 86a137e451d978..db54e766ddb1a3 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -3,4 +3,4 @@ obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \ hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \ configfs/ connector/ v4l/ trace_printk/ blackfin/ \ - vfio-mdev/ + vfio-mdev/ statx/ diff --git a/samples/statx/Makefile b/samples/statx/Makefile new file mode 100644 index 00000000000000..1f80a3d8cf45ca --- /dev/null +++ b/samples/statx/Makefile @@ -0,0 +1,10 @@ +# kbuild trick to avoid linker error. Can be omitted if a module is built. +obj- := dummy.o + +# List of programs to build +hostprogs-$(CONFIG_SAMPLE_STATX) := test-statx + +# Tell kbuild to always build the programs +always := $(hostprogs-y) + +HOSTCFLAGS_test-statx.o += -I$(objtree)/usr/include diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c new file mode 100644 index 00000000000000..8571d766331dd1 --- /dev/null +++ b/samples/statx/test-statx.c @@ -0,0 +1,254 @@ +/* Test the statx() system call. + * + * Note that the output of this program is intended to look like the output of + * /bin/stat where possible. + * + * Copyright (C) 2015 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#define _GNU_SOURCE +#define _ATFILE_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AT_STATX_SYNC_TYPE 0x6000 +#define AT_STATX_SYNC_AS_STAT 0x0000 +#define AT_STATX_FORCE_SYNC 0x2000 +#define AT_STATX_DONT_SYNC 0x4000 + +static __attribute__((unused)) +ssize_t statx(int dfd, const char *filename, unsigned flags, + unsigned int mask, struct statx *buffer) +{ + return syscall(__NR_statx, dfd, filename, flags, mask, buffer); +} + +static void print_time(const char *field, struct statx_timestamp *ts) +{ + struct tm tm; + time_t tim; + char buffer[100]; + int len; + + tim = ts->tv_sec; + if (!localtime_r(&tim, &tm)) { + perror("localtime_r"); + exit(1); + } + len = strftime(buffer, 100, "%F %T", &tm); + if (len == 0) { + perror("strftime"); + exit(1); + } + printf("%s", field); + fwrite(buffer, 1, len, stdout); + printf(".%09u", ts->tv_nsec); + len = strftime(buffer, 100, "%z", &tm); + if (len == 0) { + perror("strftime2"); + exit(1); + } + fwrite(buffer, 1, len, stdout); + printf("\n"); +} + +static void dump_statx(struct statx *stx) +{ + char buffer[256], ft = '?'; + + printf("results=%x\n", stx->stx_mask); + + printf(" "); + if (stx->stx_mask & STATX_SIZE) + printf(" Size: %-15llu", (unsigned long long)stx->stx_size); + if (stx->stx_mask & STATX_BLOCKS) + printf(" Blocks: %-10llu", (unsigned long long)stx->stx_blocks); + printf(" IO Block: %-6llu", (unsigned long long)stx->stx_blksize); + if (stx->stx_mask & STATX_TYPE) { + switch (stx->stx_mode & S_IFMT) { + case S_IFIFO: printf(" FIFO\n"); ft = 'p'; break; + case S_IFCHR: printf(" character special file\n"); ft = 'c'; break; + case S_IFDIR: printf(" directory\n"); ft = 'd'; break; + case S_IFBLK: printf(" block special file\n"); ft = 'b'; break; + case S_IFREG: printf(" regular file\n"); ft = '-'; break; + case S_IFLNK: printf(" symbolic link\n"); ft = 'l'; break; + case S_IFSOCK: printf(" socket\n"); ft = 's'; break; + default: + printf(" unknown type (%o)\n", stx->stx_mode & S_IFMT); + break; + } + } else { + printf(" no type\n"); + } + + sprintf(buffer, "%02x:%02x", stx->stx_dev_major, stx->stx_dev_minor); + printf("Device: %-15s", buffer); + if (stx->stx_mask & STATX_INO) + printf(" Inode: %-11llu", (unsigned long long) stx->stx_ino); + if (stx->stx_mask & STATX_NLINK) + printf(" Links: %-5u", stx->stx_nlink); + if (stx->stx_mask & STATX_TYPE) { + switch (stx->stx_mode & S_IFMT) { + case S_IFBLK: + case S_IFCHR: + printf(" Device type: %u,%u", + stx->stx_rdev_major, stx->stx_rdev_minor); + break; + } + } + printf("\n"); + + if (stx->stx_mask & STATX_MODE) + printf("Access: (%04o/%c%c%c%c%c%c%c%c%c%c) ", + stx->stx_mode & 07777, + ft, + stx->stx_mode & S_IRUSR ? 'r' : '-', + stx->stx_mode & S_IWUSR ? 'w' : '-', + stx->stx_mode & S_IXUSR ? 'x' : '-', + stx->stx_mode & S_IRGRP ? 'r' : '-', + stx->stx_mode & S_IWGRP ? 'w' : '-', + stx->stx_mode & S_IXGRP ? 'x' : '-', + stx->stx_mode & S_IROTH ? 'r' : '-', + stx->stx_mode & S_IWOTH ? 'w' : '-', + stx->stx_mode & S_IXOTH ? 'x' : '-'); + if (stx->stx_mask & STATX_UID) + printf("Uid: %5d ", stx->stx_uid); + if (stx->stx_mask & STATX_GID) + printf("Gid: %5d\n", stx->stx_gid); + + if (stx->stx_mask & STATX_ATIME) + print_time("Access: ", &stx->stx_atime); + if (stx->stx_mask & STATX_MTIME) + print_time("Modify: ", &stx->stx_mtime); + if (stx->stx_mask & STATX_CTIME) + print_time("Change: ", &stx->stx_ctime); + if (stx->stx_mask & STATX_BTIME) + print_time(" Birth: ", &stx->stx_btime); + + if (stx->stx_attributes) { + unsigned char bits; + int loop, byte; + + static char attr_representation[64 + 1] = + /* STATX_ATTR_ flags: */ + "????????" /* 63-56 */ + "????????" /* 55-48 */ + "????????" /* 47-40 */ + "????????" /* 39-32 */ + "????????" /* 31-24 0x00000000-ff000000 */ + "????????" /* 23-16 0x00000000-00ff0000 */ + "???me???" /* 15- 8 0x00000000-0000ff00 */ + "?dai?c??" /* 7- 0 0x00000000-000000ff */ + ; + + printf("Attributes: %016llx (", stx->stx_attributes); + for (byte = 64 - 8; byte >= 0; byte -= 8) { + bits = stx->stx_attributes >> byte; + for (loop = 7; loop >= 0; loop--) { + int bit = byte + loop; + + if (bits & 0x80) + putchar(attr_representation[63 - bit]); + else + putchar('-'); + bits <<= 1; + } + if (byte) + putchar(' '); + } + printf(")\n"); + } +} + +static void dump_hex(unsigned long long *data, int from, int to) +{ + unsigned offset, print_offset = 1, col = 0; + + from /= 8; + to = (to + 7) / 8; + + for (offset = from; offset < to; offset++) { + if (print_offset) { + printf("%04x: ", offset * 8); + print_offset = 0; + } + printf("%016llx", data[offset]); + col++; + if ((col & 3) == 0) { + printf("\n"); + print_offset = 1; + } else { + printf(" "); + } + } + + if (!print_offset) + printf("\n"); +} + +int main(int argc, char **argv) +{ + struct statx stx; + int ret, raw = 0, atflag = AT_SYMLINK_NOFOLLOW; + + unsigned int mask = STATX_ALL; + + for (argv++; *argv; argv++) { + if (strcmp(*argv, "-F") == 0) { + atflag &= ~AT_STATX_SYNC_TYPE; + atflag |= AT_STATX_FORCE_SYNC; + continue; + } + if (strcmp(*argv, "-D") == 0) { + atflag &= ~AT_STATX_SYNC_TYPE; + atflag |= AT_STATX_DONT_SYNC; + continue; + } + if (strcmp(*argv, "-L") == 0) { + atflag &= ~AT_SYMLINK_NOFOLLOW; + continue; + } + if (strcmp(*argv, "-O") == 0) { + mask &= ~STATX_BASIC_STATS; + continue; + } + if (strcmp(*argv, "-A") == 0) { + atflag |= AT_NO_AUTOMOUNT; + continue; + } + if (strcmp(*argv, "-R") == 0) { + raw = 1; + continue; + } + + memset(&stx, 0xbf, sizeof(stx)); + ret = statx(AT_FDCWD, *argv, atflag, mask, &stx); + printf("statx(%s) = %d\n", *argv, ret); + if (ret < 0) { + perror(*argv); + exit(1); + } + + if (raw) + dump_hex((unsigned long long *)&stx, 0, sizeof(stx)); + + dump_statx(&stx); + } + return 0; +} diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c index 30e282d33d4dc5..bc7fcf010a5b4c 100644 --- a/samples/trace_events/trace-events-sample.c +++ b/samples/trace_events/trace-events-sample.c @@ -33,7 +33,7 @@ static void simple_thread_func(int cnt) /* Silly tracepoints */ trace_foo_bar("hello", cnt, array, random_strings[len], - tsk_cpus_allowed(current)); + ¤t->cpus_allowed); trace_foo_with_template_simple("HELLO", cnt); diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index d6ca649cb0e96d..afe3fd3af1e406 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -148,6 +148,10 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \ # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) +# cc-if-fullversion +# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1) +cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4)) + # cc-ldoption # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) cc-ldoption = $(call try-run,\ diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 0a07f9014944ed..7234e61e7ce370 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -155,7 +155,7 @@ else # $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files # and locates generated .h files # FIXME: Replace both with specific CFLAGS* statements in the makefiles -__c_flags = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \ +__c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \ $(call flags,_c_flags) __a_flags = $(call flags,_a_flags) __cpp_flags = $(call flags,_cpp_flags) diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c index 9b0b5cbc5b899b..0f98634c20a097 100644 --- a/scripts/gcc-plugins/sancov_plugin.c +++ b/scripts/gcc-plugins/sancov_plugin.c @@ -133,7 +133,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gc #if BUILDING_GCC_VERSION < 6000 register_callback(plugin_name, PLUGIN_START_UNIT, &sancov_start_unit, NULL); register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)>_ggc_r_gt_sancov); - register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_plugin_pass_info); + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_pass_info); #endif return 0; diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c index 26d208b435a0d3..cfddddb9c9d722 100644 --- a/scripts/kconfig/gconf.c +++ b/scripts/kconfig/gconf.c @@ -914,7 +914,7 @@ on_treeview2_button_press_event(GtkWidget * widget, current = menu; display_tree_part(); gtk_widget_set_sensitive(back_btn, TRUE); - } else if ((col == COL_OPTION)) { + } else if (col == COL_OPTION) { toggle_sym_value(menu); gtk_tree_view_expand_row(view, path, TRUE); } diff --git a/scripts/module-common.lds b/scripts/module-common.lds index cf7e52e4781b9b..9b6e246a45d09f 100644 --- a/scripts/module-common.lds +++ b/scripts/module-common.lds @@ -22,4 +22,6 @@ SECTIONS { . = ALIGN(8); .init_array 0 : { *(SORT(.init_array.*)) *(.init_array) } + + __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) } } diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 0458b037c8a137..0545f5a8cabed7 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -372,6 +372,8 @@ disassocation||disassociation disapear||disappear disapeared||disappeared disappared||disappeared +disble||disable +disbled||disabled disconnet||disconnect discontinous||discontinuous dispertion||dispersion @@ -732,6 +734,7 @@ oustanding||outstanding overaall||overall overhread||overhead overlaping||overlapping +overide||override overrided||overridden overriden||overridden overun||overrun diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index f44312a19522b6..def1fbd6bdfd81 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -76,6 +76,8 @@ #include #include #include +#include +#include #include #include "include/apparmor.h" diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index e2ed498c0f5f59..063d38aef64e71 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -22,6 +22,8 @@ #include #include #include +#include + #include #include #include "evm.h" diff --git a/security/keys/dh.c b/security/keys/dh.c index 531ed2ec132f4f..893af4c450382a 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c @@ -55,7 +55,7 @@ static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi) if (status == 0) { const struct user_key_payload *payload; - payload = user_key_payload(key); + payload = user_key_payload_locked(key); if (maxlen == 0) { *mpi = NULL; diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 4fb315cddf5b00..0010955d7876c2 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -314,7 +314,7 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k goto error; down_read(&ukey->sem); - upayload = user_key_payload(ukey); + upayload = user_key_payload_locked(ukey); *master_key = upayload->data; *master_keylen = upayload->datalen; error: @@ -926,7 +926,7 @@ static long encrypted_read(const struct key *key, char __user *buffer, size_t asciiblob_len; int ret; - epayload = rcu_dereference_key(key); + epayload = dereference_key_locked(key); /* returns the hex encoded iv, encrypted-data, and hmac as ascii */ asciiblob_len = epayload->datablob_len + ivsize + 1 diff --git a/security/keys/internal.h b/security/keys/internal.h index a705a7d92ad7a9..a2f4c0abb8d847 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -13,6 +13,7 @@ #define _INTERNAL_H #include +#include #include #include #include diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 04a764f71ec88e..52c34532c78562 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -12,12 +12,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include #include diff --git a/security/keys/persistent.c b/security/keys/persistent.c index 1edc1f0a0ce2c4..d0cb5b32eff7ba 100644 --- a/security/keys/persistent.c +++ b/security/keys/persistent.c @@ -10,6 +10,8 @@ */ #include +#include + #include "internal.h" unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */ diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 918cddcd4516ae..b6fdd22205b169 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 90d61751ff12f3..2ae31c5a87de9e 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -1140,12 +1140,12 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) static long trusted_read(const struct key *key, char __user *buffer, size_t buflen) { - struct trusted_key_payload *p; + const struct trusted_key_payload *p; char *ascii_buf; char *bufp; int i; - p = rcu_dereference_key(key); + p = dereference_key_locked(key); if (!p) return -EINVAL; if (!buffer || buflen <= 0) diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index e187c8909d9db1..26605134f17a8a 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -107,7 +107,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep) /* attach the new data, displacing the old */ key->expiry = prep->expiry; if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags)) - zap = rcu_dereference_key(key); + zap = dereference_key_locked(key); rcu_assign_keypointer(key, prep->payload.data[0]); prep->payload.data[0] = NULL; @@ -123,7 +123,7 @@ EXPORT_SYMBOL_GPL(user_update); */ void user_revoke(struct key *key) { - struct user_key_payload *upayload = key->payload.data[0]; + struct user_key_payload *upayload = user_key_payload_locked(key); /* clear the quota */ key_payload_reserve(key, 0); @@ -169,7 +169,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) const struct user_key_payload *upayload; long ret; - upayload = user_key_payload(key); + upayload = user_key_payload_locked(key); ret = upayload->datalen; /* we can return the data as is */ diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 9a8f12f8d5b7ff..0c2ac318aa7fb8 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -28,7 +28,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -480,12 +481,13 @@ static int selinux_is_sblabel_mnt(struct super_block *sb) sbsec->behavior == SECURITY_FS_USE_NATIVE || /* Special handling. Genfs but also in-core setxattr handler */ !strcmp(sb->s_type->name, "sysfs") || - !strcmp(sb->s_type->name, "cgroup") || - !strcmp(sb->s_type->name, "cgroup2") || !strcmp(sb->s_type->name, "pstore") || !strcmp(sb->s_type->name, "debugfs") || !strcmp(sb->s_type->name, "tracefs") || - !strcmp(sb->s_type->name, "rootfs"); + !strcmp(sb->s_type->name, "rootfs") || + (selinux_policycap_cgroupseclabel && + (!strcmp(sb->s_type->name, "cgroup") || + !strcmp(sb->s_type->name, "cgroup2"))); } static int sb_finish_set_opts(struct super_block *sb) diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index beaa14b8b6cf57..f979c35e037ec4 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -71,6 +71,7 @@ enum { POLICYDB_CAPABILITY_OPENPERM, POLICYDB_CAPABILITY_EXTSOCKCLASS, POLICYDB_CAPABILITY_ALWAYSNETWORK, + POLICYDB_CAPABILITY_CGROUPSECLABEL, __POLICYDB_CAPABILITY_MAX }; #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) @@ -79,6 +80,7 @@ extern int selinux_policycap_netpeer; extern int selinux_policycap_openperm; extern int selinux_policycap_extsockclass; extern int selinux_policycap_alwaysnetwork; +extern int selinux_policycap_cgroupseclabel; /* * type_datum properties diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index c9e8a9898ce481..cb3fd98fb05ae7 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -46,7 +46,8 @@ static char *policycap_names[] = { "network_peer_controls", "open_perms", "extended_socket_class", - "always_check_network" + "always_check_network", + "cgroup_seclabel" }; unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index a70fcee9824ba3..b4aa491a0a23d8 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -74,6 +74,7 @@ int selinux_policycap_netpeer; int selinux_policycap_openperm; int selinux_policycap_extsockclass; int selinux_policycap_alwaysnetwork; +int selinux_policycap_cgroupseclabel; static DEFINE_RWLOCK(policy_rwlock); @@ -1993,6 +1994,9 @@ static void security_load_policycaps(void) POLICYDB_CAPABILITY_EXTSOCKCLASS); selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_ALWAYSNETWORK); + selinux_policycap_cgroupseclabel = + ebitmap_get_bit(&policydb.policycaps, + POLICYDB_CAPABILITY_CGROUPSECLABEL); } static int security_preserve_bools(struct policydb *p); diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 838ffa78cfdac1..00d223e9fb37ca 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -5,8 +5,10 @@ */ #include "common.h" + #include #include +#include /* Variables definitions.*/ diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c index 50092534ec5408..944ad77d8fbac2 100644 --- a/security/tomoyo/group.c +++ b/security/tomoyo/group.c @@ -5,6 +5,8 @@ */ #include +#include + #include "common.h" /** diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index 5fe3679137aeb7..848317fea704fe 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -5,6 +5,8 @@ */ #include +#include + #include "common.h" /* Lock for protecting policy. */ diff --git a/sound/core/control.c b/sound/core/control.c index fb096cb20a80d1..c109b82eef4bd4 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c index 36d2416f90d994..9602a7e38d8a81 100644 --- a/sound/core/hwdep.c +++ b/sound/core/hwdep.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 698a014195152a..36baf962f9b081 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -28,6 +28,7 @@ #include #include +#include #include #include #include diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index bb1261591a1f30..5088d4b8db2222 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -21,6 +21,7 @@ */ #include +#include #include #include #include diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index aec9c92250fd72..13dec5ec93f20d 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 8da9cb245d0150..ab890336175fbc 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h index d7b4d016b54758..afa007c0cc2d45 100644 --- a/sound/core/seq/oss/seq_oss_device.h +++ b/sound/core/seq/oss/seq_oss_device.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/sound/core/seq/oss/seq_oss_writeq.c b/sound/core/seq/oss/seq_oss_writeq.c index 1f6788a1844411..5e04f4df10e416 100644 --- a/sound/core/seq/oss/seq_oss_writeq.c +++ b/sound/core/seq/oss/seq_oss_writeq.c @@ -28,6 +28,7 @@ #include "../seq_clientmgr.h" #include #include +#include /* diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 4c935202ce23be..f3b1d7f50b8115 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -1832,6 +1832,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, info->output_pool != client->pool->size)) { if (snd_seq_write_pool_allocated(client)) { /* remove all existing cells */ + snd_seq_pool_mark_closing(client->pool); snd_seq_queue_client_leave_cells(client->number); snd_seq_pool_done(client->pool); } diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c index 86240d02b53077..01c4cfe30c9fef 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c @@ -21,6 +21,8 @@ #include #include +#include + #include "seq_fifo.h" #include "seq_lock.h" @@ -70,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo) return; *fifo = NULL; + if (f->pool) + snd_seq_pool_mark_closing(f->pool); + snd_seq_fifo_clear(f); /* wake up clients if any */ @@ -262,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize) /* NOTE: overflow flag is not cleared */ spin_unlock_irqrestore(&f->lock, flags); + /* close the old pool and wait until all users are gone */ + snd_seq_pool_mark_closing(oldpool); + snd_use_lock_sync(&f->use_lock); + /* release cells in old pool */ for (cell = oldhead; cell; cell = next) { next = cell->next; diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index dfa5156f358563..d4c61ec9be13d7 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -414,6 +415,18 @@ int snd_seq_pool_init(struct snd_seq_pool *pool) return 0; } +/* refuse the further insertion to the pool */ +void snd_seq_pool_mark_closing(struct snd_seq_pool *pool) +{ + unsigned long flags; + + if (snd_BUG_ON(!pool)) + return; + spin_lock_irqsave(&pool->lock, flags); + pool->closing = 1; + spin_unlock_irqrestore(&pool->lock, flags); +} + /* remove events */ int snd_seq_pool_done(struct snd_seq_pool *pool) { @@ -424,10 +437,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool) return -EINVAL; /* wait for closing all threads */ - spin_lock_irqsave(&pool->lock, flags); - pool->closing = 1; - spin_unlock_irqrestore(&pool->lock, flags); - if (waitqueue_active(&pool->output_sleep)) wake_up(&pool->output_sleep); @@ -484,6 +493,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool) *ppool = NULL; if (pool == NULL) return 0; + snd_seq_pool_mark_closing(pool); snd_seq_pool_done(pool); kfree(pool); return 0; diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h index 4a2ec779b8a701..32f959c17786d9 100644 --- a/sound/core/seq/seq_memory.h +++ b/sound/core/seq/seq_memory.h @@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool) int snd_seq_pool_init(struct snd_seq_pool *pool); /* done pool - free events */ +void snd_seq_pool_mark_closing(struct snd_seq_pool *pool); int snd_seq_pool_done(struct snd_seq_pool *pool); /* create pool */ diff --git a/sound/core/timer.c b/sound/core/timer.c index ad153149b23167..6d4fbc43924692 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h index 175da875162d83..17678d6ab5a2d9 100644 --- a/sound/firewire/bebob/bebob.h +++ b/sound/firewire/bebob/bebob.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/sound/firewire/dice/dice.h b/sound/firewire/dice/dice.h index e6c07857f4755f..da00e75e09d4ae 100644 --- a/sound/firewire/dice/dice.h +++ b/sound/firewire/dice/dice.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h index 2cd465c0caae84..9dc761bdacca71 100644 --- a/sound/firewire/digi00x/digi00x.h +++ b/sound/firewire/digi00x/digi00x.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h index d73c12b8753da2..9b19c7f05d5791 100644 --- a/sound/firewire/fireworks/fireworks.h +++ b/sound/firewire/fireworks/fireworks.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/sound/firewire/oxfw/oxfw.h b/sound/firewire/oxfw/oxfw.h index 2047dcb2762516..d54d4a9ac4a159 100644 --- a/sound/firewire/oxfw/oxfw.h +++ b/sound/firewire/oxfw/oxfw.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h index 1f61011579a7fc..d3cd4065722b33 100644 --- a/sound/firewire/tascam/tascam.h +++ b/sound/firewire/tascam/tascam.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/sound/isa/gus/gus_pcm.c b/sound/isa/gus/gus_pcm.c index 25f6788ccef36b..06505999155faf 100644 --- a/sound/isa/gus/gus_pcm.c +++ b/sound/isa/gus/gus_pcm.c @@ -27,6 +27,8 @@ #include #include +#include + #include #include #include diff --git a/sound/isa/msnd/msnd.c b/sound/isa/msnd/msnd.c index 835d4aa26761e5..8109ab3d29d1be 100644 --- a/sound/isa/msnd/msnd.c +++ b/sound/isa/msnd/msnd.c @@ -36,6 +36,7 @@ ********************************************************************/ #include +#include #include #include #include diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c index 94c411299e5a0d..ec180708f160ca 100644 --- a/sound/isa/sb/emu8000.c +++ b/sound/isa/sb/emu8000.c @@ -21,7 +21,7 @@ */ #include -#include +#include #include #include #include diff --git a/sound/isa/sb/emu8000_patch.c b/sound/isa/sb/emu8000_patch.c index 71d13c0bb7463c..c2e41d2762f70a 100644 --- a/sound/isa/sb/emu8000_patch.c +++ b/sound/isa/sb/emu8000_patch.c @@ -20,6 +20,8 @@ */ #include "emu8000_local.h" + +#include #include #include diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c index 250fd0006b5360..32f234f494e573 100644 --- a/sound/isa/sb/emu8000_pcm.c +++ b/sound/isa/sb/emu8000_pcm.c @@ -19,6 +19,8 @@ */ #include "emu8000_local.h" + +#include #include #include #include diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index 718d5e3b7806f0..4dae9ff9ef5afd 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include diff --git a/sound/oss/dmabuf.c b/sound/oss/dmabuf.c index e3f29132d3acef..c5dd396c66a226 100644 --- a/sound/oss/dmabuf.c +++ b/sound/oss/dmabuf.c @@ -27,6 +27,8 @@ #include #include +#include + #include "sound_config.h" #include "sleep.h" diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c index 5f248fb41beac9..fb3bbceb1fefdc 100644 --- a/sound/oss/dmasound/dmasound_core.c +++ b/sound/oss/dmasound/dmasound_core.c @@ -182,6 +182,7 @@ #include #include #include +#include #include diff --git a/sound/oss/midibuf.c b/sound/oss/midibuf.c index 8f45cd999965cd..701c7625c9713a 100644 --- a/sound/oss/midibuf.c +++ b/sound/oss/midibuf.c @@ -16,6 +16,8 @@ #include #include #include +#include + #define MIDIBUF_C #include "sound_config.h" diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c index a8bb4a06ba6f83..f34ec01d22394d 100644 --- a/sound/oss/msnd_pinnacle.c +++ b/sound/oss/msnd_pinnacle.c @@ -41,6 +41,8 @@ #include #include #include +#include + #include #include #include "sound_config.h" diff --git a/sound/oss/sound_config.h b/sound/oss/sound_config.h index f2554ab78f5e5d..5253b0a704379e 100644 --- a/sound/oss/sound_config.h +++ b/sound/oss/sound_config.h @@ -16,6 +16,7 @@ #include #include +#include #include "os.h" #include "soundvers.h" diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c index f3af63e58b363f..97899352b15fb0 100644 --- a/sound/oss/swarm_cs4297a.c +++ b/sound/oss/swarm_cs4297a.c @@ -64,7 +64,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c index ab4cdab5cfa57a..79edd88d5cd083 100644 --- a/sound/pci/ctxfi/cthw20k1.c +++ b/sound/pci/ctxfi/cthw20k1.c @@ -1905,7 +1905,7 @@ static int hw_card_start(struct hw *hw) return err; /* Set DMA transfer mask */ - if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { + if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); } else { dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index c15c51bea26d0a..69266b8ea2ad7b 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -261,6 +261,7 @@ enum { CXT_FIXUP_HP_530, CXT_FIXUP_CAP_MIX_AMP_5047, CXT_FIXUP_MUTE_LED_EAPD, + CXT_FIXUP_HP_DOCK, CXT_FIXUP_HP_SPECTRE, CXT_FIXUP_HP_GATE_MIC, }; @@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = cxt_fixup_mute_led_eapd, }, + [CXT_FIXUP_HP_DOCK] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x16, 0x21011020 }, /* line-out */ + { 0x18, 0x2181103f }, /* line-in */ + { } + } + }, [CXT_FIXUP_HP_SPECTRE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), + SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), @@ -871,6 +881,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" }, + { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" }, {} }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 4e112221d82546..299835d1fbaadb 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4847,6 +4847,7 @@ enum { ALC286_FIXUP_HP_GPIO_LED, ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, ALC280_FIXUP_HP_DOCK_PINS, + ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, ALC280_FIXUP_HP_9480M, ALC288_FIXUP_DELL_HEADSET_MODE, ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, @@ -4857,6 +4858,7 @@ enum { ALC292_FIXUP_DISABLE_AAMIX, ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, ALC275_FIXUP_DELL_XPS, ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, ALC293_FIXUP_LENOVO_SPK_NOISE, @@ -5388,6 +5390,16 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC280_FIXUP_HP_GPIO4 }, + [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1b, 0x21011020 }, /* line-out */ + { 0x18, 0x2181103f }, /* line-in */ + { }, + }, + .chained = true, + .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED + }, [ALC280_FIXUP_HP_9480M] = { .type = HDA_FIXUP_FUNC, .v.func = alc280_fixup_hp_9480m, @@ -5459,6 +5471,15 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, + [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE + }, [ALC275_FIXUP_DELL_XPS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -5531,7 +5552,7 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_speaker_volume, .chained = true, - .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, + .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, }, [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { .type = HDA_FIXUP_PINS, @@ -5647,7 +5668,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), + SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), @@ -5816,6 +5837,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, + {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"}, {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, @@ -6090,6 +6112,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { ALC295_STANDARD_PINS, {0x17, 0x21014040}, {0x18, 0x21a19050}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC295_STANDARD_PINS), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_STANDARD_PINS, {0x17, 0x90170110}), diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index ec1067a679da40..08b1399d1da2b8 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c @@ -89,7 +89,7 @@ static void acp_reg_write(u32 val, void __iomem *acp_mmio, u32 reg) writel(val, acp_mmio + (reg * 4)); } -/* Configure a given dma channel parameters - enable/disble, +/* Configure a given dma channel parameters - enable/disable, * number of descriptors, priority */ static void config_acp_dma_channel(void __iomem *acp_mmio, u8 ch_num, diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c index 89ac5f5a93eb31..7ae46c2647d453 100644 --- a/sound/soc/atmel/atmel-classd.c +++ b/sound/soc/atmel/atmel-classd.c @@ -349,7 +349,7 @@ static int atmel_classd_codec_dai_digital_mute(struct snd_soc_dai *codec_dai, } #define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8) -#define CLASSD_ACLK_RATE_12M288_MPY_8 (12228 * 1000 * 8) +#define CLASSD_ACLK_RATE_12M288_MPY_8 (12288 * 1000 * 8) static struct { int rate; diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index 78fca8acd3ec0a..fd272a40485b07 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -1534,21 +1534,20 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe) pin->mst_capable = false; /* if not MST, default is port[0] */ hport = &pin->ports[0]; - goto out; } else { for (i = 0; i < pin->num_ports; i++) { pin->mst_capable = true; if (pin->ports[i].id == pipe) { hport = &pin->ports[i]; - goto out; + break; } } } + + if (hport) + hdac_hdmi_present_sense(pin, hport); } -out: - if (pin && hport) - hdac_hdmi_present_sense(pin, hport); } static struct i915_audio_component_audio_ops aops = { @@ -1998,7 +1997,7 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev) struct hdac_hdmi_pin *pin, *pin_next; struct hdac_hdmi_cvt *cvt, *cvt_next; struct hdac_hdmi_pcm *pcm, *pcm_next; - struct hdac_hdmi_port *port; + struct hdac_hdmi_port *port, *port_next; int i; snd_soc_unregister_codec(&edev->hdac.dev); @@ -2008,8 +2007,9 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev) if (list_empty(&pcm->port_list)) continue; - list_for_each_entry(port, &pcm->port_list, head) - port = NULL; + list_for_each_entry_safe(port, port_next, + &pcm->port_list, head) + list_del(&port->head); list_del(&pcm->head); kfree(pcm); diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c index 324461e985b391..476135ec57268c 100644 --- a/sound/soc/codecs/rt5665.c +++ b/sound/soc/codecs/rt5665.c @@ -1241,7 +1241,7 @@ static irqreturn_t rt5665_irq(int irq, void *data) static void rt5665_jd_check_handler(struct work_struct *work) { struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv, - calibrate_work.work); + jd_check_work.work); if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) { /* jack out */ @@ -2252,7 +2252,7 @@ static const char * const rt5665_if2_1_adc_in_src[] = { static const SOC_ENUM_SINGLE_DECL( rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA, - RT5665_IF3_ADC_IN_SFT, rt5665_if2_1_adc_in_src); + RT5665_IF2_1_ADC_IN_SFT, rt5665_if2_1_adc_in_src); static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux = SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum); @@ -3178,6 +3178,9 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = { {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc}, {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc}, + {"I2S1 ASRC", NULL, "CLKDET"}, + {"I2S2 ASRC", NULL, "CLKDET"}, + {"I2S3 ASRC", NULL, "CLKDET"}, /*Vref*/ {"Mic Det Power", NULL, "Vref2"}, @@ -3912,6 +3915,7 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = { {"Mono MIX", "MONOVOL Switch", "MONOVOL"}, {"Mono Amp", NULL, "Mono MIX"}, {"Mono Amp", NULL, "Vref2"}, + {"Mono Amp", NULL, "Vref3"}, {"Mono Amp", NULL, "CLKDET SYS"}, {"Mono Amp", NULL, "CLKDET MONO"}, {"Mono Playback", "Switch", "Mono Amp"}, @@ -4798,7 +4802,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c, /* Enhance performance*/ regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1, RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK, - RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_09); + RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_12); INIT_DELAYED_WORK(&rt5665->jack_detect_work, rt5665_jack_detect_handler); diff --git a/sound/soc/codecs/rt5665.h b/sound/soc/codecs/rt5665.h index 12f7080a0d3c3f..a30f5e6d062882 100644 --- a/sound/soc/codecs/rt5665.h +++ b/sound/soc/codecs/rt5665.h @@ -1106,7 +1106,7 @@ #define RT5665_HP_DRIVER_MASK (0x3 << 2) #define RT5665_HP_DRIVER_1X (0x0 << 2) #define RT5665_HP_DRIVER_3X (0x1 << 2) -#define RT5665_HP_DRIVER_5X (0x2 << 2) +#define RT5665_HP_DRIVER_5X (0x3 << 2) #define RT5665_LDO1_DVO_MASK (0x3) #define RT5665_LDO1_DVO_09 (0x0) #define RT5665_LDO1_DVO_10 (0x1) diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index d151224ffcca41..bbdb72f73df19d 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -899,7 +899,10 @@ static int wm_coeff_put(struct snd_kcontrol *kctl, mutex_lock(&ctl->dsp->pwr_lock); - memcpy(ctl->cache, p, ctl->len); + if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) + ret = -EPERM; + else + memcpy(ctl->cache, p, ctl->len); ctl->set = 1; if (ctl->enabled && ctl->dsp->running) @@ -926,6 +929,8 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl, ctl->set = 1; if (ctl->enabled && ctl->dsp->running) ret = wm_coeff_write_control(ctl, ctl->cache, size); + else if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) + ret = -EPERM; } mutex_unlock(&ctl->dsp->pwr_lock); @@ -947,7 +952,7 @@ static int wm_coeff_put_acked(struct snd_kcontrol *kctl, mutex_lock(&ctl->dsp->pwr_lock); - if (ctl->enabled) + if (ctl->enabled && ctl->dsp->running) ret = wm_coeff_write_acked_control(ctl, val); else ret = -EPERM; diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index 4924575d2e95d3..343b291fc3725f 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -115,6 +115,7 @@ int asoc_simple_card_parse_clk(struct device *dev, clk = devm_get_clk_from_child(dev, node, NULL); if (!IS_ERR(clk)) { simple_dai->sysclk = clk_get_rate(clk); + simple_dai->clk = clk; } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) { simple_dai->sysclk = val; } else { diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c index ed58b5b3555a86..2dbfb1b24ef4a6 100644 --- a/sound/soc/intel/skylake/skl-topology.c +++ b/sound/soc/intel/skylake/skl-topology.c @@ -512,7 +512,7 @@ static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) if (bc->set_params != SKL_PARAM_INIT) continue; - mconfig->formats_config.caps = (u32 *)&bc->params; + mconfig->formats_config.caps = (u32 *)bc->params; mconfig->formats_config.caps_size = bc->size; break; diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig index 05cf809cf9e146..d7013bde6f45fc 100644 --- a/sound/soc/mediatek/Kconfig +++ b/sound/soc/mediatek/Kconfig @@ -13,7 +13,7 @@ config SND_SOC_MT2701 config SND_SOC_MT2701_CS42448 tristate "ASoc Audio driver for MT2701 with CS42448 codec" - depends on SND_SOC_MT2701 + depends on SND_SOC_MT2701 && I2C select SND_SOC_CS42XX8_I2C select SND_SOC_BT_SCO help diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c index abb5eaac854a9b..7d92a24b7cfa55 100644 --- a/sound/soc/sh/rcar/cmd.c +++ b/sound/soc/sh/rcar/cmd.c @@ -31,23 +31,24 @@ static int rsnd_cmd_init(struct rsnd_mod *mod, struct rsnd_mod *mix = rsnd_io_to_mod_mix(io); struct device *dev = rsnd_priv_to_dev(priv); u32 data; + u32 path[] = { + [1] = 1 << 0, + [5] = 1 << 8, + [6] = 1 << 12, + [9] = 1 << 15, + }; if (!mix && !dvc) return 0; + if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1) + return -ENXIO; + if (mix) { struct rsnd_dai *rdai; struct rsnd_mod *src; struct rsnd_dai_stream *tio; int i; - u32 path[] = { - [0] = 0, - [1] = 1 << 0, - [2] = 0, - [3] = 0, - [4] = 0, - [5] = 1 << 8 - }; /* * it is assuming that integrater is well understanding about @@ -70,16 +71,19 @@ static int rsnd_cmd_init(struct rsnd_mod *mod, } else { struct rsnd_mod *src = rsnd_io_to_mod_src(io); - u32 path[] = { - [0] = 0x30000, - [1] = 0x30001, - [2] = 0x40000, - [3] = 0x10000, - [4] = 0x20000, - [5] = 0x40100 + u8 cmd_case[] = { + [0] = 0x3, + [1] = 0x3, + [2] = 0x4, + [3] = 0x1, + [4] = 0x2, + [5] = 0x4, + [6] = 0x1, + [9] = 0x2, }; - data = path[rsnd_mod_id(src)]; + data = path[rsnd_mod_id(src)] | + cmd_case[rsnd_mod_id(src)] << 16; } dev_dbg(dev, "ctu/mix path = 0x%08x", data); diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c index 1f405c83386759..241cb3b08a0755 100644 --- a/sound/soc/sh/rcar/dma.c +++ b/sound/soc/sh/rcar/dma.c @@ -454,6 +454,20 @@ static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg) return ioread32(rsnd_dmapp_addr(dmac, dma, reg)); } +static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg) +{ + struct rsnd_mod *mod = rsnd_mod_get(dma); + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); + struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); + void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg); + u32 val = ioread32(addr); + + val &= ~mask; + val |= (data & mask); + + iowrite32(val, addr); +} + static int rsnd_dmapp_stop(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) @@ -461,10 +475,10 @@ static int rsnd_dmapp_stop(struct rsnd_mod *mod, struct rsnd_dma *dma = rsnd_mod_to_dma(mod); int i; - rsnd_dmapp_write(dma, 0, PDMACHCR); + rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR); for (i = 0; i < 1024; i++) { - if (0 == rsnd_dmapp_read(dma, PDMACHCR)) + if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE)) return 0; udelay(1); } diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c index 4e817c8a18c0bb..14fafdaf1395f9 100644 --- a/sound/soc/sh/rcar/ssiu.c +++ b/sound/soc/sh/rcar/ssiu.c @@ -64,7 +64,11 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod, mask1 = (1 << 4) | (1 << 20); /* mask sync bit */ mask2 = (1 << 4); /* mask sync bit */ val1 = val2 = 0; - if (rsnd_ssi_is_pin_sharing(io)) { + if (id == 8) { + /* + * SSI8 pin is sharing with SSI7, nothing to do. + */ + } else if (rsnd_ssi_is_pin_sharing(io)) { int shift = -1; switch (id) { diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 6dca408faae334..2722bb0c557310 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3326,7 +3326,10 @@ static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_platform *platform = rtd->platform; - return platform->driver->pcm_new(rtd); + if (platform->driver->pcm_new) + return platform->driver->pcm_new(rtd); + else + return 0; } static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm) @@ -3334,7 +3337,8 @@ static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm) struct snd_soc_pcm_runtime *rtd = pcm->private_data; struct snd_soc_platform *platform = rtd->platform; - platform->driver->pcm_free(pcm); + if (platform->driver->pcm_free) + platform->driver->pcm_free(pcm); } /** diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c index 5992c6ab3833ef..93a8df6ed880ea 100644 --- a/sound/soc/sti/uniperif_reader.c +++ b/sound/soc/sti/uniperif_reader.c @@ -349,6 +349,8 @@ static int uni_reader_startup(struct snd_pcm_substream *substream, struct uniperif *reader = priv->dai_data.uni; int ret; + reader->substream = substream; + if (!UNIPERIF_TYPE_IS_TDM(reader)) return 0; @@ -378,6 +380,7 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream, /* Stop the reader */ uni_reader_stop(reader); } + reader->substream = NULL; } static const struct snd_soc_dai_ops uni_reader_dai_ops = { diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c index b92bdc8361af3a..7527ba29a5a0ea 100644 --- a/sound/soc/sunxi/sun8i-codec.c +++ b/sound/soc/sunxi/sun8i-codec.c @@ -259,25 +259,20 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream, return 0; } -static const struct snd_kcontrol_new sun8i_output_left_mixer_controls[] = { - SOC_DAPM_SINGLE("LSlot 0", SUN8I_DAC_MXR_SRC, - SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L, 1, 0), - SOC_DAPM_SINGLE("LSlot 1", SUN8I_DAC_MXR_SRC, - SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L, 1, 0), - SOC_DAPM_SINGLE("DACL", SUN8I_DAC_MXR_SRC, - SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL, 1, 0), - SOC_DAPM_SINGLE("ADCL", SUN8I_DAC_MXR_SRC, - SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL, 1, 0), -}; - -static const struct snd_kcontrol_new sun8i_output_right_mixer_controls[] = { - SOC_DAPM_SINGLE("RSlot 0", SUN8I_DAC_MXR_SRC, +static const struct snd_kcontrol_new sun8i_dac_mixer_controls[] = { + SOC_DAPM_DOUBLE("AIF1 Slot 0 Digital DAC Playback Switch", + SUN8I_DAC_MXR_SRC, + SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L, SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0), - SOC_DAPM_SINGLE("RSlot 1", SUN8I_DAC_MXR_SRC, + SOC_DAPM_DOUBLE("AIF1 Slot 1 Digital DAC Playback Switch", + SUN8I_DAC_MXR_SRC, + SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L, SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0), - SOC_DAPM_SINGLE("DACR", SUN8I_DAC_MXR_SRC, + SOC_DAPM_DOUBLE("AIF2 Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC, + SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL, SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0), - SOC_DAPM_SINGLE("ADCR", SUN8I_DAC_MXR_SRC, + SOC_DAPM_DOUBLE("ADC Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC, + SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL, SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0), }; @@ -286,19 +281,21 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA, 0, NULL, 0), - /* Analog DAC */ - SND_SOC_DAPM_DAC("Digital Left DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, - SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0), - SND_SOC_DAPM_DAC("Digital Right DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, - SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0), + /* Analog DAC AIF */ + SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Left", "Playback", 0, + SUN8I_AIF1_DACDAT_CTRL, + SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0), + SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Right", "Playback", 0, + SUN8I_AIF1_DACDAT_CTRL, + SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0), /* DAC Mixers */ - SND_SOC_DAPM_MIXER("Left DAC Mixer", SND_SOC_NOPM, 0, 0, - sun8i_output_left_mixer_controls, - ARRAY_SIZE(sun8i_output_left_mixer_controls)), - SND_SOC_DAPM_MIXER("Right DAC Mixer", SND_SOC_NOPM, 0, 0, - sun8i_output_right_mixer_controls, - ARRAY_SIZE(sun8i_output_right_mixer_controls)), + SND_SOC_DAPM_MIXER("Left Digital DAC Mixer", SND_SOC_NOPM, 0, 0, + sun8i_dac_mixer_controls, + ARRAY_SIZE(sun8i_dac_mixer_controls)), + SND_SOC_DAPM_MIXER("Right Digital DAC Mixer", SND_SOC_NOPM, 0, 0, + sun8i_dac_mixer_controls, + ARRAY_SIZE(sun8i_dac_mixer_controls)), /* Clocks */ SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA, @@ -321,8 +318,6 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = { SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL, SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0), - - SND_SOC_DAPM_OUTPUT("HP"), }; static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = { @@ -338,16 +333,14 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = { { "DAC", NULL, "MODCLK DAC" }, /* DAC Routes */ - { "Digital Left DAC", NULL, "DAC" }, - { "Digital Right DAC", NULL, "DAC" }, + { "AIF1 Slot 0 Right", NULL, "DAC" }, + { "AIF1 Slot 0 Left", NULL, "DAC" }, /* DAC Mixer Routes */ - { "Left DAC Mixer", "LSlot 0", "Digital Left DAC"}, - { "Right DAC Mixer", "RSlot 0", "Digital Right DAC"}, - - /* End of route : HP out */ - { "HP", NULL, "Left DAC Mixer" }, - { "HP", NULL, "Right DAC Mixer" }, + { "Left Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch", + "AIF1 Slot 0 Left"}, + { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch", + "AIF1 Slot 0 Right"}, }; static struct snd_soc_dai_ops sun8i_codec_dai_ops = { diff --git a/sound/x86/Kconfig b/sound/x86/Kconfig index 84c8f8fc597cd6..8adf4d1bd46e71 100644 --- a/sound/x86/Kconfig +++ b/sound/x86/Kconfig @@ -1,6 +1,7 @@ menuconfig SND_X86 - tristate "X86 sound devices" + bool "X86 sound devices" depends on X86 + default y ---help--- X86 sound devices that don't fall under SoC or PCI categories diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h index 122153b16ea4ee..390d7c9685fd61 100644 --- a/tools/include/linux/filter.h +++ b/tools/include/linux/filter.h @@ -168,6 +168,16 @@ .off = OFF, \ .imm = 0 }) +/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ + +#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h index 41446668ccce18..d5677d39c1e4c8 100644 --- a/tools/include/linux/log2.h +++ b/tools/include/linux/log2.h @@ -12,12 +12,6 @@ #ifndef _TOOLS_LINUX_LOG2_H #define _TOOLS_LINUX_LOG2_H -/* - * deal with unrepresentable constant logarithms - */ -extern __attribute__((const, noreturn)) -int ____ilog2_NaN(void); - /* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented @@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n) < 1 ? ____ilog2_NaN() : \ + (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ @@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - (n) & (1ULL << 1) ? 1 : \ - (n) & (1ULL << 0) ? 0 : \ - ____ilog2_NaN() \ - ) : \ + 1 ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h new file mode 100644 index 00000000000000..0674272598205c --- /dev/null +++ b/tools/include/uapi/linux/bpf_perf_event.h @@ -0,0 +1,18 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__ +#define _UAPI__LINUX_BPF_PERF_EVENT_H__ + +#include +#include + +struct bpf_perf_event_data { + struct pt_regs regs; + __u64 sample_period; +}; + +#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */ diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 11c8d9bc762ef0..5d19fdf80292c2 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1387,7 +1387,7 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val) /* Allow writing to any other BAR, or expansion ROM */ iowrite(portoff, val, mask, &d->config_words[reg]); return true; - /* We let them overide latency timer and cacheline size */ + /* We let them override latency timer and cacheline size */ } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) { /* Only let them change the first two fields. */ if (mask == 0xFFFFFFFF) diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index e2efddf1023177..1f5300e56b44dc 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -132,7 +132,7 @@ else Q = @ endif -# Disable command line variables (CFLAGS) overide from top +# Disable command line variables (CFLAGS) override from top # level Makefile (perf), otherwise build Makefile will get # the same command line setup. MAKEOVERRIDES= diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index 47076b15eebeaa..9b8555ea3459c8 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile @@ -135,7 +135,7 @@ else Q = @ endif -# Disable command line variables (CFLAGS) overide from top +# Disable command line variables (CFLAGS) override from top # level Makefile (perf), otherwise build Makefile will get # the same command line setup. MAKEOVERRIDES= diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 66342804161c80..0c03538df74c01 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h @@ -140,7 +140,7 @@ struct pevent_plugin_option { * struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = { * { * .name = "option-name", - * .plugin_alias = "overide-file-name", (optional) + * .plugin_alias = "override-file-name", (optional) * .description = "description of option to show users", * }, * { diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 4cfdbb5b696783..066086dd59a801 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -805,11 +805,20 @@ static struct rela *find_switch_table(struct objtool_file *file, insn->jump_dest->offset > orig_insn->offset)) break; + /* look for a relocation which references .rodata */ text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (text_rela && text_rela->sym == file->rodata->sym) - return find_rela_by_dest(file->rodata, - text_rela->addend); + if (!text_rela || text_rela->sym != file->rodata->sym) + continue; + + /* + * Make sure the .rodata address isn't associated with a + * symbol. gcc jump tables are anonymous data. + */ + if (find_symbol_containing(file->rodata, text_rela->addend)) + continue; + + return find_rela_by_dest(file->rodata, text_rela->addend); } return NULL; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 0d7983ac63ef9e..d897702ce74278 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -85,6 +85,18 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset) return NULL; } +struct symbol *find_symbol_containing(struct section *sec, unsigned long offset) +{ + struct symbol *sym; + + list_for_each_entry(sym, &sec->symbol_list, list) + if (sym->type != STT_SECTION && + offset >= sym->offset && offset < sym->offset + sym->len) + return sym; + + return NULL; +} + struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, unsigned int len) { diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index aa1ff6596684f9..731973e1a3f5eb 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -79,6 +79,7 @@ struct elf { struct elf *elf_open(const char *name); struct section *find_section_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); +struct symbol *find_symbol_containing(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, unsigned int len); diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c index 7913363bde5c04..4f3c758d875d6c 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c @@ -31,7 +31,7 @@ #error Instruction buffer size too small #endif -/* Based on branch_type() from perf_event_intel_lbr.c */ +/* Based on branch_type() from arch/x86/events/intel/lbr.c */ static void intel_pt_insn_decoder(struct insn *insn, struct intel_pt_insn *intel_pt_insn) { diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 70e389bc4af71a..9b4d8ba22fed85 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols) /* Last entry */ if (curr->end == curr->start) - curr->end = roundup(curr->start, 4096); + curr->end = roundup(curr->start, 4096) + 4096; } void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index 03cb639b292ecc..fedca328532621 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -16,9 +16,9 @@ idle power-state statistics, temperature and power on X86 processors. There are two ways to invoke turbostat. The first method is to supply a \fBcommand\fP, which is forked and statistics are printed -upon its completion. +in one-shot upon its completion. The second method is to omit the command, -and turbostat displays statistics every 5 seconds. +and turbostat displays statistics every 5 seconds interval. The 5-second interval can be changed using the --interval option. .PP Some information is not available on older processors. @@ -28,9 +28,10 @@ name as necessary to disambiguate it from others is necessary. Note that option .PP \fB--add attributes\fP add column with counter having specified 'attributes'. The 'location' attribute is required, all others are optional. .nf - location: {\fBmsrDDD\fP | \fBmsr0xXXX\fP} + location: {\fBmsrDDD\fP | \fBmsr0xXXX\fP | \fB/sys/path...\fP} msrDDD is a decimal offset, eg. msr16 msr0xXXX is a hex offset, eg. msr0x10 + /sys/path... is an absolute path to a sysfs attribute scope: {\fBcpu\fP | \fBcore\fP | \fBpackage\fP} sample and print the counter for every cpu, core, or package. @@ -45,12 +46,21 @@ name as necessary to disambiguate it from others is necessary. Note that option 'delta' shows the difference in values during the measurement interval. 'percent' shows the delta as a percentage of the cycles elapsed. default: delta + + name: "name_string" + Any string that does not match a key-word above is used + as the column header. .fi .PP +\fB--cpu cpu-set\fP limit output to system summary plus the specified cpu-set. If cpu-set is the string "core", then the system summary plus the first CPU in each core are printed -- eg. subsequent HT siblings are not printed. Or if cpu-set is the string "package", then the system summary plus the first CPU in each package is printed. Otherwise, the system summary plus the specified set of CPUs are printed. The cpu-set is ordered from low to high, comma delimited with ".." and "-" permitted to denote a range. eg. 1,2,8,14..17,21-44 +.PP +\fB--hide column\fP do not show the specified columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group. +.PP +\fB--show column\fP show only the specified columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group. +.PP \fB--Dump\fP displays the raw counter values. .PP -\fB--debug\fP displays additional system configuration information. Invoking this parameter -more than once may also enable internal turbostat debug information. +\fB--quiet\fP Do not decode and print the system configuration header information. .PP \fB--interval seconds\fP overrides the default 5.0 second measurement interval. .PP @@ -61,9 +71,7 @@ The file is truncated if it already exists, and it is created if it does not exi .PP \fB--Joules\fP displays energy in Joules, rather than dividing Joules by time to print power in Watts. .PP -\fB--Package\fP limits output to the system summary plus the 1st thread in each Package. -.PP -\fB--processor\fP limits output to the system summary plus the 1st thread in each processor of each package. Ie. it skips hyper-threaded siblings. +\fB--list\fP display column header names available for use by --show and --hide, then exit. .PP \fB--Summary\fP limits output to a 1-line System Summary for each interval. .PP @@ -74,24 +82,25 @@ The file is truncated if it already exists, and it is created if it does not exi The \fBcommand\fP parameter forks \fBcommand\fP, and upon its exit, displays the statistics gathered since it was forked. .PP -.SH DEFAULT FIELD DESCRIPTIONS +.SH ROW DESCRIPTIONS +The system configuration dump (if --quiet is not used) is followed by statistics. The first row of the statistics labels the content of each column (below). The second row of statistics is the system summary line. The system summary line has a '-' in the columns for the Package, Core, and CPU. The contents of the system summary line depends on the type of column. Columns that count items (eg. IRQ) show the sum across all CPUs in the system. Columns that show a percentage show the average across all CPUs in the system. Columns that dump raw MSR values simply show 0 in the summary. After the system summary row, each row describes a specific Package/Core/CPU. Note that if the --cpu parameter is used to limit which specific CPUs are displayed, turbostat will still collect statistics for all CPUs in the system and will still show the system summary for all CPUs in the system. +.SH COLUMN DESCRIPTIONS .nf +\fBCore\fP processor core number. Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology (HT). \fBCPU\fP Linux CPU (logical processor) number. Yes, it is okay that on many systems the CPUs are not listed in numerical order -- for efficiency reasons, turbostat runs in topology order, so HT siblings appear together. -\fBAVG_MHz\fP number of cycles executed divided by time elapsed. -\fBBusy%\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state. -\fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state). +\fBPackage\fP processor package number -- not present on systems with a single processor package. +\fBAvg_MHz\fP number of cycles executed divided by time elapsed. Note that this includes idle-time when 0 instructions are executed. +\fBBusy%\fP percent of the measurement interval that the CPU executes instructions, aka. % of time in "C0" state. +\fBBzy_MHz\fP average clock rate while the CPU was not idle (ie. in "c0" state). \fBTSC_MHz\fP average MHz that the TSC ran during the entire interval. -.fi -.PP -.SH DEBUG FIELD DESCRIPTIONS -.nf -\fBPackage\fP processor package number. -\fBCore\fP processor core number. -Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology (HT). -\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. +\fBIRQ\fP The number of interrupts serviced by that CPU during the measurement interval. The system total line is the sum of interrupts serviced across all CPUs. turbostat parses /proc/interrupts to generate this summary. +\fBSMI\fP The number of System Management Interrupts serviced CPU during the measurement interval. While this counter is actually per-CPU, SMI are triggered on all processors, so the number should be the same for all CPUs. +\fBC1, C2, C3...\fP The number times Linux requested the C1, C2, C3 idle state during the measurement interval. The system summary line shows the sum for all CPUs. These are C-state names as exported in /sys/devices/system/cpu/cpu*/cpuidle/state*/name. While their names are generic, their attributes are processor specific. They the system description section of output shows what MWAIT sub-states they are mapped to on each system. +\fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3.... The system summary is the average of all CPUs in the system. Note that these are software, reflecting what was requested. The hardware counters reflect what was actually achieved. +\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. \fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. -\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. +\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. \fBPkgWatt\fP Watts consumed by the whole package. \fBCorWatt\fP Watts consumed by the core part of the package. \fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors. @@ -99,51 +108,110 @@ Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading T \fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. \fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM. .fi +.SH TOO MUCH INFORMATION EXAMPLE +By default, turbostat dumps all possible information -- a system configuration header, followed by columns for all counters. +This is ideal for remote debugging, use the "--out" option to save everything to a text file, and get that file to the expert helping you debug. .PP -.SH PERIODIC EXAMPLE -Without any parameters, turbostat displays statistics ever 5 seconds. -Periodic output goes to stdout, by default, unless --out is used to specify an output file. -The 5-second interval can be changed with th "-i sec" option. -Or a command may be specified as in "FORK EXAMPLE" below. +When you are not interested in all that information, and there are several ways to see only what you want. First the "--quiet" option will skip the configuration information, and turbostat will show only the counter columns. Second, you can reduce the columns with the "--hide" and "--show" options. If you use the "--show" option, then turbostat will show only the columns you list. If you use the "--hide" option, turbostat will show all columns, except the ones you list. +.PP +To find out what columns are available for --show and --hide, the "--list" option is available. For convenience, the special strings "sysfs" can be used to refer to all of the sysfs C-state counters at once: +.nf +sudo ./turbostat --show sysfs --quiet sleep 10 +10.003837 sec + C1 C1E C3 C6 C7s C1% C1E% C3% C6% C7s% + 4 21 2 2 459 0.14 0.82 0.00 0.00 98.93 + 1 17 2 2 130 0.00 0.02 0.00 0.00 99.80 + 0 0 0 0 31 0.00 0.00 0.00 0.00 99.95 + 2 1 0 0 52 1.14 6.49 0.00 0.00 92.21 + 1 2 0 0 52 0.00 0.08 0.00 0.00 99.86 + 0 0 0 0 71 0.00 0.00 0.00 0.00 99.89 + 0 0 0 0 25 0.00 0.00 0.00 0.00 99.96 + 0 0 0 0 74 0.00 0.00 0.00 0.00 99.94 + 0 1 0 0 24 0.00 0.00 0.00 0.00 99.84 +.fi +.PP +.SH ONE SHOT COMMAND EXAMPLE +If turbostat is invoked with a command, it will fork that command +and output the statistics gathered after the command exits. +In this case, turbostat output goes to stderr, by default. +Output can instead be saved to a file using the --out option. +In this example, the "sleep 10" command is forked, and turbostat waits for it to complete before saving all statistics into "ts.out". Note that "sleep 10" is not part of turbostat, but is simply an example of a command that turbostat can fork. The "ts.out" file is what you want to edit in a very wide window, paste into a spreadsheet, or attach to a bugzilla entry. + .nf -[root@hsw]# ./turbostat - CPU Avg_MHz Busy% Bzy_MHz TSC_MHz - - 488 12.51 3898 3498 - 0 0 0.01 3885 3498 - 4 3897 99.99 3898 3498 - 1 0 0.00 3861 3498 - 5 0 0.00 3882 3498 - 2 1 0.02 3894 3498 - 6 2 0.06 3898 3498 - 3 0 0.00 3849 3498 - 7 0 0.00 3877 3498 +[root@hsw]# ./turbostat -o ts.out sleep 10 +[root@hsw]# +.fi +.SH PERIODIC INTERVAL EXAMPLE +Without a command to fork, turbostat displays statistics ever 5 seconds. +Periodic output goes to stdout, by default, unless --out is used to specify an output file. +The 5-second interval can be changed with the "-i sec" option. +.nf +sudo ./turbostat --quiet --hide sysfs,IRQ,SMI,CoreTmp,PkgTmp,GFX%rc6,GFXMHz,PkgWatt,CorWatt,GFXWatt + Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz CPU%c1 CPU%c3 CPU%c6 CPU%c7 + - - 488 12.52 3900 3498 12.50 0.00 0.00 74.98 + 0 0 5 0.13 3900 3498 99.87 0.00 0.00 0.00 + 0 4 3897 99.99 3900 3498 0.01 + 1 1 0 0.00 3856 3498 0.01 0.00 0.00 99.98 + 1 5 0 0.00 3861 3498 0.01 + 2 2 1 0.02 3889 3498 0.03 0.00 0.00 99.95 + 2 6 0 0.00 3863 3498 0.05 + 3 3 0 0.01 3869 3498 0.02 0.00 0.00 99.97 + 3 7 0 0.00 3878 3498 0.03 + Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz CPU%c1 CPU%c3 CPU%c6 CPU%c7 + - - 491 12.59 3900 3498 12.42 0.00 0.00 74.99 + 0 0 27 0.69 3900 3498 99.31 0.00 0.00 0.00 + 0 4 3898 99.99 3900 3498 0.01 + 1 1 0 0.00 3883 3498 0.01 0.00 0.00 99.99 + 1 5 0 0.00 3898 3498 0.01 + 2 2 0 0.01 3889 3498 0.02 0.00 0.00 99.98 + 2 6 0 0.00 3889 3498 0.02 + 3 3 0 0.00 3856 3498 0.01 0.00 0.00 99.99 + 3 7 0 0.00 3897 3498 0.01 .fi -.SH DEBUG EXAMPLE -The "--debug" option prints additional system information before measurements: +This example also shows the use of the --hide option to skip columns that are not wanted. +Note that cpu4 in this example is 99.99% busy, while the other CPUs are all under 1% busy. +Notice that cpu4's HT sibling is cpu0, which is under 1% busy, but can get into CPU%c1 only, +because its cpu4's activity on shared hardware keeps it from entering a deeper C-state. -The first row of statistics is a summary for the entire system. -For residency % columns, the summary is a weighted average. -For Temperature columns, the summary is the column maximum. -For Watts columns, the summary is a system total. -Subsequent rows show per-CPU statistics. +.SH SYSTEM CONFIGURATION INFORMATION EXAMPLE + +By default, turbostat always dumps system configuration information +before taking measurements. In the example above, "--quiet" is used +to suppress that output. Here is an example of the configuration information: .nf -turbostat version 4.1 10-Feb, 2015 - Len Brown +turbostat version 2017.02.15 - Len Brown CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3c:3 (6:60:3) -CPUID(6): APERF, DTS, PTM, EPB +CPUID(1): SSE3 MONITOR - EIST TM2 TSC MSR ACPI-TM TM +CPUID(6): APERF, TURBO, DTS, PTM, No-HWP, No-HWPnotify, No-HWPwindow, No-HWPepp, No-HWPpkg, EPB +cpu4: MSR_IA32_MISC_ENABLE: 0x00850089 (TCC EIST No-MWAIT PREFETCH TURBO) +CPUID(7): No-SGX +cpu4: MSR_MISC_PWR_MGMT: 0x00400000 (ENable-EIST_Coordination DISable-EPB DISable-OOB) RAPL: 3121 sec. Joule Counter Range, at 84 Watts -cpu0: MSR_NHM_PLATFORM_INFO: 0x80838f3012300 -8 * 100 = 800 MHz max efficiency -35 * 100 = 3500 MHz TSC frequency -cpu0: MSR_IA32_POWER_CTL: 0x0004005d (C1E auto-promotion: DISabled) -cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e000400 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, UNlocked: pkg-cstate-limit=0: pc0) -cpu0: MSR_TURBO_RATIO_LIMIT: 0x25262727 -37 * 100 = 3700 MHz max turbo 4 active cores -38 * 100 = 3800 MHz max turbo 3 active cores -39 * 100 = 3900 MHz max turbo 2 active cores -39 * 100 = 3900 MHz max turbo 1 active cores +cpu4: MSR_PLATFORM_INFO: 0x80838f3012300 +8 * 100.0 = 800.0 MHz max efficiency frequency +35 * 100.0 = 3500.0 MHz base frequency +cpu4: MSR_IA32_POWER_CTL: 0x0004005d (C1E auto-promotion: DISabled) +cpu4: MSR_TURBO_RATIO_LIMIT: 0x25262727 +37 * 100.0 = 3700.0 MHz max turbo 4 active cores +38 * 100.0 = 3800.0 MHz max turbo 3 active cores +39 * 100.0 = 3900.0 MHz max turbo 2 active cores +39 * 100.0 = 3900.0 MHz max turbo 1 active cores +cpu4: MSR_CONFIG_TDP_NOMINAL: 0x00000023 (base_ratio=35) +cpu4: MSR_CONFIG_TDP_LEVEL_1: 0x00000000 () +cpu4: MSR_CONFIG_TDP_LEVEL_2: 0x00000000 () +cpu4: MSR_CONFIG_TDP_CONTROL: 0x80000000 ( lock=1) +cpu4: MSR_TURBO_ACTIVATION_RATIO: 0x00000000 (MAX_NON_TURBO_RATIO=0 lock=0) +cpu4: MSR_PKG_CST_CONFIG_CONTROL: 0x1e000400 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, UNlocked: pkg-cstate-limit=0: pc0) +cpu4: POLL: CPUIDLE CORE POLL IDLE +cpu4: C1: MWAIT 0x00 +cpu4: C1E: MWAIT 0x01 +cpu4: C3: MWAIT 0x10 +cpu4: C6: MWAIT 0x20 +cpu4: C7s: MWAIT 0x32 +cpu4: MSR_MISC_FEATURE_CONTROL: 0x00000000 (L2-Prefetch L2-Prefetch-pair L1-Prefetch L1-IP-Prefetch) cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced) -cpu0: MSR_CORE_PERF_LIMIT_REASONS, 0x31200000 (Active: ) (Logged: Auto-HWP, Amps, MultiCoreTurbo, Transitions, ) +cpu0: MSR_CORE_PERF_LIMIT_REASONS, 0x31200000 (Active: ) (Logged: Transitions, MultiCoreTurbo, Amps, Auto-HWP, ) cpu0: MSR_GFX_PERF_LIMIT_REASONS, 0x00000000 (Active: ) (Logged: ) cpu0: MSR_RING_PERF_LIMIT_REASONS, 0x0d000000 (Active: ) (Logged: Amps, PkgPwrL1, PkgPwrL2, ) cpu0: MSR_RAPL_POWER_UNIT: 0x000a0e03 (0.125000 Watts, 0.000061 Joules, 0.000977 sec.) @@ -158,23 +226,14 @@ cpu0: MSR_PP1_POLICY: 0 cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked) cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled) cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00641400 (100 C) -cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x88340800 (48 C) -cpu0: MSR_IA32_THERM_STATUS: 0x88340000 (48 C +/- 1) -cpu1: MSR_IA32_THERM_STATUS: 0x88440000 (32 C +/- 1) -cpu2: MSR_IA32_THERM_STATUS: 0x88450000 (31 C +/- 1) -cpu3: MSR_IA32_THERM_STATUS: 0x88490000 (27 C +/- 1) - Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz SMI CPU%c1 CPU%c3 CPU%c6 CPU%c7 CoreTmp PkgTmp PkgWatt CorWatt GFXWatt - - - 493 12.64 3898 3498 0 12.64 0.00 0.00 74.72 47 47 21.62 13.74 0.00 - 0 0 4 0.11 3894 3498 0 99.89 0.00 0.00 0.00 47 47 21.62 13.74 0.00 - 0 4 3897 99.98 3898 3498 0 0.02 - 1 1 7 0.17 3887 3498 0 0.04 0.00 0.00 99.79 32 - 1 5 0 0.00 3885 3498 0 0.21 - 2 2 29 0.76 3895 3498 0 0.10 0.01 0.01 99.13 32 - 2 6 2 0.06 3896 3498 0 0.80 - 3 3 1 0.02 3832 3498 0 0.03 0.00 0.00 99.95 28 - 3 7 0 0.00 3879 3498 0 0.04 -^C - +cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884c0800 (24 C) +cpu0: MSR_IA32_THERM_STATUS: 0x884c0000 (24 C +/- 1) +cpu1: MSR_IA32_THERM_STATUS: 0x88510000 (19 C +/- 1) +cpu2: MSR_IA32_THERM_STATUS: 0x884e0000 (22 C +/- 1) +cpu3: MSR_IA32_THERM_STATUS: 0x88510000 (19 C +/- 1) +cpu4: MSR_PKGC3_IRTL: 0x00008842 (valid, 67584 ns) +cpu4: MSR_PKGC6_IRTL: 0x00008873 (valid, 117760 ns) +cpu4: MSR_PKGC7_IRTL: 0x00008891 (valid, 148480 ns) .fi The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency available at the minimum package voltage. The \fBTSC frequency\fP is the base @@ -184,42 +243,22 @@ should be sustainable on all CPUs indefinitely, given nominal power and cooling. The remaining rows show what maximum turbo frequency is possible depending on the number of idle cores. Note that not all information is available on all processors. -.PP -The --debug option adds additional columns to the measurement ouput, including CPU idle power-state residency processor temperature sensor readinds. -See the field definitions above. -.SH FORK EXAMPLE -If turbostat is invoked with a command, it will fork that command -and output the statistics gathered after the command exits. -In this case, turbostat output goes to stderr, by default. -Output can instead be saved to a file using the --out option. -eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds -until ^C while the other CPUs are mostly idle: - +.SH ADD COUNTER EXAMPLE +Here we limit turbostat to showing just the CPU number for cpu0 - cpu3. +We add a counter showing the 32-bit raw value of MSR 0x199 (MSR_IA32_PERF_CTL), +labeling it with the column header, "PRF_CTRL", and display it only once, +afte the conclusion of a 0.1 second sleep. .nf -root@hsw: turbostat cat /dev/zero > /dev/null -^C - CPU Avg_MHz Busy% Bzy_MHz TSC_MHz - - 482 12.51 3854 3498 - 0 0 0.01 1960 3498 - 4 0 0.00 2128 3498 - 1 0 0.00 3003 3498 - 5 3854 99.98 3855 3498 - 2 0 0.01 3504 3498 - 6 3 0.08 3884 3498 - 3 0 0.00 2553 3498 - 7 0 0.00 2126 3498 -10.783983 sec +sudo ./turbostat --quiet --cpu 0-3 --show CPU --add msr0x199,u32,raw,PRF_CTRL sleep .1 +0.101604 sec +CPU PRF_CTRL +- 0x00000000 +0 0x00000c00 +1 0x00000800 +2 0x00000a00 +3 0x00000800 .fi -Above the cycle soaker drives cpu5 up its 3.9 GHz turbo limit. -The first row shows the average MHz and Busy% across all the processors in the system. - -Note that the Avg_MHz column reflects the total number of cycles executed -divided by the measurement interval. If the Busy% column is 100%, -then the processor was running at that speed the entire interval. -The Avg_MHz multiplied by the Busy% results in the Bzy_MHz -- -which is the average frequency while the processor was executing -- -not including any non-busy idle time. .SH NOTES diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index f13f61b065c699..828dccd3f01eaf 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -49,17 +49,14 @@ FILE *outf; int *fd_percpu; struct timespec interval_ts = {5, 0}; unsigned int debug; +unsigned int quiet; +unsigned int sums_need_wide_columns; unsigned int rapl_joules; unsigned int summary_only; +unsigned int list_header_only; unsigned int dump_only; -unsigned int do_nhm_cstates; unsigned int do_snb_cstates; unsigned int do_knl_cstates; -unsigned int do_pc2; -unsigned int do_pc3; -unsigned int do_pc6; -unsigned int do_pc7; -unsigned int do_c8_c9_c10; unsigned int do_skl_residency; unsigned int do_slm_cstates; unsigned int use_c1_residency_msr; @@ -71,25 +68,19 @@ unsigned int units = 1000000; /* MHz etc */ unsigned int genuine_intel; unsigned int has_invariant_tsc; unsigned int do_nhm_platform_info; +unsigned int no_MSR_MISC_PWR_MGMT; unsigned int aperf_mperf_multiplier = 1; -int do_irq = 1; -int do_smi; double bclk; double base_hz; unsigned int has_base_hz; double tsc_tweak = 1.0; -unsigned int show_pkg; -unsigned int show_core; -unsigned int show_cpu; unsigned int show_pkg_only; unsigned int show_core_only; char *output_buffer, *outp; unsigned int do_rapl; unsigned int do_dts; unsigned int do_ptm; -unsigned int do_gfx_rc6_ms; unsigned long long gfx_cur_rc6_ms; -unsigned int do_gfx_mhz; unsigned int gfx_cur_mhz; unsigned int tcc_activation_temp; unsigned int tcc_activation_temp_override; @@ -109,6 +100,7 @@ unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ +unsigned int has_misc_feature_control; #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ @@ -148,34 +140,38 @@ unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters */ #define NAME_BYTES 20 +#define PATH_BYTES 128 int backwards_count; char *progname; -cpu_set_t *cpu_present_set, *cpu_affinity_set; -size_t cpu_present_setsize, cpu_affinity_setsize; +#define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */ +cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_subset; +size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size; +#define MAX_ADDED_COUNTERS 16 struct thread_data { unsigned long long tsc; unsigned long long aperf; unsigned long long mperf; unsigned long long c1; - unsigned int irq_count; + unsigned long long irq_count; unsigned int smi_count; unsigned int cpu_id; unsigned int flags; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 - unsigned long long counter[1]; + unsigned long long counter[MAX_ADDED_COUNTERS]; } *thread_even, *thread_odd; struct core_data { unsigned long long c3; unsigned long long c6; unsigned long long c7; + unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ unsigned int core_temp_c; unsigned int core_id; - unsigned long long counter[1]; + unsigned long long counter[MAX_ADDED_COUNTERS]; } *core_even, *core_odd; struct pkg_data { @@ -200,7 +196,7 @@ struct pkg_data { unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ unsigned int pkg_temp_c; - unsigned long long counter[1]; + unsigned long long counter[MAX_ADDED_COUNTERS]; } *package_even, *package_odd; #define ODD_COUNTERS thread_odd, core_odd, package_odd @@ -215,22 +211,27 @@ struct pkg_data { #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) enum counter_scope {SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE}; -enum counter_type {COUNTER_CYCLES, COUNTER_SECONDS}; +enum counter_type {COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC}; enum counter_format {FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT}; struct msr_counter { unsigned int msr_num; char name[NAME_BYTES]; + char path[PATH_BYTES]; unsigned int width; enum counter_type type; enum counter_format format; struct msr_counter *next; + unsigned int flags; +#define FLAGS_HIDE (1 << 0) +#define FLAGS_SHOW (1 << 1) +#define SYSFS_PERCPU (1 << 1) }; struct sys_counters { - unsigned int thread_counter_bytes; - unsigned int core_counter_bytes; - unsigned int package_counter_bytes; + unsigned int added_thread_counters; + unsigned int added_core_counters; + unsigned int added_package_counters; struct msr_counter *tp; struct msr_counter *cp; struct msr_counter *pp; @@ -334,147 +335,333 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset); if (retval != sizeof *msr) - err(-1, "msr %d offset 0x%llx read failed", cpu, (unsigned long long)offset); + err(-1, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset); return 0; } /* - * Example Format w/ field column widths: - * - * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz IRQ SMI Busy% CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 ThreadC CoreTmp CoreCnt PkgTmp GFXMHz Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt PkgCnt - * 12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 + * Each string in this array is compared in --show and --hide cmdline. + * Thus, strings that are proper sub-sets must follow their more specific peers. + */ +struct msr_counter bic[] = { + { 0x0, "Package" }, + { 0x0, "Avg_MHz" }, + { 0x0, "Bzy_MHz" }, + { 0x0, "TSC_MHz" }, + { 0x0, "IRQ" }, + { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL}, + { 0x0, "Busy%" }, + { 0x0, "CPU%c1" }, + { 0x0, "CPU%c3" }, + { 0x0, "CPU%c6" }, + { 0x0, "CPU%c7" }, + { 0x0, "ThreadC" }, + { 0x0, "CoreTmp" }, + { 0x0, "CoreCnt" }, + { 0x0, "PkgTmp" }, + { 0x0, "GFX%rc6" }, + { 0x0, "GFXMHz" }, + { 0x0, "Pkg%pc2" }, + { 0x0, "Pkg%pc3" }, + { 0x0, "Pkg%pc6" }, + { 0x0, "Pkg%pc7" }, + { 0x0, "Pkg%pc8" }, + { 0x0, "Pkg%pc9" }, + { 0x0, "Pkg%pc10" }, + { 0x0, "PkgWatt" }, + { 0x0, "CorWatt" }, + { 0x0, "GFXWatt" }, + { 0x0, "PkgCnt" }, + { 0x0, "RAMWatt" }, + { 0x0, "PKG_%" }, + { 0x0, "RAM_%" }, + { 0x0, "Pkg_J" }, + { 0x0, "Cor_J" }, + { 0x0, "GFX_J" }, + { 0x0, "RAM_J" }, + { 0x0, "Core" }, + { 0x0, "CPU" }, + { 0x0, "Mod%c6" }, + { 0x0, "sysfs" }, +}; + +#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) +#define BIC_Package (1ULL << 0) +#define BIC_Avg_MHz (1ULL << 1) +#define BIC_Bzy_MHz (1ULL << 2) +#define BIC_TSC_MHz (1ULL << 3) +#define BIC_IRQ (1ULL << 4) +#define BIC_SMI (1ULL << 5) +#define BIC_Busy (1ULL << 6) +#define BIC_CPU_c1 (1ULL << 7) +#define BIC_CPU_c3 (1ULL << 8) +#define BIC_CPU_c6 (1ULL << 9) +#define BIC_CPU_c7 (1ULL << 10) +#define BIC_ThreadC (1ULL << 11) +#define BIC_CoreTmp (1ULL << 12) +#define BIC_CoreCnt (1ULL << 13) +#define BIC_PkgTmp (1ULL << 14) +#define BIC_GFX_rc6 (1ULL << 15) +#define BIC_GFXMHz (1ULL << 16) +#define BIC_Pkgpc2 (1ULL << 17) +#define BIC_Pkgpc3 (1ULL << 18) +#define BIC_Pkgpc6 (1ULL << 19) +#define BIC_Pkgpc7 (1ULL << 20) +#define BIC_Pkgpc8 (1ULL << 21) +#define BIC_Pkgpc9 (1ULL << 22) +#define BIC_Pkgpc10 (1ULL << 23) +#define BIC_PkgWatt (1ULL << 24) +#define BIC_CorWatt (1ULL << 25) +#define BIC_GFXWatt (1ULL << 26) +#define BIC_PkgCnt (1ULL << 27) +#define BIC_RAMWatt (1ULL << 28) +#define BIC_PKG__ (1ULL << 29) +#define BIC_RAM__ (1ULL << 30) +#define BIC_Pkg_J (1ULL << 31) +#define BIC_Cor_J (1ULL << 32) +#define BIC_GFX_J (1ULL << 33) +#define BIC_RAM_J (1ULL << 34) +#define BIC_Core (1ULL << 35) +#define BIC_CPU (1ULL << 36) +#define BIC_Mod_c6 (1ULL << 37) +#define BIC_sysfs (1ULL << 38) + +unsigned long long bic_enabled = 0xFFFFFFFFFFFFFFFFULL; +unsigned long long bic_present = BIC_sysfs; + +#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) +#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) +#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) + +#define MAX_DEFERRED 16 +char *deferred_skip_names[MAX_DEFERRED]; +int deferred_skip_index; + +/* + * HIDE_LIST - hide this list of counters, show the rest [default] + * SHOW_LIST - show this list of counters, hide the rest */ +enum show_hide_mode { SHOW_LIST, HIDE_LIST } global_show_hide_mode = HIDE_LIST; -void print_header(void) +void help(void) { - struct msr_counter *mp; + fprintf(outf, + "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" + "\n" + "Turbostat forks the specified COMMAND and prints statistics\n" + "when COMMAND completes.\n" + "If no COMMAND is specified, turbostat wakes every 5-seconds\n" + "to print statistics, until interrupted.\n" + "--add add a counter\n" + " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" + "--cpu cpu-set limit output to summary plus cpu-set:\n" + " {core | package | j,k,l..m,n-p }\n" + "--quiet skip decoding system configuration header\n" + "--interval sec Override default 5-second measurement interval\n" + "--help print this help message\n" + "--list list column headers only\n" + "--out file create or truncate \"file\" for all output\n" + "--version print version information\n" + "\n" + "For more help, run \"man turbostat\"\n"); +} - if (show_pkg) - outp += sprintf(outp, "\tPackage"); - if (show_core) - outp += sprintf(outp, "\tCore"); - if (show_cpu) - outp += sprintf(outp, "\tCPU"); - if (has_aperf) - outp += sprintf(outp, "\tAvg_MHz"); - if (has_aperf) - outp += sprintf(outp, "\tBusy%%"); - if (has_aperf) - outp += sprintf(outp, "\tBzy_MHz"); - outp += sprintf(outp, "\tTSC_MHz"); +/* + * bic_lookup + * for all the strings in comma separate name_list, + * set the approprate bit in return value. + */ +unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode) +{ + int i; + unsigned long long retval = 0; - if (!debug) - goto done; + while (name_list) { + char *comma; - if (do_irq) - outp += sprintf(outp, "\tIRQ"); - if (do_smi) - outp += sprintf(outp, "\tSMI"); - - if (do_nhm_cstates) - outp += sprintf(outp, "\tCPU%%c1"); - if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) - outp += sprintf(outp, "\tCPU%%c3"); - if (do_nhm_cstates) - outp += sprintf(outp, "\tCPU%%c6"); - if (do_snb_cstates) - outp += sprintf(outp, "\tCPU%%c7"); + comma = strchr(name_list, ','); + + if (comma) + *comma = '\0'; + + for (i = 0; i < MAX_BIC; ++i) { + if (!strcmp(name_list, bic[i].name)) { + retval |= (1ULL << i); + break; + } + } + if (i == MAX_BIC) { + if (mode == SHOW_LIST) { + fprintf(stderr, "Invalid counter name: %s\n", name_list); + exit(-1); + } + deferred_skip_names[deferred_skip_index++] = name_list; + if (debug) + fprintf(stderr, "deferred \"%s\"\n", name_list); + if (deferred_skip_index >= MAX_DEFERRED) { + fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n", + MAX_DEFERRED, name_list); + help(); + exit(1); + } + } + + name_list = comma; + if (name_list) + name_list++; + + } + return retval; +} + + +void print_header(char *delim) +{ + struct msr_counter *mp; + int printed = 0; + + if (DO_BIC(BIC_Package)) + outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); + if (DO_BIC(BIC_Core)) + outp += sprintf(outp, "%sCore", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU)) + outp += sprintf(outp, "%sCPU", (printed++ ? delim : "")); + if (DO_BIC(BIC_Avg_MHz)) + outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : "")); + if (DO_BIC(BIC_Busy)) + outp += sprintf(outp, "%sBusy%%", (printed++ ? delim : "")); + if (DO_BIC(BIC_Bzy_MHz)) + outp += sprintf(outp, "%sBzy_MHz", (printed++ ? delim : "")); + if (DO_BIC(BIC_TSC_MHz)) + outp += sprintf(outp, "%sTSC_MHz", (printed++ ? delim : "")); + + if (DO_BIC(BIC_IRQ)) { + if (sums_need_wide_columns) + outp += sprintf(outp, "%s IRQ", (printed++ ? delim : "")); + else + outp += sprintf(outp, "%sIRQ", (printed++ ? delim : "")); + } + + if (DO_BIC(BIC_SMI)) + outp += sprintf(outp, "%sSMI", (printed++ ? delim : "")); for (mp = sys.tp; mp; mp = mp->next) { + if (mp->format == FORMAT_RAW) { if (mp->width == 64) - outp += sprintf(outp, "\t%18.18s", mp->name); + outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), mp->name); else - outp += sprintf(outp, "\t%10.10s", mp->name); + outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), mp->name); } else { - outp += sprintf(outp, "\t%-7.7s", mp->name); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), mp->name); + else + outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), mp->name); } } - if (do_dts) - outp += sprintf(outp, "\tCoreTmp"); + if (DO_BIC(BIC_CPU_c1)) + outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) + outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU_c6)) + outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU_c7)) + outp += sprintf(outp, "%sCPU%%c7", (printed++ ? delim : "")); + + if (DO_BIC(BIC_Mod_c6)) + outp += sprintf(outp, "%sMod%%c6", (printed++ ? delim : "")); + + if (DO_BIC(BIC_CoreTmp)) + outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); for (mp = sys.cp; mp; mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 64) - outp += sprintf(outp, "\t%18.18s", mp->name); + outp += sprintf(outp, "%s%18.18s", delim, mp->name); else - outp += sprintf(outp, "\t%10.10s", mp->name); + outp += sprintf(outp, "%s%10.10s", delim, mp->name); } else { - outp += sprintf(outp, "\t%-7.7s", mp->name); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8s", delim, mp->name); + else + outp += sprintf(outp, "%s%s", delim, mp->name); } } - if (do_ptm) - outp += sprintf(outp, "\tPkgTmp"); + if (DO_BIC(BIC_PkgTmp)) + outp += sprintf(outp, "%sPkgTmp", (printed++ ? delim : "")); - if (do_gfx_rc6_ms) - outp += sprintf(outp, "\tGFX%%rc6"); + if (DO_BIC(BIC_GFX_rc6)) + outp += sprintf(outp, "%sGFX%%rc6", (printed++ ? delim : "")); - if (do_gfx_mhz) - outp += sprintf(outp, "\tGFXMHz"); + if (DO_BIC(BIC_GFXMHz)) + outp += sprintf(outp, "%sGFXMHz", (printed++ ? delim : "")); if (do_skl_residency) { - outp += sprintf(outp, "\tTotl%%C0"); - outp += sprintf(outp, "\tAny%%C0"); - outp += sprintf(outp, "\tGFX%%C0"); - outp += sprintf(outp, "\tCPUGFX%%"); - } - - if (do_pc2) - outp += sprintf(outp, "\tPkg%%pc2"); - if (do_pc3) - outp += sprintf(outp, "\tPkg%%pc3"); - if (do_pc6) - outp += sprintf(outp, "\tPkg%%pc6"); - if (do_pc7) - outp += sprintf(outp, "\tPkg%%pc7"); - if (do_c8_c9_c10) { - outp += sprintf(outp, "\tPkg%%pc8"); - outp += sprintf(outp, "\tPkg%%pc9"); - outp += sprintf(outp, "\tPk%%pc10"); + outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : "")); + outp += sprintf(outp, "%sAny%%C0", (printed++ ? delim : "")); + outp += sprintf(outp, "%sGFX%%C0", (printed++ ? delim : "")); + outp += sprintf(outp, "%sCPUGFX%%", (printed++ ? delim : "")); } + if (DO_BIC(BIC_Pkgpc2)) + outp += sprintf(outp, "%sPkg%%pc2", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc3)) + outp += sprintf(outp, "%sPkg%%pc3", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc6)) + outp += sprintf(outp, "%sPkg%%pc6", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc7)) + outp += sprintf(outp, "%sPkg%%pc7", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc8)) + outp += sprintf(outp, "%sPkg%%pc8", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc9)) + outp += sprintf(outp, "%sPkg%%pc9", (printed++ ? delim : "")); + if (DO_BIC(BIC_Pkgpc10)) + outp += sprintf(outp, "%sPk%%pc10", (printed++ ? delim : "")); + if (do_rapl && !rapl_joules) { - if (do_rapl & RAPL_PKG) - outp += sprintf(outp, "\tPkgWatt"); - if (do_rapl & RAPL_CORES_ENERGY_STATUS) - outp += sprintf(outp, "\tCorWatt"); - if (do_rapl & RAPL_GFX) - outp += sprintf(outp, "\tGFXWatt"); - if (do_rapl & RAPL_DRAM) - outp += sprintf(outp, "\tRAMWatt"); - if (do_rapl & RAPL_PKG_PERF_STATUS) - outp += sprintf(outp, "\tPKG_%%"); - if (do_rapl & RAPL_DRAM_PERF_STATUS) - outp += sprintf(outp, "\tRAM_%%"); + if (DO_BIC(BIC_PkgWatt)) + outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); + if (DO_BIC(BIC_CorWatt)) + outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); + if (DO_BIC(BIC_GFXWatt)) + outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); + if (DO_BIC(BIC_RAMWatt)) + outp += sprintf(outp, "%sRAMWatt", (printed++ ? delim : "")); + if (DO_BIC(BIC_PKG__)) + outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : "")); + if (DO_BIC(BIC_RAM__)) + outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : "")); } else if (do_rapl && rapl_joules) { - if (do_rapl & RAPL_PKG) - outp += sprintf(outp, "\tPkg_J"); - if (do_rapl & RAPL_CORES_ENERGY_STATUS) - outp += sprintf(outp, "\tCor_J"); - if (do_rapl & RAPL_GFX) - outp += sprintf(outp, "\tGFX_J"); - if (do_rapl & RAPL_DRAM) - outp += sprintf(outp, "\tRAM_J"); - if (do_rapl & RAPL_PKG_PERF_STATUS) - outp += sprintf(outp, "\tPKG_%%"); - if (do_rapl & RAPL_DRAM_PERF_STATUS) - outp += sprintf(outp, "\tRAM_%%"); + if (DO_BIC(BIC_Pkg_J)) + outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); + if (DO_BIC(BIC_Cor_J)) + outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); + if (DO_BIC(BIC_GFX_J)) + outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); + if (DO_BIC(BIC_RAM_J)) + outp += sprintf(outp, "%sRAM_J", (printed++ ? delim : "")); + if (DO_BIC(BIC_PKG__)) + outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : "")); + if (DO_BIC(BIC_RAM__)) + outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : "")); } for (mp = sys.pp; mp; mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 64) - outp += sprintf(outp, "\t%18.18s", mp->name); + outp += sprintf(outp, "%s%18.18s", delim, mp->name); else - outp += sprintf(outp, "\t%10.10s", mp->name); + outp += sprintf(outp, "%s%10.10s", delim, mp->name); } else { - outp += sprintf(outp, "\t%-7.7s", mp->name); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8s", delim, mp->name); + else + outp += sprintf(outp, "%s%s", delim, mp->name); } } -done: outp += sprintf(outp, "\n"); } @@ -494,10 +681,10 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "mperf: %016llX\n", t->mperf); outp += sprintf(outp, "c1: %016llX\n", t->c1); - if (do_irq) - outp += sprintf(outp, "IRQ: %08X\n", t->irq_count); - if (do_smi) - outp += sprintf(outp, "SMI: %08X\n", t->smi_count); + if (DO_BIC(BIC_IRQ)) + outp += sprintf(outp, "IRQ: %lld\n", t->irq_count); + if (DO_BIC(BIC_SMI)) + outp += sprintf(outp, "SMI: %d\n", t->smi_count); for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n", @@ -516,6 +703,7 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, c->counter[i]); } + outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us); } if (p) { @@ -527,11 +715,11 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0); outp += sprintf(outp, "pc2: %016llX\n", p->pc2); - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) outp += sprintf(outp, "pc3: %016llX\n", p->pc3); - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) outp += sprintf(outp, "pc6: %016llX\n", p->pc6); - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) outp += sprintf(outp, "pc7: %016llX\n", p->pc7); outp += sprintf(outp, "pc8: %016llX\n", p->pc8); outp += sprintf(outp, "pc9: %016llX\n", p->pc9); @@ -563,10 +751,12 @@ int dump_counters(struct thread_data *t, struct core_data *c, int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { - double interval_float; + double interval_float, tsc; char *fmt8; int i; struct msr_counter *mp; + char *delim = "\t"; + int printed = 0; /* if showing only 1st thread in core and this isn't one, bail out */ if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) @@ -576,106 +766,126 @@ int format_counters(struct thread_data *t, struct core_data *c, if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; + /*if not summary line and --cpu is used */ + if ((t != &average.threads) && + (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset))) + return 0; + interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; + tsc = t->tsc * tsc_tweak; + /* topo columns, print blanks on 1st (average) line */ if (t == &average.threads) { - if (show_pkg) - outp += sprintf(outp, "\t-"); - if (show_core) - outp += sprintf(outp, "\t-"); - if (show_cpu) - outp += sprintf(outp, "\t-"); + if (DO_BIC(BIC_Package)) + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); + if (DO_BIC(BIC_Core)) + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU)) + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } else { - if (show_pkg) { + if (DO_BIC(BIC_Package)) { if (p) - outp += sprintf(outp, "\t%d", p->package_id); + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->package_id); else - outp += sprintf(outp, "\t-"); + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } - if (show_core) { + if (DO_BIC(BIC_Core)) { if (c) - outp += sprintf(outp, "\t%d", c->core_id); + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_id); else - outp += sprintf(outp, "\t-"); + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } - if (show_cpu) - outp += sprintf(outp, "\t%d", t->cpu_id); + if (DO_BIC(BIC_CPU)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id); } - /* Avg_MHz */ - if (has_aperf) - outp += sprintf(outp, "\t%.0f", + if (DO_BIC(BIC_Avg_MHz)) + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 / units * t->aperf / interval_float); - /* Busy% */ - if (has_aperf) - outp += sprintf(outp, "\t%.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); + if (DO_BIC(BIC_Busy)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf/tsc); - /* Bzy_MHz */ - if (has_aperf) { + if (DO_BIC(BIC_Bzy_MHz)) { if (has_base_hz) - outp += sprintf(outp, "\t%.0f", base_hz / units * t->aperf / t->mperf); + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf); else - outp += sprintf(outp, "\t%.0f", - 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), + tsc / units * t->aperf / t->mperf / interval_float); } - /* TSC_MHz */ - outp += sprintf(outp, "\t%.0f", 1.0 * t->tsc/units/interval_float); - - if (!debug) - goto done; + if (DO_BIC(BIC_TSC_MHz)) + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 * t->tsc/units/interval_float); /* IRQ */ - if (do_irq) - outp += sprintf(outp, "\t%d", t->irq_count); + if (DO_BIC(BIC_IRQ)) { + if (sums_need_wide_columns) + outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->irq_count); + else + outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->irq_count); + } /* SMI */ - if (do_smi) - outp += sprintf(outp, "\t%d", t->smi_count); - - if (do_nhm_cstates) - outp += sprintf(outp, "\t%.2f", 100.0 * t->c1/t->tsc); - - /* print per-core data only for 1st thread in core */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) - goto done; - - if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) - outp += sprintf(outp, "\t%.2f", 100.0 * c->c3/t->tsc); - if (do_nhm_cstates) - outp += sprintf(outp, "\t%.2f", 100.0 * c->c6/t->tsc); - if (do_snb_cstates) - outp += sprintf(outp, "\t%.2f", 100.0 * c->c7/t->tsc); + if (DO_BIC(BIC_SMI)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->smi_count); + /* Added counters */ for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) - outp += sprintf(outp, "\t0x%08lx", (unsigned long) t->counter[i]); + outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) t->counter[i]); else - outp += sprintf(outp, "\t0x%016llx", t->counter[i]); + outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->counter[i]); } else if (mp->format == FORMAT_DELTA) { - outp += sprintf(outp, "\t%8lld", t->counter[i]); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->counter[i]); + else + outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->counter[i]); } else if (mp->format == FORMAT_PERCENT) { - outp += sprintf(outp, "\t%.2f", 100.0 * t->counter[i]/t->tsc); + if (mp->type == COUNTER_USEC) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), t->counter[i]/interval_float/10000); + else + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i]/tsc); } } + /* C1 */ + if (DO_BIC(BIC_CPU_c1)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1/tsc); - if (do_dts) - outp += sprintf(outp, "\t%d", c->core_temp_c); + + /* print per-core data only for 1st thread in core */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + goto done; + + if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); + if (DO_BIC(BIC_CPU_c6)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); + if (DO_BIC(BIC_CPU_c7)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7/tsc); + + /* Mod%c6 */ + if (DO_BIC(BIC_Mod_c6)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->mc6_us / tsc); + + if (DO_BIC(BIC_CoreTmp)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_temp_c); for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) - outp += sprintf(outp, "\t0x%08lx", (unsigned long) c->counter[i]); + outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) c->counter[i]); else - outp += sprintf(outp, "\t0x%016llx", c->counter[i]); + outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->counter[i]); } else if (mp->format == FORMAT_DELTA) { - outp += sprintf(outp, "\t%8lld", c->counter[i]); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), c->counter[i]); + else + outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->counter[i]); } else if (mp->format == FORMAT_PERCENT) { - outp += sprintf(outp, "\t%.2f", 100.0 * c->counter[i]/t->tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i]/tsc); } } @@ -684,95 +894,89 @@ int format_counters(struct thread_data *t, struct core_data *c, goto done; /* PkgTmp */ - if (do_ptm) - outp += sprintf(outp, "\t%d", p->pkg_temp_c); + if (DO_BIC(BIC_PkgTmp)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->pkg_temp_c); /* GFXrc6 */ - if (do_gfx_rc6_ms) { + if (DO_BIC(BIC_GFX_rc6)) { if (p->gfx_rc6_ms == -1) { /* detect GFX counter reset */ - outp += sprintf(outp, "\t**.**"); + outp += sprintf(outp, "%s**.**", (printed++ ? delim : "")); } else { - outp += sprintf(outp, "\t%.2f", + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), p->gfx_rc6_ms / 10.0 / interval_float); } } /* GFXMHz */ - if (do_gfx_mhz) - outp += sprintf(outp, "\t%d", p->gfx_mhz); + if (DO_BIC(BIC_GFXMHz)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz); /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */ if (do_skl_residency) { - outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_core_c0/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc); - } - - if (do_pc2) - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc2/t->tsc); - if (do_pc3) - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc3/t->tsc); - if (do_pc6) - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc6/t->tsc); - if (do_pc7) - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc7/t->tsc); - if (do_c8_c9_c10) { - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc8/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc9/t->tsc); - outp += sprintf(outp, "\t%.2f", 100.0 * p->pc10/t->tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0/tsc); } + if (DO_BIC(BIC_Pkgpc2)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2/tsc); + if (DO_BIC(BIC_Pkgpc3)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3/tsc); + if (DO_BIC(BIC_Pkgpc6)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6/tsc); + if (DO_BIC(BIC_Pkgpc7)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7/tsc); + if (DO_BIC(BIC_Pkgpc8)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8/tsc); + if (DO_BIC(BIC_Pkgpc9)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9/tsc); + if (DO_BIC(BIC_Pkgpc10)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10/tsc); + /* * If measurement interval exceeds minimum RAPL Joule Counter range, * indicate that results are suspect by printing "**" in fraction place. */ if (interval_float < rapl_joule_counter_range) - fmt8 = "\t%.2f"; + fmt8 = "%s%.2f"; else fmt8 = "%6.0f**"; - if (do_rapl && !rapl_joules) { - if (do_rapl & RAPL_PKG) - outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float); - if (do_rapl & RAPL_CORES_ENERGY_STATUS) - outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float); - if (do_rapl & RAPL_GFX) - outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float); - if (do_rapl & RAPL_DRAM) - outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float); - if (do_rapl & RAPL_PKG_PERF_STATUS) - outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); - if (do_rapl & RAPL_DRAM_PERF_STATUS) - outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); - } else if (do_rapl && rapl_joules) { - if (do_rapl & RAPL_PKG) - outp += sprintf(outp, fmt8, - p->energy_pkg * rapl_energy_units); - if (do_rapl & RAPL_CORES) - outp += sprintf(outp, fmt8, - p->energy_cores * rapl_energy_units); - if (do_rapl & RAPL_GFX) - outp += sprintf(outp, fmt8, - p->energy_gfx * rapl_energy_units); - if (do_rapl & RAPL_DRAM) - outp += sprintf(outp, fmt8, - p->energy_dram * rapl_dram_energy_units); - if (do_rapl & RAPL_PKG_PERF_STATUS) - outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); - if (do_rapl & RAPL_DRAM_PERF_STATUS) - outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); - } + if (DO_BIC(BIC_PkgWatt)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); + if (DO_BIC(BIC_CorWatt)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); + if (DO_BIC(BIC_GFXWatt)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); + if (DO_BIC(BIC_RAMWatt)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); + if (DO_BIC(BIC_Pkg_J)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); + if (DO_BIC(BIC_Cor_J)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); + if (DO_BIC(BIC_GFX_J)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); + if (DO_BIC(BIC_RAM_J)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units); + if (DO_BIC(BIC_PKG__)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); + if (DO_BIC(BIC_RAM__)) + outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); + for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) - outp += sprintf(outp, "\t0x%08lx", (unsigned long) p->counter[i]); + outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) p->counter[i]); else - outp += sprintf(outp, "\t0x%016llx", p->counter[i]); + outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->counter[i]); } else if (mp->format == FORMAT_DELTA) { - outp += sprintf(outp, "\t%8lld", p->counter[i]); + if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) + outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), p->counter[i]); + else + outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->counter[i]); } else if (mp->format == FORMAT_PERCENT) { - outp += sprintf(outp, "\t%.2f", 100.0 * p->counter[i]/t->tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i]/tsc); } } @@ -807,7 +1011,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_ static int printed; if (!printed || !summary_only) - print_header(); + print_header("\t"); if (topo.num_cpus > 1) format_counters(&average.threads, &average.cores, @@ -841,11 +1045,11 @@ delta_package(struct pkg_data *new, struct pkg_data *old) old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0; } old->pc2 = new->pc2 - old->pc2; - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) old->pc3 = new->pc3 - old->pc3; - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) old->pc6 = new->pc6 - old->pc6; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) old->pc7 = new->pc7 - old->pc7; old->pc8 = new->pc8 - old->pc8; old->pc9 = new->pc9 - old->pc9; @@ -887,6 +1091,7 @@ delta_core(struct core_data *new, struct core_data *old) old->c6 = new->c6 - old->c6; old->c7 = new->c7 - old->c7; old->core_temp_c = new->core_temp_c; + old->mc6_us = new->mc6_us - old->mc6_us; for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) @@ -916,7 +1121,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->c1 = new->c1 - old->c1; - if (has_aperf) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { old->aperf = new->aperf - old->aperf; old->mperf = new->mperf - old->mperf; @@ -941,7 +1146,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->c1 = 0; else { /* normal case, derive c1 */ - old->c1 = old->tsc - old->mperf - core_delta->c3 + old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3 - core_delta->c6 - core_delta->c7; } } @@ -952,10 +1157,10 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->mperf = 1; /* divide by 0 protection */ } - if (do_irq) + if (DO_BIC(BIC_IRQ)) old->irq_count = new->irq_count - old->irq_count; - if (do_smi) + if (DO_BIC(BIC_SMI)) old->smi_count = new->smi_count - old->smi_count; for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { @@ -1008,6 +1213,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data c->c3 = 0; c->c6 = 0; c->c7 = 0; + c->mc6_us = 0; c->core_temp_c = 0; p->pkg_wtd_core_c0 = 0; @@ -1016,11 +1222,11 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data p->pkg_both_core_gfxe_c0 = 0; p->pc2 = 0; - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) p->pc3 = 0; - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) p->pc6 = 0; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) p->pc7 = 0; p->pc8 = 0; p->pc9 = 0; @@ -1036,7 +1242,6 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data p->gfx_rc6_ms = 0; p->gfx_mhz = 0; - for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) t->counter[i] = 0; @@ -1073,6 +1278,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, average.cores.c3 += c->c3; average.cores.c6 += c->c6; average.cores.c7 += c->c7; + average.cores.mc6_us += c->mc6_us; average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); @@ -1094,11 +1300,11 @@ int sum_counters(struct thread_data *t, struct core_data *c, } average.packages.pc2 += p->pc2; - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) average.packages.pc3 += p->pc3; - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) average.packages.pc6 += p->pc6; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) average.packages.pc7 += p->pc7; average.packages.pc8 += p->pc8; average.packages.pc9 += p->pc9; @@ -1143,9 +1349,13 @@ void compute_average(struct thread_data *t, struct core_data *c, average.threads.mperf /= topo.num_cpus; average.threads.c1 /= topo.num_cpus; + if (average.threads.irq_count > 9999999) + sums_need_wide_columns = 1; + average.cores.c3 /= topo.num_cores; average.cores.c6 /= topo.num_cores; average.cores.c7 /= topo.num_cores; + average.cores.mc6_us /= topo.num_cores; if (do_skl_residency) { average.packages.pkg_wtd_core_c0 /= topo.num_packages; @@ -1155,11 +1365,11 @@ void compute_average(struct thread_data *t, struct core_data *c, } average.packages.pc2 /= topo.num_packages; - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) average.packages.pc3 /= topo.num_packages; - if (do_pc6) + if (DO_BIC(BIC_Pkgpc6)) average.packages.pc6 /= topo.num_packages; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) average.packages.pc7 /= topo.num_packages; average.packages.pc8 /= topo.num_packages; @@ -1169,16 +1379,29 @@ void compute_average(struct thread_data *t, struct core_data *c, for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; + if (mp->type == COUNTER_ITEMS) { + if (average.threads.counter[i] > 9999999) + sums_need_wide_columns = 1; + continue; + } average.threads.counter[i] /= topo.num_cpus; } for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; + if (mp->type == COUNTER_ITEMS) { + if (average.cores.counter[i] > 9999999) + sums_need_wide_columns = 1; + } average.cores.counter[i] /= topo.num_cores; } for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; + if (mp->type == COUNTER_ITEMS) { + if (average.packages.counter[i] > 9999999) + sums_need_wide_columns = 1; + } average.packages.counter[i] /= topo.num_packages; } } @@ -1192,6 +1415,60 @@ static unsigned long long rdtsc(void) return low | ((unsigned long long)high) << 32; } +/* + * Open a file, and exit on failure + */ +FILE *fopen_or_die(const char *path, const char *mode) +{ + FILE *filep = fopen(path, mode); + + if (!filep) + err(1, "%s: open failed", path); + return filep; +} +/* + * snapshot_sysfs_counter() + * + * return snapshot of given counter + */ +unsigned long long snapshot_sysfs_counter(char *path) +{ + FILE *fp; + int retval; + unsigned long long counter; + + fp = fopen_or_die(path, "r"); + + retval = fscanf(fp, "%lld", &counter); + if (retval != 1) + err(1, "snapshot_sysfs_counter(%s)", path); + + fclose(fp); + + return counter; +} + +int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp) +{ + if (mp->msr_num != 0) { + if (get_msr(cpu, mp->msr_num, counterp)) + return -1; + } else { + char path[128]; + + if (mp->flags & SYSFS_PERCPU) { + sprintf(path, "/sys/devices/system/cpu/cpu%d/%s", + cpu, mp->path); + + *counterp = snapshot_sysfs_counter(path); + } else { + *counterp = snapshot_sysfs_counter(mp->path); + } + } + + return 0; +} + /* * get_counters(...) * migrate to cpu @@ -1213,7 +1490,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) retry: t->tsc = rdtsc(); /* we are running on local CPU of interest */ - if (has_aperf) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; /* @@ -1269,35 +1546,33 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) t->mperf = t->mperf * aperf_mperf_multiplier; } - if (do_irq) + if (DO_BIC(BIC_IRQ)) t->irq_count = irqs_per_cpu[cpu]; - if (do_smi) { + if (DO_BIC(BIC_SMI)) { if (get_msr(cpu, MSR_SMI_COUNT, &msr)) return -5; t->smi_count = msr & 0xFFFFFFFF; } - - if (use_c1_residency_msr) { + if (DO_BIC(BIC_CPU_c1) && use_c1_residency_msr) { if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) return -6; } for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { - if (get_msr(cpu, mp->msr_num, &t->counter[i])) + if (get_mp(cpu, mp, &t->counter[i])) return -10; } - /* collect core counters only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; - if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) { + if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; } - if (do_nhm_cstates && !do_knl_cstates) { + if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; } else if (do_knl_cstates) { @@ -1305,18 +1580,22 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -7; } - if (do_snb_cstates) + if (DO_BIC(BIC_CPU_c7)) if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) return -8; - if (do_dts) { + if (DO_BIC(BIC_Mod_c6)) + if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us)) + return -8; + + if (DO_BIC(BIC_CoreTmp)) { if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) return -9; c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); } for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { - if (get_msr(cpu, mp->msr_num, &c->counter[i])) + if (get_mp(cpu, mp, &c->counter[i])) return -10; } @@ -1334,26 +1613,35 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0)) return -13; } - if (do_pc3) + if (DO_BIC(BIC_Pkgpc3)) if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) return -9; - if (do_pc6) - if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) - return -10; - if (do_pc2) + if (DO_BIC(BIC_Pkgpc6)) { + if (do_slm_cstates) { + if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6)) + return -10; + } else { + if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) + return -10; + } + } + + if (DO_BIC(BIC_Pkgpc2)) if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2)) return -11; - if (do_pc7) + if (DO_BIC(BIC_Pkgpc7)) if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) return -12; - if (do_c8_c9_c10) { + if (DO_BIC(BIC_Pkgpc8)) if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) return -13; + if (DO_BIC(BIC_Pkgpc9)) if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) return -13; + if (DO_BIC(BIC_Pkgpc10)) if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) return -13; - } + if (do_rapl & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) return -13; @@ -1384,20 +1672,20 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -16; p->rapl_dram_perf_status = msr & 0xFFFFFFFF; } - if (do_ptm) { + if (DO_BIC(BIC_PkgTmp)) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return -17; p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); } - if (do_gfx_rc6_ms) + if (DO_BIC(BIC_GFX_rc6)) p->gfx_rc6_ms = gfx_cur_rc6_ms; - if (do_gfx_mhz) + if (DO_BIC(BIC_GFXMHz)) p->gfx_mhz = gfx_cur_mhz; for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { - if (get_msr(cpu, mp->msr_num, &p->counter[i])) + if (get_mp(cpu, mp, &p->counter[i])) return -10; } @@ -1433,8 +1721,8 @@ char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2", int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; +int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7}; +int amt_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; @@ -1457,11 +1745,11 @@ dump_nhm_platform_info(void) fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 40) & 0xFF; - fprintf(outf, "%d * %.0f = %.0f MHz max efficiency frequency\n", + fprintf(outf, "%d * %.1f = %.1f MHz max efficiency frequency\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; - fprintf(outf, "%d * %.0f = %.0f MHz base frequency\n", + fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", ratio, bclk, ratio * bclk); get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); @@ -1483,12 +1771,12 @@ dump_hsw_turbo_ratio_limits(void) ratio = (msr >> 8) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 18 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 18 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 17 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 17 active cores\n", ratio, bclk, ratio * bclk); return; } @@ -1505,98 +1793,174 @@ dump_ivt_turbo_ratio_limits(void) ratio = (msr >> 56) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 16 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 16 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 48) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 15 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 15 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 40) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 14 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 14 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 32) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 13 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 13 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 24) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 12 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 12 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 11 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 11 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 10 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 10 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 9 active cores\n", + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 9 active cores\n", ratio, bclk, ratio * bclk); return; } +int has_turbo_ratio_group_limits(int family, int model) +{ + + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_ATOM_GOLDMONT: + case INTEL_FAM6_SKYLAKE_X: + case INTEL_FAM6_ATOM_DENVERTON: + return 1; + } + return 0; +} static void -dump_nhm_turbo_ratio_limits(void) +dump_turbo_ratio_limits(int family, int model) { - unsigned long long msr; - unsigned int ratio; + unsigned long long msr, core_counts; + unsigned int ratio, group_size; get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); - fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr); + if (has_turbo_ratio_group_limits(family, model)) { + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts); + fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, core_counts); + } else { + core_counts = 0x0807060504030201; + } + ratio = (msr >> 56) & 0xFF; + group_size = (core_counts >> 56) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 8 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 48) & 0xFF; + group_size = (core_counts >> 48) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 7 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 40) & 0xFF; + group_size = (core_counts >> 40) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 6 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 32) & 0xFF; + group_size = (core_counts >> 32) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 5 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 24) & 0xFF; + group_size = (core_counts >> 24) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 4 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 16) & 0xFF; + group_size = (core_counts >> 16) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 3 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 8) & 0xFF; + group_size = (core_counts >> 8) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 2 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); ratio = (msr >> 0) & 0xFF; + group_size = (core_counts >> 0) & 0xFF; if (ratio) - fprintf(outf, "%d * %.0f = %.0f MHz max turbo 1 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, group_size); return; } +static void +dump_atom_turbo_ratio_limits(void) +{ + unsigned long long msr; + unsigned int ratio; + + get_msr(base_cpu, MSR_ATOM_CORE_RATIOS, &msr); + fprintf(outf, "cpu%d: MSR_ATOM_CORE_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF); + + ratio = (msr >> 0) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz minimum operating frequency\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 8) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz low frequency mode (LFM)\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 16) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", + ratio, bclk, ratio * bclk); + + get_msr(base_cpu, MSR_ATOM_CORE_TURBO_RATIOS, &msr); + fprintf(outf, "cpu%d: MSR_ATOM_CORE_TURBO_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF); + + ratio = (msr >> 24) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 4 active cores\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 16) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 3 active cores\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 8) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 2 active cores\n", + ratio, bclk, ratio * bclk); + + ratio = (msr >> 0) & 0x3F; + if (ratio) + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 1 active core\n", + ratio, bclk, ratio * bclk); +} + static void dump_knl_turbo_ratio_limits(void) { @@ -1652,7 +2016,7 @@ dump_knl_turbo_ratio_limits(void) for (i = buckets_no - 1; i >= 0; i--) if (i > 0 ? ratio[i] != ratio[i - 1] : 1) fprintf(outf, - "%d * %.0f = %.0f MHz max turbo %d active cores\n", + "%d * %.1f = %.1f MHz max turbo %d active cores\n", ratio[i], bclk, ratio[i] * bclk, cores[i]); } @@ -1661,12 +2025,12 @@ dump_nhm_cst_cfg(void) { unsigned long long msr; - get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); + get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) - fprintf(outf, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr); + fprintf(outf, "cpu%d: MSR_PKG_CST_CONFIG_CONTROL: 0x%08llx", base_cpu, msr); fprintf(outf, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n", (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", @@ -1810,16 +2174,6 @@ void free_all_buffers(void) free(irqs_per_cpu); } -/* - * Open a file, and exit on failure - */ -FILE *fopen_or_die(const char *path, const char *mode) -{ - FILE *filep = fopen(path, mode); - if (!filep) - err(1, "%s: open failed", path); - return filep; -} /* * Parse a file containing a single int. @@ -2148,13 +2502,14 @@ int snapshot_gfx_mhz(void) */ int snapshot_proc_sysfs_files(void) { - if (snapshot_proc_interrupts()) - return 1; + if (DO_BIC(BIC_IRQ)) + if (snapshot_proc_interrupts()) + return 1; - if (do_gfx_rc6_ms) + if (DO_BIC(BIC_GFX_rc6)) snapshot_gfx_rc6_ms(); - if (do_gfx_mhz) + if (DO_BIC(BIC_GFXMHz)) snapshot_gfx_mhz(); return 0; @@ -2283,7 +2638,9 @@ void check_permissions() * MSR_SMI_COUNT 0x00000034 * * MSR_PLATFORM_INFO 0x000000ce - * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 + * MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 + * + * MSR_MISC_PWR_MGMT 0x000001aa * * MSR_PKG_C3_RESIDENCY 0x000003f8 * MSR_PKG_C6_RESIDENCY 0x000003f9 @@ -2291,7 +2648,8 @@ void check_permissions() * MSR_CORE_C6_RESIDENCY 0x000003fd * * Side effect: - * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL + * sets global pkg_cstate_limit to decode MSR_PKG_CST_CONFIG_CONTROL + * sets has_misc_feature_control */ int probe_nhm_msrs(unsigned int family, unsigned int model) { @@ -2322,6 +2680,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ pkg_cstate_limits = snb_pkg_cstate_limits; + has_misc_feature_control = 1; break; case INTEL_FAM6_HASWELL_CORE: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ @@ -2336,29 +2695,34 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ pkg_cstate_limits = hsw_pkg_cstate_limits; + has_misc_feature_control = 1; break; case INTEL_FAM6_SKYLAKE_X: /* SKX */ pkg_cstate_limits = skx_pkg_cstate_limits; + has_misc_feature_control = 1; break; case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ + no_MSR_MISC_PWR_MGMT = 1; case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ pkg_cstate_limits = slv_pkg_cstate_limits; break; case INTEL_FAM6_ATOM_AIRMONT: /* AMT */ pkg_cstate_limits = amt_pkg_cstate_limits; + no_MSR_MISC_PWR_MGMT = 1; break; case INTEL_FAM6_XEON_PHI_KNL: /* PHI */ case INTEL_FAM6_XEON_PHI_KNM: pkg_cstate_limits = phi_pkg_cstate_limits; break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ pkg_cstate_limits = bxt_pkg_cstate_limits; break; default: return 0; } - get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); + get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); @@ -2368,8 +2732,69 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) has_base_hz = 1; return 1; } -int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model) +/* + * SLV client has support for unique MSRs: + * + * MSR_CC6_DEMOTION_POLICY_CONFIG + * MSR_MC6_DEMOTION_POLICY_CONFIG + */ + +int has_slv_msrs(unsigned int family, unsigned int model) { + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_ATOM_SILVERMONT1: + case INTEL_FAM6_ATOM_MERRIFIELD: + case INTEL_FAM6_ATOM_MOOREFIELD: + return 1; + } + return 0; +} +int is_dnv(unsigned int family, unsigned int model) +{ + + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_ATOM_DENVERTON: + return 1; + } + return 0; +} +int is_bdx(unsigned int family, unsigned int model) +{ + + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_BROADWELL_X: + case INTEL_FAM6_BROADWELL_XEON_D: + return 1; + } + return 0; +} +int is_skx(unsigned int family, unsigned int model) +{ + + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_SKYLAKE_X: + return 1; + } + return 0; +} + +int has_turbo_ratio_limit(unsigned int family, unsigned int model) +{ + if (has_slv_msrs(family, model)) + return 0; + switch (model) { /* Nehalem compatible, but do not include turbo-ratio limit support */ case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ @@ -2381,6 +2806,13 @@ int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model) return 1; } } +int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model) +{ + if (has_slv_msrs(family, model)) + return 1; + + return 0; +} int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -2429,6 +2861,22 @@ int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) return 0; } } +int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + + if (family != 6) + return 0; + + switch (model) { + case INTEL_FAM6_ATOM_GOLDMONT: + case INTEL_FAM6_SKYLAKE_X: + return 1; + default: + return 0; + } +} int has_config_tdp(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -2475,8 +2923,11 @@ dump_cstate_pstate_config_info(unsigned int family, unsigned int model) if (has_ivt_turbo_ratio_limit(family, model)) dump_ivt_turbo_ratio_limits(); - if (has_nhm_turbo_ratio_limit(family, model)) - dump_nhm_turbo_ratio_limits(); + if (has_turbo_ratio_limit(family, model)) + dump_turbo_ratio_limits(family, model); + + if (has_atom_turbo_ratio_limit(family, model)) + dump_atom_turbo_ratio_limits(); if (has_knl_turbo_ratio_limit(family, model)) dump_knl_turbo_ratio_limits(); @@ -2487,6 +2938,96 @@ dump_cstate_pstate_config_info(unsigned int family, unsigned int model) dump_nhm_cst_cfg(); } +static void +dump_sysfs_cstate_config(void) +{ + char path[64]; + char name_buf[16]; + char desc[64]; + FILE *input; + int state; + char *sp; + + if (!DO_BIC(BIC_sysfs)) + return; + + for (state = 0; state < 10; ++state) { + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", + base_cpu, state); + input = fopen(path, "r"); + if (input == NULL) + continue; + fgets(name_buf, sizeof(name_buf), input); + + /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ + sp = strchr(name_buf, '-'); + if (!sp) + sp = strchrnul(name_buf, '\n'); + *sp = '\0'; + + fclose(input); + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", + base_cpu, state); + input = fopen(path, "r"); + if (input == NULL) + continue; + fgets(desc, sizeof(desc), input); + + fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); + fclose(input); + } +} +static void +dump_sysfs_pstate_config(void) +{ + char path[64]; + char driver_buf[64]; + char governor_buf[64]; + FILE *input; + int turbo; + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_driver", + base_cpu); + input = fopen(path, "r"); + if (input == NULL) { + fprintf(stderr, "NSFOD %s\n", path); + return; + } + fgets(driver_buf, sizeof(driver_buf), input); + fclose(input); + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", + base_cpu); + input = fopen(path, "r"); + if (input == NULL) { + fprintf(stderr, "NSFOD %s\n", path); + return; + } + fgets(governor_buf, sizeof(governor_buf), input); + fclose(input); + + fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); + fprintf(outf, "cpu%d: cpufreq governor: %s", base_cpu, governor_buf); + + sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); + input = fopen(path, "r"); + if (input != NULL) { + fscanf(input, "%d", &turbo); + fprintf(outf, "cpufreq boost: %d\n", turbo); + fclose(input); + } + + sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); + input = fopen(path, "r"); + if (input != NULL) { + fscanf(input, "%d", &turbo); + fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); + fclose(input); + } +} + /* * print_epb() @@ -2790,15 +3331,40 @@ void rapl_probe(unsigned int family, unsigned int model) case INTEL_FAM6_BROADWELL_CORE: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + BIC_PRESENT(BIC_GFX_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + BIC_PRESENT(BIC_GFXWatt); + } break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO; + if (rapl_joules) + BIC_PRESENT(BIC_Pkg_J); + else + BIC_PRESENT(BIC_PkgWatt); break; case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; + BIC_PRESENT(BIC_PKG__); + BIC_PRESENT(BIC_RAM__); + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + BIC_PRESENT(BIC_RAM_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + BIC_PRESENT(BIC_RAMWatt); + } break; case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_BROADWELL_X: /* BDX */ @@ -2807,17 +3373,55 @@ void rapl_probe(unsigned int family, unsigned int model) case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ case INTEL_FAM6_XEON_PHI_KNM: do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; + BIC_PRESENT(BIC_PKG__); + BIC_PRESENT(BIC_RAM__); + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_RAM_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_RAMWatt); + } break; case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_IVYBRIDGE_X: do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; + BIC_PRESENT(BIC_PKG__); + BIC_PRESENT(BIC_RAM__); + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + BIC_PRESENT(BIC_RAM_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + BIC_PRESENT(BIC_RAMWatt); + } break; case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ do_rapl = RAPL_PKG | RAPL_CORES; + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + } break; case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS; + BIC_PRESENT(BIC_PKG__); + BIC_PRESENT(BIC_RAM__); + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); + BIC_PRESENT(BIC_Cor_J); + BIC_PRESENT(BIC_RAM_J); + } else { + BIC_PRESENT(BIC_PkgWatt); + BIC_PRESENT(BIC_CorWatt); + BIC_PRESENT(BIC_RAMWatt); + } break; default: return; @@ -2844,7 +3448,7 @@ void rapl_probe(unsigned int family, unsigned int model) tdp = get_tdp(model); rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; - if (debug) + if (!quiet) fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); return; @@ -2969,11 +3573,9 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) return -1; - if (debug) { - fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx " - "(%f Watts, %f Joules, %f sec.)\n", cpu, msr, - rapl_power_units, rapl_energy_units, rapl_time_units); - } + fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr, + rapl_power_units, rapl_energy_units, rapl_time_units); + if (do_rapl & RAPL_PKG_POWER_INFO) { if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) @@ -2994,7 +3596,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -9; fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 63) & 1 ? "": "UN"); + cpu, msr, (msr >> 63) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "PKG Limit #1"); fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n", @@ -3020,40 +3622,34 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 31) & 1 ? "": "UN"); + cpu, msr, (msr >> 31) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "DRAM Limit"); } if (do_rapl & RAPL_CORE_POLICY) { - if (debug) { - if (get_msr(cpu, MSR_PP0_POLICY, &msr)) - return -7; + if (get_msr(cpu, MSR_PP0_POLICY, &msr)) + return -7; - fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); - } + fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); } if (do_rapl & RAPL_CORES_POWER_LIMIT) { - if (debug) { - if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) - return -9; - fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 31) & 1 ? "": "UN"); - print_power_limit_msr(cpu, msr, "Cores Limit"); - } + if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) + return -9; + fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", + cpu, msr, (msr >> 31) & 1 ? "" : "UN"); + print_power_limit_msr(cpu, msr, "Cores Limit"); } if (do_rapl & RAPL_GFX) { - if (debug) { - if (get_msr(cpu, MSR_PP1_POLICY, &msr)) - return -8; + if (get_msr(cpu, MSR_PP1_POLICY, &msr)) + return -8; - fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); + fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); - if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) - return -9; - fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 31) & 1 ? "": "UN"); - print_power_limit_msr(cpu, msr, "GFX Limit"); - } + if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) + return -9; + fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", + cpu, msr, (msr >> 31) & 1 ? "" : "UN"); + print_power_limit_msr(cpu, msr, "GFX Limit"); } return 0; } @@ -3090,6 +3686,7 @@ int has_snb_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ return 1; } @@ -3121,6 +3718,7 @@ int has_hsw_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: return 1; } return 0; @@ -3149,8 +3747,6 @@ int has_skl_msrs(unsigned int family, unsigned int model) return 0; } - - int is_slm(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -3201,7 +3797,8 @@ double slm_bclk(void) } freq = slm_freq_table[i]; - fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); + if (!quiet) + fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); return freq; } @@ -3264,7 +3861,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk target_c_local = (msr >> 16) & 0xFF; - if (debug) + if (!quiet) fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", cpu, msr, target_c_local); @@ -3299,13 +3896,30 @@ void decode_misc_enable_msr(void) unsigned long long msr; if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr)) - fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%s %s %s)\n", + fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%sTCC %sEIST %sMWAIT %sPREFETCH %sTURBO)\n", base_cpu, msr, - msr & (1 << 3) ? "TCC" : "", - msr & (1 << 16) ? "EIST" : "", - msr & (1 << 18) ? "MONITOR" : ""); + msr & MSR_IA32_MISC_ENABLE_TM1 ? "" : "No-", + msr & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP ? "" : "No-", + msr & MSR_IA32_MISC_ENABLE_MWAIT ? "No-" : "", + msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "", + msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : ""); } +void decode_misc_feature_control(void) +{ + unsigned long long msr; + + if (!has_misc_feature_control) + return; + + if (!get_msr(base_cpu, MSR_MISC_FEATURE_CONTROL, &msr)) + fprintf(outf, "cpu%d: MSR_MISC_FEATURE_CONTROL: 0x%08llx (%sL2-Prefetch %sL2-Prefetch-pair %sL1-Prefetch %sL1-IP-Prefetch)\n", + base_cpu, msr, + msr & (0 << 0) ? "No-" : "", + msr & (1 << 0) ? "No-" : "", + msr & (2 << 0) ? "No-" : "", + msr & (3 << 0) ? "No-" : ""); +} /* * Decode MSR_MISC_PWR_MGMT * @@ -3320,6 +3934,9 @@ void decode_misc_pwr_mgmt_msr(void) if (!do_nhm_platform_info) return; + if (no_MSR_MISC_PWR_MGMT) + return; + if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr)) fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n", base_cpu, msr, @@ -3327,11 +3944,30 @@ void decode_misc_pwr_mgmt_msr(void) msr & (1 << 1) ? "EN" : "DIS", msr & (1 << 8) ? "EN" : "DIS"); } +/* + * Decode MSR_CC6_DEMOTION_POLICY_CONFIG, MSR_MC6_DEMOTION_POLICY_CONFIG + * + * This MSRs are present on Silvermont processors, + * Intel Atom processor E3000 series (Baytrail), and friends. + */ +void decode_c6_demotion_policy_msr(void) +{ + unsigned long long msr; + + if (!get_msr(base_cpu, MSR_CC6_DEMOTION_POLICY_CONFIG, &msr)) + fprintf(outf, "cpu%d: MSR_CC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-CC6-Demotion)\n", + base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); + + if (!get_msr(base_cpu, MSR_MC6_DEMOTION_POLICY_CONFIG, &msr)) + fprintf(outf, "cpu%d: MSR_MC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-MC6-Demotion)\n", + base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); +} void process_cpuid() { unsigned int eax, ebx, ecx, edx, max_level, max_extended_level; unsigned int fms, family, model, stepping; + unsigned int has_turbo; eax = ebx = ecx = edx = 0; @@ -3340,7 +3976,7 @@ void process_cpuid() if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) genuine_intel = 1; - if (debug) + if (!quiet) fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", (char *)&ebx, (char *)&edx, (char *)&ecx); @@ -3351,7 +3987,7 @@ void process_cpuid() if (family == 6 || family == 0xf) model += ((fms >> 16) & 0xf) << 4; - if (debug) { + if (!quiet) { fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", max_level, family, model, stepping, family, model, stepping); fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n", @@ -3394,8 +4030,18 @@ void process_cpuid() __cpuid(0x6, eax, ebx, ecx, edx); has_aperf = ecx & (1 << 0); + if (has_aperf) { + BIC_PRESENT(BIC_Avg_MHz); + BIC_PRESENT(BIC_Busy); + BIC_PRESENT(BIC_Bzy_MHz); + } do_dts = eax & (1 << 0); + if (do_dts) + BIC_PRESENT(BIC_CoreTmp); + has_turbo = eax & (1 << 1); do_ptm = eax & (1 << 6); + if (do_ptm) + BIC_PRESENT(BIC_PkgTmp); has_hwp = eax & (1 << 7); has_hwp_notify = eax & (1 << 8); has_hwp_activity_window = eax & (1 << 9); @@ -3403,10 +4049,11 @@ void process_cpuid() has_hwp_pkg = eax & (1 << 11); has_epb = ecx & (1 << 3); - if (debug) - fprintf(outf, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sHWP, " + if (!quiet) + fprintf(outf, "CPUID(6): %sAPERF, %sTURBO, %sDTS, %sPTM, %sHWP, " "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n", has_aperf ? "" : "No-", + has_turbo ? "" : "No-", do_dts ? "" : "No-", do_ptm ? "" : "No-", has_hwp ? "" : "No-", @@ -3416,10 +4063,11 @@ void process_cpuid() has_hwp_pkg ? "" : "No-", has_epb ? "" : "No-"); - if (debug) + if (!quiet) decode_misc_enable_msr(); - if (max_level >= 0x7 && debug) { + + if (max_level >= 0x7 && !quiet) { int has_sgx; ecx = 0; @@ -3445,7 +4093,7 @@ void process_cpuid() if (ebx_tsc != 0) { - if (debug && (ebx != 0)) + if (!quiet && (ebx != 0)) fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n", eax_crystal, ebx_tsc, crystal_hz); @@ -3462,6 +4110,7 @@ void process_cpuid() crystal_hz = 25000000; /* 25.0 MHz */ break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_ATOM_GEMINI_LAKE: crystal_hz = 19200000; /* 19.2 MHz */ break; default: @@ -3470,7 +4119,7 @@ void process_cpuid() if (crystal_hz) { tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal; - if (debug) + if (!quiet) fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n", tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal); } @@ -3485,7 +4134,7 @@ void process_cpuid() base_mhz = max_mhz = bus_mhz = edx = 0; __cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx); - if (debug) + if (!quiet) fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n", base_mhz, max_mhz, bus_mhz); } @@ -3493,56 +4142,96 @@ void process_cpuid() if (has_aperf) aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); - do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); + BIC_PRESENT(BIC_IRQ); + BIC_PRESENT(BIC_TSC_MHz); + + if (probe_nhm_msrs(family, model)) { + do_nhm_platform_info = 1; + BIC_PRESENT(BIC_CPU_c1); + BIC_PRESENT(BIC_CPU_c3); + BIC_PRESENT(BIC_CPU_c6); + BIC_PRESENT(BIC_SMI); + } do_snb_cstates = has_snb_msrs(family, model); + + if (do_snb_cstates) + BIC_PRESENT(BIC_CPU_c7); + do_irtl_snb = has_snb_msrs(family, model); - do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); - do_pc3 = (pkg_cstate_limit >= PCL__3); - do_pc6 = (pkg_cstate_limit >= PCL__6); - do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7); - do_c8_c9_c10 = has_hsw_msrs(family, model); + if (do_snb_cstates && (pkg_cstate_limit >= PCL__2)) + BIC_PRESENT(BIC_Pkgpc2); + if (pkg_cstate_limit >= PCL__3) + BIC_PRESENT(BIC_Pkgpc3); + if (pkg_cstate_limit >= PCL__6) + BIC_PRESENT(BIC_Pkgpc6); + if (do_snb_cstates && (pkg_cstate_limit >= PCL__7)) + BIC_PRESENT(BIC_Pkgpc7); + if (has_slv_msrs(family, model)) { + BIC_NOT_PRESENT(BIC_Pkgpc2); + BIC_NOT_PRESENT(BIC_Pkgpc3); + BIC_PRESENT(BIC_Pkgpc6); + BIC_NOT_PRESENT(BIC_Pkgpc7); + BIC_PRESENT(BIC_Mod_c6); + use_c1_residency_msr = 1; + } + if (is_dnv(family, model)) { + BIC_PRESENT(BIC_CPU_c1); + BIC_NOT_PRESENT(BIC_CPU_c3); + BIC_NOT_PRESENT(BIC_Pkgpc3); + BIC_NOT_PRESENT(BIC_CPU_c7); + BIC_NOT_PRESENT(BIC_Pkgpc7); + use_c1_residency_msr = 1; + } + if (is_skx(family, model)) { + BIC_NOT_PRESENT(BIC_CPU_c3); + BIC_NOT_PRESENT(BIC_Pkgpc3); + BIC_NOT_PRESENT(BIC_CPU_c7); + BIC_NOT_PRESENT(BIC_Pkgpc7); + } + if (is_bdx(family, model)) { + BIC_NOT_PRESENT(BIC_CPU_c7); + BIC_NOT_PRESENT(BIC_Pkgpc7); + } + if (has_hsw_msrs(family, model)) { + BIC_PRESENT(BIC_Pkgpc8); + BIC_PRESENT(BIC_Pkgpc9); + BIC_PRESENT(BIC_Pkgpc10); + } do_irtl_hsw = has_hsw_msrs(family, model); do_skl_residency = has_skl_msrs(family, model); do_slm_cstates = is_slm(family, model); do_knl_cstates = is_knl(family, model); - if (debug) + if (!quiet) decode_misc_pwr_mgmt_msr(); + if (!quiet && has_slv_msrs(family, model)) + decode_c6_demotion_policy_msr(); + rapl_probe(family, model); perf_limit_reasons_probe(family, model); - if (debug) + if (!quiet) dump_cstate_pstate_config_info(family, model); + if (!quiet) + dump_sysfs_cstate_config(); + if (!quiet) + dump_sysfs_pstate_config(); + if (has_skl_msrs(family, model)) calculate_tsc_tweak(); - do_gfx_rc6_ms = !access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK); + if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) + BIC_PRESENT(BIC_GFX_rc6); - do_gfx_mhz = !access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK); + if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK)) + BIC_PRESENT(BIC_GFXMHz); - return; -} + if (!quiet) + decode_misc_feature_control(); -void help() -{ - fprintf(outf, - "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" - "\n" - "Turbostat forks the specified COMMAND and prints statistics\n" - "when COMMAND completes.\n" - "If no COMMAND is specified, turbostat wakes every 5-seconds\n" - "to print statistics, until interrupted.\n" - "--add add a counter\n" - " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" - "--debug run in \"debug\" mode\n" - "--interval sec Override default 5-second measurement interval\n" - "--help print this help message\n" - "--out file create or truncate \"file\" for all output\n" - "--version print version information\n" - "\n" - "For more help, run \"man turbostat\"\n"); + return; } @@ -3579,7 +4268,7 @@ void topology_probe() topo.max_cpu_num = 0; for_all_proc_cpus(count_cpus); if (!summary_only && topo.num_cpus > 1) - show_cpu = 1; + BIC_PRESENT(BIC_CPU); if (debug > 1) fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); @@ -3598,6 +4287,15 @@ void topology_probe() CPU_ZERO_S(cpu_present_setsize, cpu_present_set); for_all_proc_cpus(mark_cpu_present); + /* + * Validate that all cpus in cpu_subset are also in cpu_present_set + */ + for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) { + if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset)) + if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) + err(1, "cpu%d not present", i); + } + /* * Allocate and initialize cpu_affinity_set */ @@ -3639,15 +4337,15 @@ void topology_probe() if (debug > 1) fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", max_core_id, topo.num_cores_per_pkg); - if (debug && !summary_only && topo.num_cores_per_pkg > 1) - show_core = 1; + if (!summary_only && topo.num_cores_per_pkg > 1) + BIC_PRESENT(BIC_Core); topo.num_packages = max_package_id + 1; if (debug > 1) fprintf(outf, "max_package_id %d, sizing for %d packages\n", max_package_id, topo.num_packages); - if (debug && !summary_only && topo.num_packages > 1) - show_pkg = 1; + if (!summary_only && topo.num_packages > 1) + BIC_PRESENT(BIC_Package); topo.num_threads_per_core = max_siblings; if (debug > 1) @@ -3662,7 +4360,7 @@ allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data int i; *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg * - topo.num_packages, sizeof(struct thread_data) + sys.thread_counter_bytes); + topo.num_packages, sizeof(struct thread_data)); if (*t == NULL) goto error; @@ -3671,14 +4369,14 @@ allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data (*t)[i].cpu_id = -1; *c = calloc(topo.num_cores_per_pkg * topo.num_packages, - sizeof(struct core_data) + sys.core_counter_bytes); + sizeof(struct core_data)); if (*c == NULL) goto error; for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++) (*c)[i].core_id = -1; - *p = calloc(topo.num_packages, sizeof(struct pkg_data) + sys.package_counter_bytes); + *p = calloc(topo.num_packages, sizeof(struct pkg_data)); if (*p == NULL) goto error; @@ -3789,24 +4487,24 @@ void turbostat_init() process_cpuid(); - if (debug) + if (!quiet) for_all_cpus(print_hwp, ODD_COUNTERS); - if (debug) + if (!quiet) for_all_cpus(print_epb, ODD_COUNTERS); - if (debug) + if (!quiet) for_all_cpus(print_perf_limit, ODD_COUNTERS); - if (debug) + if (!quiet) for_all_cpus(print_rapl, ODD_COUNTERS); for_all_cpus(set_temperature_target, ODD_COUNTERS); - if (debug) + if (!quiet) for_all_cpus(print_thermal, ODD_COUNTERS); - if (debug && do_irtl_snb) + if (!quiet && do_irtl_snb) print_irtl(); } @@ -3815,6 +4513,7 @@ int fork_it(char **argv) pid_t child_pid; int status; + snapshot_proc_sysfs_files(); status = for_all_cpus(get_counters, EVEN_COUNTERS); if (status) exit(status); @@ -3826,6 +4525,7 @@ int fork_it(char **argv) if (!child_pid) { /* child */ execvp(argv[0], argv); + err(errno, "exec %s", argv[0]); } else { /* parent */ @@ -3841,6 +4541,7 @@ int fork_it(char **argv) * n.b. fork_it() does not check for errors from for_all_cpus() * because re-starting is problematic when forking */ + snapshot_proc_sysfs_files(); for_all_cpus(get_counters, ODD_COUNTERS); gettimeofday(&tv_odd, (struct timezone *)NULL); timersub(&tv_odd, &tv_even, &tv_delta); @@ -3862,6 +4563,7 @@ int get_and_dump_counters(void) { int status; + snapshot_proc_sysfs_files(); status = for_all_cpus(get_counters, ODD_COUNTERS); if (status) return status; @@ -3876,13 +4578,13 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(outf, "turbostat version 4.16 24 Dec 2016" + fprintf(outf, "turbostat version 17.02.24" " - Len Brown \n"); } -int add_counter(unsigned int msr_num, char *name, unsigned int width, - enum counter_scope scope, enum counter_type type, - enum counter_format format) +int add_counter(unsigned int msr_num, char *path, char *name, + unsigned int width, enum counter_scope scope, + enum counter_type type, enum counter_format format, int flags) { struct msr_counter *msrp; @@ -3894,31 +4596,46 @@ int add_counter(unsigned int msr_num, char *name, unsigned int width, msrp->msr_num = msr_num; strncpy(msrp->name, name, NAME_BYTES); + if (path) + strncpy(msrp->path, path, PATH_BYTES); msrp->width = width; msrp->type = type; msrp->format = format; + msrp->flags = flags; switch (scope) { case SCOPE_CPU: - sys.thread_counter_bytes += 64; msrp->next = sys.tp; sys.tp = msrp; - sys.thread_counter_bytes += sizeof(unsigned long long); + sys.added_thread_counters++; + if (sys.added_thread_counters > MAX_ADDED_COUNTERS) { + fprintf(stderr, "exceeded max %d added thread counters\n", + MAX_ADDED_COUNTERS); + exit(-1); + } break; case SCOPE_CORE: - sys.core_counter_bytes += 64; msrp->next = sys.cp; sys.cp = msrp; - sys.core_counter_bytes += sizeof(unsigned long long); + sys.added_core_counters++; + if (sys.added_core_counters > MAX_ADDED_COUNTERS) { + fprintf(stderr, "exceeded max %d added core counters\n", + MAX_ADDED_COUNTERS); + exit(-1); + } break; case SCOPE_PACKAGE: - sys.package_counter_bytes += 64; msrp->next = sys.pp; sys.pp = msrp; - sys.package_counter_bytes += sizeof(unsigned long long); + sys.added_package_counters++; + if (sys.added_package_counters > MAX_ADDED_COUNTERS) { + fprintf(stderr, "exceeded max %d added package counters\n", + MAX_ADDED_COUNTERS); + exit(-1); + } break; } @@ -3928,7 +4645,8 @@ int add_counter(unsigned int msr_num, char *name, unsigned int width, void parse_add_command(char *add_command) { int msr_num = 0; - char name_buffer[NAME_BYTES]; + char *path = NULL; + char name_buffer[NAME_BYTES] = ""; int width = 64; int fail = 0; enum counter_scope scope = SCOPE_CPU; @@ -3943,6 +4661,11 @@ void parse_add_command(char *add_command) if (sscanf(add_command, "msr%d", &msr_num) == 1) goto next; + if (*add_command == '/') { + path = add_command; + goto next; + } + if (sscanf(add_command, "u%d", &width) == 1) { if ((width == 32) || (width == 64)) goto next; @@ -3968,6 +4691,10 @@ void parse_add_command(char *add_command) type = COUNTER_SECONDS; goto next; } + if (!strncmp(add_command, "usec", strlen("usec"))) { + type = COUNTER_USEC; + goto next; + } if (!strncmp(add_command, "raw", strlen("raw"))) { format = FORMAT_RAW; goto next; @@ -3992,36 +4719,26 @@ void parse_add_command(char *add_command) next: add_command = strchr(add_command, ','); - if (add_command) + if (add_command) { + *add_command = '\0'; add_command++; + } } - if (msr_num == 0) { - fprintf(stderr, "--add: (msrDDD | msr0xXXX) required\n"); + if ((msr_num == 0) && (path == NULL)) { + fprintf(stderr, "--add: (msrDDD | msr0xXXX | /path_to_counter ) required\n"); fail++; } /* generate default column header */ if (*name_buffer == '\0') { - if (format == FORMAT_RAW) { - if (width == 32) - sprintf(name_buffer, "msr%d", msr_num); - else - sprintf(name_buffer, "MSR%d", msr_num); - } else if (format == FORMAT_DELTA) { - if (width == 32) - sprintf(name_buffer, "cnt%d", msr_num); - else - sprintf(name_buffer, "CNT%d", msr_num); - } else if (format == FORMAT_PERCENT) { - if (width == 32) - sprintf(name_buffer, "msr%d%%", msr_num); - else - sprintf(name_buffer, "MSR%d%%", msr_num); - } + if (width == 32) + sprintf(name_buffer, "M0x%x%s", msr_num, format == FORMAT_PERCENT ? "%" : ""); + else + sprintf(name_buffer, "M0X%x%s", msr_num, format == FORMAT_PERCENT ? "%" : ""); } - if (add_counter(msr_num, name_buffer, width, scope, type, format)) + if (add_counter(msr_num, path, name_buffer, width, scope, type, format, 0)) fail++; if (fail) { @@ -4029,20 +4746,214 @@ void parse_add_command(char *add_command) exit(1); } } + +int is_deferred_skip(char *name) +{ + int i; + + for (i = 0; i < deferred_skip_index; ++i) + if (!strcmp(name, deferred_skip_names[i])) + return 1; + return 0; +} + +void probe_sysfs(void) +{ + char path[64]; + char name_buf[16]; + FILE *input; + int state; + char *sp; + + if (!DO_BIC(BIC_sysfs)) + return; + + for (state = 10; state > 0; --state) { + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", + base_cpu, state); + input = fopen(path, "r"); + if (input == NULL) + continue; + fgets(name_buf, sizeof(name_buf), input); + + /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ + sp = strchr(name_buf, '-'); + if (!sp) + sp = strchrnul(name_buf, '\n'); + *sp = '%'; + *(sp + 1) = '\0'; + + fclose(input); + + sprintf(path, "cpuidle/state%d/time", state); + + if (is_deferred_skip(name_buf)) + continue; + + add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC, + FORMAT_PERCENT, SYSFS_PERCPU); + } + + for (state = 10; state > 0; --state) { + + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", + base_cpu, state); + input = fopen(path, "r"); + if (input == NULL) + continue; + fgets(name_buf, sizeof(name_buf), input); + /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ + sp = strchr(name_buf, '-'); + if (!sp) + sp = strchrnul(name_buf, '\n'); + *sp = '\0'; + fclose(input); + + sprintf(path, "cpuidle/state%d/usage", state); + + if (is_deferred_skip(name_buf)) + continue; + + add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, + FORMAT_DELTA, SYSFS_PERCPU); + } + +} + + +/* + * parse cpuset with following syntax + * 1,2,4..6,8-10 and set bits in cpu_subset + */ +void parse_cpu_command(char *optarg) +{ + unsigned int start, end; + char *next; + + if (!strcmp(optarg, "core")) { + if (cpu_subset) + goto error; + show_core_only++; + return; + } + if (!strcmp(optarg, "package")) { + if (cpu_subset) + goto error; + show_pkg_only++; + return; + } + if (show_core_only || show_pkg_only) + goto error; + + cpu_subset = CPU_ALLOC(CPU_SUBSET_MAXCPUS); + if (cpu_subset == NULL) + err(3, "CPU_ALLOC"); + cpu_subset_size = CPU_ALLOC_SIZE(CPU_SUBSET_MAXCPUS); + + CPU_ZERO_S(cpu_subset_size, cpu_subset); + + next = optarg; + + while (next && *next) { + + if (*next == '-') /* no negative cpu numbers */ + goto error; + + start = strtoul(next, &next, 10); + + if (start >= CPU_SUBSET_MAXCPUS) + goto error; + CPU_SET_S(start, cpu_subset_size, cpu_subset); + + if (*next == '\0') + break; + + if (*next == ',') { + next += 1; + continue; + } + + if (*next == '-') { + next += 1; /* start range */ + } else if (*next == '.') { + next += 1; + if (*next == '.') + next += 1; /* start range */ + else + goto error; + } + + end = strtoul(next, &next, 10); + if (end <= start) + goto error; + + while (++start <= end) { + if (start >= CPU_SUBSET_MAXCPUS) + goto error; + CPU_SET_S(start, cpu_subset_size, cpu_subset); + } + + if (*next == ',') + next += 1; + else if (*next != '\0') + goto error; + } + + return; + +error: + fprintf(stderr, "\"--cpu %s\" malformed\n", optarg); + help(); + exit(-1); +} + +int shown; +/* + * parse_show_hide() - process cmdline to set default counter action + */ +void parse_show_hide(char *optarg, enum show_hide_mode new_mode) +{ + /* + * --show: show only those specified + * The 1st invocation will clear and replace the enabled mask + * subsequent invocations can add to it. + */ + if (new_mode == SHOW_LIST) { + if (shown == 0) + bic_enabled = bic_lookup(optarg, new_mode); + else + bic_enabled |= bic_lookup(optarg, new_mode); + shown = 1; + + return; + } + + /* + * --hide: do not show those specified + * multiple invocations simply clear more bits in enabled mask + */ + bic_enabled &= ~bic_lookup(optarg, new_mode); + +} + void cmdline(int argc, char **argv) { int opt; int option_index = 0; static struct option long_options[] = { {"add", required_argument, 0, 'a'}, + {"cpu", required_argument, 0, 'c'}, {"Dump", no_argument, 0, 'D'}, - {"debug", no_argument, 0, 'd'}, + {"debug", no_argument, 0, 'd'}, /* internal, not documented */ {"interval", required_argument, 0, 'i'}, {"help", no_argument, 0, 'h'}, + {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help {"Joules", no_argument, 0, 'J'}, + {"list", no_argument, 0, 'l'}, {"out", required_argument, 0, 'o'}, - {"Package", no_argument, 0, 'p'}, - {"processor", no_argument, 0, 'p'}, + {"quiet", no_argument, 0, 'q'}, + {"show", required_argument, 0, 's'}, {"Summary", no_argument, 0, 'S'}, {"TCC", required_argument, 0, 'T'}, {"version", no_argument, 0, 'v' }, @@ -4051,18 +4962,24 @@ void cmdline(int argc, char **argv) progname = argv[0]; - while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:PpST:v", + while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v", long_options, &option_index)) != -1) { switch (opt) { case 'a': parse_add_command(optarg); break; + case 'c': + parse_cpu_command(optarg); + break; case 'D': dump_only++; break; case 'd': debug++; break; + case 'H': + parse_show_hide(optarg, HIDE_LIST); + break; case 'h': default: help(); @@ -4084,14 +5001,18 @@ void cmdline(int argc, char **argv) case 'J': rapl_joules++; break; + case 'l': + list_header_only++; + quiet++; + break; case 'o': outf = fopen_or_die(optarg, "w"); break; - case 'P': - show_pkg_only++; + case 'q': + quiet = 1; break; - case 'p': - show_core_only++; + case 's': + parse_show_hide(optarg, SHOW_LIST); break; case 'S': summary_only++; @@ -4113,15 +5034,24 @@ int main(int argc, char **argv) cmdline(argc, argv); - if (debug) + if (!quiet) print_version(); + probe_sysfs(); + turbostat_init(); /* dump counters and exit */ if (dump_only) return get_and_dump_counters(); + /* list header and exit */ + if (list_header_only) { + print_header(","); + flush_output_stdout(); + return 0; + } + /* * if any params left, it must be a command to fork */ diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 6e4eb2fc2d1e78..0c8b61f8398eda 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1880,6 +1880,7 @@ sub get_grub_index { sub wait_for_input { my ($fp, $time) = @_; + my $start_time; my $rin; my $rout; my $nr; @@ -1895,17 +1896,22 @@ sub wait_for_input vec($rin, fileno($fp), 1) = 1; vec($rin, fileno(\*STDIN), 1) = 1; + $start_time = time; + while (1) { $nr = select($rout=$rin, undef, undef, $time); - if ($nr <= 0) { - return undef; - } + last if ($nr <= 0); # copy data from stdin to the console if (vec($rout, fileno(\*STDIN), 1) == 1) { - sysread(\*STDIN, $buf, 1000); - syswrite($fp, $buf, 1000); + $nr = sysread(\*STDIN, $buf, 1000); + syswrite($fp, $buf, $nr) if ($nr > 0); + } + + # The timeout is based on time waiting for the fp data + if (vec($rout, fileno($fp), 1) != 1) { + last if (defined($time) && (time - $start_time > $time)); next; } @@ -1917,12 +1923,11 @@ sub wait_for_input last if ($ch eq "\n"); } - if (!length($line)) { - return undef; - } + last if (!length($line)); return $line; } + return undef; } sub reboot_to { diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 45be8b55a66345..798f176554338b 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -887,7 +887,7 @@ static void nfit_test0_setup(struct nfit_test *t) memdev->range_index = 0+1; memdev->region_index = 4+1; memdev->region_size = SPA0_SIZE/2; - memdev->region_offset = t->spa_set_dma[0]; + memdev->region_offset = 1; memdev->address = 0; memdev->interleave_index = 0; memdev->interleave_ways = 2; @@ -902,7 +902,7 @@ static void nfit_test0_setup(struct nfit_test *t) memdev->range_index = 0+1; memdev->region_index = 5+1; memdev->region_size = SPA0_SIZE/2; - memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2; + memdev->region_offset = (1 << 8); memdev->address = 0; memdev->interleave_index = 0; memdev->interleave_ways = 2; @@ -917,7 +917,7 @@ static void nfit_test0_setup(struct nfit_test *t) memdev->range_index = 1+1; memdev->region_index = 4+1; memdev->region_size = SPA1_SIZE/4; - memdev->region_offset = t->spa_set_dma[1]; + memdev->region_offset = (1 << 16); memdev->address = SPA0_SIZE/2; memdev->interleave_index = 0; memdev->interleave_ways = 4; @@ -932,7 +932,7 @@ static void nfit_test0_setup(struct nfit_test *t) memdev->range_index = 1+1; memdev->region_index = 5+1; memdev->region_size = SPA1_SIZE/4; - memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4; + memdev->region_offset = (1 << 24); memdev->address = SPA0_SIZE/2; memdev->interleave_index = 0; memdev->interleave_ways = 4; @@ -947,7 +947,7 @@ static void nfit_test0_setup(struct nfit_test *t) memdev->range_index = 1+1; memdev->region_index = 6+1; memdev->region_size = SPA1_SIZE/4; - memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4; + memdev->region_offset = (1ULL << 32); memdev->address = SPA0_SIZE/2; memdev->interleave_index = 0; memdev->interleave_ways = 4; @@ -962,7 +962,7 @@ static void nfit_test0_setup(struct nfit_test *t) memdev->range_index = 1+1; memdev->region_index = 7+1; memdev->region_size = SPA1_SIZE/4; - memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4; + memdev->region_offset = (1ULL << 40); memdev->address = SPA0_SIZE/2; memdev->interleave_index = 0; memdev->interleave_ways = 4; @@ -1380,7 +1380,7 @@ static void nfit_test0_setup(struct nfit_test *t) memdev->range_index = 11+1; memdev->region_index = 9+1; memdev->region_size = SPA0_SIZE; - memdev->region_offset = t->spa_set_dma[2]; + memdev->region_offset = (1ULL << 48); memdev->address = 0; memdev->interleave_index = 0; memdev->interleave_ways = 1; diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index f11315bedefc3d..6a9480c03cbdfc 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -1,6 +1,7 @@ CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address -LDFLAGS += -lpthread -lurcu +LDFLAGS += -fsanitize=address +LDLIBS+= -lpthread -lurcu TARGETS = main idr-test multiorder CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ @@ -10,23 +11,25 @@ ifndef SHIFT SHIFT=3 endif +ifeq ($(BUILD), 32) + CFLAGS += -m32 + LDFLAGS += -m32 +endif + targets: mapshift $(TARGETS) main: $(OFILES) - $(CC) $(CFLAGS) $(LDFLAGS) $^ -o main idr-test: idr-test.o $(CORE_OFILES) - $(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test multiorder: multiorder.o $(CORE_OFILES) - $(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder clean: $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h vpath %.c ../../lib -$(OFILES): *.h */*.h generated/map-shift.h \ +$(OFILES): Makefile *.h */*.h generated/map-shift.h \ ../../include/linux/*.h \ ../../include/asm/*.h \ ../../../include/linux/radix-tree.h \ @@ -41,7 +44,7 @@ idr.c: ../../../lib/idr.c .PHONY: mapshift mapshift: - @if ! grep -qw $(SHIFT) generated/map-shift.h; then \ + @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ generated/map-shift.h; \ fi diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c index 9b09ddfe462fd3..99c40f3ed1337f 100644 --- a/tools/testing/radix-tree/benchmark.c +++ b/tools/testing/radix-tree/benchmark.c @@ -17,6 +17,9 @@ #include #include "test.h" +#define for_each_index(i, base, order) \ + for (i = base; i < base + (1 << order); i++) + #define NSEC_PER_SEC 1000000000L static long long benchmark_iter(struct radix_tree_root *root, bool tagged) @@ -57,27 +60,176 @@ static long long benchmark_iter(struct radix_tree_root *root, bool tagged) return nsec; } +static void benchmark_insert(struct radix_tree_root *root, + unsigned long size, unsigned long step, int order) +{ + struct timespec start, finish; + unsigned long index; + long long nsec; + + clock_gettime(CLOCK_MONOTONIC, &start); + + for (index = 0 ; index < size ; index += step) + item_insert_order(root, index, order); + + clock_gettime(CLOCK_MONOTONIC, &finish); + + nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + + (finish.tv_nsec - start.tv_nsec); + + printv(2, "Size: %8ld, step: %8ld, order: %d, insertion: %15lld ns\n", + size, step, order, nsec); +} + +static void benchmark_tagging(struct radix_tree_root *root, + unsigned long size, unsigned long step, int order) +{ + struct timespec start, finish; + unsigned long index; + long long nsec; + + clock_gettime(CLOCK_MONOTONIC, &start); + + for (index = 0 ; index < size ; index += step) + radix_tree_tag_set(root, index, 0); + + clock_gettime(CLOCK_MONOTONIC, &finish); + + nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + + (finish.tv_nsec - start.tv_nsec); + + printv(2, "Size: %8ld, step: %8ld, order: %d, tagging: %17lld ns\n", + size, step, order, nsec); +} + +static void benchmark_delete(struct radix_tree_root *root, + unsigned long size, unsigned long step, int order) +{ + struct timespec start, finish; + unsigned long index, i; + long long nsec; + + clock_gettime(CLOCK_MONOTONIC, &start); + + for (index = 0 ; index < size ; index += step) + for_each_index(i, index, order) + item_delete(root, i); + + clock_gettime(CLOCK_MONOTONIC, &finish); + + nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + + (finish.tv_nsec - start.tv_nsec); + + printv(2, "Size: %8ld, step: %8ld, order: %d, deletion: %16lld ns\n", + size, step, order, nsec); +} + static void benchmark_size(unsigned long size, unsigned long step, int order) { RADIX_TREE(tree, GFP_KERNEL); long long normal, tagged; - unsigned long index; - for (index = 0 ; index < size ; index += step) { - item_insert_order(&tree, index, order); - radix_tree_tag_set(&tree, index, 0); - } + benchmark_insert(&tree, size, step, order); + benchmark_tagging(&tree, size, step, order); tagged = benchmark_iter(&tree, true); normal = benchmark_iter(&tree, false); - printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", - size, step, order, tagged, normal); + printv(2, "Size: %8ld, step: %8ld, order: %d, tagged iteration: %8lld ns\n", + size, step, order, tagged); + printv(2, "Size: %8ld, step: %8ld, order: %d, normal iteration: %8lld ns\n", + size, step, order, normal); + + benchmark_delete(&tree, size, step, order); item_kill_tree(&tree); rcu_barrier(); } +static long long __benchmark_split(unsigned long index, + int old_order, int new_order) +{ + struct timespec start, finish; + long long nsec; + RADIX_TREE(tree, GFP_ATOMIC); + + item_insert_order(&tree, index, old_order); + + clock_gettime(CLOCK_MONOTONIC, &start); + radix_tree_split(&tree, index, new_order); + clock_gettime(CLOCK_MONOTONIC, &finish); + nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + + (finish.tv_nsec - start.tv_nsec); + + item_kill_tree(&tree); + + return nsec; + +} + +static void benchmark_split(unsigned long size, unsigned long step) +{ + int i, j, idx; + long long nsec = 0; + + + for (idx = 0; idx < size; idx += step) { + for (i = 3; i < 11; i++) { + for (j = 0; j < i; j++) { + nsec += __benchmark_split(idx, i, j); + } + } + } + + printv(2, "Size %8ld, step %8ld, split time %10lld ns\n", + size, step, nsec); + +} + +static long long __benchmark_join(unsigned long index, + unsigned order1, unsigned order2) +{ + unsigned long loc; + struct timespec start, finish; + long long nsec; + void *item, *item2 = item_create(index + 1, order1); + RADIX_TREE(tree, GFP_KERNEL); + + item_insert_order(&tree, index, order2); + item = radix_tree_lookup(&tree, index); + + clock_gettime(CLOCK_MONOTONIC, &start); + radix_tree_join(&tree, index + 1, order1, item2); + clock_gettime(CLOCK_MONOTONIC, &finish); + nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + + (finish.tv_nsec - start.tv_nsec); + + loc = find_item(&tree, item); + if (loc == -1) + free(item); + + item_kill_tree(&tree); + + return nsec; +} + +static void benchmark_join(unsigned long step) +{ + int i, j, idx; + long long nsec = 0; + + for (idx = 0; idx < 1 << 10; idx += step) { + for (i = 1; i < 15; i++) { + for (j = 0; j < i; j++) { + nsec += __benchmark_join(idx, i, j); + } + } + } + + printv(2, "Size %8d, step %8ld, join time %10lld ns\n", + 1 << 10, step, nsec); +} + void benchmark(void) { unsigned long size[] = {1 << 10, 1 << 20, 0}; @@ -95,4 +247,11 @@ void benchmark(void) for (c = 0; size[c]; c++) for (s = 0; step[s]; s++) benchmark_size(size[c], step[s] << 9, 9); + + for (c = 0; size[c]; c++) + for (s = 0; step[s]; s++) + benchmark_split(size[c], step[s]); + + for (s = 0; step[s]; s++) + benchmark_join(step[s]); } diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index a26098c6123d1c..30cd0b296f1a76 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -153,6 +153,30 @@ void idr_nowait_test(void) idr_destroy(&idr); } +void idr_get_next_test(void) +{ + unsigned long i; + int nextid; + DEFINE_IDR(idr); + + int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0}; + + for(i = 0; indices[i]; i++) { + struct item *item = item_create(indices[i], 0); + assert(idr_alloc(&idr, item, indices[i], indices[i+1], + GFP_KERNEL) == indices[i]); + } + + for(i = 0, nextid = 0; indices[i]; i++) { + idr_get_next(&idr, &nextid); + assert(nextid == indices[i]); + nextid++; + } + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); +} + void idr_checks(void) { unsigned long i; @@ -202,6 +226,7 @@ void idr_checks(void) idr_alloc_test(); idr_null_test(); idr_nowait_test(); + idr_get_next_test(); } /* @@ -338,7 +363,7 @@ void ida_check_random(void) { DEFINE_IDA(ida); DECLARE_BITMAP(bitmap, 2048); - int id; + int id, err; unsigned int i; time_t s = time(NULL); @@ -352,8 +377,11 @@ void ida_check_random(void) ida_remove(&ida, bit); } else { __set_bit(bit, bitmap); - ida_pre_get(&ida, GFP_KERNEL); - assert(!ida_get_new_above(&ida, bit, &id)); + do { + ida_pre_get(&ida, GFP_KERNEL); + err = ida_get_new_above(&ida, bit, &id); + } while (err == -ENOMEM); + assert(!err); assert(id == bit); } } @@ -362,6 +390,24 @@ void ida_check_random(void) goto repeat; } +void ida_simple_get_remove_test(void) +{ + DEFINE_IDA(ida); + unsigned long i; + + for (i = 0; i < 10000; i++) { + assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i); + } + assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0); + + for (i = 0; i < 10000; i++) { + ida_simple_remove(&ida, i); + } + assert(ida_is_empty(&ida)); + + ida_destroy(&ida); +} + void ida_checks(void) { DEFINE_IDA(ida); @@ -428,15 +474,41 @@ void ida_checks(void) ida_check_max(); ida_check_conv(); ida_check_random(); + ida_simple_get_remove_test(); radix_tree_cpu_dead(1); } +static void *ida_random_fn(void *arg) +{ + rcu_register_thread(); + ida_check_random(); + rcu_unregister_thread(); + return NULL; +} + +void ida_thread_tests(void) +{ + pthread_t threads[10]; + int i; + + for (i = 0; i < ARRAY_SIZE(threads); i++) + if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) { + perror("creating ida thread"); + exit(1); + } + + while (i--) + pthread_join(threads[i], NULL); +} + int __weak main(void) { radix_tree_init(); idr_checks(); ida_checks(); + ida_thread_tests(); + radix_tree_cpu_dead(1); rcu_barrier(); if (nr_allocated) printf("nr_allocated = %d\n", nr_allocated); diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index b829127d567057..bc9a78449572f1 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -368,6 +368,7 @@ int main(int argc, char **argv) iteration_test(0, 10 + 90 * long_run); iteration_test(7, 10 + 90 * long_run); single_thread_tests(long_run); + ida_thread_tests(); /* Free any remaining preallocated nodes */ radix_tree_cpu_dead(0); diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c index d4ff009892456a..36dcf7d6945dc6 100644 --- a/tools/testing/radix-tree/tag_check.c +++ b/tools/testing/radix-tree/tag_check.c @@ -330,6 +330,34 @@ static void single_check(void) item_kill_tree(&tree); } +void radix_tree_clear_tags_test(void) +{ + unsigned long index; + struct radix_tree_node *node; + struct radix_tree_iter iter; + void **slot; + + RADIX_TREE(tree, GFP_KERNEL); + + item_insert(&tree, 0); + item_tag_set(&tree, 0, 0); + __radix_tree_lookup(&tree, 0, &node, &slot); + radix_tree_clear_tags(&tree, node, slot); + assert(item_tag_get(&tree, 0, 0) == 0); + + for (index = 0; index < 1000; index++) { + item_insert(&tree, index); + item_tag_set(&tree, index, 0); + } + + radix_tree_for_each_slot(slot, &tree, &iter, 0) { + radix_tree_clear_tags(&tree, iter.node, slot); + assert(item_tag_get(&tree, iter.index, 0) == 0); + } + + item_kill_tree(&tree); +} + void tag_check(void) { single_check(); @@ -347,4 +375,5 @@ void tag_check(void) thrash_tags(); rcu_barrier(); printv(2, "after thrash_tags: %d allocated\n", nr_allocated); + radix_tree_clear_tags_test(); } diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index b30e11d9d271c3..0f8220cc61663f 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -36,6 +36,7 @@ void iteration_test(unsigned order, unsigned duration); void benchmark(void); void idr_checks(void); void ida_checks(void); +void ida_thread_tests(void); struct item * item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index e8b79a7b50bd52..d8593f1251ecce 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -26,6 +26,7 @@ TARGETS += ptrace TARGETS += seccomp TARGETS += sigaltstack TARGETS += size +TARGETS += splice TARGETS += static_keys TARGETS += sync TARGETS += sysctl diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 4b498265dae6dc..9af09e8099c0aa 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -1,20 +1,30 @@ LIBDIR := ../../../lib -BPFOBJ := $(LIBDIR)/bpf/bpf.o +BPFDIR := $(LIBDIR)/bpf +APIDIR := ../../../include/uapi +GENDIR := ../../../../include/generated +GENHDR := $(GENDIR)/autoconf.h -CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) +ifneq ($(wildcard $(GENHDR)),) + GENFLAGS := -DHAVE_GENHDR +endif + +CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) +LDLIBS += -lcap TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map TEST_PROGS := test_kmod.sh -.PHONY: all clean force +include ../lib.mk + +BPFOBJ := $(OUTPUT)/bpf.o + +$(TEST_GEN_PROGS): $(BPFOBJ) + +.PHONY: force # force a rebuild of BPFOBJ when its dependencies are updated force: $(BPFOBJ): force - $(MAKE) -C $(dir $(BPFOBJ)) - -$(test_objs): $(BPFOBJ) - -include ../lib.mk + $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index cada17ac00b8e6..a0aa2009b0e0a8 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -80,8 +80,9 @@ static void test_hashmap(int task, void *data) assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); key = 2; assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); - key = 1; - assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); + key = 3; + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && + errno == E2BIG); /* Check that key = 0 doesn't exist. */ key = 0; @@ -110,6 +111,24 @@ static void test_hashmap(int task, void *data) close(fd); } +static void test_hashmap_sizes(int task, void *data) +{ + int fd, i, j; + + for (i = 1; i <= 512; i <<= 1) + for (j = 1; j <= 1 << 18; j <<= 1) { + fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, + 2, map_flags); + if (fd < 0) { + printf("Failed to create hashmap key=%d value=%d '%s'\n", + i, j, strerror(errno)); + exit(1); + } + close(fd); + usleep(10); /* give kernel time to destroy */ + } +} + static void test_hashmap_percpu(int task, void *data) { unsigned int nr_cpus = bpf_num_possible_cpus(); @@ -317,7 +336,10 @@ static void test_arraymap_percpu(int task, void *data) static void test_arraymap_percpu_many_keys(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); - unsigned int nr_keys = 20000; + /* nr_keys is not too large otherwise the test stresses percpu + * allocator more than anything else + */ + unsigned int nr_keys = 2000; long values[nr_cpus]; int key, fd, i; @@ -419,6 +441,7 @@ static void test_map_stress(void) { run_parallel(100, test_hashmap, NULL); run_parallel(100, test_hashmap_percpu, NULL); + run_parallel(100, test_hashmap_sizes, NULL); run_parallel(100, test_arraymap, NULL); run_parallel(100, test_arraymap_percpu, NULL); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index e1f5b9eea1e874..c848e90b642131 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -8,6 +8,8 @@ * License as published by the Free Software Foundation. */ +#include +#include #include #include #include @@ -28,6 +30,14 @@ #include +#ifdef HAVE_GENHDR +# include "autoconf.h" +#else +# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__) +# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 +# endif +#endif + #include "../../../include/linux/filter.h" #ifndef ARRAY_SIZE @@ -37,6 +47,8 @@ #define MAX_INSNS 512 #define MAX_FIXUPS 8 +#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) + struct bpf_test { const char *descr; struct bpf_insn insns[MAX_INSNS]; @@ -51,6 +63,7 @@ struct bpf_test { REJECT } result, result_unpriv; enum bpf_prog_type prog_type; + uint8_t flags; }; /* Note we want this to be 64 bit aligned so that the end of our array is @@ -2429,6 +2442,30 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, + { + "direct packet access: test15 (spill with xadd)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_5, 4096), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), + BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R2 invalid mem access 'inv'", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, { "helper access to packet: test1, valid packet_ptr range", .insns = { @@ -2932,6 +2969,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R0 pointer arithmetic prohibited", .result_unpriv = REJECT, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "valid map access into an array with a variable", @@ -2955,6 +2993,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R0 pointer arithmetic prohibited", .result_unpriv = REJECT, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "valid map access into an array with a signed variable", @@ -2982,6 +3021,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R0 pointer arithmetic prohibited", .result_unpriv = REJECT, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with a constant", @@ -3023,6 +3063,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is outside of the array range", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with a variable", @@ -3046,6 +3087,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with no floor check", @@ -3072,6 +3114,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with a invalid max check", @@ -3098,6 +3141,7 @@ static struct bpf_test tests[] = { .errstr = "invalid access to map value, value_size=48 off=44 size=8", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with a invalid max check", @@ -3127,6 +3171,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "multiple registers share map_lookup_elem result", @@ -3250,6 +3295,7 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr_unpriv = "R0 pointer arithmetic prohibited", .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "constant register |= constant should keep constant type", @@ -3415,6 +3461,26 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_LWT_XMIT, }, + { + "overlapping checks for direct packet access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_XMIT, + }, { "invalid access of tc_classid for LWT_IN", .insns = { @@ -3959,7 +4025,208 @@ static struct bpf_test tests[] = { .result_unpriv = REJECT, }, { - "map element value (adjusted) is preserved across register spilling", + "map element value or null is marked on register spilling", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result = ACCEPT, + .result_unpriv = REJECT, + }, + { + "map element value store of cleared call register", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R1 !read_ok", + .errstr = "R1 !read_ok", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value with unaligned store", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43), + BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33), + BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23), + BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22), + BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23), + BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .result = ACCEPT, + .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + }, + { + "map element value with unaligned load", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .result = ACCEPT, + .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + }, + { + "map element value illegal alu op, 1", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value illegal alu op, 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value illegal alu op, 3", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value illegal alu op, 4", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value illegal alu op, 5", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_IMM(BPF_REG_3, 4096), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr = "R0 invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value is preserved across register spilling", .insns = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), @@ -3981,6 +4248,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R0 pointer arithmetic prohibited", .result = ACCEPT, .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", @@ -4419,6 +4687,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result = REJECT, .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid range check", @@ -4450,6 +4719,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result = REJECT, .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, } }; @@ -4528,11 +4798,11 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, static void do_test_single(struct bpf_test *test, bool unpriv, int *passes, int *errors) { + int fd_prog, expected_ret, reject_from_alignment; struct bpf_insn *prog = test->insns; int prog_len = probe_filter_length(prog); int prog_type = test->prog_type; int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1; - int fd_prog, expected_ret; const char *expected_err; do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); @@ -4545,8 +4815,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv, test->result_unpriv : test->result; expected_err = unpriv && test->errstr_unpriv ? test->errstr_unpriv : test->errstr; + + reject_from_alignment = fd_prog < 0 && + (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && + strstr(bpf_vlog, "Unknown alignment."); +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (reject_from_alignment) { + printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", + strerror(errno)); + goto fail_log; + } +#endif if (expected_ret == ACCEPT) { - if (fd_prog < 0) { + if (fd_prog < 0 && !reject_from_alignment) { printf("FAIL\nFailed to load prog '%s'!\n", strerror(errno)); goto fail_log; @@ -4556,14 +4837,15 @@ static void do_test_single(struct bpf_test *test, bool unpriv, printf("FAIL\nUnexpected success to load!\n"); goto fail_log; } - if (!strstr(bpf_vlog, expected_err)) { + if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) { printf("FAIL\nUnexpected error message!\n"); goto fail_log; } } (*passes)++; - printf("OK\n"); + printf("OK%s\n", reject_from_alignment ? + " (NOTE: reject due to unknown alignment)" : ""); close_fds: close(fd_prog); close(fd_f1); @@ -4583,10 +4865,12 @@ static bool is_admin(void) cap_flag_value_t sysadmin = CAP_CLEAR; const cap_value_t cap_val = CAP_SYS_ADMIN; +#ifdef CAP_IS_SUPPORTED if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) { perror("cap_get_flag"); return false; } +#endif caps = cap_get_proc(); if (!caps) { perror("cap_get_proc"); diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index ce96d80ad64f4d..775c589ac3c0a2 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -2,6 +2,10 @@ # Makefile can operate with or without the kbuild infrastructure. CC := $(CROSS_COMPILE)gcc +ifeq (0,$(MAKELEVEL)) +OUTPUT := $(shell pwd) +endif + TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS)) TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c index 248a820048dfe8..66d31de60b9ae9 100644 --- a/tools/testing/selftests/powerpc/harness.c +++ b/tools/testing/selftests/powerpc/harness.c @@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name) rc = run_test(test_function, name); - if (rc == MAGIC_SKIP_RETURN_VALUE) + if (rc == MAGIC_SKIP_RETURN_VALUE) { test_skip(name); - else + /* so that skipped test is not marked as failed */ + rc = 0; + } else test_finish(name, rc); return rc; diff --git a/tools/testing/selftests/powerpc/include/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h index d828bfb6ef2d9a..54064ced9e95b3 100644 --- a/tools/testing/selftests/powerpc/include/vsx_asm.h +++ b/tools/testing/selftests/powerpc/include/vsx_asm.h @@ -16,56 +16,56 @@ */ FUNC_START(load_vsx) li r5,0 - lxvx vs20,r5,r3 + lxvd2x vs20,r5,r3 addi r5,r5,16 - lxvx vs21,r5,r3 + lxvd2x vs21,r5,r3 addi r5,r5,16 - lxvx vs22,r5,r3 + lxvd2x vs22,r5,r3 addi r5,r5,16 - lxvx vs23,r5,r3 + lxvd2x vs23,r5,r3 addi r5,r5,16 - lxvx vs24,r5,r3 + lxvd2x vs24,r5,r3 addi r5,r5,16 - lxvx vs25,r5,r3 + lxvd2x vs25,r5,r3 addi r5,r5,16 - lxvx vs26,r5,r3 + lxvd2x vs26,r5,r3 addi r5,r5,16 - lxvx vs27,r5,r3 + lxvd2x vs27,r5,r3 addi r5,r5,16 - lxvx vs28,r5,r3 + lxvd2x vs28,r5,r3 addi r5,r5,16 - lxvx vs29,r5,r3 + lxvd2x vs29,r5,r3 addi r5,r5,16 - lxvx vs30,r5,r3 + lxvd2x vs30,r5,r3 addi r5,r5,16 - lxvx vs31,r5,r3 + lxvd2x vs31,r5,r3 blr FUNC_END(load_vsx) FUNC_START(store_vsx) li r5,0 - stxvx vs20,r5,r3 + stxvd2x vs20,r5,r3 addi r5,r5,16 - stxvx vs21,r5,r3 + stxvd2x vs21,r5,r3 addi r5,r5,16 - stxvx vs22,r5,r3 + stxvd2x vs22,r5,r3 addi r5,r5,16 - stxvx vs23,r5,r3 + stxvd2x vs23,r5,r3 addi r5,r5,16 - stxvx vs24,r5,r3 + stxvd2x vs24,r5,r3 addi r5,r5,16 - stxvx vs25,r5,r3 + stxvd2x vs25,r5,r3 addi r5,r5,16 - stxvx vs26,r5,r3 + stxvd2x vs26,r5,r3 addi r5,r5,16 - stxvx vs27,r5,r3 + stxvd2x vs27,r5,r3 addi r5,r5,16 - stxvx vs28,r5,r3 + stxvd2x vs28,r5,r3 addi r5,r5,16 - stxvx vs29,r5,r3 + stxvd2x vs29,r5,r3 addi r5,r5,16 - stxvx vs30,r5,r3 + stxvd2x vs30,r5,r3 addi r5,r5,16 - stxvx vs31,r5,r3 + stxvd2x vs31,r5,r3 blr FUNC_END(store_vsx) diff --git a/tools/testing/selftests/splice/Makefile b/tools/testing/selftests/splice/Makefile new file mode 100644 index 00000000000000..de51f439d4a6a3 --- /dev/null +++ b/tools/testing/selftests/splice/Makefile @@ -0,0 +1,8 @@ +TEST_PROGS := default_file_splice_read.sh +EXTRA := default_file_splice_read +all: $(TEST_PROGS) $(EXTRA) + +include ../lib.mk + +clean: + rm -fr $(TEST_PROGS) $(EXTRA) diff --git a/tools/testing/selftests/splice/default_file_splice_read.c b/tools/testing/selftests/splice/default_file_splice_read.c new file mode 100644 index 00000000000000..01dd6091554c3d --- /dev/null +++ b/tools/testing/selftests/splice/default_file_splice_read.c @@ -0,0 +1,8 @@ +#define _GNU_SOURCE +#include + +int main(int argc, char **argv) +{ + splice(0, 0, 1, 0, 1<<30, 0); + return 0; +} diff --git a/tools/testing/selftests/splice/default_file_splice_read.sh b/tools/testing/selftests/splice/default_file_splice_read.sh new file mode 100755 index 00000000000000..1ea2adeabc946d --- /dev/null +++ b/tools/testing/selftests/splice/default_file_splice_read.sh @@ -0,0 +1,7 @@ +#!/bin/sh +n=`./default_file_splice_read +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int nerrs = 0; + +static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), + int flags) +{ + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_sigaction = handler; + sa.sa_flags = SA_SIGINFO | flags; + sigemptyset(&sa.sa_mask); + if (sigaction(sig, &sa, 0)) + err(1, "sigaction"); + +} + +static void clearhandler(int sig) +{ + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = SIG_DFL; + sigemptyset(&sa.sa_mask); + if (sigaction(sig, &sa, 0)) + err(1, "sigaction"); +} + +static jmp_buf jmpbuf; + +static void sigsegv(int sig, siginfo_t *si, void *ctx_void) +{ + siglongjmp(jmpbuf, 1); +} + +static bool try_outb(unsigned short port) +{ + sethandler(SIGSEGV, sigsegv, SA_RESETHAND); + if (sigsetjmp(jmpbuf, 1) != 0) { + return false; + } else { + asm volatile ("outb %%al, %w[port]" + : : [port] "Nd" (port), "a" (0)); + return true; + } + clearhandler(SIGSEGV); +} + +static void expect_ok(unsigned short port) +{ + if (!try_outb(port)) { + printf("[FAIL]\toutb to 0x%02hx failed\n", port); + exit(1); + } + + printf("[OK]\toutb to 0x%02hx worked\n", port); +} + +static void expect_gp(unsigned short port) +{ + if (try_outb(port)) { + printf("[FAIL]\toutb to 0x%02hx worked\n", port); + exit(1); + } + + printf("[OK]\toutb to 0x%02hx failed\n", port); +} + +int main(void) +{ + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(0, &cpuset); + if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) + err(1, "sched_setaffinity to CPU 0"); + + expect_gp(0x80); + expect_gp(0xed); + + /* + * Probe for ioperm support. Note that clearing ioperm bits + * works even as nonroot. + */ + printf("[RUN]\tenable 0x80\n"); + if (ioperm(0x80, 1, 1) != 0) { + printf("[OK]\tioperm(0x80, 1, 1) failed (%d) -- try running as root\n", + errno); + return 0; + } + expect_ok(0x80); + expect_gp(0xed); + + printf("[RUN]\tdisable 0x80\n"); + if (ioperm(0x80, 1, 0) != 0) { + printf("[FAIL]\tioperm(0x80, 1, 0) failed (%d)", errno); + return 1; + } + expect_gp(0x80); + expect_gp(0xed); + + /* Make sure that fork() preserves ioperm. */ + if (ioperm(0x80, 1, 1) != 0) { + printf("[FAIL]\tioperm(0x80, 1, 0) failed (%d)", errno); + return 1; + } + + pid_t child = fork(); + if (child == -1) + err(1, "fork"); + + if (child == 0) { + printf("[RUN]\tchild: check that we inherited permissions\n"); + expect_ok(0x80); + expect_gp(0xed); + return 0; + } else { + int status; + if (waitpid(child, &status, 0) != child || + !WIFEXITED(status)) { + printf("[FAIL]\tChild died\n"); + nerrs++; + } else if (WEXITSTATUS(status) != 0) { + printf("[FAIL]\tChild failed\n"); + nerrs++; + } else { + printf("[OK]\tChild succeeded\n"); + } + } + + /* Test the capability checks. */ + + printf("\tDrop privileges\n"); + if (setresuid(1, 1, 1) != 0) { + printf("[WARN]\tDropping privileges failed\n"); + return 0; + } + + printf("[RUN]\tdisable 0x80\n"); + if (ioperm(0x80, 1, 0) != 0) { + printf("[FAIL]\tioperm(0x80, 1, 0) failed (%d)", errno); + return 1; + } + printf("[OK]\tit worked\n"); + + printf("[RUN]\tenable 0x80 again\n"); + if (ioperm(0x80, 1, 1) == 0) { + printf("[FAIL]\tit succeeded but should have failed.\n"); + return 1; + } + printf("[OK]\tit failed\n"); + return 0; +} diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 4af47079cf0430..f6121612e769f5 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -45,6 +45,12 @@ #define AR_DB (1 << 22) #define AR_G (1 << 23) +#ifdef __x86_64__ +# define INT80_CLOBBERS "r8", "r9", "r10", "r11" +#else +# define INT80_CLOBBERS +#endif + static int nerrs; /* Points to an array of 1024 ints, each holding its own index. */ @@ -588,7 +594,7 @@ static int invoke_set_thread_area(void) asm volatile ("int $0x80" : "=a" (ret), "+m" (low_user_desc) : "a" (243), "b" (low_user_desc) - : "flags"); + : INT80_CLOBBERS); return ret; } @@ -657,7 +663,7 @@ static void test_gdt_invalidation(void) "+a" (eax) : "m" (low_user_desc_clear), [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) - : "flags"); + : INT80_CLOBBERS); if (sel != 0) { result = "FAIL"; @@ -688,7 +694,7 @@ static void test_gdt_invalidation(void) "+a" (eax) : "m" (low_user_desc_clear), [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) - : "flags"); + : INT80_CLOBBERS); if (sel != 0) { result = "FAIL"; @@ -721,7 +727,7 @@ static void test_gdt_invalidation(void) "+a" (eax) : "m" (low_user_desc_clear), [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) - : "flags"); + : INT80_CLOBBERS); #ifdef __x86_64__ syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base); @@ -774,7 +780,7 @@ static void test_gdt_invalidation(void) "+a" (eax) : "m" (low_user_desc_clear), [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) - : "flags"); + : INT80_CLOBBERS); #ifdef __x86_64__ syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base); diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c index b037ce9cf116b1..eaea9243970840 100644 --- a/tools/testing/selftests/x86/ptrace_syscall.c +++ b/tools/testing/selftests/x86/ptrace_syscall.c @@ -58,7 +58,8 @@ static void do_full_int80(struct syscall_args32 *args) asm volatile ("int $0x80" : "+a" (args->nr), "+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2), - "+S" (args->arg3), "+D" (args->arg4), "+r" (bp)); + "+S" (args->arg3), "+D" (args->arg4), "+r" (bp) + : : "r8", "r9", "r10", "r11"); args->arg5 = bp; #else sys32_helper(args, int80_and_ret); diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c index 50c26358e8b7ec..a48da95c18fdf1 100644 --- a/tools/testing/selftests/x86/single_step_syscall.c +++ b/tools/testing/selftests/x86/single_step_syscall.c @@ -56,9 +56,11 @@ static volatile sig_atomic_t sig_traps; #ifdef __x86_64__ # define REG_IP REG_RIP # define WIDTH "q" +# define INT80_CLOBBERS "r8", "r9", "r10", "r11" #else # define REG_IP REG_EIP # define WIDTH "l" +# define INT80_CLOBBERS #endif static unsigned long get_eflags(void) @@ -140,7 +142,8 @@ int main() printf("[RUN]\tSet TF and check int80\n"); set_eflags(get_eflags() | X86_EFLAGS_TF); - asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)); + asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid) + : INT80_CLOBBERS); check_result(); /* diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 571b64a01c5097..8d1da1af4b09e4 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) return ret; } -static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu, - struct vgic_its *its, - gpa_t addr, unsigned int len) -{ - u32 reg = 0; - - mutex_lock(&its->cmd_lock); - if (its->creadr == its->cwriter) - reg |= GITS_CTLR_QUIESCENT; - if (its->enabled) - reg |= GITS_CTLR_ENABLE; - mutex_unlock(&its->cmd_lock); - - return reg; -} - -static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its, - gpa_t addr, unsigned int len, - unsigned long val) -{ - its->enabled = !!(val & GITS_CTLR_ENABLE); -} - static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, struct vgic_its *its, gpa_t addr, unsigned int len) @@ -1161,33 +1138,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its, #define ITS_CMD_SIZE 32 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5)) -/* - * By writing to CWRITER the guest announces new commands to be processed. - * To avoid any races in the first place, we take the its_cmd lock, which - * protects our ring buffer variables, so that there is only one user - * per ITS handling commands at a given time. - */ -static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its, - gpa_t addr, unsigned int len, - unsigned long val) +/* Must be called with the cmd_lock held. */ +static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) { gpa_t cbaser; u64 cmd_buf[4]; - u32 reg; - if (!its) - return; - - mutex_lock(&its->cmd_lock); - - reg = update_64bit_reg(its->cwriter, addr & 7, len, val); - reg = ITS_CMD_OFFSET(reg); - if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { - mutex_unlock(&its->cmd_lock); + /* Commands are only processed when the ITS is enabled. */ + if (!its->enabled) return; - } - its->cwriter = reg; cbaser = CBASER_ADDRESS(its->cbaser); while (its->cwriter != its->creadr) { @@ -1207,6 +1167,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its, if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser)) its->creadr = 0; } +} + +/* + * By writing to CWRITER the guest announces new commands to be processed. + * To avoid any races in the first place, we take the its_cmd lock, which + * protects our ring buffer variables, so that there is only one user + * per ITS handling commands at a given time. + */ +static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u64 reg; + + if (!its) + return; + + mutex_lock(&its->cmd_lock); + + reg = update_64bit_reg(its->cwriter, addr & 7, len, val); + reg = ITS_CMD_OFFSET(reg); + if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { + mutex_unlock(&its->cmd_lock); + return; + } + its->cwriter = reg; + + vgic_its_process_commands(kvm, its); mutex_unlock(&its->cmd_lock); } @@ -1287,6 +1275,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, *regptr = reg; } +static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu, + struct vgic_its *its, + gpa_t addr, unsigned int len) +{ + u32 reg = 0; + + mutex_lock(&its->cmd_lock); + if (its->creadr == its->cwriter) + reg |= GITS_CTLR_QUIESCENT; + if (its->enabled) + reg |= GITS_CTLR_ENABLE; + mutex_unlock(&its->cmd_lock); + + return reg; +} + +static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its, + gpa_t addr, unsigned int len, + unsigned long val) +{ + mutex_lock(&its->cmd_lock); + + its->enabled = !!(val & GITS_CTLR_ENABLE); + + /* + * Try to process any pending commands. This function bails out early + * if the ITS is disabled or no commands have been queued. + */ + vgic_its_process_commands(kvm, its); + + mutex_unlock(&its->cmd_lock); +} + #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \ { \ .reg_offset = off, \ diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 3654b4c835ef73..2a5db135272215 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -180,21 +180,37 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool new_active_state) { + struct kvm_vcpu *requester_vcpu; spin_lock(&irq->irq_lock); + + /* + * The vcpu parameter here can mean multiple things depending on how + * this function is called; when handling a trap from the kernel it + * depends on the GIC version, and these functions are also called as + * part of save/restore from userspace. + * + * Therefore, we have to figure out the requester in a reliable way. + * + * When accessing VGIC state from user space, the requester_vcpu is + * NULL, which is fine, because we guarantee that no VCPUs are running + * when accessing VGIC state from user space so irq->vcpu->cpu is + * always -1. + */ + requester_vcpu = kvm_arm_get_running_vcpu(); + /* * If this virtual IRQ was written into a list register, we * have to make sure the CPU that runs the VCPU thread has - * synced back LR state to the struct vgic_irq. We can only - * know this for sure, when either this irq is not assigned to - * anyone's AP list anymore, or the VCPU thread is not - * running on any CPUs. + * synced back the LR state to the struct vgic_irq. * - * In the opposite case, we know the VCPU thread may be on its - * way back from the guest and still has to sync back this - * IRQ, so we release and re-acquire the spin_lock to let the - * other thread sync back the IRQ. + * As long as the conditions below are true, we know the VCPU thread + * may be on its way back from the guest (we kicked the VCPU thread in + * vgic_change_active_prepare) and still has to sync back this IRQ, + * so we release and re-acquire the spin_lock to let the other thread + * sync back the IRQ. */ while (irq->vcpu && /* IRQ may have state in an LR somewhere */ + irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */ irq->vcpu->cpu != -1) /* VCPU thread is running */ cond_resched_lock(&irq->irq_lock); diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index edc6ee2dc852e9..be0f4c3e0142e0 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -229,10 +229,13 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu) /* * If we are emulating a GICv3, we do it in an non-GICv2-compatible * way, so we force SRE to 1 to demonstrate this to the guest. + * Also, we don't support any form of IRQ/FIQ bypass. * This goes with the spec allowing the value to be RAO/WI. */ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { - vgic_v3->vgic_sre = ICC_SRE_EL1_SRE; + vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB | + ICC_SRE_EL1_DFB | + ICC_SRE_EL1_SRE); vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; } else { vgic_v3->vgic_sre = 0; diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 2366177172f67c..bb298a200cd3f2 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "async_pf.h" #include diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index a29786dd952210..4d28a9ddbee010 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, continue; kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); - kvm->buses[bus_idx]->ioeventfd_count--; + if (kvm->buses[bus_idx]) + kvm->buses[bus_idx]->ioeventfd_count--; ioeventfd_release(p); ret = 0; break; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 35f71409d9ee4d..88257b311cb579 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -32,7 +32,9 @@ #include #include #include -#include +#include +#include +#include #include #include #include @@ -617,7 +619,7 @@ static struct kvm *kvm_create_vm(unsigned long type) mutex_init(&kvm->lock); mutex_init(&kvm->irq_lock); mutex_init(&kvm->slots_lock); - atomic_set(&kvm->users_count, 1); + refcount_set(&kvm->users_count, 1); INIT_LIST_HEAD(&kvm->devices); r = kvm_arch_init_vm(kvm, type); @@ -725,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm) list_del(&kvm->vm_list); spin_unlock(&kvm_lock); kvm_free_irq_routing(kvm); - for (i = 0; i < KVM_NR_BUSES; i++) - kvm_io_bus_destroy(kvm->buses[i]); + for (i = 0; i < KVM_NR_BUSES; i++) { + if (kvm->buses[i]) + kvm_io_bus_destroy(kvm->buses[i]); + kvm->buses[i] = NULL; + } kvm_coalesced_mmio_free(kvm); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); @@ -747,13 +752,13 @@ static void kvm_destroy_vm(struct kvm *kvm) void kvm_get_kvm(struct kvm *kvm) { - atomic_inc(&kvm->users_count); + refcount_inc(&kvm->users_count); } EXPORT_SYMBOL_GPL(kvm_get_kvm); void kvm_put_kvm(struct kvm *kvm) { - if (atomic_dec_and_test(&kvm->users_count)) + if (refcount_dec_and_test(&kvm->users_count)) kvm_destroy_vm(kvm); } EXPORT_SYMBOL_GPL(kvm_put_kvm); @@ -1060,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm, * changes) is disallowed above, so any other attribute changes getting * here can be skipped. */ - if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { + if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) { r = kvm_iommu_map_pages(kvm, &new); return r; } @@ -3472,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, }; bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); + if (!bus) + return -ENOMEM; r = __kvm_io_bus_write(vcpu, bus, &range, val); return r < 0 ? r : 0; } @@ -3489,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, }; bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); + if (!bus) + return -ENOMEM; /* First try the device referenced by cookie. */ if ((cookie >= 0) && (cookie < bus->dev_count) && @@ -3539,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, }; bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); + if (!bus) + return -ENOMEM; r = __kvm_io_bus_read(vcpu, bus, &range, val); return r < 0 ? r : 0; } @@ -3551,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, struct kvm_io_bus *new_bus, *bus; bus = kvm->buses[bus_idx]; + if (!bus) + return -ENOMEM; + /* exclude ioeventfd which is limited by maximum fd */ if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) return -ENOSPC; @@ -3570,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, } /* Caller must hold slots_lock. */ -int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev) +void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev) { - int i, r; + int i; struct kvm_io_bus *new_bus, *bus; bus = kvm->buses[bus_idx]; - r = -ENOENT; + if (!bus) + return; + for (i = 0; i < bus->dev_count; i++) if (bus->range[i].dev == dev) { - r = 0; break; } - if (r) - return r; + if (i == bus->dev_count) + return; new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * sizeof(struct kvm_io_range)), GFP_KERNEL); - if (!new_bus) - return -ENOMEM; + if (!new_bus) { + pr_err("kvm: failed to shrink bus, removing it completely\n"); + goto broken; + } memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); new_bus->dev_count--; memcpy(new_bus->range + i, bus->range + i + 1, (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); +broken: rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus); - return r; + return; } struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, @@ -3613,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, srcu_idx = srcu_read_lock(&kvm->srcu); bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); + if (!bus) + goto out_unlock; dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); if (dev_idx < 0) @@ -3639,7 +3659,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file, * To avoid the race between open and the removal of the debugfs * directory we test against the users count. */ - if (!atomic_add_unless(&stat_data->kvm->users_count, 1, 0)) + if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) return -ENOENT; if (simple_attr_open(inode, file, get, set, fmt)) {