summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu11
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt28
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.yaml44
-rw-r--r--Documentation/devicetree/bindings/ipmi/npcm7xx-kcs-bmc.txt40
-rw-r--r--Documentation/devicetree/bindings/ipmi/nuvoton,npcm750-kcs-bmc.yaml55
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml61
-rw-r--r--Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml3
-rw-r--r--Documentation/devicetree/bindings/ufs/qcom,ufs.yaml3
-rw-r--r--Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml3
-rw-r--r--Documentation/driver-api/driver-model/devres.rst1
-rw-r--r--MAINTAINERS21
-rw-r--r--arch/arm64/include/asm/topology.h3
-rw-r--r--arch/arm64/kernel/topology.c101
-rw-r--r--drivers/ata/libata-acpi.c67
-rw-r--r--drivers/ata/libata-core.c13
-rw-r--r--drivers/ata/libata-scsi.c1
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/base/arch_topology.c96
-rw-r--r--drivers/base/base.h16
-rw-r--r--drivers/base/bus.c3
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/cpu.c26
-rw-r--r--drivers/base/dd.c12
-rw-r--r--drivers/base/devres.c25
-rw-r--r--drivers/base/firmware_loader/sysfs.c10
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c24
-rw-r--r--drivers/char/tpm/tpm2-cmd.c42
-rw-r--r--drivers/char/tpm/tpm2-sessions.c199
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs4
-rw-r--r--drivers/firmware/xilinx/Makefile2
-rw-r--r--drivers/firmware/xilinx/zynqmp-ufs.c118
-rw-r--r--drivers/firmware/xilinx/zynqmp.c46
-rw-r--r--drivers/gpu/drm/nova/driver.rs4
-rw-r--r--drivers/gpu/drm/nova/file.rs2
-rw-r--r--drivers/gpu/drm/tyr/driver.rs4
-rw-r--r--drivers/gpu/nova-core/driver.rs50
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/pwm/pwm_th1520.rs4
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c3
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fnic/fnic_res.c1
-rw-r--r--drivers/scsi/hosts.c19
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c3
-rw-r--r--drivers/scsi/isci/task.h10
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c249
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h25
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h17
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c15
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla1280.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c1777
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h112
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c4
-rw-r--r--drivers/scsi/scsi.c12
-rw-r--r--drivers/scsi/scsi_debug.c132
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c104
-rw-r--r--drivers/scsi/scsi_logging.c21
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c74
-rw-r--r--drivers/scsi/scsi_sysfs.c79
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c34
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c49
-rw-r--r--drivers/scsi/st.c89
-rw-r--r--drivers/scsi/stex.c1
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_configfs.c38
-rw-r--r--drivers/target/target_core_device.c24
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_iblock.c9
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_sbc.c51
-rw-r--r--drivers/target/target_core_spc.c49
-rw-r--r--drivers/target/target_core_stat.c268
-rw-r--r--drivers/target/target_core_tpg.c23
-rw-r--r--drivers/target/target_core_transport.c26
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c2
-rw-r--r--drivers/ufs/core/Makefile1
-rw-r--r--drivers/ufs/core/ufs-mcq.c62
-rw-r--r--drivers/ufs/core/ufs-rpmb.c254
-rw-r--r--drivers/ufs/core/ufs-sysfs.c3
-rw-r--r--drivers/ufs/core/ufs_bsg.c2
-rw-r--r--drivers/ufs/core/ufs_trace.h1
-rw-r--r--drivers/ufs/core/ufs_trace_types.h1
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h18
-rw-r--r--drivers/ufs/core/ufshcd-priv.h54
-rw-r--r--drivers/ufs/core/ufshcd.c934
-rw-r--r--drivers/ufs/host/Kconfig13
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c37
-rw-r--r--drivers/ufs/host/ufs-amd-versal2.c564
-rw-r--r--drivers/ufs/host/ufs-mediatek.c130
-rw-r--r--drivers/ufs/host/ufs-mediatek.h4
-rw-r--r--drivers/ufs/host/ufs-qcom.c3
-rw-r--r--drivers/ufs/host/ufs-rockchip.c20
-rw-r--r--drivers/ufs/host/ufshcd-dwc.h46
-rw-r--r--fs/debugfs/inode.c21
-rw-r--r--fs/debugfs/internal.h13
-rw-r--r--fs/kernfs/dir.c5
-rw-r--r--fs/kernfs/mount.c1
-rw-r--r--fs/nfs/localio.c48
-rw-r--r--fs/sysfs/group.c10
-rw-r--r--include/linux/arch_topology.h5
-rw-r--r--include/linux/ata.h1
-rw-r--r--include/linux/device.h19
-rw-r--r--include/linux/device/devres.h17
-rw-r--r--include/linux/firmware/xlnx-zynqmp-ufs.h38
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h16
-rw-r--r--include/linux/libata.h76
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/platform_device.h6
-rw-r--r--include/linux/sysfs.h48
-rw-r--r--include/linux/tpm.h38
-rw-r--r--include/scsi/scsi_dbg.h4
-rw-r--r--include/scsi/scsi_device.h30
-rw-r--r--include/scsi/scsi_host.h33
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h26
-rw-r--r--include/uapi/linux/media/amlogic/c3-isp-config.h2
-rw-r--r--include/ufs/ufs.h5
-rw-r--r--include/ufs/ufs_quirks.h7
-rw-r--r--include/ufs/ufshcd.h29
-rw-r--r--include/ufs/ufshci.h25
-rw-r--r--include/ufs/unipro.h8
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--rust/bindings/bindings_helper.h7
-rw-r--r--rust/helpers/pci.c14
-rw-r--r--rust/helpers/time.c5
-rw-r--r--rust/kernel/auxiliary.rs120
-rw-r--r--rust/kernel/cpufreq.rs4
-rw-r--r--rust/kernel/debugfs.rs110
-rw-r--r--rust/kernel/debugfs/file_ops.rs140
-rw-r--r--rust/kernel/debugfs/traits.rs238
-rw-r--r--rust/kernel/device.rs130
-rw-r--r--rust/kernel/devres.rs18
-rw-r--r--rust/kernel/dma.rs29
-rw-r--r--rust/kernel/driver.rs4
-rw-r--r--rust/kernel/fs/file.rs5
-rw-r--r--rust/kernel/i2c.rs586
-rw-r--r--rust/kernel/io.rs32
-rw-r--r--rust/kernel/io/mem.rs36
-rw-r--r--rust/kernel/io/poll.rs93
-rw-r--r--rust/kernel/io/resource.rs31
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/pci.rs231
-rw-r--r--rust/kernel/pci/id.rs6
-rw-r--r--rust/kernel/pci/io.rs144
-rw-r--r--rust/kernel/pci/irq.rs252
-rw-r--r--rust/kernel/platform.rs63
-rw-r--r--rust/kernel/scatterlist.rs2
-rw-r--r--rust/kernel/time/delay.rs37
-rw-r--r--rust/kernel/uaccess.rs85
-rw-r--r--rust/kernel/usb.rs25
-rw-r--r--rust/pin-init/src/lib.rs87
-rw-r--r--samples/kobject/kset-example.c44
-rw-r--r--samples/rust/Kconfig25
-rw-r--r--samples/rust/Makefile2
-rw-r--r--samples/rust/rust_debugfs.rs34
-rw-r--r--samples/rust/rust_debugfs_scoped.rs14
-rw-r--r--samples/rust/rust_dma.rs37
-rw-r--r--samples/rust/rust_driver_auxiliary.rs59
-rw-r--r--samples/rust/rust_driver_i2c.rs74
-rw-r--r--samples/rust/rust_driver_pci.rs53
-rw-r--r--samples/rust/rust_driver_platform.rs6
-rw-r--r--samples/rust/rust_driver_usb.rs5
-rw-r--r--samples/rust/rust_i2c_client.rs147
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c41
201 files changed, 8200 insertions, 2417 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 8aed6d94c4cd..3a05604c21bf 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -764,6 +764,17 @@ Description:
participate in load balancing. These CPUs are set by
boot parameter "isolcpus=".
+What: /sys/devices/system/cpu/housekeeping
+Date: Oct 2025
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:
+ (RO) the list of logical CPUs that are designated by the kernel as
+ "housekeeping". Each CPU are responsible for handling essential
+ system-wide background tasks, including RCU callbacks, delayed
+ timer callbacks, and unbound workqueues, minimizing scheduling
+ jitter on low-latency, isolated CPUs. These CPUs are set when boot
+ parameter "isolcpus=nohz" or "nohz_full=" is specified.
+
What: /sys/devices/system/cpu/crash_hotplug
Date: Aug 2023
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 5692e19199b3..b242519f57da 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1211,12 +1211,8 @@ Kernel parameters
debugfs= [KNL,EARLY] This parameter enables what is exposed to
userspace and debugfs internal clients.
- Format: { on, no-mount, off }
+ Format: { on, off }
on: All functions are enabled.
- no-mount:
- Filesystem is not registered but kernel clients can
- access APIs and a crashkernel can be used to read
- its content. There is nothing to mount.
off: Filesystem is not registered and clients
get a -EPERM as result when trying to register files
or directories within debugfs.
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
deleted file mode 100644
index 25f86da804b7..000000000000
--- a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Aspeed BT (Block Transfer) IPMI interface
-
-The Aspeed SOCs (AST2400 and AST2500) are commonly used as BMCs
-(BaseBoard Management Controllers) and the BT interface can be used to
-perform in-band IPMI communication with their host.
-
-Required properties:
-
-- compatible : should be one of
- "aspeed,ast2400-ibt-bmc"
- "aspeed,ast2500-ibt-bmc"
- "aspeed,ast2600-ibt-bmc"
-- reg: physical address and size of the registers
-- clocks: clock for the device
-
-Optional properties:
-
-- interrupts: interrupt generated by the BT interface. without an
- interrupt, the driver will operate in poll mode.
-
-Example:
-
- ibt@1e789140 {
- compatible = "aspeed,ast2400-ibt-bmc";
- reg = <0x1e789140 0x18>;
- interrupts = <8>;
- clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
- };
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.yaml b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.yaml
new file mode 100644
index 000000000000..c4f7cdbbe16b
--- /dev/null
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ipmi/aspeed,ast2400-ibt-bmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aspeed Block Transfer (BT) IPMI interface
+
+maintainers:
+ - Joel Stanley <joel@jms.id.au>
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2400-ibt-bmc
+ - aspeed,ast2500-ibt-bmc
+ - aspeed,ast2600-ibt-bmc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/aspeed-clock.h>
+
+ bt@1e789140 {
+ compatible = "aspeed,ast2400-ibt-bmc";
+ reg = <0x1e789140 0x18>;
+ interrupts = <8>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ };
diff --git a/Documentation/devicetree/bindings/ipmi/npcm7xx-kcs-bmc.txt b/Documentation/devicetree/bindings/ipmi/npcm7xx-kcs-bmc.txt
deleted file mode 100644
index 4fda76e63396..000000000000
--- a/Documentation/devicetree/bindings/ipmi/npcm7xx-kcs-bmc.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-* Nuvoton NPCM KCS (Keyboard Controller Style) IPMI interface
-
-The Nuvoton SOCs (NPCM) are commonly used as BMCs
-(Baseboard Management Controllers) and the KCS interface can be
-used to perform in-band IPMI communication with their host.
-
-Required properties:
-- compatible : should be one of
- "nuvoton,npcm750-kcs-bmc"
- "nuvoton,npcm845-kcs-bmc", "nuvoton,npcm750-kcs-bmc"
-- interrupts : interrupt generated by the controller
-- kcs_chan : The KCS channel number in the controller
-
-Example:
-
- lpc_kcs: lpc_kcs@f0007000 {
- compatible = "nuvoton,npcm750-lpc-kcs", "simple-mfd", "syscon";
- reg = <0xf0007000 0x40>;
- reg-io-width = <1>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0xf0007000 0x40>;
-
- kcs1: kcs1@0 {
- compatible = "nuvoton,npcm750-kcs-bmc";
- reg = <0x0 0x40>;
- interrupts = <0 9 4>;
- kcs_chan = <1>;
- status = "disabled";
- };
-
- kcs2: kcs2@0 {
- compatible = "nuvoton,npcm750-kcs-bmc";
- reg = <0x0 0x40>;
- interrupts = <0 9 4>;
- kcs_chan = <2>;
- status = "disabled";
- };
- };
diff --git a/Documentation/devicetree/bindings/ipmi/nuvoton,npcm750-kcs-bmc.yaml b/Documentation/devicetree/bindings/ipmi/nuvoton,npcm750-kcs-bmc.yaml
new file mode 100644
index 000000000000..fc5df1c5e3bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/ipmi/nuvoton,npcm750-kcs-bmc.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ipmi/nuvoton,npcm750-kcs-bmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM KCS BMC
+
+maintainers:
+ - Avi Fishman <avifishman70@gmail.com>
+ - Tomer Maimon <tmaimon77@gmail.com>
+ - Tali Perry <tali.perry1@gmail.com>
+
+description:
+ The Nuvoton SOCs (NPCM) are commonly used as BMCs (Baseboard Management
+ Controllers) and the KCS interface can be used to perform in-band IPMI
+ communication with their host.
+
+properties:
+ compatible:
+ oneOf:
+ - const: nuvoton,npcm750-kcs-bmc
+ - items:
+ - enum:
+ - nuvoton,npcm845-kcs-bmc
+ - const: nuvoton,npcm750-kcs-bmc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ kcs_chan:
+ description: The KCS channel number in the controller
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 3
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - kcs_chan
+
+additionalProperties: false
+
+examples:
+ - |
+ kcs@0 {
+ compatible = "nuvoton,npcm750-kcs-bmc";
+ reg = <0x0 0x40>;
+ interrupts = <9 4>;
+ kcs_chan = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml
index 3e62b5d4da61..6e2edd43fc2a 100644
--- a/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml
@@ -8,8 +8,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Universal Flash Storage (UFS) M-PHY
maintainers:
- - Stanley Chu <stanley.chu@mediatek.com>
- Chunfeng Yun <chunfeng.yun@mediatek.com>
+ - Peter Wang <peter.wang@mediatek.com>
+ - Chaotian Jing <chaotian.jing@mediatek.com>
description: |
UFS M-PHY nodes are defined to describe on-chip UFS M-PHY hardware macro.
diff --git a/Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml b/Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml
new file mode 100644
index 000000000000..c00ec342d574
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/amd,versal2-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AMD Versal Gen 2 UFS Host Controller
+
+maintainers:
+ - Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
+
+allOf:
+ - $ref: ufs-common.yaml
+
+properties:
+ compatible:
+ const: amd,versal2-ufs
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: core
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: host
+ - const: phy
+
+required:
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ ufs@f10b0000 {
+ compatible = "amd,versal2-ufs";
+ reg = <0xf10b0000 0x1000>;
+ clocks = <&ufs_core_clk>;
+ clock-names = "core";
+ resets = <&scmi_reset 4>, <&scmi_reset 35>;
+ reset-names = "host", "phy";
+ interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ freq-table-hz = <0 0>;
+ };
diff --git a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml
index 1dec54fb00f3..15c347f5e660 100644
--- a/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/mediatek,ufs.yaml
@@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Mediatek Universal Flash Storage (UFS) Controller
maintainers:
- - Stanley Chu <stanley.chu@mediatek.com>
+ - Peter Wang <peter.wang@mediatek.com>
+ - Chaotian Jing <chaotian.jing@mediatek.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
index 1dd41f6d5258..516bb61a4624 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
@@ -88,7 +88,6 @@ allOf:
- const: ice_core_clk
reg:
minItems: 2
- maxItems: 2
reg-names:
minItems: 2
required:
@@ -117,7 +116,6 @@ allOf:
- const: tx_lane0_sync_clk
- const: rx_lane0_sync_clk
reg:
- minItems: 1
maxItems: 1
reg-names:
maxItems: 1
@@ -147,7 +145,6 @@ allOf:
- const: ice_core_clk
reg:
minItems: 2
- maxItems: 2
reg-names:
minItems: 2
required:
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
index b4e744ebffd1..a7eb7ad85a94 100644
--- a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
@@ -61,6 +61,9 @@ properties:
phy-names:
const: ufs-phy
+ power-domains:
+ maxItems: 1
+
samsung,sysreg:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index 2b36ebde9cec..0198ac65e874 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -383,7 +383,6 @@ NET
PER-CPU MEM
devm_alloc_percpu()
- devm_free_percpu()
PCI
devm_pci_alloc_host_bridge() : managed PCI host bridge allocation
diff --git a/MAINTAINERS b/MAINTAINERS
index 15259aad7374..074d4351714c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11821,6 +11821,16 @@ F: include/linux/i2c.h
F: include/uapi/linux/i2c-*.h
F: include/uapi/linux/i2c.h
+I2C SUBSYSTEM [RUST]
+M: Igor Korotin <igor.korotin.linux@gmail.com>
+R: Danilo Krummrich <dakr@kernel.org>
+R: Daniel Almeida <daniel.almeida@collabora.com>
+L: rust-for-linux@vger.kernel.org
+S: Maintained
+F: rust/kernel/i2c.rs
+F: samples/rust/rust_driver_i2c.rs
+F: samples/rust/rust_i2c_client.rs
+
I2C SUBSYSTEM HOST DRIVERS
M: Andi Shyti <andi.shyti@kernel.org>
L: linux-i2c@vger.kernel.org
@@ -14767,6 +14777,7 @@ LOONGSON-2K Board Management Controller (BMC) DRIVER
M: Binbin Zhou <zhoubinbin@loongson.cn>
M: Chong Qiao <qiaochong@loongson.cn>
S: Maintained
+F: drivers/char/ipmi/ipmi_si_ls2k.c
F: drivers/mfd/ls2k-bmc-core.c
LOONGSON EDAC DRIVER
@@ -23313,6 +23324,7 @@ F: drivers/scsi/
F: drivers/ufs/
F: include/scsi/
F: include/uapi/scsi/
+F: include/ufs/
SCSI TAPE DRIVER
M: Kai Mäkisara <Kai.Makisara@kolumbus.fi>
@@ -26628,6 +26640,14 @@ S: Supported
F: Documentation/devicetree/bindings/ufs/
F: Documentation/scsi/ufs.rst
F: drivers/ufs/core/
+F: include/ufs/
+
+UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER AMD VERSAL2
+M: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
+M: Ajay Neeli <ajay.neeli@amd.com>
+S: Maintained
+F: Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml
+F: drivers/ufs/host/ufs-amd-versal2.c
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
M: Pedro Sousa <pedrom.sousa@synopsys.com>
@@ -26645,6 +26665,7 @@ F: drivers/ufs/host/ufs-exynos*
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS
M: Peter Wang <peter.wang@mediatek.com>
+M: Chaotian Jing <chaotian.jing@mediatek.com>
R: Stanley Jhu <chu.stanley@gmail.com>
L: linux-scsi@vger.kernel.org
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 341174bf9106..b9eaf4ad7085 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -36,6 +36,9 @@ void update_freq_counters_refs(void);
#define arch_scale_hw_pressure topology_get_hw_pressure
#define arch_update_hw_pressure topology_update_hw_pressure
+#undef arch_cpu_is_threaded
+#define arch_cpu_is_threaded() (read_cpuid_mpidr() & MPIDR_MT_BITMASK)
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 5d07ee85bdae..5d24dc53799b 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -25,107 +25,6 @@
#include <asm/cputype.h>
#include <asm/topology.h>
-#ifdef CONFIG_ACPI
-static bool __init acpi_cpu_is_threaded(int cpu)
-{
- int is_threaded = acpi_pptt_cpu_is_thread(cpu);
-
- /*
- * if the PPTT doesn't have thread information, assume a homogeneous
- * machine and return the current CPU's thread state.
- */
- if (is_threaded < 0)
- is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
-
- return !!is_threaded;
-}
-
-struct cpu_smt_info {
- unsigned int thread_num;
- int core_id;
-};
-
-/*
- * Propagate the topology information of the processor_topology_node tree to the
- * cpu_topology array.
- */
-int __init parse_acpi_topology(void)
-{
- unsigned int max_smt_thread_num = 1;
- struct cpu_smt_info *entry;
- struct xarray hetero_cpu;
- unsigned long hetero_id;
- int cpu, topology_id;
-
- if (acpi_disabled)
- return 0;
-
- xa_init(&hetero_cpu);
-
- for_each_possible_cpu(cpu) {
- topology_id = find_acpi_cpu_topology(cpu, 0);
- if (topology_id < 0)
- return topology_id;
-
- if (acpi_cpu_is_threaded(cpu)) {
- cpu_topology[cpu].thread_id = topology_id;
- topology_id = find_acpi_cpu_topology(cpu, 1);
- cpu_topology[cpu].core_id = topology_id;
-
- /*
- * In the PPTT, CPUs below a node with the 'identical
- * implementation' flag have the same number of threads.
- * Count the number of threads for only one CPU (i.e.
- * one core_id) among those with the same hetero_id.
- * See the comment of find_acpi_cpu_topology_hetero_id()
- * for more details.
- *
- * One entry is created for each node having:
- * - the 'identical implementation' flag
- * - its parent not having the flag
- */
- hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
- entry = xa_load(&hetero_cpu, hetero_id);
- if (!entry) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- WARN_ON_ONCE(!entry);
-
- if (entry) {
- entry->core_id = topology_id;
- entry->thread_num = 1;
- xa_store(&hetero_cpu, hetero_id,
- entry, GFP_KERNEL);
- }
- } else if (entry->core_id == topology_id) {
- entry->thread_num++;
- }
- } else {
- cpu_topology[cpu].thread_id = -1;
- cpu_topology[cpu].core_id = topology_id;
- }
- topology_id = find_acpi_cpu_topology_cluster(cpu);
- cpu_topology[cpu].cluster_id = topology_id;
- topology_id = find_acpi_cpu_topology_package(cpu);
- cpu_topology[cpu].package_id = topology_id;
- }
-
- /*
- * This is a short loop since the number of XArray elements is the
- * number of heterogeneous CPU clusters. On a homogeneous system
- * there's only one entry in the XArray.
- */
- xa_for_each(&hetero_cpu, hetero_id, entry) {
- max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
- xa_erase(&hetero_cpu, hetero_id);
- kfree(entry);
- }
-
- cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
- xa_destroy(&hetero_cpu);
- return 0;
-}
-#endif
-
#ifdef CONFIG_ARM64_AMU_EXTN
#define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
#define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index f2140fc06ba0..15e18d50dcc6 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -246,6 +246,73 @@ void ata_acpi_bind_dev(struct ata_device *dev)
}
/**
+ * ata_acpi_dev_manage_restart - if the disk should be stopped (spun down) on
+ * system restart.
+ * @dev: target ATA device
+ *
+ * RETURNS:
+ * true if the disk should be stopped, otherwise false.
+ */
+bool ata_acpi_dev_manage_restart(struct ata_device *dev)
+{
+ struct device *tdev;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (dev->link->ap->flags & ATA_FLAG_ACPI_SATA)
+ tdev = &dev->tdev;
+ else
+ tdev = &dev->link->ap->tdev;
+
+ if (!is_acpi_device_node(tdev->fwnode))
+ return false;
+ return acpi_bus_power_manageable(ACPI_HANDLE(tdev));
+}
+
+/**
+ * ata_acpi_port_power_on - set the power state of the ata port to D0
+ * @ap: target ATA port
+ *
+ * This function is called at the beginning of ata_port_probe().
+ */
+void ata_acpi_port_power_on(struct ata_port *ap)
+{
+ acpi_handle handle;
+ int i;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (ap->flags & ATA_FLAG_ACPI_SATA) {
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ struct ata_device *dev = &ap->link.device[i];
+
+ if (!is_acpi_device_node(dev->tdev.fwnode))
+ continue;
+ handle = ACPI_HANDLE(&dev->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ continue;
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_dev_err(dev,
+ "acpi: failed to set power state to D0\n");
+ }
+ return;
+ }
+
+ if (!is_acpi_device_node(ap->tdev.fwnode))
+ return;
+ handle = ACPI_HANDLE(&ap->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ return;
+
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_port_err(ap, "acpi: failed to set power state to D0\n");
+}
+
+/**
* ata_acpi_dissociate - dissociate ATA host from ACPI objects
* @host: target ATA host
*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 462217935558..0b24bd169d61 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3146,6 +3146,10 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
dev->max_sectors);
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_8191)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_8191,
+ dev->max_sectors);
+
if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
@@ -3998,6 +4002,7 @@ static const char * const ata_quirk_names[] = {
[__ATA_QUIRK_NO_DMA_LOG] = "nodmalog",
[__ATA_QUIRK_NOTRIM] = "notrim",
[__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
+ [__ATA_QUIRK_MAX_SEC_8191] = "maxsec8191",
[__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
[__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
[__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
@@ -4104,6 +4109,12 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "LITEON CX1-JB*-HP", NULL, ATA_QUIRK_MAX_SEC_1024 },
{ "LITEON EP1-*", NULL, ATA_QUIRK_MAX_SEC_1024 },
+ /*
+ * These devices time out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=220693
+ */
+ { "DELLBOSS VD", "MV.R00-0", ATA_QUIRK_MAX_SEC_8191 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
@@ -5915,6 +5926,8 @@ void ata_port_probe(struct ata_port *ap)
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
+ ata_acpi_port_power_on(ap);
+
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 434774e71fe6..721d3f270c8e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1102,6 +1102,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
*/
sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1;
+ sdev->manage_restart = ata_acpi_dev_manage_restart(dev);
sdev->force_runtime_start_on_system_start = 1;
}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index e5b977a8d3e1..0e7ecac73680 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -130,6 +130,8 @@ extern void ata_acpi_on_disable(struct ata_device *dev);
extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
extern void ata_acpi_bind_port(struct ata_port *ap);
extern void ata_acpi_bind_dev(struct ata_device *dev);
+extern void ata_acpi_port_power_on(struct ata_port *ap);
+extern bool ata_acpi_dev_manage_restart(struct ata_device *dev);
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
#else
static inline void ata_acpi_dissociate(struct ata_host *host) { }
@@ -140,6 +142,8 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
pm_message_t state) { }
static inline void ata_acpi_bind_port(struct ata_port *ap) {}
static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
+static inline void ata_acpi_port_power_on(struct ata_port *ap) {}
+static inline bool ata_acpi_dev_manage_restart(struct ata_device *dev) { return 0; }
#endif
/* libata-scsi.c */
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index e1eff05bea4a..84ec92bff642 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -823,12 +823,106 @@ void remove_cpu_topology(unsigned int cpu)
clear_cpu_topology(cpu);
}
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+struct cpu_smt_info {
+ unsigned int thread_num;
+ int core_id;
+};
+
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+ /*
+ * if the PPTT doesn't have thread information, check for architecture
+ * specific fallback if available
+ */
+ if (is_threaded < 0)
+ is_threaded = arch_cpu_is_threaded();
+
+ return !!is_threaded;
+}
+
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
__weak int __init parse_acpi_topology(void)
{
+ unsigned int max_smt_thread_num = 1;
+ struct cpu_smt_info *entry;
+ struct xarray hetero_cpu;
+ unsigned long hetero_id;
+ int cpu, topology_id;
+
+ if (acpi_disabled)
+ return 0;
+
+ xa_init(&hetero_cpu);
+
+ for_each_possible_cpu(cpu) {
+ topology_id = find_acpi_cpu_topology(cpu, 0);
+ if (topology_id < 0)
+ return topology_id;
+
+ if (acpi_cpu_is_threaded(cpu)) {
+ cpu_topology[cpu].thread_id = topology_id;
+ topology_id = find_acpi_cpu_topology(cpu, 1);
+ cpu_topology[cpu].core_id = topology_id;
+
+ /*
+ * In the PPTT, CPUs below a node with the 'identical
+ * implementation' flag have the same number of threads.
+ * Count the number of threads for only one CPU (i.e.
+ * one core_id) among those with the same hetero_id.
+ * See the comment of find_acpi_cpu_topology_hetero_id()
+ * for more details.
+ *
+ * One entry is created for each node having:
+ * - the 'identical implementation' flag
+ * - its parent not having the flag
+ */
+ hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
+ entry = xa_load(&hetero_cpu, hetero_id);
+ if (!entry) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ WARN_ON_ONCE(!entry);
+
+ if (entry) {
+ entry->core_id = topology_id;
+ entry->thread_num = 1;
+ xa_store(&hetero_cpu, hetero_id,
+ entry, GFP_KERNEL);
+ }
+ } else if (entry->core_id == topology_id) {
+ entry->thread_num++;
+ }
+ } else {
+ cpu_topology[cpu].thread_id = -1;
+ cpu_topology[cpu].core_id = topology_id;
+ }
+ topology_id = find_acpi_cpu_topology_cluster(cpu);
+ cpu_topology[cpu].cluster_id = topology_id;
+ topology_id = find_acpi_cpu_topology_package(cpu);
+ cpu_topology[cpu].package_id = topology_id;
+ }
+
+ /*
+ * This is a short loop since the number of XArray elements is the
+ * number of heterogeneous CPU clusters. On a homogeneous system
+ * there's only one entry in the XArray.
+ */
+ xa_for_each(&hetero_cpu, hetero_id, entry) {
+ max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
+ xa_erase(&hetero_cpu, hetero_id);
+ kfree(entry);
+ }
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+ xa_destroy(&hetero_cpu);
return 0;
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
int cpu, ret;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 86fa7fbb3548..430cbefbc97f 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -85,6 +85,18 @@ struct driver_private {
};
#define to_driver(obj) container_of(obj, struct driver_private, kobj)
+#ifdef CONFIG_RUST
+/**
+ * struct driver_type - Representation of a Rust driver type.
+ */
+struct driver_type {
+ /**
+ * @id: Representation of core::any::TypeId.
+ */
+ u8 id[16];
+} __packed;
+#endif
+
/**
* struct device_private - structure to hold the private to the driver core portions of the device structure.
*
@@ -100,6 +112,7 @@ struct driver_private {
* @async_driver - pointer to device driver awaiting probe via async_probe
* @device - pointer back to the struct device that this structure is
* associated with.
+ * @driver_type - The type of the bound Rust driver.
* @dead - This device is currently either in the process of or has been
* removed from the system. Any asynchronous events scheduled for this
* device should exit without taking any action.
@@ -116,6 +129,9 @@ struct device_private {
const struct device_driver *async_driver;
char *deferred_probe_reason;
struct device *device;
+#ifdef CONFIG_RUST
+ struct driver_type driver_type;
+#endif
u8 dead:1;
};
#define to_device_private_parent(obj) \
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 5e75e1bce551..320e155c6be7 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -533,8 +533,7 @@ void bus_probe_device(struct device *dev)
if (!sp)
return;
- if (sp->drivers_autoprobe)
- device_initial_probe(dev);
+ device_initial_probe(dev);
mutex_lock(&sp->mutex);
list_for_each_entry(sif, &sp->interfaces, node)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f69dc9c85954..40de2f51a1b1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -4138,7 +4138,7 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
- device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ device_link_wq = alloc_workqueue("device_link_wq", WQ_PERCPU, 0);
if (!device_link_wq)
goto wq_err;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index fa0a2eef93ac..c6c57b6f61c6 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -300,13 +300,30 @@ static ssize_t print_cpus_isolated(struct device *dev,
}
static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
+static ssize_t housekeeping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *hk_mask;
+
+ hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
+
+ if (housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(hk_mask));
+ return sysfs_emit(buf, "\n");
+}
+static DEVICE_ATTR_RO(housekeeping);
+
#ifdef CONFIG_NO_HZ_FULL
-static ssize_t print_cpus_nohz_full(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t nohz_full_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+ if (cpumask_available(tick_nohz_full_mask))
+ return sysfs_emit(buf, "%*pbl\n",
+ cpumask_pr_args(tick_nohz_full_mask));
+ return sysfs_emit(buf, "\n");
}
-static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
+static DEVICE_ATTR_RO(nohz_full);
#endif
#ifdef CONFIG_CRASH_HOTPLUG
@@ -505,6 +522,7 @@ static struct attribute *cpu_root_attrs[] = {
&dev_attr_offline.attr,
&dev_attr_enabled.attr,
&dev_attr_isolated.attr,
+ &dev_attr_housekeeping.attr,
#ifdef CONFIG_NO_HZ_FULL
&dev_attr_nohz_full.attr,
#endif
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 13ab98e033ea..349f31bedfa1 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -193,7 +193,7 @@ void driver_deferred_probe_trigger(void)
* Kick the re-probe thread. It may already be scheduled, but it is
* safe to kick it again.
*/
- queue_work(system_unbound_wq, &deferred_probe_work);
+ queue_work(system_dfl_wq, &deferred_probe_work);
}
/**
@@ -1077,7 +1077,15 @@ EXPORT_SYMBOL_GPL(device_attach);
void device_initial_probe(struct device *dev)
{
- __device_attach(dev, true);
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (!sp)
+ return;
+
+ if (sp->drivers_autoprobe)
+ __device_attach(dev, true);
+
+ subsys_put(sp);
}
/*
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index c948c88d3956..f54db6d138ab 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1222,13 +1222,6 @@ static void devm_percpu_release(struct device *dev, void *pdata)
free_percpu(p);
}
-static int devm_percpu_match(struct device *dev, void *data, void *p)
-{
- struct devres *devr = container_of(data, struct devres, data);
-
- return *(void **)devr->data == p;
-}
-
/**
* __devm_alloc_percpu - Resource-managed alloc_percpu
* @dev: Device to allocate per-cpu memory for
@@ -1264,21 +1257,3 @@ void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
return pcpu;
}
EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
-
-/**
- * devm_free_percpu - Resource-managed free_percpu
- * @dev: Device this memory belongs to
- * @pdata: Per-cpu memory to free
- *
- * Free memory allocated with devm_alloc_percpu().
- */
-void devm_free_percpu(struct device *dev, void __percpu *pdata)
-{
- /*
- * Use devres_release() to prevent memory leakage as
- * devm_free_pages() does.
- */
- WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
- (void *)(__force unsigned long)pdata));
-}
-EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index add0b9b75edd..92e91050f96a 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -47,7 +47,10 @@ static ssize_t timeout_show(const struct class *class, const struct class_attrib
static ssize_t timeout_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
- int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+ int tmp_loading_timeout;
+
+ if (kstrtoint(buf, 10, &tmp_loading_timeout))
+ return -EINVAL;
if (tmp_loading_timeout < 0)
tmp_loading_timeout = 0;
@@ -157,7 +160,10 @@ static ssize_t firmware_loading_store(struct device *dev,
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
struct fw_priv *fw_priv;
ssize_t written = count;
- int loading = simple_strtol(buf, NULL, 10);
+ int loading;
+
+ if (kstrtoint(buf, 10, &loading))
+ return -EINVAL;
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index 829270067d16..c3797b93c5f5 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -100,8 +100,10 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&fwlp->lock);
- if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
- ret = -ENODEV;
+ if (fwlp->progress == FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fwlp->lock);
+ return -ENODEV;
+ }
fwlp->ops->cancel(fwlp->fw_upload);
mutex_unlock(&fwlp->lock);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 3700ab4eba3e..3f48fc6ab596 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -599,7 +599,8 @@ static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
static int __ipmi_bmc_register(struct ipmi_smi *intf,
struct ipmi_device_id *id,
bool guid_set, guid_t *guid, int intf_num);
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id, bool rescan);
static void free_ipmi_user(struct kref *ref)
{
@@ -2668,7 +2669,7 @@ retry_bmc_lock:
if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
need_waiter(intf); /* Retry later on an error. */
else
- __scan_channels(intf, &id);
+ __scan_channels(intf, &id, false);
if (!intf_set) {
@@ -2688,7 +2689,7 @@ retry_bmc_lock:
goto out_noprocessing;
} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
/* Version info changes, scan the channels again. */
- __scan_channels(intf, &bmc->fetch_id);
+ __scan_channels(intf, &bmc->fetch_id, true);
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
@@ -3417,8 +3418,6 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
intf->channels_ready = true;
wake_up(&intf->waitq);
} else {
- intf->channel_list = intf->wchannels + set;
- intf->channels_ready = true;
rv = send_channel_info_cmd(intf, intf->curr_channel);
}
@@ -3440,10 +3439,21 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
/*
* Must be holding intf->bmc_reg_mutex to call this.
*/
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id,
+ bool rescan)
{
int rv;
+ if (rescan) {
+ /* Clear channels_ready to force channels rescan. */
+ intf->channels_ready = false;
+ }
+
+ /* Skip channel scan if channels are already marked ready */
+ if (intf->channels_ready)
+ return 0;
+
if (ipmi_version_major(id) > 1
|| (ipmi_version_major(id) == 1
&& ipmi_version_minor(id) >= 5)) {
@@ -3658,7 +3668,7 @@ int ipmi_add_smi(struct module *owner,
}
mutex_lock(&intf->bmc_reg_mutex);
- rv = __scan_channels(intf, &id);
+ rv = __scan_channels(intf, &id, false);
mutex_unlock(&intf->bmc_reg_mutex);
if (rv)
goto out_err_bmc_reg;
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index dd502322f499..3a77be7ebf4a 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -11,8 +11,11 @@
* used by the kernel internally.
*/
+#include "linux/dev_printk.h"
+#include "linux/tpm.h"
#include "tpm.h"
#include <crypto/hash_info.h>
+#include <linux/unaligned.h>
static bool disable_pcr_integrity;
module_param(disable_pcr_integrity, bool, 0444);
@@ -199,11 +202,15 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
}
if (!disable_pcr_integrity) {
- tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ rc = tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
} else {
tpm_buf_append_handle(chip, &buf, pcr_idx);
- tpm_buf_append_auth(chip, &buf, 0, NULL, 0);
+ tpm_buf_append_auth(chip, &buf, NULL, 0);
}
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
@@ -214,8 +221,14 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
- if (!disable_pcr_integrity)
- tpm_buf_fill_hmac_session(chip, &buf);
+ if (!disable_pcr_integrity) {
+ rc = tpm_buf_fill_hmac_session(chip, &buf);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
+ }
+
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
if (!disable_pcr_integrity)
rc = tpm_buf_check_hmac_response(chip, &buf, rc);
@@ -269,11 +282,24 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
do {
tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM);
- tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT
- | TPM2_SA_CONTINUE_SESSION,
- NULL, 0);
+ if (tpm2_chip_auth(chip)) {
+ tpm_buf_append_hmac_session(chip, &buf,
+ TPM2_SA_ENCRYPT |
+ TPM2_SA_CONTINUE_SESSION,
+ NULL, 0);
+ } else {
+ offset = buf.handles * 4 + TPM_HEADER_SIZE;
+ head = (struct tpm_header *)buf.data;
+ if (tpm_buf_length(&buf) == offset)
+ head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ }
tpm_buf_append_u16(&buf, num_bytes);
- tpm_buf_fill_hmac_session(chip, &buf);
+ err = tpm_buf_fill_hmac_session(chip, &buf);
+ if (err) {
+ tpm_buf_destroy(&buf);
+ return err;
+ }
+
err = tpm_transmit_cmd(chip, &buf,
offsetof(struct tpm2_get_random_out,
buffer),
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 6d03c224e6b2..4149379665c4 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -144,59 +144,80 @@ struct tpm2_auth {
/*
* Name Size based on TPM algorithm (assumes no hash bigger than 255)
*/
-static u8 name_size(const u8 *name)
+static int name_size(const u8 *name)
{
- static u8 size_map[] = {
- [TPM_ALG_SHA1] = SHA1_DIGEST_SIZE,
- [TPM_ALG_SHA256] = SHA256_DIGEST_SIZE,
- [TPM_ALG_SHA384] = SHA384_DIGEST_SIZE,
- [TPM_ALG_SHA512] = SHA512_DIGEST_SIZE,
- };
- u16 alg = get_unaligned_be16(name);
- return size_map[alg] + 2;
-}
-
-static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- off_t offset = TPM_HEADER_SIZE;
- u32 tot_len = be32_to_cpu(head->length);
- u32 val;
-
- /* we're starting after the header so adjust the length */
- tot_len -= TPM_HEADER_SIZE;
-
- /* skip public */
- val = tpm_buf_read_u16(buf, &offset);
- if (val > tot_len)
- return -EINVAL;
- offset += val;
- /* name */
- val = tpm_buf_read_u16(buf, &offset);
- if (val != name_size(&buf->data[offset]))
+ u16 hash_alg = get_unaligned_be16(name);
+
+ switch (hash_alg) {
+ case TPM_ALG_SHA1:
+ return SHA1_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA256:
+ return SHA256_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA384:
+ return SHA384_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA512:
+ return SHA512_DIGEST_SIZE + 2;
+ default:
+ pr_warn("tpm: unsupported name algorithm: 0x%04x\n", hash_alg);
return -EINVAL;
- memcpy(name, &buf->data[offset], val);
- /* forget the rest */
- return 0;
+ }
}
-static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
+static int tpm2_read_public(struct tpm_chip *chip, u32 handle, void *name)
{
+ u32 mso = tpm2_handle_mso(handle);
+ off_t offset = TPM_HEADER_SIZE;
+ int rc, name_size_alg;
struct tpm_buf buf;
- int rc;
+
+ if (mso != TPM2_MSO_PERSISTENT && mso != TPM2_MSO_VOLATILE &&
+ mso != TPM2_MSO_NVRAM) {
+ memcpy(name, &handle, sizeof(u32));
+ return sizeof(u32);
+ }
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
if (rc)
return rc;
tpm_buf_append_u32(&buf, handle);
- rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
- if (rc == TPM2_RC_SUCCESS)
- rc = tpm2_parse_read_public(name, &buf);
- tpm_buf_destroy(&buf);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "TPM2_ReadPublic");
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return tpm_ret_to_err(rc);
+ }
- return rc;
+ /* Skip TPMT_PUBLIC: */
+ offset += tpm_buf_read_u16(&buf, &offset);
+
+ /*
+ * Ensure space for the length field of TPM2B_NAME and hashAlg field of
+ * TPMT_HA (the extra four bytes).
+ */
+ if (offset + 4 > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ rc = tpm_buf_read_u16(&buf, &offset);
+ name_size_alg = name_size(&buf.data[offset]);
+
+ if (name_size_alg < 0)
+ return name_size_alg;
+
+ if (rc != name_size_alg) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ if (offset + rc > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ memcpy(name, &buf.data[offset], rc);
+ return name_size_alg;
}
#endif /* CONFIG_TCG_TPM2_HMAC */
@@ -221,52 +242,76 @@ static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
* As with most tpm_buf operations, success is assumed because failure
* will be caused by an incorrect programming model and indicated by a
* kernel message.
+ *
+ * Ends the authorization session on failure.
*/
-void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
- u32 handle, u8 *name)
+int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name)
{
#ifdef CONFIG_TCG_TPM2_HMAC
enum tpm2_mso_type mso = tpm2_handle_mso(handle);
struct tpm2_auth *auth;
+ u16 name_size_alg;
int slot;
+ int ret;
#endif
if (!tpm2_chip_auth(chip)) {
tpm_buf_append_handle(chip, buf, handle);
- return;
+ return 0;
}
#ifdef CONFIG_TCG_TPM2_HMAC
slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4;
if (slot >= AUTH_MAX_NAMES) {
- dev_err(&chip->dev, "TPM: too many handles\n");
- return;
+ dev_err(&chip->dev, "too many handles\n");
+ ret = -EIO;
+ goto err;
}
auth = chip->auth;
- WARN(auth->session != tpm_buf_length(buf),
- "name added in wrong place\n");
+ if (auth->session != tpm_buf_length(buf)) {
+ dev_err(&chip->dev, "session state malformed");
+ ret = -EIO;
+ goto err;
+ }
tpm_buf_append_u32(buf, handle);
auth->session += 4;
if (mso == TPM2_MSO_PERSISTENT ||
mso == TPM2_MSO_VOLATILE ||
mso == TPM2_MSO_NVRAM) {
- if (!name)
- tpm2_read_public(chip, handle, auth->name[slot]);
+ if (!name) {
+ ret = tpm2_read_public(chip, handle, auth->name[slot]);
+ if (ret < 0)
+ goto err;
+
+ name_size_alg = ret;
+ }
} else {
- if (name)
- dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
+ if (name) {
+ dev_err(&chip->dev, "handle 0x%08x does not use a name\n",
+ handle);
+ ret = -EIO;
+ goto err;
+ }
}
auth->name_h[slot] = handle;
if (name)
- memcpy(auth->name[slot], name, name_size(name));
+ memcpy(auth->name[slot], name, name_size_alg);
+#endif
+ return 0;
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+err:
+ tpm2_end_auth_session(chip);
+ return tpm_ret_to_err(ret);
#endif
}
EXPORT_SYMBOL_GPL(tpm_buf_append_name);
void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
- u8 attributes, u8 *passphrase, int passphrase_len)
+ u8 *passphrase, int passphrase_len)
{
/* offset tells us where the sessions area begins */
int offset = buf->handles * 4 + TPM_HEADER_SIZE;
@@ -327,8 +372,7 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- tpm_buf_append_auth(chip, buf, attributes, passphrase,
- passphrase_len);
+ tpm_buf_append_auth(chip, buf, passphrase, passphrase_len);
return;
}
@@ -533,11 +577,9 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
* encryption key and encrypts the first parameter of the command
* buffer with it.
*
- * As with most tpm_buf operations, success is assumed because failure
- * will be caused by an incorrect programming model and indicated by a
- * kernel message.
+ * Ends the authorization session on failure.
*/
-void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
+int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
{
u32 cc, handles, val;
struct tpm2_auth *auth = chip->auth;
@@ -549,9 +591,12 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u8 cphash[SHA256_DIGEST_SIZE];
struct sha256_ctx sctx;
struct hmac_sha256_ctx hctx;
+ int ret;
- if (!auth)
- return;
+ if (!auth) {
+ ret = -EIO;
+ goto err;
+ }
/* save the command code in BE format */
auth->ordinal = head->ordinal;
@@ -560,9 +605,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
i = tpm2_find_cc(chip, cc);
if (i < 0) {
- dev_err(&chip->dev, "Command 0x%x not found in TPM\n", cc);
- return;
+ dev_err(&chip->dev, "command 0x%08x not found\n", cc);
+ ret = -EIO;
+ goto err;
}
+
attrs = chip->cc_attrs_tbl[i];
handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
@@ -576,9 +623,9 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u32 handle = tpm_buf_read_u32(buf, &offset_s);
if (auth->name_h[i] != handle) {
- dev_err(&chip->dev, "TPM: handle %d wrong for name\n",
- i);
- return;
+ dev_err(&chip->dev, "invalid handle 0x%08x\n", handle);
+ ret = -EIO;
+ goto err;
}
}
/* point offset_s to the start of the sessions */
@@ -609,12 +656,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
offset_s += len;
}
if (offset_s != offset_p) {
- dev_err(&chip->dev, "TPM session length is incorrect\n");
- return;
+ dev_err(&chip->dev, "session length is incorrect\n");
+ ret = -EIO;
+ goto err;
}
if (!hmac) {
- dev_err(&chip->dev, "TPM could not find HMAC session\n");
- return;
+ dev_err(&chip->dev, "could not find HMAC session\n");
+ ret = -EIO;
+ goto err;
}
/* encrypt before HMAC */
@@ -646,8 +695,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
if (mso == TPM2_MSO_PERSISTENT ||
mso == TPM2_MSO_VOLATILE ||
mso == TPM2_MSO_NVRAM) {
- sha256_update(&sctx, auth->name[i],
- name_size(auth->name[i]));
+ ret = name_size(auth->name[i]);
+ if (ret < 0)
+ goto err;
+
+ sha256_update(&sctx, auth->name[i], ret);
} else {
__be32 h = cpu_to_be32(auth->name_h[i]);
@@ -668,6 +720,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
hmac_sha256_update(&hctx, &auth->attrs, 1);
hmac_sha256_final(&hctx, hmac);
+ return 0;
+
+err:
+ tpm2_end_auth_session(chip);
+ return ret;
}
EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
index 53923b8ef7a1..31e07f0279db 100644
--- a/drivers/cpufreq/rcpufreq_dt.rs
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -207,9 +207,9 @@ impl platform::Driver for CPUFreqDTDriver {
fn probe(
pdev: &platform::Device<Core>,
_id_info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
- Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ Ok(Self {})
}
}
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
index 875a53703c82..70f8f02f14a3 100644
--- a/drivers/firmware/xilinx/Makefile
+++ b/drivers/firmware/xilinx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares
-obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
diff --git a/drivers/firmware/xilinx/zynqmp-ufs.c b/drivers/firmware/xilinx/zynqmp-ufs.c
new file mode 100644
index 000000000000..85da8a822f3a
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-ufs.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Firmware Layer for UFS APIs
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/module.h>
+
+/* Register Node IDs */
+#define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */
+#define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */
+
+/* Register Offsets for PMC IOU SLCR */
+#define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */
+#define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */
+
+/* Masks for SRAM Control and Status Register */
+#define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */
+#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */
+#define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */
+
+/* Mask to check M-PHY TX-RX configuration readiness */
+#define TX_RX_CFG_RDY_MASK GENMASK(3, 0)
+
+/* Register Offsets for EFUSE Cache */
+#define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */
+
+/**
+ * zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness
+ * @is_ready: Store output status (true/false)
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
+{
+ u32 regval;
+ int ret;
+
+ if (!is_ready)
+ return -EINVAL;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, &regval);
+ if (ret)
+ return ret;
+
+ regval &= TX_RX_CFG_RDY_MASK;
+ if (regval)
+ *is_ready = true;
+ else
+ *is_ready = false;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready);
+
+/**
+ * zynqmp_pm_is_sram_init_done - check SRAM initialization
+ * @is_done: Store output status (true/false)
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_is_sram_init_done(bool *is_done)
+{
+ u32 regval;
+ int ret;
+
+ if (!is_done)
+ return -EINVAL;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &regval);
+ if (ret)
+ return ret;
+
+ regval &= SRAM_CSR_INIT_DONE_MASK;
+ if (regval)
+ *is_done = true;
+ else
+ *is_done = false;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done);
+
+/**
+ * zynqmp_pm_set_sram_bypass - Set SRAM bypass Control
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_sram_bypass(void)
+{
+ u32 sram_csr;
+ int ret;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr);
+ if (ret)
+ return ret;
+
+ sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK;
+ sram_csr |= SRAM_CSR_BYPASS_MASK;
+
+ return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET,
+ GENMASK(2, 1), sram_csr);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass);
+
+/**
+ * zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values
+ * @val: Store the calibration value
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_get_ufs_calibration_values(u32 *val)
+{
+ return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 835a50c5af46..ad811f40e059 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1654,6 +1654,52 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
}
/**
+ * zynqmp_pm_sec_read_reg - PM call to securely read from given offset
+ * of the node
+ * @node_id: Node Id of the device
+ * @offset: Offset to be used (20-bit)
+ * @ret_value: Output data read from the given offset after
+ * firmware access policy is successfully enforced
+ *
+ * Return: Returns 0 on success or error value on failure
+ */
+int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u32 count = 1;
+ int ret;
+
+ if (!ret_value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG,
+ offset, count);
+
+ *ret_value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg);
+
+/**
+ * zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset
+ * of the node
+ * @node_id: Node Id of the device
+ * @offset: Offset to be used (20-bit)
+ * @mask: Mask to be used
+ * @value: Value to be written
+ *
+ * Return: Returns 0 on success or error value on failure
+ */
+int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG,
+ offset, mask, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg);
+
+/**
* zynqmp_pm_set_sd_config - PM call to set value of SD config registers
* @node: SD node ID
* @config: The config type of SD registers
diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs
index 91b7380f83ab..2246d8e104e0 100644
--- a/drivers/gpu/drm/nova/driver.rs
+++ b/drivers/gpu/drm/nova/driver.rs
@@ -45,13 +45,13 @@ impl auxiliary::Driver for NovaDriver {
type IdInfo = ();
const ID_TABLE: auxiliary::IdTable<Self::IdInfo> = &AUX_TABLE;
- fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> impl PinInit<Self, Error> {
let data = try_pin_init!(NovaData { adev: adev.into() });
let drm = drm::Device::<Self>::new(adev.as_ref(), data)?;
drm::Registration::new_foreign_owned(&drm, adev.as_ref(), 0)?;
- Ok(KBox::new(Self { drm }, GFP_KERNEL)?.into())
+ Ok(Self { drm })
}
}
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
index 90b9d2d0ec4a..a3b7bd36792c 100644
--- a/drivers/gpu/drm/nova/file.rs
+++ b/drivers/gpu/drm/nova/file.rs
@@ -28,7 +28,7 @@ impl File {
_file: &drm::File<File>,
) -> Result<u32> {
let adev = &dev.adev;
- let parent = adev.parent().ok_or(ENOENT)?;
+ let parent = adev.parent();
let pdev: &pci::Device = parent.try_into()?;
let value = match getparam.param as u32 {
diff --git a/drivers/gpu/drm/tyr/driver.rs b/drivers/gpu/drm/tyr/driver.rs
index d5625dd1e41c..0389c558c036 100644
--- a/drivers/gpu/drm/tyr/driver.rs
+++ b/drivers/gpu/drm/tyr/driver.rs
@@ -103,7 +103,7 @@ impl platform::Driver for TyrDriver {
fn probe(
pdev: &platform::Device<Core>,
_info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
let core_clk = Clk::get(pdev.as_ref(), Some(c_str!("core")))?;
let stacks_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("stacks")))?;
let coregroup_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("coregroup")))?;
@@ -143,7 +143,7 @@ impl platform::Driver for TyrDriver {
let tdev: ARef<TyrDevice> = drm::Device::new(pdev.as_ref(), data)?;
drm::driver::Registration::new_foreign_owned(&tdev, pdev.as_ref(), 0)?;
- let driver = KBox::pin_init(try_pin_init!(TyrDriver { device: tdev }), GFP_KERNEL)?;
+ let driver = TyrDriver { device: tdev };
// We need this to be dev_info!() because dev_dbg!() does not work at
// all in Rust for now, and we need to see whether probe succeeded.
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index d91bbc50cde7..b8b0cc0f2d93 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -4,6 +4,7 @@ use kernel::{
auxiliary,
c_str,
device::Core,
+ devres::Devres,
dma::Device,
dma::DmaMask,
pci,
@@ -23,7 +24,8 @@ use crate::gpu::Gpu;
pub(crate) struct NovaCore {
#[pin]
pub(crate) gpu: Gpu,
- _reg: auxiliary::Registration,
+ #[pin]
+ _reg: Devres<auxiliary::Registration>,
}
const BAR0_SIZE: usize = SZ_16M;
@@ -67,41 +69,33 @@ impl pci::Driver for NovaCore {
type IdInfo = ();
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
- dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
-
- pdev.enable_device_mem()?;
- pdev.set_master();
+ fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> impl PinInit<Self, Error> {
+ pin_init::pin_init_scope(move || {
+ dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
- // SAFETY: No concurrent DMA allocations or mappings can be made because
- // the device is still being probed and therefore isn't being used by
- // other threads of execution.
- unsafe { pdev.dma_set_mask_and_coherent(DmaMask::new::<GPU_DMA_BITS>())? };
+ pdev.enable_device_mem()?;
+ pdev.set_master();
- let devres_bar = Arc::pin_init(
- pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0")),
- GFP_KERNEL,
- )?;
+ // SAFETY: No concurrent DMA allocations or mappings can be made because
+ // the device is still being probed and therefore isn't being used by
+ // other threads of execution.
+ unsafe { pdev.dma_set_mask_and_coherent(DmaMask::new::<GPU_DMA_BITS>())? };
- // Used to provided a `&Bar0` to `Gpu::new` without tying it to the lifetime of
- // `devres_bar`.
- let bar_clone = Arc::clone(&devres_bar);
- let bar = bar_clone.access(pdev.as_ref())?;
+ let bar = Arc::pin_init(
+ pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0")),
+ GFP_KERNEL,
+ )?;
- let this = KBox::pin_init(
- try_pin_init!(Self {
- gpu <- Gpu::new(pdev, devres_bar, bar),
- _reg: auxiliary::Registration::new(
+ Ok(try_pin_init!(Self {
+ gpu <- Gpu::new(pdev, bar.clone(), bar.access(pdev.as_ref())?),
+ _reg <- auxiliary::Registration::new(
pdev.as_ref(),
c_str!("nova-drm"),
0, // TODO[XARR]: Once it lands, use XArray; for now we don't use the ID.
crate::MODULE_NAME
- )?,
- }),
- GFP_KERNEL,
- )?;
-
- Ok(this)
+ ),
+ }))
+ })
}
fn unbind(pdev: &pci::Device<Core>, this: Pin<&Self>) {
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 738bc4e60a18..e60a8d3947c9 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1857,7 +1857,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
ioc->reset_work_q =
- alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM, 0, ioc->id);
+ alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
+ ioc->id);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
@@ -1984,7 +1985,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock);
- ioc->fw_event_q = alloc_workqueue("mpt/%d", WQ_MEM_RECLAIM, 0, ioc->id);
+ ioc->fw_event_q = alloc_workqueue("mpt/%d",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0,
+ ioc->id);
if (!ioc->fw_event_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b9c11f67315f..9d1de68dee27 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -106,7 +106,7 @@ config PHANTOM
config RPMB
tristate "RPMB partition interface"
- depends on MMC
+ depends on MMC || SCSI_UFSHCD
help
Unified RPMB unit interface for RPMB capable devices such as eMMC and
UFS. Provides interface for in-kernel security controllers to access
diff --git a/drivers/pwm/pwm_th1520.rs b/drivers/pwm/pwm_th1520.rs
index 955c359b07fb..e3b7e77356fc 100644
--- a/drivers/pwm/pwm_th1520.rs
+++ b/drivers/pwm/pwm_th1520.rs
@@ -337,7 +337,7 @@ impl platform::Driver for Th1520PwmPlatformDriver {
fn probe(
pdev: &platform::Device<Core>,
_id_info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
let dev = pdev.as_ref();
let request = pdev.io_request_by_index(0).ok_or(ENODEV)?;
@@ -374,7 +374,7 @@ impl platform::Driver for Th1520PwmPlatformDriver {
pwm::Registration::register(dev, chip)?;
- Ok(KBox::new(Th1520PwmPlatformDriver, GFP_KERNEL)?.into())
+ Ok(Th1520PwmPlatformDriver)
}
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index ea66196ef7c7..82c6e7c7cdaf 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -242,7 +242,7 @@ static int aac_queuecommand(struct Scsi_Host *shost,
{
aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
- return aac_scsi_cmd(cmd) ? FAILED : 0;
+ return aac_scsi_cmd(cmd) ? SCSI_MLQUEUE_HOST_BUSY : 0;
}
/**
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 063e1b5818d3..06223b5ee6da 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2401,8 +2401,7 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
- printk(" host_busy %d, host_no %d,\n",
- scsi_host_busy(s), s->host_no);
+ printk(" host_no %d,\n", s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index adf3d9145606..95f3620059f7 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -882,6 +882,9 @@ static void asd_pci_remove(struct pci_dev *dev)
asd_disable_ints(asd_ha);
+ /* Ensure all scheduled tasklets complete before freeing resources */
+ tasklet_kill(&asd_ha->seq.dl_tasklet);
+
asd_remove_dev_attrs(asd_ha);
/* XXX more here as needed */
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index dc88bc46dcc0..a0e794ffc980 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5633,7 +5633,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
- phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1,
+ phba->wq = alloc_workqueue("beiscsi_%02x_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
phba->shost->host_no);
if (!phba->wq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 58da993251e9..0f68739d380a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2695,7 +2695,7 @@ static int __init bnx2fc_mod_init(void)
if (rc)
goto detach_ft;
- bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
+ bnx2fc_wq = alloc_workqueue("bnx2fc", WQ_PERCPU, 0);
if (!bnx2fc_wq) {
rc = -ENOMEM;
goto release_bt;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 1bf5948d1188..6fd89ae33059 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1300,7 +1300,7 @@ static int __init alua_init(void)
{
int r;
- kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
+ kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!kaluad_wq)
return -ENOMEM;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 4912087de10d..c8c5dfb3ba9a 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2438,7 +2438,7 @@ static int __init fcoe_init(void)
unsigned int cpu;
int rc = 0;
- fcoe_wq = alloc_workqueue("fcoe", 0, 0);
+ fcoe_wq = alloc_workqueue("fcoe", WQ_PERCPU, 0);
if (!fcoe_wq)
return -ENOMEM;
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 763475587b7f..9801e5fbb0dd 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -134,7 +134,6 @@ int fnic_get_vnic_config(struct fnic *fnic)
c->luns_per_tgt));
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
- c->intr_timer_type = c->intr_timer_type;
/* for older firmware, GET_CONFIG will not return anything */
if (c->wq_copy_count == 0)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 17173239301e..1b3fbd328277 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -231,6 +231,12 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
+ if (shost->nr_reserved_cmds && !sht->queue_reserved_command) {
+ shost_printk(KERN_ERR, shost,
+ "nr_reserved_cmds set but no method to queue\n");
+ goto fail;
+ }
+
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
@@ -307,6 +313,14 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto out_del_dev;
+ if (shost->nr_reserved_cmds) {
+ shost->pseudo_sdev = scsi_get_pseudo_sdev(shost);
+ if (!shost->pseudo_sdev) {
+ error = -ENOMEM;
+ goto out_del_dev;
+ }
+ }
+
scsi_proc_host_add(shost);
scsi_autopm_put_host(shost);
return error;
@@ -436,6 +450,7 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
shost->hostt = sht;
shost->this_id = sht->this_id;
shost->can_queue = sht->can_queue;
+ shost->nr_reserved_cmds = sht->nr_reserved_cmds;
shost->sg_tablesize = sht->sg_tablesize;
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
shost->cmd_per_lun = sht->cmd_per_lun;
@@ -604,8 +619,8 @@ static bool scsi_host_check_in_flight(struct request *rq, void *data)
}
/**
- * scsi_host_busy - Return the host busy counter
- * @shost: Pointer to Scsi_Host to inc.
+ * scsi_host_busy - Return the count of in-flight commands
+ * @shost: Pointer to Scsi_Host
**/
int scsi_host_busy(struct Scsi_Host *shost)
{
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 5a3787f27369..f259746bc804 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3533,7 +3533,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
init_completion(&vscsi->wait_idle);
init_completion(&vscsi->unconfig);
- vscsi->work_q = alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM, 1,
+ vscsi->work_q = alloc_workqueue("ibmvscsis%s",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
dev_name(&vdev->dev));
if (!vscsi->work_q) {
rc = -ENOMEM;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index f96633fa6939..d05d09c1263d 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -85,15 +85,17 @@ struct isci_tmf {
struct completion *complete;
enum sas_protocol proto;
+ unsigned char lun[8];
+ u16 io_tag;
+ enum isci_tmf_function_codes tmf_code;
+ int status;
+
+ /* Must be last --ends in a flexible-array member. */
union {
struct ssp_response_iu resp_iu;
struct dev_to_host_fis d2h_fis;
u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
} resp;
- unsigned char lun[8];
- u16 io_tag;
- enum isci_tmf_function_codes tmf_code;
- int status;
};
static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 224edacf2d8e..689793d03c20 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -311,7 +311,6 @@ struct lpfc_defer_flogi_acc {
u16 rx_id;
u16 ox_id;
struct lpfc_nodelist *ndlp;
-
};
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
@@ -634,6 +633,7 @@ struct lpfc_vport {
#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
+#define FC_CT_RSPNI_PNI 0x40 /* RSPNI_PNI accepted by switch */
struct list_head fc_nodes;
spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
@@ -1078,6 +1078,8 @@ struct lpfc_hba {
uint32_t nport_event_cnt; /* timestamp for nlplist entry */
+ unsigned long pni; /* 64-bit Platform Name Identifier */
+
uint8_t wwnn[8];
uint8_t wwpn[8];
uint32_t RandomData[7];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f93f8dca65bd..d3caac394291 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1743,6 +1743,28 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
static void
+lpfc_cmpl_ct_cmd_rspni_pni(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *ctrsp;
+ u32 ulp_status;
+
+ vport = cmdiocb->vport;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
+ if (ulp_status == IOSTAT_SUCCESS) {
+ outp = cmdiocb->rsp_dmabuf;
+ ctrsp = (struct lpfc_sli_ct_request *)outp->virt;
+ if (be16_to_cpu(ctrsp->CommandResponse.bits.CmdRsp) ==
+ SLI_CT_RESPONSE_FS_ACC)
+ vport->ct_flags |= FC_CT_RSPNI_PNI;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+}
+
+static void
lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
@@ -1956,6 +1978,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RSPNI_PNI)
+ bpl->tus.f.bdeSize = RSPNI_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_DA_ID)
bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID)
@@ -2077,6 +2101,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
CtReq->un.rsnn.symbname, size);
cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
break;
+ case SLI_CTNS_RSPNI_PNI:
+ vport->ct_flags &= ~FC_CT_RSPNI_PNI;
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_RSPNI_PNI);
+ CtReq->un.rspni.pni = cpu_to_be64(phba->pni);
+ scnprintf(CtReq->un.rspni.symbname,
+ sizeof(CtReq->un.rspni.symbname), "OS Host Name::%s",
+ phba->os_host_name);
+ CtReq->un.rspni.len = strnlen(CtReq->un.rspni.symbname,
+ sizeof(CtReq->un.rspni.symbname));
+ cmpl = lpfc_cmpl_ct_cmd_rspni_pni;
+ break;
case SLI_CTNS_DA_ID:
/* Implement DA_ID Nameserver request */
CtReq->CommandResponse.bits.CmdRsp =
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 3d47dc7458d1..51cb8571c049 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -208,6 +208,7 @@ enum lpfc_nlp_flag {
NPR list */
NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */
NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */
+ NLP_FLOGI_DFR_ACC = 28, /* FLOGI LS_ACC was Deferred */
NLP_SC_REQ = 29, /* Target requires authentication */
NLP_FIRSTBURST = 30, /* Target supports FirstBurst */
NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b71db7d7d747..02b6d31b9ad9 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -650,8 +650,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
sp->cmn.bbRcvSizeLsb;
@@ -934,10 +932,15 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
/* One additional decrement on node reference count to
- * trigger the release of the node
+ * trigger the release of the node. Make sure the ndlp
+ * is marked NLP_DROPPED.
*/
- if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) &&
+ !test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+ set_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_put(ndlp);
+ }
goto out;
}
@@ -995,9 +998,10 @@ stop_rr_fcf_flogi:
IOERR_LOOP_OPEN_FAILURE)))
lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"2858 FLOGI Status:x%x/x%x TMO"
- ":x%x Data x%lx x%x\n",
+ ":x%x Data x%lx x%x x%lx x%x\n",
ulp_status, ulp_word4, tmo,
- phba->hba_flag, phba->fcf.fcf_flag);
+ phba->hba_flag, phba->fcf.fcf_flag,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@@ -1015,14 +1019,17 @@ stop_rr_fcf_flogi:
* reference to trigger node release.
*/
if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) &&
- !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ !test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
+ set_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_put(ndlp);
+ }
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0150 FLOGI Status:x%x/x%x "
- "xri x%x TMO:x%x refcnt %d\n",
+ "xri x%x iotag x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag,
- tmo, kref_read(&ndlp->kref));
+ cmdiocb->iotag, tmo, kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
@@ -1279,6 +1286,19 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t tmo, did;
int rc;
+ /* It's possible for lpfc to reissue a FLOGI on an ndlp that is marked
+ * NLP_DROPPED. This happens when the FLOGI completed with the XB bit
+ * set causing lpfc to reference the ndlp until the XRI_ABORTED CQE is
+ * issued. The time window for the XRI_ABORTED CQE can be as much as
+ * 2*2*RA_TOV allowing for ndlp reuse of this type when the link is
+ * cycling quickly. When true, restore the initial reference and remove
+ * the NLP_DROPPED flag as lpfc is retrying.
+ */
+ if (test_and_clear_bit(NLP_DROPPED, &ndlp->nlp_flag)) {
+ if (!lpfc_nlp_get(ndlp))
+ return 1;
+ }
+
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_FLOGI);
@@ -1334,6 +1354,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Can't do SLI4 class2 without support sequence coalescing */
sp->cls2.classValid = 0;
sp->cls2.seqDelivery = 0;
+
+ /* Fill out Auxiliary Parameter Data */
+ if (phba->pni) {
+ sp->aux.flags =
+ AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID;
+ sp->aux.pni = cpu_to_be64(phba->pni);
+ sp->aux.npiv_cnt = cpu_to_be16(phba->max_vpi - 1);
+ }
} else {
/* Historical, setting sequential-delivery bit for SLI3 */
sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
@@ -1413,11 +1441,12 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->defer_flogi_acc.ox_id;
}
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
- " ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc.rx_id,
- phba->defer_flogi_acc.ox_id, phba->hba_flag);
+ /* The LS_ACC completion needs to drop the initial reference.
+ * This is a special case for Pt2Pt because both FLOGIs need
+ * to complete and lpfc defers the LS_ACC when the remote
+ * FLOGI arrives before the driver's FLOGI.
+ */
+ set_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag);
/* Send deferred FLOGI ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
@@ -1433,6 +1462,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->defer_flogi_acc.ndlp = NULL;
}
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
+ " ox_id: x%x, ndlp x%px hba_flag x%lx\n",
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id,
+ phba->defer_flogi_acc.ndlp,
+ phba->hba_flag);
+
vport->fc_myDID = did;
}
@@ -2248,7 +2285,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
- sp->cmn.bbRcvSizeMsb &= 0xF;
+ if (!test_bit(FC_PT2PT, &vport->fc_flag))
+ sp->cmn.bbRcvSizeMsb &= 0xF;
/* Check if the destination port supports VMID */
ndlp->vmid_support = 0;
@@ -2367,7 +2405,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mode = KERN_INFO;
/* Warn PRLI status */
- lpfc_printf_vlog(vport, mode, LOG_ELS,
+ lpfc_vlog_msg(vport, mode, LOG_ELS,
"2754 PRLI DID:%06X Status:x%x/x%x, "
"data: x%x x%x x%lx\n",
ndlp->nlp_DID, ulp_status,
@@ -3024,6 +3062,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ulp_status,
ulp_word4);
+ /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
skip_recovery = 1;
}
@@ -3262,7 +3301,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
return -ENOMEM;
}
rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
- (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
+ (u8 *)&ns_ndlp->fc_sparam, mbox, fc_ndlp->nlp_rpi);
if (rc) {
rc = -EACCES;
goto out;
@@ -3306,7 +3345,8 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
*
* This routine is a generic completion callback function for Discovery ELS cmd.
* Currently used by the ELS command issuing routines for the ELS State Change
- * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
+ * Request (SCR), lpfc_issue_els_scr(), Exchange Diagnostic Capabilities (EDC),
+ * lpfc_issue_els_edc() and the ELS RDF, lpfc_issue_els_rdf().
* These commands will be retried once only for ELS timeout errors.
**/
static void
@@ -3379,11 +3419,21 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
return;
}
+
if (ulp_status) {
/* ELS discovery cmd completes with error */
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
"4203 ELS cmd x%x error: x%x x%X\n", cmd,
ulp_status, ulp_word4);
+
+ /* In the case where the ELS cmd completes with an error and
+ * the node does not have RPI registered, the node is
+ * outstanding and should put its initial reference.
+ */
+ if ((cmd == ELS_CMD_SCR || cmd == ELS_CMD_RDF) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
+ !test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
goto out;
}
@@ -3452,6 +3502,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
uint8_t *pcmd;
uint16_t cmdsize;
struct lpfc_nodelist *ndlp;
+ bool node_created = false;
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
@@ -3461,21 +3512,21 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
+ node_created = true;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR);
if (!elsiocb)
- return 1;
+ goto out_node_created;
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
if (rc) {
- lpfc_els_free_iocb(phba, elsiocb);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0937 %s: Failed to reg fc node, rc %d\n",
__func__, rc);
- return 1;
+ goto out_free_iocb;
}
}
pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
@@ -3494,23 +3545,27 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
phba->fc_stat.elsXmitSCR++;
elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->ndlp = lpfc_nlp_get(ndlp);
- if (!elsiocb->ndlp) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (!elsiocb->ndlp)
+ goto out_free_iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue SCR: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- lpfc_nlp_put(ndlp);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto out_iocb_error;
return 0;
+
+out_iocb_error:
+ lpfc_nlp_put(ndlp);
+out_free_iocb:
+ lpfc_els_free_iocb(phba, elsiocb);
+out_node_created:
+ if (node_created)
+ lpfc_nlp_put(ndlp);
+ return 1;
}
/**
@@ -3597,8 +3652,8 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue RSCN: did:x%x",
- ndlp->nlp_DID, 0, 0);
+ "Issue RSCN: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
@@ -3705,10 +3760,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
lpfc_nlp_put(ndlp);
return 1;
}
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of the node.
- */
- /* Don't release reference count as RDF is likely outstanding */
+
return 0;
}
@@ -3726,7 +3778,12 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
*
* Return code
* 0 - Successfully issued rdf command
- * 1 - Failed to issue rdf command
+ * < 0 - Failed to issue rdf command
+ * -EACCES - RDF not required for NPIV_PORT
+ * -ENODEV - No fabric controller device available
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ *
**/
int
lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
@@ -3737,25 +3794,30 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
struct lpfc_nodelist *ndlp;
uint16_t cmdsize;
int rc;
+ bool node_created = false;
+ int err;
cmdsize = sizeof(*prdf);
+ /* RDF ELS is not required on an NPIV VN_Port. */
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return -EACCES;
+
ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
if (!ndlp) {
ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
if (!ndlp)
return -ENODEV;
lpfc_enqueue_node(vport, ndlp);
+ node_created = true;
}
- /* RDF ELS is not required on an NPIV VN_Port. */
- if (vport->port_type == LPFC_NPIV_PORT)
- return -EACCES;
-
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RDF);
- if (!elsiocb)
- return -ENOMEM;
+ if (!elsiocb) {
+ err = -ENOMEM;
+ goto out_node_created;
+ }
/* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
@@ -3781,8 +3843,8 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->ndlp = lpfc_nlp_get(ndlp);
if (!elsiocb->ndlp) {
- lpfc_els_free_iocb(phba, elsiocb);
- return -EIO;
+ err = -EIO;
+ goto out_free_iocb;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -3791,11 +3853,19 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- lpfc_nlp_put(ndlp);
- return -EIO;
+ err = -EIO;
+ goto out_iocb_error;
}
return 0;
+
+out_iocb_error:
+ lpfc_nlp_put(ndlp);
+out_free_iocb:
+ lpfc_els_free_iocb(phba, elsiocb);
+out_node_created:
+ if (node_created)
+ lpfc_nlp_put(ndlp);
+ return err;
}
/**
@@ -3816,19 +3886,23 @@ static int
lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
+ int rc;
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL);
/* Send LS_ACC */
- if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
+ if (rc) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
- "1623 Failed to RDF_ACC from x%x for x%x\n",
- ndlp->nlp_DID, vport->fc_myDID);
+ "1623 Failed to RDF_ACC from x%x for x%x Data: %d\n",
+ ndlp->nlp_DID, vport->fc_myDID, rc);
return -EIO;
}
+ rc = lpfc_issue_els_rdf(vport, 0);
/* Issue new RDF for reregistering */
- if (lpfc_issue_els_rdf(vport, 0)) {
+ if (rc) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
- "2623 Failed to re register RDF for x%x\n",
- vport->fc_myDID);
+ "2623 Failed to re register RDF for x%x Data: %d\n",
+ vport->fc_myDID, rc);
return -EIO;
}
@@ -4299,7 +4373,7 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
+ * lpfc_els_free_iocb routine to trigger the release of
* the node.
*/
lpfc_els_free_iocb(phba, elsiocb);
@@ -5127,7 +5201,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
- /* The I/O iocb is complete. Clear the node and first dmbuf */
+ /* The I/O iocb is complete. Clear the node and first dmabuf */
elsiocb->ndlp = NULL;
/* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
@@ -5160,14 +5234,12 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else {
buf_ptr1 = elsiocb->cmd_dmabuf;
lpfc_els_free_data(phba, buf_ptr1);
- elsiocb->cmd_dmabuf = NULL;
}
}
if (elsiocb->bpl_dmabuf) {
buf_ptr = elsiocb->bpl_dmabuf;
lpfc_els_free_bpl(phba, buf_ptr);
- elsiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
@@ -5305,11 +5377,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
LPFC_MBOXQ_t *mbox = NULL;
u32 ulp_status, ulp_word4, tmo, did, iotag;
+ u32 cmd;
if (!vport) {
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"3177 null vport in ELS rsp\n");
- goto out;
+ goto release;
}
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -5419,7 +5492,7 @@ out:
* these conditions because it doesn't need the login.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport && vport->port_type == LPFC_NPIV_PORT &&
+ vport->port_type == LPFC_NPIV_PORT &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
@@ -5435,6 +5508,27 @@ out:
}
}
+ /* The driver's unsolicited deferred FLOGI ACC in Pt2Pt needs to
+ * release the initial reference because the put after the free_iocb
+ * call removes only the reference from the defer logic. This FLOGI
+ * is never registered with the SCSI transport.
+ */
+ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
+ test_and_clear_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag)) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "3357 Pt2Pt Defer FLOGI ACC ndlp x%px, "
+ "nflags x%lx, fc_flag x%lx\n",
+ ndlp, ndlp->nlp_flag,
+ vport->fc_flag);
+ cmd = *((u32 *)cmdiocb->cmd_dmabuf->virt);
+ if (cmd == ELS_CMD_ACC) {
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
+ }
+ }
+
+release:
/* Release the originating I/O reference. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@@ -5569,7 +5663,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sp->cls1.classValid = 0;
sp->cls2.classValid = 0;
sp->cls3.classValid = 0;
- sp->cls4.classValid = 0;
/* Copy our worldwide names */
memcpy(&sp->portName, &vport->fc_sparam.portName,
@@ -5583,7 +5676,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0,
sizeof(sp->un.vendorVersion));
- sp->cmn.bbRcvSizeMsb &= 0xF;
+ if (!test_bit(FC_PT2PT, &vport->fc_flag))
+ sp->cmn.bbRcvSizeMsb &= 0xF;
/* If our firmware supports this feature, convey that
* info to the target using the vendor specific field.
@@ -8402,13 +8496,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
&wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did;
-
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3344 Deferring FLOGI ACC: rx_id: x%x,"
- " ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc.rx_id,
- phba->defer_flogi_acc.ox_id, phba->hba_flag);
-
phba->defer_flogi_acc.flag = true;
/* This nlp_get is paired with nlp_puts that reset the
@@ -8417,6 +8504,14 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* processed or cancelled.
*/
phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3344 Deferring FLOGI ACC: rx_id: x%x,"
+ " ox_id: x%x, ndlp x%px, hba_flag x%lx\n",
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id,
+ phba->defer_flogi_acc.ndlp,
+ phba->hba_flag);
return 0;
}
@@ -8734,7 +8829,7 @@ reject_out:
* @cmdiocb: pointer to lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure.
*
- * This routine processes Read Timout Value (RTV) IOCB received as an
+ * This routine processes Read Timeout Value (RTV) IOCB received as an
* ELS unsolicited event. It first checks the remote port state. If the
* remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
* state, it invokes the lpfc_els_rsl_reject() routine to send the reject
@@ -10357,11 +10452,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Do not process any unsolicited ELS commands
* if the ndlp is in DEV_LOSS
*/
- if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) {
- if (newnode)
- lpfc_nlp_put(ndlp);
+ if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag))
goto dropit;
- }
elsiocb->ndlp = lpfc_nlp_get(ndlp);
if (!elsiocb->ndlp)
@@ -10843,7 +10935,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
/*
* The different unsolicited event handlers would tell us
- * if they are done with "mp" by setting cmd_dmabuf to NULL.
+ * if they are done with "mp" by setting cmd_dmabuf/bpl_dmabuf to NULL.
*/
if (elsiocb->cmd_dmabuf) {
lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
@@ -11423,6 +11515,13 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
+ /* Fill out Auxiliary Parameter Data */
+ if (phba->pni) {
+ sp->aux.flags =
+ AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID;
+ sp->aux.pni = cpu_to_be64(phba->pni);
+ }
+
pcmd += sizeof(uint32_t); /* CSP Word 2 */
pcmd += sizeof(uint32_t); /* CSP Word 3 */
pcmd += sizeof(uint32_t); /* CSP Word 4 */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 43d246c5c049..bb803f32bc1b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -424,6 +424,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
+ clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
"8438 Devloss timeout reversed on DID x%x "
@@ -566,7 +567,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return fcf_inuse;
}
- lpfc_nlp_put(ndlp);
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
return fcf_inuse;
}
@@ -4371,6 +4373,8 @@ out:
lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+ if (phba->pni)
+ lpfc_ns_cmd(vport, SLI_CTNS_RSPNI_PNI, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 3bc0efa7453e..b2e353590ebb 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -168,6 +168,11 @@ struct lpfc_sli_ct_request {
uint8_t len;
uint8_t symbname[255];
} rspn;
+ struct rspni { /* For RSPNI_PNI requests */
+ __be64 pni;
+ u8 len;
+ u8 symbname[255];
+ } rspni;
struct gff {
uint32_t PortId;
} gff;
@@ -213,6 +218,8 @@ struct lpfc_sli_ct_request {
sizeof(struct da_id))
#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rspn))
+#define RSPNI_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rspni))
/*
* FsType Definitions
@@ -309,6 +316,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_RIP_NN 0x0235
#define SLI_CTNS_RIPA_NN 0x0236
#define SLI_CTNS_RSNN_NN 0x0239
+#define SLI_CTNS_RSPNI_PNI 0x0240
#define SLI_CTNS_DA_ID 0x0300
/*
@@ -512,6 +520,21 @@ struct class_parms {
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
};
+enum aux_parm_flags {
+ AUX_PARM_PNI_VALID = 0x20, /* FC Word 0, bit 29 */
+ AUX_PARM_DATA_VALID = 0x40, /* FC Word 0, bit 30 */
+};
+
+struct aux_parm {
+ u8 flags; /* FC Word 0, bit 31:24 */
+ u8 ext_feat[3]; /* FC Word 0, bit 23:0 */
+
+ __be64 pni; /* FC Word 1 and 2, platform name identifier */
+
+ __be16 rsvd; /* FC Word 3, bit 31:16 */
+ __be16 npiv_cnt; /* FC Word 3, bit 15:0 */
+} __packed;
+
struct serv_parm { /* Structure is in Big Endian format */
struct csp cmn;
struct lpfc_name portName;
@@ -519,7 +542,7 @@ struct serv_parm { /* Structure is in Big Endian format */
struct class_parms cls1;
struct class_parms cls2;
struct class_parms cls3;
- struct class_parms cls4;
+ struct aux_parm aux;
union {
uint8_t vendorVersion[16];
struct {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 065eb91de9c0..b1460b16dd91 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3057,12 +3057,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_vmid_vport_cleanup(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_DID == Fabric_Cntl_DID &&
- ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- lpfc_nlp_put(ndlp);
- continue;
- }
-
/* Fabric Ports not in UNMAPPED state are cleaned up in the
* DEVICE_RM event.
*/
@@ -7950,7 +7944,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate all driver workqueues here */
/* The lpfc_wq workqueue for deferred irq use */
- phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!phba->wq)
return -ENOMEM;
@@ -9082,9 +9076,9 @@ lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "6077 Setup FDMI mask: hba x%x port x%x\n",
- vport->fdmi_hba_mask, vport->fdmi_port_mask);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6077 Setup FDMI mask: hba x%x port x%x\n",
+ vport->fdmi_hba_mask, vport->fdmi_port_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 1e5ef93e67e3..8240d59f4120 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -432,8 +432,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
/* if already logged in, do implicit logout */
@@ -452,18 +450,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
if (!(ndlp->nlp_type & NLP_FABRIC) &&
!(phba->nvmet_support)) {
- /* Clear ndlp info, since follow up PRLI may have
- * updated ndlp information
- */
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
- clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
-
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
- ndlp, NULL);
- return 1;
+ break;
}
if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
@@ -485,7 +472,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
break;
}
-
+ /* Clear ndlp info, since follow up processes may have
+ * updated ndlp information
+ */
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
@@ -1426,8 +1415,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7ea7c4245c69..73d77cfab5f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -27,6 +27,8 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/lockdep.h>
+#include <linux/dmi.h>
+#include <linux/of.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -8447,6 +8449,70 @@ lpfc_set_host_tm(struct lpfc_hba *phba)
}
/**
+ * lpfc_get_platform_uuid - Attempts to extract a platform uuid
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine attempts to first read SMBIOS DMI data for the System
+ * Information structure offset 08h called System UUID. Else, no platform
+ * UUID will be advertised.
+ **/
+static void
+lpfc_get_platform_uuid(struct lpfc_hba *phba)
+{
+ int rc;
+ const char *uuid;
+ char pni[17] = {0}; /* 16 characters + '\0' */
+ bool is_ff = true, is_00 = true;
+ u8 i;
+
+ /* First attempt SMBIOS DMI */
+ uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
+ if (uuid) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2088 SMBIOS UUID %s\n",
+ uuid);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2099 Could not extract UUID\n");
+ }
+
+ if (uuid && uuid_is_valid(uuid)) {
+ /* Generate PNI from UUID format.
+ *
+ * 1.) Extract lower 64 bits from UUID format.
+ * 2.) Set 3h for NAA Locally Assigned Name Identifier format.
+ *
+ * e.g. xxxxxxxx-xxxx-xxxx-yyyy-yyyyyyyyyyyy
+ *
+ * extract the yyyy-yyyyyyyyyyyy portion
+ * final PNI 3yyyyyyyyyyyyyyy
+ */
+ scnprintf(pni, sizeof(pni), "3%c%c%c%s",
+ uuid[20], uuid[21], uuid[22], &uuid[24]);
+
+ /* Sanitize the converted PNI */
+ for (i = 1; i < 16 && (is_ff || is_00); i++) {
+ if (pni[i] != '0')
+ is_00 = false;
+ if (pni[i] != 'f' && pni[i] != 'F')
+ is_ff = false;
+ }
+
+ /* Convert from char* to unsigned long */
+ rc = kstrtoul(pni, 16, &phba->pni);
+ if (!rc && !is_ff && !is_00) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2100 PNI 0x%016lx\n", phba->pni);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2101 PNI %s generation status %d\n",
+ pni, rc);
+ phba->pni = 0;
+ }
+ }
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device initialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -8529,6 +8595,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
}
+ /* Obtain platform UUID, only for SLI4 FC adapters */
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag))
+ lpfc_get_platform_uuid(phba);
+
if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
LPFC_DCBX_CEE_MODE)
set_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
@@ -19858,13 +19928,15 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
+ * lpfc_sli4_resume_rpi - Resume traffic relative to an RPI
* @ndlp: pointer to lpfc nodelist data structure.
* @cmpl: completion call-back.
* @iocbq: data to load as mbox ctx_u information
*
- * This routine is invoked to remove the memory region that
- * provided rpi via a bitmask.
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
**/
int
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
@@ -19894,7 +19966,6 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
return -EIO;
}
- /* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 31c3c5abdca6..f3dada5bf7c1 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.11"
+#define LPFC_DRIVER_VERSION "14.4.0.12"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index b677d80e5874..ddeea0ee2834 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1150,9 +1150,13 @@ typedef struct LOG_BLOCK_SPAN_INFO {
} LD_SPAN_INFO, *PLD_SPAN_INFO;
struct MR_FW_RAID_MAP_ALL {
- struct MR_FW_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
+ /* Must be last --ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct MR_FW_RAID_MAP, raidMap, ldSpanMap,
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
+ );
} __attribute__ ((packed));
+static_assert(offsetof(struct MR_FW_RAID_MAP_ALL, raidMap.ldSpanMap) ==
+ offsetof(struct MR_FW_RAID_MAP_ALL, ldSpanMap));
struct MR_DRV_RAID_MAP {
/* total size of this structure, including this field.
@@ -1194,10 +1198,13 @@ struct MR_DRV_RAID_MAP {
* And it is mainly for code re-use purpose.
*/
struct MR_DRV_RAID_MAP_ALL {
-
- struct MR_DRV_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
+ /* Must be last --ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct MR_DRV_RAID_MAP, raidMap, ldSpanMap,
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
+ );
} __packed;
+static_assert(offsetof(struct MR_DRV_RAID_MAP_ALL, raidMap.ldSpanMap) ==
+ offsetof(struct MR_DRV_RAID_MAP_ALL, ldSpanMap));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 8ff4b89ff81e..9acca83d6958 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1534,7 +1534,7 @@ static int __init pm8001_init(void)
if (pm8001_use_tasklet && !pm8001_use_msix)
pm8001_use_tasklet = false;
- pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
+ pm8001_wq = alloc_workqueue("pm80xx", WQ_PERCPU, 0);
if (!pm8001_wq)
goto err;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 6b1ebab36fa3..7792e00800ae 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3374,7 +3374,8 @@ retry_probe:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
qedf->io_mempool);
- qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM,
+ qedf->link_update_wq = alloc_workqueue("qedf_%u_link",
+ WQ_MEM_RECLAIM | WQ_PERCPU,
1, qedf->lport->host->host_no);
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
@@ -3585,7 +3586,8 @@ retry_probe:
ether_addr_copy(params.ll2_mac_address, qedf->mac);
/* Start LL2 processing thread */
- qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1,
+ qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
host->host_no);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
@@ -3628,7 +3630,8 @@ retry_probe:
}
qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
- WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ qedf->lport->host->host_no);
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
@@ -3641,7 +3644,8 @@ retry_probe:
sprintf(host_buf, "qedf_%u_dpc",
qedf->lport->host->host_no);
qedf->dpc_wq =
- alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf);
+ alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ host_buf);
}
INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
@@ -4177,7 +4181,8 @@ static int __init qedf_init(void)
goto err3;
}
- qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq");
+ qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ "qedf_io_wq");
if (!qedf_io_wq) {
QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
goto err4;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index b168bb2178e9..56685ee22fdf 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2768,7 +2768,7 @@ retry_probe:
}
qedi->offload_thread = alloc_workqueue("qedi_ofld%d",
- WQ_MEM_RECLAIM,
+ WQ_MEM_RECLAIM | WQ_PERCPU,
1, qedi->shost->host_no);
if (!qedi->offload_thread) {
QEDI_ERR(&qedi->dbg_ctx,
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index ef841f643171..26c312a48a19 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2799,7 +2799,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
- dprintk(2, " bus %i, target %i, lun %i\n",
+ dprintk(2, " bus %i, target %i, lun %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
@@ -2871,7 +2871,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
remseg--;
}
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
- "command packet data - b %i, t %i, l %i \n",
+ "command packet data - b %i, t %i, l %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt,
@@ -2929,14 +2929,14 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
remseg -= cnt;
dprintk(5, "qla1280_64bit_start_scsi: "
"continuation packet data - b %i, t "
- "%i, l %i \n", SCSI_BUS_32(cmd),
+ "%i, l %llu\n", SCSI_BUS_32(cmd),
SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt,
REQUEST_ENTRY_SIZE);
}
} else { /* No data transfer */
dprintk(5, "qla1280_64bit_start_scsi: No data, command "
- "packet data - b %i, t %i, l %i \n",
+ "packet data - b %i, t %i, l %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
}
@@ -3655,7 +3655,7 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
dprintk(2, "qla1280_status_entry: Check "
"condition Sense data, b %i, t %i, "
- "l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
+ "l %llu\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd));
if (sense_sz)
qla1280_dump_buffer(2,
@@ -3955,7 +3955,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
sp = scsi_cmd_priv(cmd);
printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
- printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
+ printk(" chan=%d, target = 0x%02x, lun = 0x%02llx, cmd_len = 0x%02x\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
CMD_CDBLEN(cmd));
printk(" CDB = ");
@@ -3976,29 +3976,6 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
printk(" underflow size = 0x%x, direction=0x%x\n",
cmd->underflow, cmd->sc_data_direction);
}
-
-/**************************************************************************
- * ql1280_dump_device
- *
- **************************************************************************/
-static void
-ql1280_dump_device(struct scsi_qla_host *ha)
-{
-
- struct scsi_cmnd *cp;
- struct srb *sp;
- int i;
-
- printk(KERN_DEBUG "Outstanding Commands on controller:\n");
-
- for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
- if ((sp = ha->outstanding_cmds[i]) == NULL)
- continue;
- if ((cp = sp->cmd) == NULL)
- continue;
- qla1280_print_scsi_cmd(1, cp);
- }
-}
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 5136549005e7..a7e3ec9bba47 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -54,10 +54,11 @@
* | Misc | 0xd303 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
- * | Target Mode | 0xe081 | |
+ * | Target Mode | 0xe089 | |
* | Target Mode Management | 0xf09b | 0xf002 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000d | |
+ * | Target Mode SRR | 0x11038 | |
* ----------------------------------------------------------------------
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index cb95b7b12051..b3265952c4be 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3503,7 +3503,6 @@ struct isp_operations {
#define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
-#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS 0x04
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 145defc420f2..55d531c19e6b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -766,7 +766,7 @@ extern int qla2x00_dfs_remove(scsi_qla_host_t *);
/* Globa function prototypes for multi-q */
extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
- struct qla_msix_entry *, int);
+ struct qla_msix_entry *);
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 6a2e1c7fd125..d395cbfe6802 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4369,6 +4369,7 @@ enable_82xx_npiv:
ha->max_npiv_vports =
MIN_MULTI_ID_FABRIC - 1;
}
+ qlt_config_nvram_with_fw_version(vha);
qla2x00_get_resource_cnts(vha);
qla_init_iocb_limit(vha);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index c4c6b5c6658c..a3971afc2dd1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -4467,32 +4467,6 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
return IRQ_HANDLED;
}
-irqreturn_t
-qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
-{
- struct qla_hw_data *ha;
- struct qla_qpair *qpair;
- struct device_reg_24xx __iomem *reg;
- unsigned long flags;
-
- qpair = dev_id;
- if (!qpair) {
- ql_log(ql_log_info, NULL, 0x505b,
- "%s: NULL response queue pointer.\n", __func__);
- return IRQ_NONE;
- }
- ha = qpair->hw;
-
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- queue_work(ha->wq, &qpair->q_work);
-
- return IRQ_HANDLED;
-}
-
/* Interrupt handling helpers. */
struct qla_init_msix_entry {
@@ -4505,7 +4479,6 @@ static const struct qla_init_msix_entry msix_entries[] = {
{ "rsp_q", qla24xx_msix_rsp_q },
{ "atio_q", qla83xx_msix_atio_q },
{ "qpair_multiq", qla2xxx_msix_rsp_q },
- { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
};
static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
@@ -4792,9 +4765,10 @@ free_irqs:
}
int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
- struct qla_msix_entry *msix, int vector_type)
+ struct qla_msix_entry *msix)
{
- const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
+ const struct qla_init_msix_entry *intr =
+ &msix_entries[QLA_MSIX_QPAIR_MULTIQ_RSP_Q];
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 32eb0ce8b170..1f01576f044b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -253,6 +253,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ reinit_completion(&ha->mbx_intr_comp);
/* Unlock mbx registers and wait for interrupt */
ql_dbg(ql_dbg_mbx, vha, 0x100f,
@@ -279,6 +280,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"cmd=%x Timeout.\n", command);
spin_lock_irqsave(&ha->hardware_lock, flags);
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ reinit_completion(&ha->mbx_intr_comp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (chip_reset != ha->chip_reset) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 8b71ac0b1d99..0abc47e72e0b 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -899,9 +899,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
- ret = qla25xx_request_irq(ha, qpair, qpair->msix,
- ha->flags.disable_msix_handshake ?
- QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
+ ret = qla25xx_request_irq(ha, qpair, qpair->msix);
if (ret)
goto que_failed;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 316594aa40cc..42eb65a62f1f 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -1292,7 +1292,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
a.reason = FCNVME_RJT_RC_LOGIC;
a.explanation = FCNVME_RJT_EXP_NONE;
xmt_reject = true;
- kfree(item);
+ qla24xx_free_purex_item(item);
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9007533e36e0..3a57f07d73f5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1862,12 +1862,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- if (qla2x00_chip_is_down(vha)) {
- req->outstanding_cmds[cnt] = NULL;
- sp->done(sp, res);
- continue;
- }
-
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -1881,10 +1875,26 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
continue;
}
cmd = (struct qla_tgt_cmd *)sp;
- cmd->aborted = 1;
+
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->aborted = 1;
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ ha->tgt.tgt_ops->handle_data(cmd);
+ } else {
+ ha->tgt.tgt_ops->free_cmd(cmd);
+ }
break;
case TYPE_TGT_TMCMD:
- /* Skip task management functions. */
+ /*
+ * Currently, only ABTS response gets on the
+ * outstanding_cmds[]
+ */
+ qlt_free_ul_mcmd(ha,
+ (struct qla_tgt_mgmt_cmd *) sp);
break;
default:
break;
@@ -3397,7 +3407,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!ha->wq)) {
ret = -ENOMEM;
goto probe_failed;
@@ -3444,13 +3454,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mqenable = 0;
if (ha->mqenable) {
- bool startit = false;
-
- if (QLA_TGT_MODE_ENABLED())
- startit = false;
-
- if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
- startit = true;
+ bool startit = !!(host->active_mode & MODE_INITIATOR);
/* Create start of day qpairs for Block MQ */
for (i = 0; i < ha->max_qpairs; i++)
@@ -5280,7 +5284,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport)
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
- queue_work(system_unbound_wq, &fcport->reg_work);
+ queue_work(system_dfl_wq, &fcport->reg_work);
}
static
@@ -7244,6 +7248,7 @@ qla2xxx_wake_dpc(struct scsi_qla_host *vha)
if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
wake_up_process(t);
}
+EXPORT_SYMBOL(qla2xxx_wake_dpc);
/*
* qla2x00_rst_aen
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 1e81582085e3..d772136984c9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -104,8 +104,6 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
response_t *pkt);
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
int fn, void *iocb, int flags);
-static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
- *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status, int qfull);
static void qlt_disable_vha(struct scsi_qla_host *vha);
@@ -136,20 +134,6 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
-static const char *prot_op_str(u32 prot_op)
-{
- switch (prot_op) {
- case TARGET_PROT_NORMAL: return "NORMAL";
- case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
- case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
- case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
- case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
- case TARGET_PROT_DIN_PASS: return "DIN_PASS";
- case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
- default: return "UNKNOWN";
- }
-}
-
/* This API intentionally takes dest as a parameter, rather than returning
* int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -226,6 +210,10 @@ static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
struct qla_tgt_sess_op *u;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
+ unsigned int add_cdb_len = 0;
+
+ /* atio must be the last member of qla_tgt_sess_op for add_cdb_len */
+ BUILD_BUG_ON(offsetof(struct qla_tgt_sess_op, atio) + sizeof(u->atio) != sizeof(*u));
if (tgt->tgt_stop) {
ql_dbg(ql_dbg_async, vha, 0x502c,
@@ -234,12 +222,17 @@ static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
goto out_term;
}
- u = kzalloc(sizeof(*u), GFP_ATOMIC);
+ if (atio->u.raw.entry_type == ATIO_TYPE7 &&
+ atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)
+ add_cdb_len =
+ ((unsigned int) atio->u.isp24.fcp_cmnd.add_cdb_len) * 4;
+
+ u = kzalloc(sizeof(*u) + add_cdb_len, GFP_ATOMIC);
if (u == NULL)
goto out_term;
u->vha = vha;
- memcpy(&u->atio, atio, sizeof(*atio));
+ memcpy(&u->atio, atio, sizeof(*atio) + add_cdb_len);
INIT_LIST_HEAD(&u->cmd_list);
spin_lock_irqsave(&vha->cmd_list_lock, flags);
@@ -252,7 +245,7 @@ out:
return;
out_term:
- qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked);
goto out;
}
@@ -271,7 +264,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
"Freeing unknown %s %p, because of Abort\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
- &u->atio, ha_locked, 0);
+ &u->atio, ha_locked);
goto abort;
}
@@ -285,7 +278,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
"Freeing unknown %s %p, because tgt is being stopped\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
- &u->atio, ha_locked, 0);
+ &u->atio, ha_locked);
} else {
ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
"Reschedule u %p, vha %p, host %p\n", u, vha, host);
@@ -1909,6 +1902,10 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
* ABTS response. So, in it ID fields are reversed.
*/
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe082,
+ "qla_target(%d): tag %u: Sending TERM EXCH CTIO for ABTS\n",
+ vha->vp_idx, le32_to_cpu(entry->exchange_addr_to_abort));
+
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->nport_handle = entry->nport_handle;
@@ -1987,8 +1984,12 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
cmd_lun = scsilun_to_int(
(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
- if (cmd_key == key && cmd_lun == lun)
+ if (cmd_key == key && cmd_lun == lun) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe085,
+ "qla_target(%d): tag %lld: aborted by TMR\n",
+ vha->vp_idx, cmd->se_cmd.tag);
cmd->aborted = 1;
+ }
}
spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
}
@@ -2017,7 +2018,6 @@ static void qlt_do_tmr_work(struct work_struct *work)
struct qla_hw_data *ha = mcmd->vha->hw;
int rc;
uint32_t tag;
- unsigned long flags;
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
@@ -2032,34 +2032,12 @@ static void qlt_do_tmr_work(struct work_struct *work)
mcmd->tmr_func, tag);
if (rc != 0) {
- spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
- switch (mcmd->tmr_func) {
- case QLA_TGT_ABTS:
- mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
- qlt_build_abts_resp_iocb(mcmd);
- break;
- case QLA_TGT_LUN_RESET:
- case QLA_TGT_CLEAR_TS:
- case QLA_TGT_ABORT_TS:
- case QLA_TGT_CLEAR_ACA:
- case QLA_TGT_TARGET_RESET:
- qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
- qla_sam_status);
- break;
-
- case QLA_TGT_ABORT_ALL:
- case QLA_TGT_NEXUS_LOSS_SESS:
- case QLA_TGT_NEXUS_LOSS:
- qlt_send_notify_ack(mcmd->qpair,
- &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
- break;
- }
- spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
-
ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
"qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
mcmd->vha->vp_idx, rc);
- mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ mcmd->flags |= QLA24XX_MGMT_LLD_OWNED;
+ mcmd->fc_tm_rsp = FCP_TMF_FAILED;
+ qlt_xmit_tm_rsp(mcmd);
}
}
@@ -2247,6 +2225,21 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
EXPORT_SYMBOL(qlt_free_mcmd);
/*
+ * If the upper layer knows about this mgmt cmd, then call its ->free_cmd()
+ * callback, which will eventually call qlt_free_mcmd(). Otherwise, call
+ * qlt_free_mcmd() directly.
+ */
+void qlt_free_ul_mcmd(struct qla_hw_data *ha, struct qla_tgt_mgmt_cmd *mcmd)
+{
+ if (!mcmd)
+ return;
+ if (mcmd->flags & QLA24XX_MGMT_LLD_OWNED)
+ qlt_free_mcmd(mcmd);
+ else
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+}
+
+/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then
* reacquire
*/
@@ -2338,12 +2331,12 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
mcmd->reset_count, qpair->chip_reset);
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return;
}
- if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
+ if (mcmd->flags & QLA24XX_MGMT_SEND_NACK) {
switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
case ELS_LOGO:
case ELS_PRLO:
@@ -2376,7 +2369,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
* qlt_xmit_tm_rsp() returns here..
*/
if (free_mcmd)
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
}
@@ -2443,7 +2436,7 @@ out_err:
return -1;
}
-static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
struct qla_hw_data *ha;
struct qla_qpair *qpair;
@@ -3218,12 +3211,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
uint32_t full_req_cnt = 0;
unsigned long flags = 0;
int res;
-
- if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
- (cmd->sess && cmd->sess->deleted)) {
- cmd->state = QLA_TGT_STATE_PROCESSED;
- return 0;
- }
+ int pre_xmit_res;
ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
"is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
@@ -3231,33 +3219,43 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
&cmd->se_cmd, qpair->id);
- res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ pre_xmit_res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
- if (unlikely(res != 0)) {
- return res;
- }
+ /*
+ * Check pre_xmit_res later because we want to check other errors
+ * first.
+ */
+
+ /* Begin timer on the first call, not on SRR retry. */
+ if (likely(cmd->jiffies_at_hw_st_entry == 0))
+ cmd->jiffies_at_hw_st_entry = get_jiffies_64();
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (unlikely(cmd->sent_term_exchg ||
+ cmd->sess->deleted ||
+ !qpair->fw_started ||
+ cmd->reset_count != qpair->chip_reset)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe101,
+ "qla_target(%d): tag %lld: skipping send response for aborted cmd\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ qlt_unmap_sg(vha, cmd);
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return 0;
+ }
+
+ /* Check for errors from qlt_pre_xmit_response(). */
+ res = pre_xmit_res;
+ if (unlikely(res))
+ goto out_unmap_unlock;
+
if (xmit_type == QLA_TGT_XMIT_STATUS)
qpair->tgt_counters.core_qla_snd_status++;
else
qpair->tgt_counters.core_qla_que_buf++;
- if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
- /*
- * Either the port is not online or this request was from
- * previous life, just abort the processing.
- */
- cmd->state = QLA_TGT_STATE_PROCESSED;
- ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
- "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
- vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, qpair->chip_reset);
- res = 0;
- goto out_unmap_unlock;
- }
-
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, full_req_cnt);
if (unlikely(res))
@@ -3372,36 +3370,50 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
struct qla_tgt_prm prm;
unsigned long flags = 0;
int res = 0;
+ int pci_map_res;
struct qla_qpair *qpair = cmd->qpair;
+ /* Begin timer on the first call, not on SRR retry. */
+ if (likely(cmd->jiffies_at_hw_st_entry == 0))
+ cmd->jiffies_at_hw_st_entry = get_jiffies_64();
+
memset(&prm, 0, sizeof(prm));
prm.cmd = cmd;
prm.tgt = tgt;
prm.sg = NULL;
prm.req_cnt = 1;
- if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
- (cmd->sess && cmd->sess->deleted)) {
- /*
- * Either the port is not online or this request was from
- * previous life, just abort the processing.
- */
+ /* Calculate number of entries and segments required */
+ pci_map_res = qlt_pci_map_calc_cnt(&prm);
+ /*
+ * Check pci_map_res later because we want to check other errors first.
+ */
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+
+ if (unlikely(cmd->sent_term_exchg ||
+ cmd->sess->deleted ||
+ !qpair->fw_started ||
+ cmd->reset_count != qpair->chip_reset)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe102,
+ "qla_target(%d): tag %lld: skipping data-out for aborted cmd\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ qlt_unmap_sg(vha, cmd);
cmd->aborted = 1;
cmd->write_data_transferred = 0;
cmd->state = QLA_TGT_STATE_DATA_IN;
+ cmd->jiffies_at_hw_st_entry = 0;
vha->hw->tgt.tgt_ops->handle_data(cmd);
- ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
- "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
- vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, qpair->chip_reset);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return 0;
}
- /* Calculate number of entries and segments required */
- if (qlt_pci_map_calc_cnt(&prm) != 0)
- return -EAGAIN;
+ /* Check for errors from qlt_pci_map_calc_cnt(). */
+ if (unlikely(pci_map_res != 0)) {
+ res = -EAGAIN;
+ goto out_unlock_free_unmap;
+ }
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
if (res != 0)
@@ -3438,6 +3450,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
return res;
out_unlock_free_unmap:
+ cmd->jiffies_at_hw_st_entry = 0;
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -3457,7 +3470,6 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
uint8_t *ep = &sts->expected_dif[0];
uint64_t lba = cmd->se_cmd.t_task_lba;
uint8_t scsi_status, sense_key, asc, ascq;
- unsigned long flags;
struct scsi_qla_host *vha = cmd->vha;
cmd->trc_flags |= TRC_DIF_ERR;
@@ -3528,16 +3540,14 @@ out:
case QLA_TGT_STATE_NEED_DATA:
/* handle_data will load DIF error code */
cmd->state = QLA_TGT_STATE_DATA_IN;
+ cmd->jiffies_at_hw_st_entry = 0;
vha->hw->tgt.tgt_ops->handle_data(cmd);
break;
default:
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if (cmd->aborted) {
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ if (cmd->sent_term_exchg) {
vha->hw->tgt.tgt_ops->free_cmd(cmd);
break;
}
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
ascq);
@@ -3611,6 +3621,62 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
}
/*
+ * Handle a SRR that had been previously associated with a command when the
+ * command has been aborted or otherwise cannot process the SRR.
+ *
+ * If reject is true, then attempt to reject the SRR. Otherwise abort the
+ * immediate notify exchange.
+ */
+void qlt_srr_abort(struct qla_tgt_cmd *cmd, bool reject)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_tgt_srr *srr = cmd->srr;
+
+ if (srr->imm_ntfy_recvd) {
+ if (reject)
+ srr->reject = true;
+ else
+ srr->aborted = true;
+
+ if (srr->ctio_recvd) {
+ /*
+ * The SRR should already be scheduled for processing,
+ * and the SRR processing code should see that the cmd
+ * has been aborted and take appropriate action. In
+ * addition, the cmd refcount should have been
+ * incremented, preventing the cmd from being freed
+ * until SRR processing is done.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102e,
+ "qla_target(%d): tag %lld: %s: SRR already scheduled\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ } else {
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ unsigned long flags;
+
+ /* Shedule processing for the SRR immediate notify. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102f,
+ "qla_target(%d): tag %lld: %s: schedule SRR %s\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__,
+ reject ? "reject" : "abort");
+ cmd->srr = NULL;
+ srr->cmd = NULL;
+ spin_lock_irqsave(&tgt->srr_lock, flags);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ queue_work(qla_tgt_wq, &tgt->srr_work);
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11030,
+ "qla_target(%d): tag %lld: %s: no IMM SRR; free SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ cmd->srr = NULL;
+ kfree(srr);
+ }
+}
+EXPORT_SYMBOL(qlt_srr_abort);
+
+/*
* If hardware_lock held on entry, might drop it, then reaquire
* This function sends the appropriate CTIO to ISP 2xxx or 24xx
*/
@@ -3618,43 +3684,61 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
struct qla_tgt_cmd *cmd,
struct atio_from_isp *atio)
{
- struct scsi_qla_host *vha = qpair->vha;
struct ctio7_to_24xx *ctio24;
- struct qla_hw_data *ha = vha->hw;
- request_t *pkt;
- int ret = 0;
+ struct scsi_qla_host *vha;
+ uint16_t loop_id;
uint16_t temp;
- ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
-
- if (cmd)
+ if (cmd) {
vha = cmd->vha;
+ loop_id = cmd->loop_id;
+ } else {
+ port_id_t id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
+ struct qla_hw_data *ha;
+ struct fc_port *sess;
+ unsigned long flags;
- pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
- if (pkt == NULL) {
+ vha = qpair->vha;
+ ha = vha->hw;
+
+ /*
+ * CTIO7_NHANDLE_UNRECOGNIZED works when aborting an idle
+ * command but not when aborting a command with an active CTIO
+ * exchange.
+ */
+ loop_id = CTIO7_NHANDLE_UNRECOGNIZED;
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
+ if (sess)
+ loop_id = sess->loop_id;
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ }
+
+ if (cmd) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe009,
+ "qla_target(%d): tag %lld: Sending TERM EXCH CTIO state %d cmd_sent_to_fw %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state,
+ cmd->cmd_sent_to_fw);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe009,
+ "qla_target(%d): tag %u: Sending TERM EXCH CTIO (no cmd)\n",
+ vha->vp_idx, le32_to_cpu(atio->u.isp24.exchange_addr));
+ }
+
+ ctio24 = qla2x00_alloc_iocbs_ready(qpair, NULL);
+ if (!ctio24) {
ql_dbg(ql_dbg_tgt, vha, 0xe050,
"qla_target(%d): %s failed: unable to allocate "
"request packet\n", vha->vp_idx, __func__);
return -ENOMEM;
}
- if (cmd != NULL) {
- if (cmd->state < QLA_TGT_STATE_PROCESSED) {
- ql_dbg(ql_dbg_tgt, vha, 0xe051,
- "qla_target(%d): Terminating cmd %p with "
- "incorrect state %d\n", vha->vp_idx, cmd,
- cmd->state);
- } else
- ret = 1;
- }
-
qpair->tgt_counters.num_term_xchg_sent++;
- pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
- ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
+ ctio24->entry_count = 1;
+ ctio24->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio24->nport_handle = cpu_to_le16(loop_id);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -3671,12 +3755,25 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(vha, qpair->req);
- return ret;
+ return 0;
}
-static void qlt_send_term_exchange(struct qla_qpair *qpair,
- struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
- int ul_abort)
+/*
+ * Aborting a command that is active in the FW (i.e. cmd->cmd_sent_to_fw == 1)
+ * will usually trigger the FW to send a completion CTIO with error status,
+ * and the driver will then call the ->handle_data() or ->free_cmd() callbacks.
+ * This can be used to clear a command that is locked up in the FW unless there
+ * is something more seriously wrong.
+ *
+ * Aborting a command that is not active in the FW (i.e.
+ * cmd->cmd_sent_to_fw == 0) will not directly trigger any callbacks. Instead,
+ * when the target mode midlevel calls qlt_rdy_to_xfer() or
+ * qlt_xmit_response(), the driver will see that the cmd has been aborted and
+ * call the appropriate callback immediately without performing the requested
+ * operation.
+ */
+void qlt_send_term_exchange(struct qla_qpair *qpair,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
{
struct scsi_qla_host *vha;
unsigned long flags = 0;
@@ -3700,10 +3797,14 @@ static void qlt_send_term_exchange(struct qla_qpair *qpair,
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
done:
- if (cmd && !ul_abort && !cmd->aborted) {
- if (cmd->sg_mapped)
- qlt_unmap_sg(vha, cmd);
- vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ if (cmd) {
+ /*
+ * Set this even if -ENOMEM above, since term exchange will be
+ * sent eventually...
+ */
+ cmd->sent_term_exchg = 1;
+ cmd->aborted = 1;
+ cmd->jiffies_at_term_exchg = jiffies;
}
if (!ha_locked)
@@ -3711,6 +3812,7 @@ done:
return;
}
+EXPORT_SYMBOL(qlt_send_term_exchange);
static void qlt_init_term_exchange(struct scsi_qla_host *vha)
{
@@ -3761,38 +3863,35 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
{
- struct qla_tgt *tgt = cmd->tgt;
- struct scsi_qla_host *vha = tgt->vha;
- struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_qpair *qpair = cmd->qpair;
unsigned long flags;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
- "qla_target(%d): terminating exchange for aborted cmd=%p "
- "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
- se_cmd->tag);
-
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if (cmd->aborted) {
- if (cmd->sg_mapped)
- qlt_unmap_sg(vha, cmd);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- /*
- * It's normal to see 2 calls in this path:
- * 1) XFER Rdy completion + CMD_T_ABORT
- * 2) TCM TMR - drain_state_list
- */
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
- "multiple abort. %p transport_state %x, t_state %x, "
- "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
- cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
- return -EIO;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): tag %lld: cmd being aborted (state %d) %s; %s\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state,
+ cmd->cmd_sent_to_fw ? "sent to fw" : "not sent to fw",
+ cmd->aborted ? "aborted" : "not aborted");
+
+ if (cmd->state != QLA_TGT_STATE_DONE && !cmd->sent_term_exchg) {
+ if (!qpair->fw_started ||
+ cmd->reset_count != qpair->chip_reset) {
+ /*
+ * Chip was reset; just pretend that we sent the term
+ * exchange.
+ */
+ cmd->sent_term_exchg = 1;
+ cmd->aborted = 1;
+ cmd->jiffies_at_term_exchg = jiffies;
+ } else {
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
+ }
}
- cmd->aborted = 1;
- cmd->trc_flags |= TRC_ABORT;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
return 0;
}
EXPORT_SYMBOL(qlt_abort_cmd);
@@ -3812,54 +3911,99 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
qlt_decr_num_pend_cmds(cmd->vha);
BUG_ON(cmd->sg_mapped);
+ if (unlikely(cmd->free_sg)) {
+ cmd->free_sg = 0;
+ qlt_free_sg(cmd);
+ }
+ if (unlikely(cmd->srr))
+ qlt_srr_abort(cmd, false);
+
+ if (unlikely(cmd->aborted ||
+ (cmd->trc_flags & (TRC_CTIO_STRANGE | TRC_CTIO_ERR |
+ TRC_SRR_CTIO | TRC_SRR_IMM)))) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xe086,
+ "qla_target(%d): tag %lld: free cmd (trc_flags %x, aborted %u, sent_term_exchg %u, rsp_sent %u)\n",
+ cmd->vha->vp_idx, cmd->se_cmd.tag,
+ cmd->trc_flags, cmd->aborted, cmd->sent_term_exchg,
+ cmd->rsp_sent);
+ }
+
+ if (unlikely(cmd->cdb != &cmd->atio.u.isp24.fcp_cmnd.cdb[0])) {
+ kfree(cmd->cdb);
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
+ }
+
cmd->jiffies_at_free = get_jiffies_64();
if (!sess || !sess->se_sess) {
WARN_ON(1);
return;
}
- cmd->jiffies_at_free = get_jiffies_64();
cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
}
EXPORT_SYMBOL(qlt_free_cmd);
/*
- * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * Process a CTIO response for a SCSI command that failed due to SRR.
+ *
+ * qpair->qp_lock_ptr supposed to be held on entry
*/
-static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
- struct qla_tgt_cmd *cmd, uint32_t status)
+static int qlt_prepare_srr_ctio(struct qla_qpair *qpair,
+ struct qla_tgt_cmd *cmd)
{
- int term = 0;
- struct scsi_qla_host *vha = qpair->vha;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_srr *srr;
- if (cmd->se_cmd.prot_op)
- ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
- "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
- "se_cmd=%p tag[%x] op %#x/%s",
- cmd->lba, cmd->lba,
- cmd->num_blks, &cmd->se_cmd,
- cmd->atio.u.isp24.exchange_addr,
- cmd->se_cmd.prot_op,
- prot_op_str(cmd->se_cmd.prot_op));
-
- if (ctio != NULL) {
- struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
-
- term = !(c->flags &
- cpu_to_le16(OF_TERM_EXCH));
- } else
- term = 1;
+ cmd->trc_flags |= TRC_SRR_CTIO;
- if (term)
- qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
+ srr = cmd->srr;
+ if (srr != NULL) {
+ /* qlt_prepare_srr_imm() was called first. */
- return term;
-}
+ WARN_ON(srr->ctio_recvd);
+ WARN_ON(!srr->imm_ntfy_recvd);
+ if (vha->hw->tgt.tgt_ops->get_cmd_ref(cmd)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11037,
+ "qla_target(%d): tag %lld: unable to get cmd ref for SRR processing\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ qlt_srr_abort(cmd, true);
+ return -ESHUTDOWN;
+ }
+
+ srr->ctio_recvd = true;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100f,
+ "qla_target(%d): tag %lld: Scheduling SRR work\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+
+ /* Schedule the srr for processing in qlt_handle_srr(). */
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &tgt->srr_work);
+ spin_unlock(&tgt->srr_lock);
+ return 0;
+ }
+
+ srr = kzalloc(sizeof(*srr), GFP_ATOMIC);
+ if (!srr)
+ return -ENOMEM;
+
+ /* Expect qlt_prepare_srr_imm() to be called. */
+ srr->ctio_recvd = true;
+ srr->cmd = cmd;
+ srr->reset_count = cmd->reset_count;
+ cmd->srr = srr;
+ return 0;
+}
/* ha->hardware_lock supposed to be held on entry */
static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
- struct rsp_que *rsp, uint32_t handle, void *ctio)
+ struct rsp_que *rsp, uint32_t handle, uint8_t cmd_type,
+ const void *ctio)
{
void *cmd = NULL;
struct req_que *req;
@@ -3882,29 +4026,97 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
h &= QLA_CMD_HANDLE_MASK;
- if (h != QLA_TGT_NULL_HANDLE) {
- if (unlikely(h >= req->num_outstanding_cmds)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe052,
- "qla_target(%d): Wrong handle %x received\n",
- vha->vp_idx, handle);
- return NULL;
- }
-
- cmd = req->outstanding_cmds[h];
- if (unlikely(cmd == NULL)) {
- ql_dbg(ql_dbg_async, vha, 0xe053,
- "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
- vha->vp_idx, handle, req->id, rsp->id);
- return NULL;
- }
- req->outstanding_cmds[h] = NULL;
- } else if (ctio != NULL) {
+ if (h == QLA_TGT_NULL_HANDLE) {
/* We can't get loop ID from CTIO7 */
ql_dbg(ql_dbg_tgt, vha, 0xe054,
"qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
"support NULL handles\n", vha->vp_idx);
return NULL;
}
+ if (unlikely(h >= req->num_outstanding_cmds)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe052,
+ "qla_target(%d): Wrong handle %x received\n",
+ vha->vp_idx, handle);
+ return NULL;
+ }
+
+ /*
+ * We passed a numeric handle for a cmd to the hardware, and the
+ * hardware passed the handle back to us. Look up the associated cmd,
+ * and validate that the cmd_type and exchange address match what the
+ * caller expects. This guards against buggy HBA firmware that returns
+ * the same CTIO multiple times.
+ */
+
+ cmd = req->outstanding_cmds[h];
+
+ if (unlikely(cmd == NULL)) {
+ if (cmd_type == TYPE_TGT_CMD) {
+ __le32 ctio_exchange_addr =
+ ((const struct ctio7_from_24xx *)ctio)->
+ exchange_address;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe053,
+ "qla_target(%d): tag %u: handle %x: cmd detached; ignoring CTIO (handle %x req->id %d rsp->id %d)\n",
+ vha->vp_idx, le32_to_cpu(ctio_exchange_addr), h,
+ handle, req->id, rsp->id);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe053,
+ "qla_target(%d): cmd detached; ignoring CTIO (handle %x req->id %d rsp->id %d)\n",
+ vha->vp_idx, handle, req->id, rsp->id);
+ }
+ return NULL;
+ }
+
+ if (unlikely(((srb_t *)cmd)->cmd_type != cmd_type)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe087,
+ "qla_target(%d): handle %x: cmd detached; ignoring CTIO (cmd_type mismatch)\n",
+ vha->vp_idx, h);
+ return NULL;
+ }
+
+ switch (cmd_type) {
+ case TYPE_TGT_CMD: {
+ __le32 ctio_exchange_addr =
+ ((const struct ctio7_from_24xx *)ctio)->
+ exchange_address;
+ __le32 cmd_exchange_addr =
+ ((struct qla_tgt_cmd *)cmd)->
+ atio.u.isp24.exchange_addr;
+
+ BUILD_BUG_ON(offsetof(struct ctio7_from_24xx,
+ exchange_address) !=
+ offsetof(struct ctio_crc_from_fw,
+ exchange_address));
+
+ if (unlikely(ctio_exchange_addr != cmd_exchange_addr)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe088,
+ "qla_target(%d): tag %u: handle %x: cmd detached; ignoring CTIO (exchange address mismatch)\n",
+ vha->vp_idx, le32_to_cpu(ctio_exchange_addr), h);
+ return NULL;
+ }
+ break;
+ }
+
+ case TYPE_TGT_TMCMD: {
+ __le32 ctio_exchange_addr =
+ ((const struct abts_resp_from_24xx_fw *)ctio)->
+ exchange_address;
+ __le32 cmd_exchange_addr =
+ ((struct qla_tgt_mgmt_cmd *)cmd)->
+ orig_iocb.abts.exchange_address;
+
+ if (unlikely(ctio_exchange_addr != cmd_exchange_addr)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe089,
+ "qla_target(%d): ABTS: handle %x: cmd detached; ignoring CTIO (exchange address mismatch)\n",
+ vha->vp_idx, h);
+ return NULL;
+ }
+ break;
+ }
+ }
+
+ req->outstanding_cmds[h] = NULL;
return cmd;
}
@@ -3913,12 +4125,13 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
- struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
+ struct rsp_que *rsp, uint32_t handle, uint32_t status,
+ struct ctio7_from_24xx *ctio)
{
struct qla_hw_data *ha = vha->hw;
- struct se_cmd *se_cmd;
struct qla_tgt_cmd *cmd;
struct qla_qpair *qpair = rsp->qpair;
+ uint16_t ctio_flags;
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
/* That could happen only in case of an error/reset/abort */
@@ -3930,45 +4143,92 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
return;
}
- cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
- if (cmd == NULL)
- return;
+ ctio_flags = le16_to_cpu(ctio->flags);
+
+ cmd = qlt_ctio_to_cmd(vha, rsp, handle, TYPE_TGT_CMD, ctio);
+ if (unlikely(cmd == NULL)) {
+ if ((handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE &&
+ (ctio_flags & 0xe1ff) == (CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE)) {
+ u32 tag = le32_to_cpu(ctio->exchange_address);
- if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
- cmd->sess) {
- qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
- (struct ctio7_from_24xx *)ctio);
+ if (status == CTIO_SUCCESS)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe083,
+ "qla_target(%d): tag %u: term exchange successful\n",
+ vha->vp_idx, tag);
+ else
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe084,
+ "qla_target(%d): tag %u: term exchange failed; status = 0x%x\n",
+ vha->vp_idx, tag, status);
+ }
+ return;
}
- se_cmd = &cmd->se_cmd;
+ if ((ctio_flags & CTIO7_FLAGS_DATA_OUT) && cmd->sess)
+ qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, ctio);
+
cmd->cmd_sent_to_fw = 0;
qlt_unmap_sg(vha, cmd);
if (unlikely(status != CTIO_SUCCESS)) {
+ u8 op = cmd->cdb ? cmd->cdb[0] : 0;
+ bool term_exchg = false;
+
+ /*
+ * If the hardware terminated the exchange, then we don't need
+ * to send an explicit term exchange message.
+ */
+ if (ctio_flags & OF_TERM_EXCH) {
+ cmd->sent_term_exchg = 1;
+ cmd->aborted = 1;
+ cmd->jiffies_at_term_exchg = jiffies;
+ }
+
switch (status & 0xFFFF) {
case CTIO_INVALID_RX_ID:
+ term_exchg = true;
if (printk_ratelimit())
dev_info(&vha->hw->pdev->dev,
- "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
- vha->vp_idx, cmd->atio.u.isp24.attr,
+ "qla_target(%d): tag %lld, op %x: CTIO with INVALID_RX_ID status 0x%x received (state %d, port %8phC, LUN %lld, ATIO attr %x, CTIO Flags %x|%x)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op,
+ status, cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun, cmd->atio.u.isp24.attr,
((cmd->ctio_flags >> 9) & 0xf),
cmd->ctio_flags);
-
break;
+
case CTIO_LIP_RESET:
case CTIO_TARGET_RESET:
case CTIO_ABORTED:
- /* driver request abort via Terminate exchange */
+ term_exchg = true;
+ fallthrough;
case CTIO_TIMEOUT:
- /* They are OK */
+ {
+ const char *status_str;
+
+ switch (status & 0xFFFF) {
+ case CTIO_LIP_RESET:
+ status_str = "LIP_RESET";
+ break;
+ case CTIO_TARGET_RESET:
+ status_str = "TARGET_RESET";
+ break;
+ case CTIO_ABORTED:
+ status_str = "ABORTED";
+ break;
+ case CTIO_TIMEOUT:
+ default:
+ status_str = "TIMEOUT";
+ break;
+ }
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
- "qla_target(%d): CTIO with "
- "status %#x received, state %x, se_cmd %p, "
- "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
- "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
- status, cmd->state, se_cmd);
+ "qla_target(%d): tag %lld, op %x: CTIO with %s status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op,
+ status_str, status, cmd->state,
+ cmd->sess->port_name, cmd->unpacked_lun);
break;
+ }
case CTIO_PORT_LOGGED_OUT:
case CTIO_PORT_UNAVAILABLE:
@@ -3977,11 +4237,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
- "qla_target(%d): CTIO with %s status %x "
- "received (state %x, se_cmd %p)\n", vha->vp_idx,
+ "qla_target(%d): tag %lld, op %x: CTIO with %s status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op,
logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
- status, cmd->state, se_cmd);
+ status, cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun);
+ term_exchg = true;
if (logged_out && cmd->sess) {
/*
* Session is already logged out, but we need
@@ -3996,18 +4258,30 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
}
break;
}
+
+ case CTIO_SRR_RECEIVED:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100e,
+ "qla_target(%d): tag %lld, op %x: CTIO with SRR status 0x%x received (state %d, port %8phC, LUN %lld, bufflen %d)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun, cmd->bufflen);
+
+ if (qlt_prepare_srr_ctio(qpair, cmd) == 0)
+ return;
+ break;
+
case CTIO_DIF_ERROR: {
struct ctio_crc_from_fw *crc =
(struct ctio_crc_from_fw *)ctio;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
- "qla_target(%d): CTIO with DIF_ERROR status %x "
- "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
- "expect_dif[0x%llx]\n",
- vha->vp_idx, status, cmd->state, se_cmd,
+ "qla_target(%d): tag %lld, op %x: CTIO with DIF_ERROR status 0x%x received (state %d, port %8phC, LUN %lld, actual_dif[0x%llx] expect_dif[0x%llx])\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
- qlt_handle_dif_error(qpair, cmd, ctio);
+ qlt_handle_dif_error(qpair, cmd, crc);
return;
}
@@ -4016,51 +4290,72 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
case CTIO_FAST_INVALID_REQ:
case CTIO_FAST_SPI_ERR:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
- "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
- vha->vp_idx, status, cmd->state, se_cmd);
+ "qla_target(%d): tag %lld, op %x: CTIO with EDIF error status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun);
break;
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
- "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
- vha->vp_idx, status, cmd->state, se_cmd);
+ "qla_target(%d): tag %lld, op %x: CTIO with error status 0x%x received (state %d, port %8phC, LUN %lld)\n",
+ vha->vp_idx, cmd->se_cmd.tag, op, status,
+ cmd->state, cmd->sess->port_name,
+ cmd->unpacked_lun);
break;
}
+ cmd->trc_flags |= TRC_CTIO_ERR;
- /* "cmd->aborted" means
- * cmd is already aborted/terminated, we don't
- * need to terminate again. The exchange is already
- * cleaned up/freed at FW level. Just cleanup at driver
- * level.
+ /*
+ * In state QLA_TGT_STATE_NEED_DATA the failed CTIO was for
+ * Data-Out, so either abort the exchange or try sending check
+ * condition with sense data depending on the severity of
+ * the error. In state QLA_TGT_STATE_PROCESSED the failed CTIO
+ * was for status (and possibly Data-In), so don't try sending
+ * an error status again in that case (if the error was for
+ * Data-In with status, we could try sending status without
+ * Data-In, but we don't do that currently).
*/
- if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
- (!cmd->aborted)) {
- cmd->trc_flags |= TRC_CTIO_ERR;
- if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
- return;
- }
+ if (!cmd->sent_term_exchg &&
+ (term_exchg || cmd->state != QLA_TGT_STATE_NEED_DATA))
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
+ }
+
+ if (unlikely(cmd->srr != NULL)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11031,
+ "qla_target(%d): tag %lld, op %x: expected CTIO with SRR status; got status 0x%x: state %d, bufflen %d\n",
+ vha->vp_idx, cmd->se_cmd.tag,
+ cmd->cdb ? cmd->cdb[0] : 0, status, cmd->state,
+ cmd->bufflen);
+ qlt_srr_abort(cmd, true);
}
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
cmd->trc_flags |= TRC_CTIO_DONE;
+
+ if (likely(status == CTIO_SUCCESS))
+ cmd->rsp_sent = 1;
+
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->state = QLA_TGT_STATE_DATA_IN;
if (status == CTIO_SUCCESS)
cmd->write_data_transferred = 1;
+ cmd->jiffies_at_hw_st_entry = 0;
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->aborted) {
cmd->trc_flags |= TRC_CTIO_ABORTED;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
- "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
+ "qla_target(%d): tag %lld: Aborted command finished\n",
+ vha->vp_idx, cmd->se_cmd.tag);
} else {
cmd->trc_flags |= TRC_CTIO_STRANGE;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
- "qla_target(%d): A command in state (%d) should "
- "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+ "qla_target(%d): tag %lld: A command in state (%d) should not return a CTIO complete\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state);
}
if (unlikely(status != CTIO_SUCCESS) &&
@@ -4113,7 +4408,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio;
- unsigned char *cdb;
unsigned long flags;
uint32_t data_length;
int ret, fcp_task_attr, data_dir, bidi = 0;
@@ -4129,8 +4423,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
goto out_term;
}
- spin_lock_init(&cmd->cmd_lock);
- cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
if (atio->u.isp24.fcp_cmnd.rddata &&
@@ -4148,7 +4440,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
atio->u.isp24.fcp_cmnd.task_attr);
data_length = get_datalen_for_atio(atio);
- ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+ ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cmd->cdb, data_length,
fcp_task_attr, data_dir, bidi);
if (ret != 0)
goto out_term;
@@ -4166,9 +4458,14 @@ out_term:
*/
cmd->trc_flags |= TRC_DO_WORK_ERR;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
+ qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1);
qlt_decr_num_pend_cmds(vha);
+ if (unlikely(cmd->cdb != &cmd->atio.u.isp24.fcp_cmnd.cdb[0])) {
+ kfree(cmd->cdb);
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
+ }
cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -4292,18 +4589,43 @@ out:
cmd->se_cmd.cpuid = h->cpuid;
}
+/*
+ * Safely make a fixed-length copy of a variable-length atio by truncating the
+ * CDB if necessary.
+ */
+static void memcpy_atio(struct atio_from_isp *dst,
+ const struct atio_from_isp *src)
+{
+ int len;
+
+ memcpy(dst, src, sizeof(*dst));
+
+ /*
+ * If the CDB was truncated, prevent get_datalen_for_atio() from
+ * accessing invalid memory.
+ */
+ len = src->u.isp24.fcp_cmnd.add_cdb_len;
+ if (unlikely(len != 0)) {
+ dst->u.isp24.fcp_cmnd.add_cdb_len = 0;
+ memcpy(&dst->u.isp24.fcp_cmnd.add_cdb[0],
+ &src->u.isp24.fcp_cmnd.add_cdb[len * 4],
+ 4);
+ }
+}
+
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct fc_port *sess,
struct atio_from_isp *atio)
{
struct qla_tgt_cmd *cmd;
+ int add_cdb_len;
cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
if (!cmd)
return NULL;
cmd->cmd_type = TYPE_TGT_CMD;
- memcpy(&cmd->atio, atio, sizeof(*atio));
+ memcpy_atio(&cmd->atio, atio);
INIT_LIST_HEAD(&cmd->sess_cmd_list);
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
@@ -4323,6 +4645,29 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->vp_idx = vha->vp_idx;
cmd->edif = sess->edif.enable;
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
+
+ /*
+ * NOTE: memcpy_atio() set cmd->atio.u.isp24.fcp_cmnd.add_cdb_len to 0,
+ * so use the original value here.
+ */
+ add_cdb_len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+ if (unlikely(add_cdb_len != 0)) {
+ int cdb_len = 16 + add_cdb_len * 4;
+ u8 *cdb;
+
+ cdb = kmalloc(cdb_len, GFP_ATOMIC);
+ if (unlikely(!cdb)) {
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ return NULL;
+ }
+ /* CAUTION: copy CDB from atio not cmd->atio */
+ memcpy(cdb, atio->u.isp24.fcp_cmnd.cdb, cdb_len);
+ cmd->cdb = cdb;
+ cmd->cdb_len = cdb_len;
+ }
+
return cmd;
}
@@ -4900,6 +5245,863 @@ out:
}
/*
+ * Return true if the HBA firmware version is known to have bugs that
+ * prevent Sequence Level Error Recovery (SLER) / Sequence Retransmission
+ * Request (SRR) from working.
+ *
+ * Some bad versions are based on testing and some are based on "Marvell Fibre
+ * Channel Firmware Release Notes".
+ */
+static bool qlt_has_sler_fw_bug(struct qla_hw_data *ha)
+{
+ bool has_sler_fw_bug = false;
+
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ /*
+ * In the fw release notes:
+ * ER147301 was added to v9.05.00 causing SLER regressions
+ * FCD-259 was fixed in v9.08.00
+ * FCD-371 was fixed in v9.08.00
+ * FCD-1183 was fixed in v9.09.00
+ *
+ * QLE2694L (ISP2071) known bad firmware (tested):
+ * 9.06.02
+ * 9.07.00
+ * 9.08.02
+ * SRRs trigger hundreds of bogus entries in the response
+ * queue and various other problems.
+ *
+ * QLE2694L known good firmware (tested):
+ * 8.08.05
+ * 9.09.00
+ *
+ * Suspected bad firmware (not confirmed by testing):
+ * v9.05.xx
+ *
+ * unknown firmware:
+ * 9.00.00 - 9.04.xx
+ */
+ if (ha->fw_major_version == 9 &&
+ ha->fw_minor_version >= 5 &&
+ ha->fw_minor_version <= 8)
+ has_sler_fw_bug = true;
+ }
+
+ return has_sler_fw_bug;
+}
+
+/*
+ * Return true and print a message if the HA has been reset since the SRR
+ * immediate notify was received; else return false.
+ */
+static bool qlt_srr_is_chip_reset(struct scsi_qla_host *vha,
+ struct qla_qpair *qpair, struct qla_tgt_srr *srr)
+{
+ if (!vha->flags.online ||
+ !qpair->fw_started ||
+ srr->reset_count != qpair->chip_reset) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100d,
+ "qla_target(%d): chip reset; discarding IMM SRR\n",
+ vha->vp_idx);
+ return true;
+ }
+ return false;
+}
+
+/* Find and return the command associated with a SRR immediate notify. */
+static struct qla_tgt_cmd *qlt_srr_to_cmd(struct scsi_qla_host *vha,
+ const struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct fc_port *sess;
+ struct qla_tgt_cmd *cmd;
+ uint32_t tag = le32_to_cpu(iocb->u.isp24.exchange_address);
+ uint16_t loop_id;
+ be_id_t s_id;
+ unsigned long flags;
+
+ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11009,
+ "qla_target(%d): IMM SRR with unknown exchange address; reject SRR\n",
+ vha->vp_idx);
+ return NULL;
+ }
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ s_id.domain = iocb->u.isp24.port_id[2];
+ s_id.area = iocb->u.isp24.port_id[1];
+ s_id.al_pa = iocb->u.isp24.port_id[0];
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess)
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ if (!sess || sess->deleted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100a,
+ "qla_target(%d): could not find session for IMM SRR; reject SRR\n",
+ vha->vp_idx);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return NULL;
+ }
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, tag);
+ if (!cmd) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100b,
+ "qla_target(%d): could not find cmd for IMM SRR; reject SRR\n",
+ vha->vp_idx);
+ } else {
+ u16 srr_ox_id = le16_to_cpu(iocb->u.isp24.srr_ox_id);
+ u16 cmd_ox_id = be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+ if (srr_ox_id != cmd_ox_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100c,
+ "qla_target(%d): tag %lld: IMM SRR: srr_ox_id[%04x] != cmd_ox_id[%04x]; reject SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag,
+ srr_ox_id, cmd_ox_id);
+ cmd = NULL;
+ }
+ }
+
+ return cmd;
+}
+
+/*
+ * Handle an immediate notify SRR (Sequence Retransmission Request) message from
+ * the hardware. The hardware will also send a CTIO with CTIO_SRR_RECEIVED status
+ * for the affected command.
+ *
+ * This may be called a second time for the same immediate notify SRR if
+ * CTIO_SRR_RECEIVED is never received and qlt_srr_abort() is called.
+ *
+ * Process context, no locks
+ */
+static void qlt_handle_srr_imm(struct scsi_qla_host *vha,
+ struct qla_tgt_srr *srr)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair;
+ struct qla_tgt_cmd *cmd;
+ uint8_t srr_explain = NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL;
+
+ /* handle qlt_srr_abort() */
+ if (srr->aborted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11004,
+ "qla_target(%d): IMM SRR: terminating SRR for aborted cmd\n",
+ vha->vp_idx);
+ spin_lock_irq(&ha->hardware_lock);
+ if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
+ qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
+ spin_unlock_irq(&ha->hardware_lock);
+ kfree(srr);
+ return;
+ }
+ if (srr->reject) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11005,
+ "qla_target(%d): IMM SRR: rejecting SRR for unknown cmd\n",
+ vha->vp_idx);
+ goto out_reject;
+ }
+
+ /* Find the command associated with the SRR. */
+ cmd = qlt_srr_to_cmd(vha, &srr->imm_ntfy);
+ if (cmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11005,
+ "qla_target(%d): IMM SRR: rejecting SRR for unknown cmd\n",
+ vha->vp_idx);
+ srr_explain = NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_INVALID_OX_ID_RX_ID;
+ goto out_reject;
+ }
+
+ if (ha->tgt.tgt_ops->get_cmd_ref(cmd)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11038,
+ "qla_target(%d): IMM SRR: unable to get cmd ref; rejecting SRR\n",
+ vha->vp_idx);
+ cmd = NULL;
+ goto out_reject;
+ }
+
+ qpair = cmd->qpair;
+
+ spin_lock_irq(qpair->qp_lock_ptr);
+
+ if (cmd->reset_count != srr->reset_count) {
+ /* force a miscompare */
+ srr->reset_count = qpair->chip_reset ^ 1;
+ }
+ if (qlt_srr_is_chip_reset(vha, qpair, srr)) {
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ kfree(srr);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11001,
+ "qla_target(%d): tag %lld, op %x: received IMM SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->cdb ? cmd->cdb[0] : 0);
+
+ cmd->trc_flags |= TRC_SRR_IMM;
+
+ if (cmd->srr != NULL) {
+ if (cmd->srr->imm_ntfy_recvd) {
+ /*
+ * Received another immediate notify SRR message for
+ * this command before the previous one could be processed
+ * (not expected to happen).
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11006,
+ "qla_target(%d): tag %lld: received multiple IMM SRR; reject SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ goto out_reject;
+ }
+
+ /* qlt_prepare_srr_ctio() was called first. */
+ WARN_ON(!cmd->srr->ctio_recvd);
+
+ /*
+ * The immediate notify and CTIO handlers both allocated
+ * separate srr structs; combine them.
+ */
+ memcpy(&cmd->srr->imm_ntfy, &srr->imm_ntfy,
+ sizeof(srr->imm_ntfy));
+ kfree(srr);
+ srr = cmd->srr;
+ srr->imm_ntfy_recvd = true;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11002,
+ "qla_target(%d): tag %lld: schedule SRR work\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+
+ /* Schedule the srr for processing in qlt_handle_srr(). */
+ spin_lock(&tgt->srr_lock);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ /*
+ * Already running the work function; no need to schedule
+ * tgt->srr_work.
+ */
+ spin_unlock(&tgt->srr_lock);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ /* return with cmd refcount incremented */
+ return;
+ }
+
+ /* The CTIO SRR for this command has not yet been received. */
+
+ if (cmd->sent_term_exchg) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11007,
+ "qla_target(%d): tag %lld: IMM SRR: cmd already aborted\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ spin_lock_irq(&ha->hardware_lock);
+ if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
+ qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
+ spin_unlock_irq(&ha->hardware_lock);
+ kfree(srr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ return;
+ }
+
+ /* If not expecting a CTIO, then reject IMM SRR. */
+ if (!cmd->cmd_sent_to_fw) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11008,
+ "qla_target(%d): tag %lld: IMM SRR but !cmd_sent_to_fw (state %d); reject SRR\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ goto out_reject;
+ }
+
+ /* Expect qlt_prepare_srr_ctio() to be called. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11003,
+ "qla_target(%d): tag %lld: wait for CTIO SRR (state %d)\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->state);
+ srr->cmd = cmd;
+ cmd->srr = srr;
+
+ spin_unlock_irq(qpair->qp_lock_ptr);
+
+ ha->tgt.tgt_ops->put_cmd_ref(cmd);
+ return;
+
+out_reject:
+ qpair = vha->hw->base_qpair;
+ spin_lock_irq(qpair->qp_lock_ptr);
+ if (!qlt_srr_is_chip_reset(vha, qpair, srr))
+ qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ srr_explain);
+ spin_unlock_irq(qpair->qp_lock_ptr);
+ kfree(srr);
+}
+
+/*
+ * Handle an immediate notify SRR (Sequence Retransmission Request) message from
+ * the hardware. The hardware will also send a CTIO with CTIO_SRR_RECEIVED status
+ * for the affected command.
+ *
+ * ha->hardware_lock supposed to be held on entry
+ */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_srr *srr;
+
+ ql_log(ql_log_warn, vha, 0x11000, "qla_target(%d): received IMM SRR\n",
+ vha->vp_idx);
+
+ /*
+ * Need cmd->qpair->qp_lock_ptr, but have ha->hardware_lock. Defer
+ * processing to a workqueue so that the right lock can be acquired
+ * safely.
+ */
+
+ srr = kzalloc(sizeof(*srr), GFP_ATOMIC);
+ if (!srr)
+ goto out_reject;
+
+ memcpy(&srr->imm_ntfy, iocb, sizeof(srr->imm_ntfy));
+ srr->imm_ntfy_recvd = true;
+ srr->reset_count = vha->hw->base_qpair->chip_reset;
+ spin_lock(&tgt->srr_lock);
+ list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
+ queue_work(qla_tgt_wq, &tgt->srr_work);
+ spin_unlock(&tgt->srr_lock);
+ /* resume processing in qlt_handle_srr_imm() */
+ return;
+
+out_reject:
+ qlt_send_notify_ack(vha->hw->base_qpair, iocb, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * If possible, undo the effect of qlt_set_data_offset() and restore the cmd
+ * data buffer back to its full size.
+ */
+static int qlt_restore_orig_sg(struct qla_tgt_cmd *cmd)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ WARN_ON(cmd->sg_mapped);
+
+ if (cmd->offset == 0) {
+ /* qlt_set_data_offset() has not been called. */
+ return 0;
+ }
+
+ if (se_cmd->t_data_sg == NULL ||
+ se_cmd->t_data_nents == 0 ||
+ se_cmd->data_length == 0) {
+ /* The original scatterlist is not available. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102c,
+ "qla_target(%d): tag %lld: cannot restore original cmd buffer; keep modified buffer at offset %d\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->offset);
+ return -ENOENT;
+ }
+
+ /* Restore the original scatterlist. */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102d,
+ "qla_target(%d): tag %lld: restore original cmd buffer: offset %d -> 0\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->offset);
+ if (cmd->free_sg) {
+ cmd->free_sg = 0;
+ qlt_free_sg(cmd);
+ }
+ cmd->offset = 0;
+ cmd->sg = se_cmd->t_data_sg;
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->bufflen = se_cmd->data_length;
+ return 0;
+}
+
+/*
+ * Adjust the data buffer of the given command to skip over offset bytes from
+ * the beginning while also reducing the length by offset bytes.
+ *
+ * This may be called multiple times for a single command if there are multiple
+ * SRRs, which each call reducing the buffer size further relative to the
+ * previous call. Note that the buffer may be reset back to its original size
+ * by calling qlt_restore_orig_sg().
+ */
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct scatterlist *sg_srr_start = NULL, *sg;
+ uint32_t first_offset = offset;
+ int sg_srr_cnt, i;
+ int bufflen = 0;
+
+ WARN_ON(cmd->sg_mapped);
+
+ ql_dbg(ql_dbg_tgt, vha, 0x11020,
+ "qla_target(%d): tag %lld: %s: sg %p sg_cnt %d dir %d cmd->offset %d cmd->bufflen %d add offset %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__, cmd->sg,
+ cmd->sg_cnt, cmd->dma_data_direction, cmd->offset, cmd->bufflen,
+ offset);
+
+ if (cmd->se_cmd.prot_op != TARGET_PROT_NORMAL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11021,
+ "qla_target(%d): tag %lld: %s: SRR with protection information at nonzero offset not implemented\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ return -EINVAL;
+ }
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11022,
+ "qla_target(%d): tag %lld: %s: Missing cmd->sg or zero cmd->sg_cnt\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Walk the current cmd->sg list until we locate the new sg_srr_start
+ */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ ql_dbg(ql_dbg_tgt, vha, 0x11023,
+ "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
+
+ if (first_offset < sg->length) {
+ sg_srr_start = sg;
+ break;
+ }
+ first_offset -= sg->length;
+ }
+
+ if (!sg_srr_start) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11024,
+ "qla_target(%d): tag %lld: Unable to locate sg_srr_start for offset: %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, offset);
+ return -EINVAL;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11025,
+ "qla_target(%d): tag %lld: prepare SRR sgl at sg index %d of %d byte offset %u of %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, i, cmd->sg_cnt,
+ first_offset, sg_srr_start->length);
+
+ sg_srr_cnt = cmd->sg_cnt - i;
+
+ if (first_offset == 0 && !cmd->free_sg) {
+ /*
+ * The offset points to the beginning of a scatterlist element.
+ * In this case there is no need to modify the first scatterlist
+ * element, so we can just point directly inside the original
+ * unmodified scatterlist.
+ */
+ ql_dbg(ql_dbg_tgt, vha, 0x11026, "point directly to old sgl\n");
+ cmd->sg = sg_srr_start;
+ } else {
+ /*
+ * Allocate at most 2 new scatterlist elements to reduce memory
+ * requirements.
+ */
+ int n_alloc_sg = min(sg_srr_cnt, 2);
+ struct scatterlist *sg_srr =
+ kmalloc_array(n_alloc_sg, sizeof(*sg_srr), GFP_ATOMIC);
+ if (!sg_srr) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11027,
+ "qla_target(%d): tag %lld: Unable to allocate SRR scatterlist\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+ return -ENOMEM;
+ }
+ sg_init_table(sg_srr, n_alloc_sg);
+
+ /* Init the first sg element to skip over the unneeded data. */
+ sg_set_page(&sg_srr[0], sg_page(sg_srr_start),
+ sg_srr_start->length - first_offset,
+ sg_srr_start->offset + first_offset);
+ if (sg_srr_cnt == 1) {
+ ql_dbg(ql_dbg_tgt, vha, 0x11028,
+ "single-element array\n");
+ } else if (sg_srr_cnt == 2) {
+ /* Only two elements; copy the last element. */
+ ql_dbg(ql_dbg_tgt, vha, 0x11029,
+ "complete two-element array\n");
+ sg = sg_next(sg_srr_start);
+ sg_set_page(&sg_srr[1], sg_page(sg), sg->length,
+ sg->offset);
+ } else {
+ /*
+ * Three or more elements; chain our newly-allocated
+ * 2-entry array to the rest of the original
+ * scatterlist at the splice point.
+ */
+ ql_dbg(ql_dbg_tgt, vha, 0x1102a,
+ "chain to original scatterlist\n");
+ sg = sg_next(sg_srr_start);
+ sg_chain(sg_srr, 2, sg);
+ }
+
+ /*
+ * If the previous scatterlist was allocated here on a previous
+ * call, then it should be safe to free now.
+ */
+ if (cmd->free_sg)
+ qlt_free_sg(cmd);
+ cmd->sg = sg_srr;
+ cmd->free_sg = 1;
+ }
+
+ /* Note that sg_cnt doesn't include any extra chain elements. */
+ cmd->sg_cnt = sg_srr_cnt;
+ cmd->offset += offset;
+ cmd->bufflen -= offset;
+
+ /* Check the scatterlist length for consistency. */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ bufflen += sg->length;
+ }
+ if (bufflen != cmd->bufflen) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102b,
+ "qla_target(%d): tag %lld: %s: bad sgl length: expected %d got %d\n",
+ vha->vp_idx, cmd->se_cmd.tag, __func__, cmd->bufflen, bufflen);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Given the "SRR relative offset" (offset of data to retry), determine what
+ * needs to be retransmitted (data and/or status) and return the mask in
+ * xmit_type. If retrying data, adjust the command buffer to point to only the
+ * data that need to be retried, skipping over the data that don't need to be
+ * retried.
+ *
+ * Returns 0 for success or a negative error number.
+ */
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+ uint32_t srr_rel_offs, int *xmit_type)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ int res = 0, rel_offs;
+
+ if (srr_rel_offs < cmd->offset ||
+ srr_rel_offs > cmd->offset + cmd->bufflen) {
+ *xmit_type = 0;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101e,
+ "qla_target(%d): tag %lld: srr_rel_offs %u outside accepted range %u - %u\n",
+ vha->vp_idx, cmd->se_cmd.tag, srr_rel_offs,
+ cmd->offset, cmd->offset + cmd->bufflen);
+ return -EINVAL;
+ }
+
+ /*
+ * srr_rel_offs is the offset of the data we need from the beginning of
+ * the *original* buffer.
+ *
+ * cmd->offset is the offset of the current cmd scatterlist from the
+ * beginning of the *original* buffer, which might be nonzero if there
+ * was a previous SRR and the buffer could not be reset back to its
+ * original size.
+ *
+ * rel_offs is the offset of the data we need from the beginning of the
+ * current cmd scatterlist.
+ */
+ rel_offs = srr_rel_offs - cmd->offset;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101f,
+ "qla_target(%d): tag %lld: current buffer [%u - %u); srr_rel_offs=%d, rel_offs=%d\n",
+ vha->vp_idx, cmd->se_cmd.tag, cmd->offset,
+ cmd->offset + cmd->bufflen, srr_rel_offs, rel_offs);
+
+ *xmit_type = QLA_TGT_XMIT_ALL;
+
+ if (rel_offs == cmd->bufflen)
+ *xmit_type = QLA_TGT_XMIT_STATUS;
+ else if (rel_offs > 0)
+ res = qlt_set_data_offset(cmd, rel_offs);
+
+ return res;
+}
+
+/*
+ * Process a SRR (Sequence Retransmission Request) for a SCSI command once both
+ * the immediate notify SRR and CTIO SRR have been received from the hw.
+ *
+ * Process context, no locks
+ */
+static void qlt_handle_srr(struct scsi_qla_host *vha, struct qla_tgt_srr *srr)
+{
+ struct qla_tgt_cmd *cmd = srr->cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct qla_qpair *qpair = cmd->qpair;
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t op = cmd->cdb ? cmd->cdb[0] : 0;
+ uint32_t srr_rel_offs = le32_to_cpu(srr->imm_ntfy.u.isp24.srr_rel_offs);
+ uint16_t srr_ui = le16_to_cpu(srr->imm_ntfy.u.isp24.srr_ui);
+ int xmit_type = 0;
+ bool xmit_response = false;
+ bool rdy_to_xfer = false;
+ bool did_timeout;
+ bool send_term_exch = false;
+
+ spin_lock_irq(qpair->qp_lock_ptr);
+
+ WARN_ON(cmd->cmd_sent_to_fw);
+
+ cmd->srr = NULL;
+
+ if (qlt_srr_is_chip_reset(vha, qpair, srr))
+ goto out_advance_cmd;
+
+ if (cmd->sent_term_exchg || cmd->sess->deleted || srr->aborted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11010,
+ "qla_target(%d): tag %lld: IMM SRR: cmd already aborted\n",
+ vha->vp_idx, cmd->se_cmd.tag);
+
+ spin_unlock_irq(qpair->qp_lock_ptr);
+
+ spin_lock_irq(&ha->hardware_lock);
+ if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
+ qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
+ spin_unlock_irq(&ha->hardware_lock);
+
+ send_term_exch = true;
+
+ spin_lock_irq(qpair->qp_lock_ptr);
+ goto out_advance_cmd;
+ }
+
+ if (srr->reject)
+ goto out_reject;
+
+ /*
+ * If we receive multiple SRRs for the same command, place a time limit
+ * on how long we are willing to retry. This timeout should be less
+ * than SQA_MAX_HW_PENDING_TIME in scst_qla2xxx.c.
+ */
+ did_timeout = time_is_before_jiffies64((cmd->jiffies_at_hw_st_entry ? :
+ cmd->jiffies_at_alloc) + 30 * HZ);
+
+ qlt_restore_orig_sg(cmd);
+
+ switch (srr_ui) {
+ case SRR_IU_STATUS:
+ if (cmd->state != QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11011,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_STATUS due to unexpected state %d\n",
+ vha->vp_idx, se_cmd->tag, op,
+ cmd->state);
+ goto out_reject;
+ }
+
+ if (did_timeout) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11033,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_STATUS due to timeout\n",
+ vha->vp_idx, se_cmd->tag, op);
+ goto out_reject;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11012,
+ "qla_target(%d): tag %lld, op %x: accept SRR_IU_STATUS and retransmit scsi_status=%x\n",
+ vha->vp_idx, se_cmd->tag, op,
+ se_cmd->scsi_status);
+ xmit_type = QLA_TGT_XMIT_STATUS;
+ xmit_response = true;
+ cmd->trc_flags |= TRC_SRR_RSP;
+ break;
+
+ case SRR_IU_DATA_IN:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11013,
+ "qla_target(%d): tag %lld, op %x: process SRR_IU_DATA_IN: bufflen=%d, sg_cnt=%d, offset=%d, srr_offset=%d, scsi_status=%x\n",
+ vha->vp_idx, se_cmd->tag, op, cmd->bufflen,
+ cmd->sg_cnt, cmd->offset, srr_rel_offs,
+ se_cmd->scsi_status);
+
+ if (cmd->state != QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11014,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN due to unexpected state %d\n",
+ vha->vp_idx, se_cmd->tag, cmd->state);
+ goto out_reject;
+ }
+
+ /*
+ * QLA_TGT_STATE_PROCESSED does not necessarily imply data-in
+ */
+ if (!qlt_has_data(cmd)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11015,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN because cmd has no data to send\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11016,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN because buffer is missing\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ if (did_timeout) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11034,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_DATA_IN due to timeout\n",
+ vha->vp_idx, se_cmd->tag, op);
+ goto out_reject;
+ }
+
+ if (qlt_srr_adjust_data(cmd, srr_rel_offs, &xmit_type) != 0)
+ goto out_reject;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11017,
+ "qla_target(%d): tag %lld: accept SRR_IU_DATA_IN and retransmit data: bufflen=%d, offset=%d\n",
+ vha->vp_idx, se_cmd->tag, cmd->bufflen,
+ cmd->offset);
+ xmit_response = true;
+ cmd->trc_flags |= TRC_SRR_RSP;
+ break;
+
+ case SRR_IU_DATA_OUT:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11018,
+ "qla_target(%d): tag %lld, op %x: process SRR_IU_DATA_OUT: bufflen=%d, sg_cnt=%d, offset=%d, srr_offset=%d\n",
+ vha->vp_idx, se_cmd->tag, op, cmd->bufflen,
+ cmd->sg_cnt, cmd->offset, srr_rel_offs);
+
+ if (cmd->state != QLA_TGT_STATE_NEED_DATA) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11019,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT due to unexpected state %d\n",
+ vha->vp_idx, se_cmd->tag, cmd->state);
+ goto out_reject;
+ }
+
+ /*
+ * QLA_TGT_STATE_NEED_DATA implies there should be data-out
+ */
+ if (!qlt_has_data(cmd) || !cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101a,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT because buffer is missing\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ if (did_timeout) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11035,
+ "qla_target(%d): tag %lld, op %x: reject SRR_IU_DATA_OUT due to timeout\n",
+ vha->vp_idx, se_cmd->tag, op);
+ goto out_reject;
+ }
+
+ if (qlt_srr_adjust_data(cmd, srr_rel_offs, &xmit_type) != 0)
+ goto out_reject;
+
+ if (!(xmit_type & QLA_TGT_XMIT_DATA)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101b,
+ "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT: bad offset\n",
+ vha->vp_idx, se_cmd->tag);
+ goto out_reject;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101c,
+ "qla_target(%d): tag %lld: accept SRR_IU_DATA_OUT and receive data again: bufflen=%d, offset=%d\n",
+ vha->vp_idx, se_cmd->tag, cmd->bufflen,
+ cmd->offset);
+ cmd->trc_flags |= TRC_SRR_XRDY;
+ rdy_to_xfer = true;
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101d,
+ "qla_target(%d): tag %lld, op %x: reject unknown srr_ui value 0x%x: state=%d, bufflen=%d, offset=%d, srr_offset=%d\n",
+ vha->vp_idx, se_cmd->tag, op, srr_ui, cmd->state,
+ cmd->bufflen, cmd->offset, srr_rel_offs);
+ goto out_reject;
+ }
+
+ qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+
+ spin_unlock_irq(qpair->qp_lock_ptr);
+
+ if (xmit_response) {
+ /* For status and data-in, retransmit the response. */
+ if (qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status)) {
+ send_term_exch = true;
+ spin_lock_irq(qpair->qp_lock_ptr);
+ goto out_advance_cmd;
+ }
+ } else if (rdy_to_xfer) {
+ /* For data-out, receive data again. */
+ if (qlt_rdy_to_xfer(cmd)) {
+ send_term_exch = true;
+ spin_lock_irq(qpair->qp_lock_ptr);
+ goto out_advance_cmd;
+ }
+ }
+
+ return;
+
+out_reject:
+ qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+out_advance_cmd:
+ if (!cmd->sent_term_exchg &&
+ (send_term_exch || cmd->state != QLA_TGT_STATE_NEED_DATA) &&
+ !qlt_srr_is_chip_reset(vha, qpair, srr)) {
+ cmd->trc_flags |= TRC_SRR_TERM;
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
+ }
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ /*
+ * The initiator should abort the command, but if not, try to
+ * return an error.
+ */
+ cmd->srr_failed = 1;
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ cmd->jiffies_at_hw_st_entry = 0;
+ vha->hw->tgt.tgt_ops->handle_data(cmd);
+ } else {
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ }
+ spin_unlock_irq(qpair->qp_lock_ptr);
+}
+
+/* Workqueue function for processing SRR work in process context. */
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+ struct scsi_qla_host *vha = tgt->vha;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x11032,
+ "qla_target(%d): Entering SRR work\n", vha->vp_idx);
+
+ for (;;) {
+ struct qla_tgt_srr *srr;
+
+ spin_lock_irq(&tgt->srr_lock);
+ srr = list_first_entry_or_null(&tgt->srr_list, typeof(*srr),
+ srr_list_entry);
+ if (!srr) {
+ spin_unlock_irq(&tgt->srr_lock);
+ break;
+ }
+ list_del(&srr->srr_list_entry);
+ spin_unlock_irq(&tgt->srr_lock);
+
+ if (!srr->cmd) {
+ qlt_handle_srr_imm(vha, srr);
+ } else {
+ qlt_handle_srr(vha, srr);
+ vha->hw->tgt.tgt_ops->put_cmd_ref(srr->cmd);
+ kfree(srr);
+ }
+ }
+}
+
+/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
@@ -5325,6 +6527,12 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
if (qlt_24xx_handle_els(vha, iocb) == 0)
send_notify_ack = 0;
break;
+
+ case IMM_NTFY_SRR:
+ qlt_prepare_srr_imm(vha, iocb);
+ send_notify_ack = 0;
+ break;
+
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
"qla_target(%d): Received unknown immediate "
@@ -5359,7 +6567,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) {
- qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
+ qlt_send_term_exchange(qpair, NULL, atio, 1);
return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
@@ -5469,13 +6677,15 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
qlt_incr_num_pend_cmds(vha);
INIT_LIST_HEAD(&cmd->cmd_list);
- memcpy(&cmd->atio, atio, sizeof(*atio));
+ memcpy_atio(&cmd->atio, atio);
cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
cmd->qpair = ha->base_qpair;
+ cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
+ cmd->cdb_len = 16;
if (qfull) {
cmd->q_full = 1;
@@ -5588,7 +6798,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe05f,
"qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(ha->base_qpair, NULL,
- atio, 1, 0);
+ atio, 1);
break;
case -EBUSY:
ql_dbg(ql_dbg_tgt, vha, 0xe060,
@@ -5697,7 +6907,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
struct qla_tgt_mgmt_cmd *mcmd;
struct qla_hw_data *ha = vha->hw;
- mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
+ mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, TYPE_TGT_TMCMD, pkt);
if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
ql_dbg(ql_dbg_async, vha, 0xe064,
"qla_target(%d): ABTS Comp without mcmd\n",
@@ -5717,7 +6927,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
le32_to_cpu(entry->error_subcode2) == 0) {
if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
return;
}
qlt_24xx_retry_term_exchange(vha, rsp->qpair,
@@ -5728,10 +6938,10 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
vha->vp_idx, entry->compl_status,
entry->error_subcode1,
entry->error_subcode2);
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
}
} else if (mcmd) {
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ qlt_free_ul_mcmd(ha, mcmd);
}
}
@@ -5795,7 +7005,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe05f,
"qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(rsp->qpair, NULL,
- atio, 1, 0);
+ atio, 1);
break;
case -EBUSY:
ql_dbg(ql_dbg_tgt, vha, 0xe060,
@@ -5816,26 +7026,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
}
break;
- case CONTINUE_TGT_IO_TYPE:
- {
- struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
-
- qlt_do_ctio_completion(vha, rsp, entry->handle,
- le16_to_cpu(entry->status)|(pkt->entry_status << 16),
- entry);
- break;
- }
-
- case CTIO_A64_TYPE:
- {
- struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
-
- qlt_do_ctio_completion(vha, rsp, entry->handle,
- le16_to_cpu(entry->status)|(pkt->entry_status << 16),
- entry);
- break;
- }
-
case IMMED_NOTIFY_TYPE:
ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
@@ -6323,6 +7513,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
+ spin_lock_init(&tgt->srr_lock);
+ INIT_LIST_HEAD(&tgt->srr_list);
+ INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
atomic_set(&tgt->tgt_global_resets_count, 0);
base_vha->vha_tgt.qla_tgt = tgt;
@@ -6705,7 +7898,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
adjust_corrupted_atio(pkt);
qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
- ha_locked, 0);
+ ha_locked);
} else {
qlt_24xx_atio_pkt_all_vps(vha,
(struct atio_from_isp *)pkt, ha_locked);
@@ -6971,6 +8164,32 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
}
}
+/* Update any settings that depend on ha->fw_*_version. */
+void
+qlt_config_nvram_with_fw_version(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ql2xtgt_tape_enable && qlt_has_sler_fw_bug(ha)) {
+ ql_log(ql_log_warn, vha, 0x11036,
+ "WARNING: ignoring ql2xtgt_tape_enable due to buggy HBA firmware; please upgrade FW\n");
+
+ /* Disable FC Tape support */
+ if (ha->isp_ops->nvram_config == qla81xx_nvram_config) {
+ struct init_cb_81xx *icb =
+ (struct init_cb_81xx *)ha->init_cb;
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_12);
+ } else {
+ struct init_cb_24xx *icb =
+ (struct init_cb_24xx *)ha->init_cb;
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_12);
+ }
+ }
+}
+
void
qlt_modify_vp_config(struct scsi_qla_host *vha,
struct vp_config_entry_24xx *vpmod)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 15a59c125c53..61072fb41b29 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -184,6 +184,7 @@ struct nack_to_isp {
#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_INVALID_OX_ID_RX_ID 0x17
#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
#define NOTIFY_ACK_SUCCESS 0x01
@@ -686,6 +687,8 @@ struct qla_tgt_func_tmpl {
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
uint32_t);
struct qla_tgt_cmd *(*get_cmd)(struct fc_port *);
+ int (*get_cmd_ref)(struct qla_tgt_cmd *cmd);
+ void (*put_cmd_ref)(struct qla_tgt_cmd *cmd);
void (*rel_cmd)(struct qla_tgt_cmd *);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
@@ -754,6 +757,7 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
+#define QLA_TGT_STATE_DONE 4 /* cmd being freed */
/* ATIO task_codes field */
#define ATIO_SIMPLE_QUEUE 0
@@ -822,18 +826,26 @@ struct qla_tgt {
int notify_ack_expected;
int abts_resp_expected;
int modify_lun_expected;
+
+ spinlock_t srr_lock;
+ struct list_head srr_list;
+ struct work_struct srr_work;
+
atomic_t tgt_global_resets_count;
+
struct list_head tgt_list_entry;
};
struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
uint32_t chip_reset;
- struct atio_from_isp atio;
struct work_struct work;
struct list_head cmd_list;
bool aborted;
struct rsp_que *rsp;
+
+ struct atio_from_isp atio;
+ /* DO NOT ADD ANYTHING ELSE HERE - atio must be last member */
};
enum trace_flags {
@@ -858,6 +870,7 @@ enum trace_flags {
TRC_DATA_IN = BIT_18,
TRC_ABORT = BIT_19,
TRC_DIF_ERR = BIT_20,
+ TRC_SRR_IMM = BIT_21,
};
struct qla_tgt_cmd {
@@ -876,25 +889,36 @@ struct qla_tgt_cmd {
/* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
- spinlock_t cmd_lock;
- /* to save extra sess dereferences */
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
+
+ /* Call qlt_free_sg() if set. */
+ unsigned int free_sg:1;
+
unsigned int write_data_transferred:1;
+
+ /* Set if the SCSI status was sent successfully. */
+ unsigned int rsp_sent:1;
+
unsigned int q_full:1;
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
unsigned int edif:1;
+ /* Set if a SRR was rejected. */
+ unsigned int srr_failed:1;
+
+ /* Set if the exchange has been terminated. */
+ unsigned int sent_term_exchg:1;
+
/*
- * This variable may be set from outside the LIO and I/O completion
- * callback functions. Do not declare this member variable as a
- * bitfield to avoid a read-modify-write operation when this variable
- * is set.
+ * Set if sent_term_exchg is set, or if the cmd was aborted by a TMR,
+ * or if some other error prevents normal processing of the command.
*/
- unsigned int aborted;
+ unsigned int aborted:1;
+ struct qla_tgt_srr *srr;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
int bufflen; /* cmd buffer length */
@@ -925,13 +949,23 @@ struct qla_tgt_cmd {
uint8_t scsi_status, sense_key, asc, ascq;
struct crc_context *ctx;
- const uint8_t *cdb;
+ uint8_t *cdb;
uint64_t lba;
+ int cdb_len;
uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
uint32_t a_ref_tag, e_ref_tag;
#define DIF_BUNDL_DMA_VALID 1
uint16_t prot_flags;
+ unsigned long jiffies_at_term_exchg;
+
+ /*
+ * jiffies64 when qlt_rdy_to_xfer() or qlt_xmit_response() first
+ * called, or 0 when not in those states. Used to limit the number of
+ * SRR retries.
+ */
+ uint64_t jiffies_at_hw_st_entry;
+
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
@@ -965,6 +999,7 @@ struct qla_tgt_mgmt_cmd {
unsigned int flags;
#define QLA24XX_MGMT_SEND_NACK BIT_0
#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
+#define QLA24XX_MGMT_LLD_OWNED BIT_2
uint32_t reset_count;
struct work_struct work;
uint64_t unpacked_lun;
@@ -993,6 +1028,45 @@ struct qla_tgt_prm {
uint16_t tot_dsds;
};
+/*
+ * SRR (Sequence Retransmission Request) - resend or re-receive some or all
+ * data or status to recover from a transient I/O error.
+ */
+struct qla_tgt_srr {
+ /*
+ * Copy of immediate notify SRR message received from hw; valid only if
+ * imm_ntfy_recvd is true.
+ */
+ struct imm_ntfy_from_isp imm_ntfy;
+
+ struct list_head srr_list_entry;
+
+ /* The command affected by this SRR, or NULL if not yet determined. */
+ struct qla_tgt_cmd *cmd;
+
+ /* Used to detect if the HBA has been reset since receiving the SRR. */
+ uint32_t reset_count;
+
+ /*
+ * The hardware sends two messages for each SRR - an immediate notify
+ * and a CTIO with CTIO_SRR_RECEIVED status. These keep track of which
+ * messages have been received. The SRR can be processed once both of
+ * these are true.
+ */
+ bool imm_ntfy_recvd;
+ bool ctio_recvd;
+
+ /*
+ * This is set to true if the affected command was aborted (cmd may be
+ * set to NULL), in which case the immediate notify exchange also needs
+ * to be aborted.
+ */
+ bool aborted;
+
+ /* This is set to true to force the SRR to be rejected. */
+ bool reject;
+};
+
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
@@ -1048,6 +1122,20 @@ static inline uint32_t sid_to_key(const be_id_t s_id)
}
/*
+ * Free the scatterlist allocated by qlt_set_data_offset(). Call this only if
+ * cmd->free_sg is set.
+ */
+static inline void qlt_free_sg(struct qla_tgt_cmd *cmd)
+{
+ /*
+ * The scatterlist may be chained to the original scatterlist, but we
+ * only need to free the first segment here since that is the only part
+ * allocated by qlt_set_data_offset().
+ */
+ kfree(cmd->sg);
+}
+
+/*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
@@ -1055,9 +1143,14 @@ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
extern int qlt_abort_cmd(struct qla_tgt_cmd *);
+void qlt_srr_abort(struct qla_tgt_cmd *cmd, bool reject);
+void qlt_send_term_exchange(struct qla_qpair *qpair,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+void qlt_free_ul_mcmd(struct qla_hw_data *ha, struct qla_tgt_mgmt_cmd *mcmd);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd);
extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
@@ -1073,6 +1166,7 @@ extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_81xx *);
extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_81xx *);
+void qlt_config_nvram_with_fw_version(struct scsi_qla_host *vha);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
struct vp_config_entry_24xx *);
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index ceaf1c7b1d17..2fff68935338 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -291,6 +291,16 @@ static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess)
return cmd;
}
+static int tcm_qla2xxx_get_cmd_ref(struct qla_tgt_cmd *cmd)
+{
+ return target_get_sess_cmd(&cmd->se_cmd, true);
+}
+
+static void tcm_qla2xxx_put_cmd_ref(struct qla_tgt_cmd *cmd)
+{
+ target_put_sess_cmd(&cmd->se_cmd);
+}
+
static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
{
target_free_tag(cmd->sess->se_sess, &cmd->se_cmd);
@@ -303,6 +313,8 @@ static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
+ cmd->state = QLA_TGT_STATE_DONE;
+
cmd->qpair->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
@@ -529,6 +541,9 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err);
+ else if (cmd->srr_failed)
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_SNACK_REJECTED);
else
transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
@@ -1524,6 +1539,8 @@ static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.get_cmd = tcm_qla2xxx_get_cmd,
+ .get_cmd_ref = tcm_qla2xxx_get_cmd_ref,
+ .put_cmd_ref = tcm_qla2xxx_put_cmd_ref,
.rel_cmd = tcm_qla2xxx_rel_cmd,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 75125d2021f5..7febc0baa9d6 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1016,7 +1016,7 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
uint32_t crash_record_size = 0;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
@@ -1099,7 +1099,7 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
/* Get Crash Record. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
mbox_cmd[2] = LSDW(event_log_dma);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 9a0f467264b3..76cdad063f7b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -216,6 +216,9 @@ int scsi_device_max_queue_depth(struct scsi_device *sdev)
*/
int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
{
+ if (!sdev->budget_map.map)
+ return -EINVAL;
+
depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
if (depth > 0) {
@@ -255,6 +258,8 @@ EXPORT_SYMBOL(scsi_change_queue_depth);
*/
int scsi_track_queue_full(struct scsi_device *sdev, int depth)
{
+ if (!sdev->budget_map.map)
+ return 0;
/*
* Don't let QUEUE_FULLs on the same
@@ -826,8 +831,11 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
spin_lock_irqsave(shost->host_lock, flags);
while (list->next != &shost->__devices) {
next = list_entry(list->next, struct scsi_device, siblings);
- /* skip devices that we can't get a reference to */
- if (!scsi_device_get(next))
+ /*
+ * Skip pseudo devices and also devices we can't get a
+ * reference to.
+ */
+ if (!scsi_device_is_pseudo_dev(next) && !scsi_device_get(next))
break;
next = NULL;
list = list->next;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b2ab97be5db3..1f2a53ba5dd9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -230,6 +230,7 @@ struct tape_block {
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
#define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_CMD_ABORT 0x10000
+#define SDEBUG_OPT_UNALIGNED_WRITE 0x20000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
@@ -237,7 +238,8 @@ struct tape_block {
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
SDEBUG_OPT_SHORT_TRANSFER | \
SDEBUG_OPT_HOST_BUSY | \
- SDEBUG_OPT_CMD_ABORT)
+ SDEBUG_OPT_CMD_ABORT | \
+ SDEBUG_OPT_UNALIGNED_WRITE)
#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
@@ -2961,11 +2963,11 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
int target_dev_id;
int target = scp->device->id;
unsigned char *ap;
- unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
- arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+ unsigned char *arr __free(kfree) = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+
if (!arr)
return -ENOMEM;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
@@ -4932,6 +4934,14 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (unlikely(sdebug_opts & SDEBUG_OPT_UNALIGNED_WRITE &&
+ atomic_read(&sdeb_inject_pending))) {
+ atomic_set(&sdeb_inject_pending, 0);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
+ UNALIGNED_WRITE_ASCQ);
+ return check_condition_result;
+ }
+
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -6752,20 +6762,59 @@ static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
return false;
}
+struct sdebug_abort_cmd {
+ u32 unique_tag;
+};
+
+enum sdebug_internal_cmd_type {
+ SCSI_DEBUG_ABORT_CMD,
+};
+
+struct sdebug_internal_cmd {
+ enum sdebug_internal_cmd_type type;
+
+ union {
+ struct sdebug_abort_cmd abort_cmd;
+ };
+};
+
+union sdebug_priv {
+ struct sdebug_scsi_cmd cmd;
+ struct sdebug_internal_cmd internal_cmd;
+};
+
/*
- * Called from scsi_debug_abort() only, which is for timed-out cmd.
+ * Abort SCSI command @cmnd. Only called from scsi_debug_abort(). Although
+ * it would be possible to call scsi_debug_stop_cmnd() directly, an internal
+ * command is allocated and submitted to trigger the reserved command
+ * infrastructure.
*/
static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
{
- struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
- unsigned long flags;
- bool res;
-
- spin_lock_irqsave(&sdsc->lock, flags);
- res = scsi_debug_stop_cmnd(cmnd);
- spin_unlock_irqrestore(&sdsc->lock, flags);
-
- return res;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct request *rq = scsi_cmd_to_rq(cmnd);
+ u32 unique_tag = blk_mq_unique_tag(rq);
+ struct sdebug_internal_cmd *internal_cmd;
+ struct scsi_cmnd *abort_cmd;
+ struct request *abort_rq;
+ blk_status_t res;
+
+ abort_cmd = scsi_get_internal_cmd(shost->pseudo_sdev, DMA_NONE,
+ BLK_MQ_REQ_RESERVED);
+ if (!abort_cmd)
+ return false;
+ internal_cmd = scsi_cmd_priv(abort_cmd);
+ *internal_cmd = (struct sdebug_internal_cmd) {
+ .type = SCSI_DEBUG_ABORT_CMD,
+ .abort_cmd = {
+ .unique_tag = unique_tag,
+ },
+ };
+ abort_rq = scsi_cmd_to_rq(abort_cmd);
+ abort_rq->timeout = secs_to_jiffies(3);
+ res = blk_execute_rq(abort_rq, true);
+ scsi_put_internal_cmd(abort_cmd);
+ return res == BLK_STS_OK;
}
/*
@@ -9220,6 +9269,56 @@ out_handle:
return ret;
}
+/* Process @scp, a request to abort a SCSI command by tag. */
+static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
+{
+ struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
+ struct sdebug_abort_cmd *abort_cmd = &internal_cmd->abort_cmd;
+ const u32 unique_tag = abort_cmd->unique_tag;
+ struct scsi_cmnd *to_be_aborted_scmd =
+ scsi_host_find_tag(shost, unique_tag);
+ struct sdebug_scsi_cmd *to_be_aborted_sdsc =
+ scsi_cmd_priv(to_be_aborted_scmd);
+ bool res = false;
+
+ if (!to_be_aborted_scmd) {
+ pr_err("%s: command with tag %#x not found\n", __func__,
+ unique_tag);
+ return;
+ }
+
+ scoped_guard(spinlock_irqsave, &to_be_aborted_sdsc->lock)
+ res = scsi_debug_stop_cmnd(to_be_aborted_scmd);
+
+ if (res)
+ pr_info("%s: aborted command with tag %#x\n",
+ __func__, unique_tag);
+ else
+ pr_err("%s: failed to abort command with tag %#x\n",
+ __func__, unique_tag);
+
+ set_host_byte(scp, res ? DID_OK : DID_ERROR);
+}
+
+static int scsi_debug_process_reserved_command(struct Scsi_Host *shost,
+ struct scsi_cmnd *scp)
+{
+ struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
+
+ switch (internal_cmd->type) {
+ case SCSI_DEBUG_ABORT_CMD:
+ scsi_debug_abort_cmd(shost, scp);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ set_host_byte(scp, DID_ERROR);
+ break;
+ }
+
+ scsi_done(scp);
+ return 0;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -9420,6 +9519,9 @@ static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
struct sdebug_defer *sd_dp = &sdsc->sd_dp;
+ if (blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd)))
+ return 0;
+
spin_lock_init(&sdsc->lock);
hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -9439,6 +9541,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.sdev_destroy = scsi_debug_sdev_destroy,
.ioctl = scsi_debug_ioctl,
.queuecommand = scsi_debug_queuecommand,
+ .queue_reserved_command = scsi_debug_process_reserved_command,
.change_queue_depth = sdebug_change_qdepth,
.map_queues = sdebug_map_queues,
.mq_poll = sdebug_blk_mq_poll,
@@ -9448,6 +9551,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.eh_bus_reset_handler = scsi_debug_bus_reset,
.eh_host_reset_handler = scsi_debug_host_reset,
.can_queue = SDEBUG_CANQUEUE,
+ .nr_reserved_cmds = 1,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,
.cmd_per_lun = DEF_CMD_PER_LUN,
@@ -9456,7 +9560,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.module = THIS_MODULE,
.skip_settle_delay = 1,
.track_queue_depth = 1,
- .cmd_size = sizeof(struct sdebug_scsi_cmd),
+ .cmd_size = sizeof(union sdebug_priv),
.init_cmd_priv = sdebug_init_cmd_priv,
.target_alloc = sdebug_target_alloc,
.target_destroy = sdebug_target_destroy,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c13812a3f03..f869108fd969 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -749,6 +749,9 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
const struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev;
+ if (!sdev->budget_map.map)
+ return;
+
if (!sht->track_queue_depth ||
sdev->queue_depth >= sdev->max_queue_depth)
return;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d7e42293b864..51ad2ad07e43 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -396,7 +396,8 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
- sbitmap_put(&sdev->budget_map, cmd->budget_token);
+ if (sdev->budget_map.map)
+ sbitmap_put(&sdev->budget_map, cmd->budget_token);
cmd->budget_token = -1;
}
@@ -1360,6 +1361,9 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
{
int token;
+ if (!sdev->budget_map.map)
+ return INT_MAX;
+
token = sbitmap_get(&sdev->budget_map);
if (token < 0)
return -1;
@@ -1530,6 +1534,14 @@ static void scsi_complete(struct request *rq)
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
enum scsi_disposition disposition;
+ if (blk_mq_is_reserved_rq(rq)) {
+ /* Only pass-through requests are supported in this code path. */
+ WARN_ON_ONCE(!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)));
+ scsi_mq_uninit_cmd(cmd);
+ __blk_mq_end_request(rq, scsi_result_to_blk_status(cmd->result));
+ return;
+ }
+
INIT_LIST_HEAD(&cmd->eh_entry);
atomic_inc(&cmd->device->iodone_cnt);
@@ -1749,7 +1761,8 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{
struct scsi_device *sdev = q->queuedata;
- sbitmap_put(&sdev->budget_map, budget_token);
+ if (sdev->budget_map.map)
+ sbitmap_put(&sdev->budget_map, budget_token);
}
/*
@@ -1818,25 +1831,31 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(cmd->budget_token < 0);
/*
- * If the device is not in running state we will reject some or all
- * commands.
+ * Bypass the SCSI device, SCSI target and SCSI host checks for
+ * reserved commands.
*/
- if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
- ret = scsi_device_state_check(sdev, req);
- if (ret != BLK_STS_OK)
- goto out_put_budget;
- }
+ if (!blk_mq_is_reserved_rq(req)) {
+ /*
+ * If the device is not in running state we will reject some or
+ * all commands.
+ */
+ if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
+ ret = scsi_device_state_check(sdev, req);
+ if (ret != BLK_STS_OK)
+ goto out_put_budget;
+ }
- ret = BLK_STS_RESOURCE;
- if (!scsi_target_queue_ready(shost, sdev))
- goto out_put_budget;
- if (unlikely(scsi_host_in_recovery(shost))) {
- if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
- ret = BLK_STS_OFFLINE;
- goto out_dec_target_busy;
+ ret = BLK_STS_RESOURCE;
+ if (!scsi_target_queue_ready(shost, sdev))
+ goto out_put_budget;
+ if (unlikely(scsi_host_in_recovery(shost))) {
+ if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
+ ret = BLK_STS_OFFLINE;
+ goto out_dec_target_busy;
+ }
+ if (!scsi_host_queue_ready(q, shost, sdev, cmd))
+ goto out_dec_target_busy;
}
- if (!scsi_host_queue_ready(q, shost, sdev, cmd))
- goto out_dec_target_busy;
/*
* Only clear the driver-private command data if the LLD does not supply
@@ -1865,6 +1884,14 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
blk_mq_start_request(req);
+ if (blk_mq_is_reserved_rq(req)) {
+ reason = shost->hostt->queue_reserved_command(shost, cmd);
+ if (reason) {
+ ret = BLK_STS_RESOURCE;
+ goto out_put_budget;
+ }
+ return BLK_STS_OK;
+ }
reason = scsi_dispatch_cmd(cmd);
if (reason) {
scsi_set_blocked(cmd, reason);
@@ -2083,7 +2110,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->ops = &scsi_mq_ops_no_commit;
tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
tag_set->nr_maps = shost->nr_maps ? : 1;
- tag_set->queue_depth = shost->can_queue;
+ tag_set->queue_depth = shost->can_queue + shost->nr_reserved_cmds;
+ tag_set->reserved_tags = shost->nr_reserved_cmds;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev);
if (shost->hostt->tag_alloc_policy_rr)
@@ -2107,6 +2135,44 @@ void scsi_mq_free_tags(struct kref *kref)
}
/**
+ * scsi_get_internal_cmd() - Allocate an internal SCSI command.
+ * @sdev: SCSI device from which to allocate the command
+ * @data_direction: Data direction for the allocated command
+ * @flags: request allocation flags, e.g. BLK_MQ_REQ_RESERVED or
+ * BLK_MQ_REQ_NOWAIT.
+ *
+ * Allocates a SCSI command for internal LLDD use.
+ */
+struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev,
+ enum dma_data_direction data_direction,
+ blk_mq_req_flags_t flags)
+{
+ enum req_op op = data_direction == DMA_TO_DEVICE ? REQ_OP_DRV_OUT :
+ REQ_OP_DRV_IN;
+ struct scsi_cmnd *scmd;
+ struct request *rq;
+
+ rq = scsi_alloc_request(sdev->request_queue, op, flags);
+ if (IS_ERR(rq))
+ return NULL;
+ scmd = blk_mq_rq_to_pdu(rq);
+ scmd->device = sdev;
+
+ return scmd;
+}
+EXPORT_SYMBOL_GPL(scsi_get_internal_cmd);
+
+/**
+ * scsi_put_internal_cmd() - Free an internal SCSI command.
+ * @scmd: SCSI command to be freed
+ */
+void scsi_put_internal_cmd(struct scsi_cmnd *scmd)
+{
+ blk_mq_free_request(blk_mq_rq_from_pdu(scmd));
+}
+EXPORT_SYMBOL_GPL(scsi_put_internal_cmd);
+
+/**
* scsi_device_from_queue - return sdev associated with a request_queue
* @q: The request queue to return the sdev from
*
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index b02af340c2d3..3cd0d3074085 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -26,9 +26,9 @@ static void scsi_log_release_buffer(char *bufptr)
kfree(bufptr);
}
-static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+static inline const char *scmd_name(struct scsi_cmnd *scmd)
{
- struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
+ const struct request *rq = scsi_cmd_to_rq(scmd);
if (!rq->q || !rq->q->disk)
return NULL;
@@ -80,8 +80,8 @@ void sdev_prefix_printk(const char *level, const struct scsi_device *sdev,
}
EXPORT_SYMBOL(sdev_prefix_printk);
-void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
- const char *fmt, ...)
+void scmd_printk(const char *level, struct scsi_cmnd *scmd, const char *fmt,
+ ...)
{
va_list args;
char *logbuf;
@@ -94,7 +94,7 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
if (!logbuf)
return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
- scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag);
+ scsi_cmd_to_rq(scmd)->tag);
if (off < logbuf_len) {
va_start(args, fmt);
off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
@@ -371,16 +371,15 @@ void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
EXPORT_SYMBOL(__scsi_print_sense);
/* Normalize and print sense buffer in SCSI command */
-void scsi_print_sense(const struct scsi_cmnd *cmd)
+void scsi_print_sense(struct scsi_cmnd *cmd)
{
scsi_log_print_sense(cmd->device, scmd_name(cmd),
- scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag,
- cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+ scsi_cmd_to_rq(cmd)->tag, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
}
EXPORT_SYMBOL(scsi_print_sense);
-void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
- int disposition)
+void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition)
{
char *logbuf;
size_t off, logbuf_len;
@@ -393,7 +392,7 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd),
- scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag);
+ scsi_cmd_to_rq(cmd)->tag);
if (off >= logbuf_len)
goto out_printk;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d581613d87c7..2652fecbfe47 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -205,7 +205,6 @@ static int scsi_runtime_idle(struct device *dev)
/* Insert hooks here for targets, hosts, and transport classes */
if (scsi_is_sdev_device(dev)) {
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return -EBUSY;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5b2b19f5e8ec..d07ec15d6c00 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -135,6 +135,7 @@ extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode);
extern void scsi_forget_host(struct Scsi_Host *);
+struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *);
/* scsi_sysctl.c */
#ifdef CONFIG_SYSCTL
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 3c6e089e80c3..7acbfcfc2172 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -347,6 +347,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
+ scsi_sysfs_device_initialize(sdev);
+
+ if (scsi_device_is_pseudo_dev(sdev))
+ return sdev;
+
depth = sdev->host->cmd_per_lun ?: 1;
/*
@@ -363,8 +368,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
scsi_change_queue_depth(sdev, depth);
- scsi_sysfs_device_initialize(sdev);
-
if (shost->hostt->sdev_init) {
ret = shost->hostt->sdev_init(sdev);
if (ret) {
@@ -1068,6 +1071,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
transport_configure_device(&sdev->sdev_gendev);
+ sdev->sdev_bflags = *bflags;
+
+ if (scsi_device_is_pseudo_dev(sdev))
+ return SCSI_SCAN_LUN_PRESENT;
+
/*
* No need to freeze the queue as it isn't reachable to anyone else yet.
*/
@@ -1113,7 +1121,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->max_queue_depth = sdev->queue_depth;
WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
- sdev->sdev_bflags = *bflags;
/*
* Ok, the device is now all set up, we can
@@ -1212,6 +1219,12 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (!sdev)
goto out;
+ if (scsi_device_is_pseudo_dev(sdev)) {
+ if (bflagsp)
+ *bflagsp = BLIST_NOLUN;
+ return SCSI_SCAN_LUN_PRESENT;
+ }
+
result = kmalloc(result_len, GFP_KERNEL);
if (!result)
goto out_free_sdev;
@@ -2083,12 +2096,65 @@ void scsi_forget_host(struct Scsi_Host *shost)
restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(sdev, &shost->__devices, siblings) {
- if (sdev->sdev_state == SDEV_DEL)
+ if (scsi_device_is_pseudo_dev(sdev) ||
+ sdev->sdev_state == SDEV_DEL)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_device(sdev);
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * Remove the pseudo device last since it may be needed during removal
+ * of other SCSI devices.
+ */
+ if (shost->pseudo_sdev)
+ __scsi_remove_device(shost->pseudo_sdev);
}
+/**
+ * scsi_get_pseudo_sdev() - Attach a pseudo SCSI device to a SCSI host
+ * @shost: Host that needs a pseudo SCSI device
+ *
+ * Lock status: None assumed.
+ *
+ * Returns: The scsi_device or NULL
+ *
+ * Notes:
+ * Attach a single scsi_device to the Scsi_Host. The primary aim for this
+ * device is to serve as a container from which SCSI commands can be
+ * allocated. Each SCSI command will carry a command tag allocated by the
+ * block layer. These SCSI commands can be used by the LLDD to send
+ * internal or passthrough commands without having to manage tag allocation
+ * inside the LLDD.
+ */
+struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev = NULL;
+ struct scsi_target *starget;
+
+ guard(mutex)(&shost->scan_mutex);
+
+ if (!scsi_host_scan_allowed(shost))
+ goto out;
+
+ starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->max_id);
+ if (!starget)
+ goto out;
+
+ sdev = scsi_alloc_sdev(starget, U64_MAX, NULL);
+ if (!sdev) {
+ scsi_target_reap(starget);
+ goto put_target;
+ }
+
+ sdev->borken = 0;
+
+put_target:
+ /* See also the get_device(dev) call in scsi_alloc_target(). */
+ put_device(&starget->dev);
+
+out:
+ return sdev;
+}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 15ba493d2138..99eb0a30df61 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -605,68 +605,6 @@ sdev_show_##field (struct device *dev, struct device_attribute *attr, \
sdev_show_function(field, format_string) \
static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
-
-/*
- * sdev_rw_attr: create a function and attribute variable for a
- * read/write field.
- */
-#define sdev_rw_attr(field, format_string) \
- sdev_show_function(field, format_string) \
- \
-static ssize_t \
-sdev_store_##field (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct scsi_device *sdev; \
- sdev = to_scsi_device(dev); \
- sscanf (buf, format_string, &sdev->field); \
- return count; \
-} \
-static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
-
-/* Currently we don't export bit fields, but we might in future,
- * so leave this code in */
-#if 0
-/*
- * sdev_rd_attr: create a function and attribute variable for a
- * read/write bit field.
- */
-#define sdev_rw_attr_bit(field) \
- sdev_show_function(field, "%d\n") \
- \
-static ssize_t \
-sdev_store_##field (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- int ret; \
- struct scsi_device *sdev; \
- ret = scsi_sdev_check_buf_bit(buf); \
- if (ret >= 0) { \
- sdev = to_scsi_device(dev); \
- sdev->field = ret; \
- ret = count; \
- } \
- return ret; \
-} \
-static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
-
-/*
- * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
- * else return -EINVAL.
- */
-static int scsi_sdev_check_buf_bit(const char *buf)
-{
- if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
- if (buf[0] == '1')
- return 1;
- else if (buf[0] == '0')
- return 0;
- else
- return -EINVAL;
- } else
- return -EINVAL;
-}
-#endif
/*
* Create the actual show/store functions and data structures.
*/
@@ -710,10 +648,14 @@ static ssize_t
sdev_store_timeout (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct scsi_device *sdev;
- int timeout;
- sdev = to_scsi_device(dev);
- sscanf (buf, "%d\n", &timeout);
+ struct scsi_device *sdev = to_scsi_device(dev);
+ int ret, timeout;
+
+ ret = kstrtoint(buf, 0, &timeout);
+ if (ret)
+ return ret;
+ if (timeout <= 0)
+ return -EINVAL;
blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
return count;
}
@@ -1406,6 +1348,9 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
int error;
struct scsi_target *starget = sdev->sdev_target;
+ if (WARN_ON_ONCE(scsi_device_is_pseudo_dev(sdev)))
+ return -EINVAL;
+
error = scsi_target_add(starget);
if (error)
return error;
@@ -1513,7 +1458,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
- if (sdev->host->hostt->sdev_destroy)
+ if (!scsi_device_is_pseudo_dev(sdev) && sdev->host->hostt->sdev_destroy)
sdev->host->hostt->sdev_destroy(sdev);
transport_destroy_device(dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 3a821afee9bc..987befb02408 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -441,7 +441,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->next_vport_number = 0;
fc_host->npiv_vports_inuse = 0;
- fc_host->work_q = alloc_workqueue("fc_wq_%d", 0, 0, shost->host_no);
+ fc_host->work_q = alloc_workqueue("fc_wq_%d", WQ_PERCPU, 0,
+ shost->host_no);
if (!fc_host->work_q)
return -ENOMEM;
@@ -3088,7 +3089,7 @@ fc_remote_port_create(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock, flags);
- rport->devloss_work_q = alloc_workqueue("fc_dl_%d_%d", 0, 0,
+ rport->devloss_work_q = alloc_workqueue("fc_dl_%d_%d", WQ_PERCPU, 0,
shost->host_no, rport->number);
if (!rport->devloss_work_q) {
printk(KERN_ERR "FC Remote Port alloc_workqueue failed\n");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 743b4c792ceb..ed21c032bbc4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3961,7 +3961,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
list_del_init(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
- queue_work(system_unbound_wq, &session->destroy_work);
+ queue_work(system_dfl_wq, &session->destroy_work);
}
break;
case ISCSI_UEVENT_UNBIND_SESSION:
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0252d3f6bed1..f2c0744b4480 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -318,6 +318,35 @@ static ssize_t manage_shutdown_store(struct device *dev,
}
static DEVICE_ATTR_RW(manage_shutdown);
+static ssize_t manage_restart_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_restart);
+}
+
+static ssize_t manage_restart_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ bool v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_restart = v;
+
+ return count;
+}
+static DEVICE_ATTR_RW(manage_restart);
+
static ssize_t
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -654,6 +683,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_manage_system_start_stop.attr,
&dev_attr_manage_runtime_start_stop.attr,
&dev_attr_manage_shutdown.attr,
+ &dev_attr_manage_restart.attr,
&dev_attr_protection_type.attr,
&dev_attr_protection_mode.attr,
&dev_attr_app_tag_own.attr,
@@ -4177,7 +4207,9 @@ static void sd_shutdown(struct device *dev)
(system_state == SYSTEM_POWER_OFF &&
sdkp->device->manage_shutdown) ||
(system_state == SYSTEM_RUNNING &&
- sdkp->device->manage_runtime_start_stop)) {
+ sdkp->device->manage_runtime_start_stop) ||
+ (system_state == SYSTEM_RESTART &&
+ sdkp->device->manage_restart)) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index e519df68d603..70c75ab1453a 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -133,6 +133,7 @@ static int sim710_probe_common(struct device *dev, unsigned long base_addr,
out_put_host:
scsi_host_put(host);
out_release:
+ ioport_unmap(hostdata->base);
release_region(base_addr, 64);
out_free:
kfree(hostdata);
@@ -148,6 +149,7 @@ static int sim710_device_remove(struct device *dev)
scsi_remove_host(host);
NCR_700_release(host);
+ ioport_unmap(hostdata->base);
kfree(hostdata);
free_irq(host->irq, host);
release_region(host->base, 64);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 03c97e60d36f..fe549e2b7c94 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -34,11 +34,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.34-035"
+#define DRIVER_VERSION "2.1.36-026"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 34
-#define DRIVER_REVISION 35
+#define DRIVER_RELEASE 36
+#define DRIVER_REVISION 26
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -5555,14 +5555,25 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
+/*
+ * Adjust the timeout value for physical devices sent to the firmware
+ * by subtracting 3 seconds for timeouts greater than or equal to 8 seconds.
+ *
+ * This provides the firmware with additional time to attempt early recovery
+ * before the OS-level timeout occurs.
+ */
+#define ADJUST_SECS_TIMEOUT_VALUE(tv) (((tv) >= 8) ? ((tv) - 3) : (tv))
+
static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group, bool io_high_prio)
{
int rc;
+ u32 timeout;
size_t cdb_length;
struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
+ struct request *rq;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
@@ -5634,6 +5645,12 @@ static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
return SCSI_MLQUEUE_HOST_BUSY;
}
+ if (device->is_physical_device) {
+ rq = scsi_cmd_to_rq(scmd);
+ timeout = rq->timeout / HZ;
+ put_unaligned_le32(ADJUST_SECS_TIMEOUT_VALUE(timeout), &request->timeout);
+ }
+
pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
return 0;
@@ -6410,10 +6427,22 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev
static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
{
+ unsigned long flags;
int rc;
mutex_lock(&ctrl_info->lun_reset_mutex);
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ if (pqi_find_scsi_dev(ctrl_info, device->bus, device->target, device->lun) == NULL) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "skipping reset of scsi %d:%d:%d:%u, device has been removed\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
@@ -6594,7 +6623,9 @@ static void pqi_sdev_destroy(struct scsi_device *sdev)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
+ struct pqi_tmf_work *tmf_work;
int mutex_acquired;
+ unsigned int lun;
unsigned long flags;
ctrl_info = shost_to_hba(sdev->host);
@@ -6621,8 +6652,13 @@ static void pqi_sdev_destroy(struct scsi_device *sdev)
mutex_unlock(&ctrl_info->scan_mutex);
+ for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
+ cancel_work_sync(&tmf_work->work_struct);
+
+ mutex_lock(&ctrl_info->lun_reset_mutex);
pqi_dev_info(ctrl_info, "removed", device);
pqi_free_device(device);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
}
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
@@ -8936,7 +8972,8 @@ static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out;
- host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
+ host_memory_descriptor->host_chunk_virt_address =
+ kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
if (!host_memory_descriptor->host_chunk_virt_address)
goto out;
@@ -10110,6 +10147,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4840)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
{
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 74a6830b7ed8..168f25e4aaa3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3526,8 +3526,64 @@ static int partition_tape(struct scsi_tape *STp, int size)
out:
return result;
}
-
+/*
+ * Handles any extra state needed for ioctls which are not st-specific.
+ * Called with the scsi_tape lock held, released before return
+ */
+static long st_common_ioctl(struct scsi_tape *STp, struct st_modedef *STm,
+ struct file *file, unsigned int cmd_in,
+ unsigned long arg)
+{
+ int i, retval = 0;
+
+ if (!STm->defined) {
+ retval = -ENXIO;
+ goto out;
+ }
+
+ switch (cmd_in) {
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ case SCSI_IOCTL_GET_PCI:
+ break;
+ case SG_IO:
+ case SCSI_IOCTL_SEND_COMMAND:
+ case CDROM_SEND_PACKET:
+ if (!capable(CAP_SYS_RAWIO)) {
+ retval = -EPERM;
+ goto out;
+ }
+ fallthrough;
+ default:
+ if ((i = flush_buffer(STp, 0)) < 0) {
+ retval = i;
+ goto out;
+ } else { /* flush_buffer succeeds */
+ if (STp->can_partitions) {
+ i = switch_partition(STp);
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ }
+ }
+ }
+ mutex_unlock(&STp->lock);
+
+ retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE,
+ cmd_in, (void __user *)arg);
+ if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
+ /* unload */
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
+ }
+
+ return retval;
+out:
+ mutex_unlock(&STp->lock);
+ return retval;
+}
/* The ioctl command */
static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
@@ -3565,6 +3621,15 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
if (retval)
goto out;
+ switch (cmd_in) {
+ case MTIOCPOS:
+ case MTIOCGET:
+ case MTIOCTOP:
+ break;
+ default:
+ return st_common_ioctl(STp, STm, file, cmd_in, arg);
+ }
+
cmd_type = _IOC_TYPE(cmd_in);
cmd_nr = _IOC_NR(cmd_in);
@@ -3876,29 +3941,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
}
mt_pos.mt_blkno = blk;
retval = put_user_mtpos(p, &mt_pos);
- goto out;
- }
- mutex_unlock(&STp->lock);
-
- switch (cmd_in) {
- case SG_IO:
- case SCSI_IOCTL_SEND_COMMAND:
- case CDROM_SEND_PACKET:
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- break;
- default:
- break;
}
-
- retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p);
- if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
- /* unload */
- STp->rew_at_close = 0;
- STp->ready = ST_NO_TAPE;
- }
- return retval;
-
out:
mutex_unlock(&STp->lock);
return retval;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index e6357bc301cb..93c223e0a777 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1844,6 +1844,7 @@ out_release_regions:
out_scsi_host_put:
scsi_host_put(host);
out_disable:
+ unregister_reboot_notifier(&stex_notifier);
pci_disable_device(pdev);
return err;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 3b89b5a70331..b8457477cee9 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -730,7 +730,7 @@ static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
agent->orb_pointer);
- queue_work(system_unbound_wq, &agent->work);
+ queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE;
@@ -764,7 +764,7 @@ static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent DOORBELL\n");
- queue_work(system_unbound_wq, &agent->work);
+ queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE;
@@ -990,7 +990,7 @@ static void tgt_agent_fetch_work(struct work_struct *work)
if (tgt_agent_check_active(agent) && !doorbell) {
INIT_WORK(&req->work, tgt_agent_process_work);
- queue_work(system_unbound_wq, &req->work);
+ queue_work(system_dfl_wq, &req->work);
} else {
/* don't process this request, just check next_ORB */
sbp_free_request(req);
@@ -1618,7 +1618,7 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
agent->orb_offset = sbp2_pointer_to_addr(ptr);
agent->request = req;
- queue_work(system_unbound_wq, &agent->work);
+ queue_work(system_dfl_wq, &agent->work);
rcode = RCODE_COMPLETE;
} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
addr_to_sbp2_pointer(agent->orb_offset, ptr);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 9e51c535ba8c..f7868b41c5e6 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -578,6 +578,11 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_with_boundary);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_boundary);
#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page,\
@@ -1300,6 +1305,11 @@ CONFIGFS_ATTR(, max_write_same_len);
CONFIGFS_ATTR(, alua_support);
CONFIGFS_ATTR(, pgr_support);
CONFIGFS_ATTR(, submit_type);
+CONFIGFS_ATTR_RO(, atomic_max_len);
+CONFIGFS_ATTR_RO(, atomic_alignment);
+CONFIGFS_ATTR_RO(, atomic_granularity);
+CONFIGFS_ATTR_RO(, atomic_max_with_boundary);
+CONFIGFS_ATTR_RO(, atomic_max_boundary);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
@@ -1343,6 +1353,11 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_pgr_support,
&attr_emulate_rsoc,
&attr_submit_type,
+ &attr_atomic_alignment,
+ &attr_atomic_max_len,
+ &attr_atomic_granularity,
+ &attr_atomic_max_with_boundary,
+ &attr_atomic_max_boundary,
NULL,
};
EXPORT_SYMBOL(sbc_attrib_attrs);
@@ -2758,33 +2773,24 @@ static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
{
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
- struct se_device *dev;
- struct se_hba *hba;
struct t10_alua_lu_gp_member *lu_gp_mem;
- ssize_t len = 0, cur_len;
- unsigned char buf[LU_GROUP_NAME_BUF] = { };
+ const char *const end = page + PAGE_SIZE;
+ char *cur = page;
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
- dev = lu_gp_mem->lu_gp_mem_dev;
- hba = dev->se_hba;
+ struct se_device *dev = lu_gp_mem->lu_gp_mem_dev;
+ struct se_hba *hba = dev->se_hba;
- cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+ cur += scnprintf(cur, end - cur, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item));
- cur_len++; /* Extra byte for NULL terminator */
-
- if ((cur_len + len) > PAGE_SIZE || cur_len > LU_GROUP_NAME_BUF) {
- pr_warn("Ran out of lu_gp_show_attr"
- "_members buffer\n");
+ if (WARN_ON_ONCE(cur >= end))
break;
- }
- memcpy(page+len, buf, cur_len);
- len += cur_len;
}
spin_unlock(&lu_gp->lu_gp_lock);
- return len;
+ return cur - page;
}
CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7bb711b24c0d..8ccb8541db1c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -814,6 +814,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
+ /* Skip allocating lun_stats since we can't export them. */
xcopy_lun = &dev->xcopy_lun;
rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
init_completion(&xcopy_lun->lun_shutdown_comp);
@@ -840,12 +841,29 @@ free_device:
return NULL;
}
+void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ int block_size = bdev_logical_block_size(bdev);
+
+ if (!bdev_can_atomic_write(bdev))
+ return;
+
+ attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size;
+ attrib->atomic_granularity = attrib->atomic_alignment =
+ queue_atomic_write_unit_min_bytes(q) / block_size;
+ attrib->atomic_max_with_boundary = 0;
+ attrib->atomic_max_boundary = 0;
+}
+EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev);
+
/*
* Check if the underlying struct block_device supports discard and if yes
* configure the UNMAP parameters.
*/
-bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct block_device *bdev)
+bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev)
{
int block_size = bdev_logical_block_size(bdev);
@@ -863,7 +881,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
bdev_discard_alignment(bdev) / block_size;
return true;
}
-EXPORT_SYMBOL(target_configure_unmap_from_queue);
+EXPORT_SYMBOL(target_configure_unmap_from_bdev);
/*
* Convert from blocksize advertised to the initiator to the 512 byte
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 7156a4dc1ca7..13159928e365 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -697,7 +697,7 @@ static void target_fabric_port_release(struct config_item *item)
struct se_lun *lun = container_of(to_config_group(item),
struct se_lun, lun_group);
- kfree_rcu(lun, rcu_head);
+ call_rcu(&lun->rcu_head, target_tpg_free_lun);
}
static struct configfs_item_operations target_fabric_port_item_ops = {
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 2d78ef74633c..b2610073e8cc 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -92,8 +92,8 @@ static bool fd_configure_unmap(struct se_device *dev)
struct inode *inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode))
- return target_configure_unmap_from_queue(&dev->dev_attrib,
- I_BDEV(inode));
+ return target_configure_unmap_from_bdev(&dev->dev_attrib,
+ I_BDEV(inode));
/* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
dev->dev_attrib.max_unmap_lba_count = 0x2000;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 66c292b7d74b..8ec7b534ad76 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -84,8 +84,8 @@ static bool iblock_configure_unmap(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- return target_configure_unmap_from_queue(&dev->dev_attrib,
- ib_dev->ibd_bd);
+ return target_configure_unmap_from_bdev(&dev->dev_attrib,
+ ib_dev->ibd_bd);
}
static int iblock_configure_device(struct se_device *dev)
@@ -152,6 +152,8 @@ static int iblock_configure_device(struct se_device *dev)
if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1;
+ target_configure_write_atomic_from_bdev(&dev->dev_attrib, bd);
+
bi = bdev_get_integrity(bd);
if (!bi)
return 0;
@@ -773,6 +775,9 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
else if (!bdev_write_cache(ib_dev->ibd_bd))
opf |= REQ_FUA;
}
+
+ if (cmd->se_cmd_flags & SCF_ATOMIC)
+ opf |= REQ_ATOMIC;
} else {
opf = REQ_OP_READ;
miter_dir = SG_MITER_FROM_SG;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 20aab1f50565..763e6d26e187 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -125,6 +125,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
struct se_lun *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
+void target_tpg_free_lun(struct rcu_head *head);
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
bool, struct se_device *);
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index fe8beb7dbab1..abe91dc8722e 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -764,6 +764,49 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
return 0;
}
+static sense_reason_t
+sbc_check_atomic(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+{
+ struct se_dev_attrib *attrib = &dev->dev_attrib;
+ u16 boundary, transfer_len;
+ u64 lba;
+
+ lba = transport_lba_64(cdb);
+ boundary = get_unaligned_be16(&cdb[10]);
+ transfer_len = get_unaligned_be16(&cdb[12]);
+
+ if (!attrib->atomic_max_len)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (boundary) {
+ if (transfer_len > attrib->atomic_max_with_boundary)
+ return TCM_INVALID_CDB_FIELD;
+
+ if (boundary > attrib->atomic_max_boundary)
+ return TCM_INVALID_CDB_FIELD;
+ } else {
+ if (transfer_len > attrib->atomic_max_len)
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (attrib->atomic_granularity) {
+ if (transfer_len % attrib->atomic_granularity)
+ return TCM_INVALID_CDB_FIELD;
+
+ if (boundary && boundary % attrib->atomic_granularity)
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (dev->dev_attrib.atomic_alignment) {
+ u64 _lba = lba;
+
+ if (do_div(_lba, dev->dev_attrib.atomic_alignment))
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ return 0;
+}
+
sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
{
@@ -861,6 +904,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
break;
case WRITE_16:
case WRITE_VERIFY_16:
+ case WRITE_ATOMIC_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
@@ -872,6 +916,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ if (cdb[0] == WRITE_ATOMIC_16) {
+ cmd->se_cmd_flags |= SCF_ATOMIC;
+
+ ret = sbc_check_atomic(dev, cmd, cdb);
+ if (ret)
+ return ret;
+ }
cmd->execute_cmd = sbc_execute_rw;
break;
case VARIABLE_LENGTH_CMD:
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index aad0096afa21..fe2b888bcb43 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -521,7 +521,6 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
have_tp = 1;
buf[0] = dev->transport->get_device_type(dev);
- buf[3] = have_tp ? 0x3c : 0x10;
/* Set WSNZ to 1 */
buf[4] = 0x01;
@@ -562,11 +561,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
else
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
- /*
- * Exit now if we don't support TP.
- */
+ put_unaligned_be16(12, &buf[2]);
+
if (!have_tp)
- goto max_write_same;
+ goto try_atomic;
/*
* Set MAXIMUM UNMAP LBA COUNT
@@ -595,9 +593,29 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* MAXIMUM WRITE SAME LENGTH
*/
-max_write_same:
put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
+ put_unaligned_be16(40, &buf[2]);
+
+try_atomic:
+ /*
+ * ATOMIC
+ */
+ if (!dev->dev_attrib.atomic_max_len)
+ goto done;
+
+ if (dev->dev_attrib.atomic_max_len < io_max_blocks)
+ put_unaligned_be32(dev->dev_attrib.atomic_max_len, &buf[44]);
+ else
+ put_unaligned_be32(io_max_blocks, &buf[44]);
+
+ put_unaligned_be32(dev->dev_attrib.atomic_alignment, &buf[48]);
+ put_unaligned_be32(dev->dev_attrib.atomic_granularity, &buf[52]);
+ put_unaligned_be32(dev->dev_attrib.atomic_max_with_boundary, &buf[56]);
+ put_unaligned_be32(dev->dev_attrib.atomic_max_boundary, &buf[60]);
+
+ put_unaligned_be16(60, &buf[2]);
+done:
return 0;
}
@@ -1452,6 +1470,24 @@ static const struct target_opcode_descriptor tcm_opcode_write_same32 = {
.update_usage_bits = set_dpofua_usage_bits32,
};
+static bool tcm_is_atomic_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ return cmd->se_dev->dev_attrib.atomic_max_len;
+}
+
+static struct target_opcode_descriptor tcm_opcode_write_atomic16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_ATOMIC_16,
+ .cdb_size = 16,
+ .usage_bits = {WRITE_ATOMIC_16, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_atomic_enabled,
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
@@ -2008,6 +2044,7 @@ static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
&tcm_opcode_write16,
&tcm_opcode_write_verify16,
&tcm_opcode_write_same32,
+ &tcm_opcode_write_atomic16,
&tcm_opcode_compare_write,
&tcm_opcode_read_capacity,
&tcm_opcode_read_capacity16,
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 6bdf2d8bd694..083205052be2 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -276,56 +276,39 @@ static ssize_t target_stat_lu_state_bit_show(struct config_item *item,
return snprintf(page, PAGE_SIZE, "exposed\n");
}
-static ssize_t target_stat_lu_num_cmds_show(struct config_item *item,
- char *page)
-{
- struct se_device *dev = to_stat_lu_dev(item);
- struct se_dev_io_stats *stats;
- unsigned int cpu;
- u32 cmds = 0;
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(dev->stats, cpu);
- cmds += stats->total_cmds;
- }
-
- /* scsiLuNumCommands */
- return snprintf(page, PAGE_SIZE, "%u\n", cmds);
-}
-
-static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_device *dev = to_stat_lu_dev(item);
- struct se_dev_io_stats *stats;
- unsigned int cpu;
- u32 bytes = 0;
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(dev->stats, cpu);
- bytes += stats->read_bytes;
- }
-
- /* scsiLuReadMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
-}
-
-static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_device *dev = to_stat_lu_dev(item);
- struct se_dev_io_stats *stats;
- unsigned int cpu;
- u32 bytes = 0;
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(dev->stats, cpu);
- bytes += stats->write_bytes;
- }
-
- /* scsiLuWrittenMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
-}
+#define per_cpu_stat_snprintf(stats_struct, prefix, field, shift) \
+static ssize_t \
+per_cpu_stat_##prefix##_snprintf(struct stats_struct __percpu *per_cpu_stats, \
+ char *page) \
+{ \
+ struct stats_struct *stats; \
+ unsigned int cpu; \
+ u64 sum = 0; \
+ \
+ for_each_possible_cpu(cpu) { \
+ stats = per_cpu_ptr(per_cpu_stats, cpu); \
+ sum += stats->field; \
+ } \
+ \
+ return snprintf(page, PAGE_SIZE, "%llu\n", sum >> shift); \
+}
+
+#define lu_show_per_cpu_stat(prefix, field, shift) \
+per_cpu_stat_snprintf(se_dev_io_stats, prefix, field, shift); \
+static ssize_t \
+target_stat_##prefix##_show(struct config_item *item, char *page) \
+{ \
+ struct se_device *dev = to_stat_lu_dev(item); \
+ \
+ return per_cpu_stat_##prefix##_snprintf(dev->stats, page); \
+} \
+
+/* scsiLuNumCommands */
+lu_show_per_cpu_stat(lu_num_cmds, total_cmds, 0);
+/* scsiLuReadMegaBytes */
+lu_show_per_cpu_stat(lu_read_mbytes, read_bytes, 20);
+/* scsiLuWrittenMegaBytes */
+lu_show_per_cpu_stat(lu_write_mbytes, write_bytes, 20);
static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
{
@@ -623,53 +606,30 @@ static ssize_t target_stat_tgt_port_port_index_show(struct config_item *item,
return ret;
}
-static ssize_t target_stat_tgt_port_in_cmds_show(struct config_item *item,
- char *page)
-{
- struct se_lun *lun = to_stat_tgt_port(item);
- struct se_device *dev;
- ssize_t ret = -ENODEV;
-
- rcu_read_lock();
- dev = rcu_dereference(lun->lun_se_dev);
- if (dev)
- ret = snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&lun->lun_stats.cmd_pdus));
- rcu_read_unlock();
- return ret;
-}
-
-static ssize_t target_stat_tgt_port_write_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_lun *lun = to_stat_tgt_port(item);
- struct se_device *dev;
- ssize_t ret = -ENODEV;
-
- rcu_read_lock();
- dev = rcu_dereference(lun->lun_se_dev);
- if (dev)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20));
- rcu_read_unlock();
- return ret;
-}
-
-static ssize_t target_stat_tgt_port_read_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_lun *lun = to_stat_tgt_port(item);
- struct se_device *dev;
- ssize_t ret = -ENODEV;
-
- rcu_read_lock();
- dev = rcu_dereference(lun->lun_se_dev);
- if (dev)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20));
- rcu_read_unlock();
- return ret;
-}
+#define tgt_port_show_per_cpu_stat(prefix, field, shift) \
+per_cpu_stat_snprintf(scsi_port_stats, prefix, field, shift); \
+static ssize_t \
+target_stat_##prefix##_show(struct config_item *item, char *page) \
+{ \
+ struct se_lun *lun = to_stat_tgt_port(item); \
+ struct se_device *dev; \
+ int ret; \
+ \
+ rcu_read_lock(); \
+ dev = rcu_dereference(lun->lun_se_dev); \
+ if (!dev) { \
+ rcu_read_unlock(); \
+ return -ENODEV; \
+ } \
+ \
+ ret = per_cpu_stat_##prefix##_snprintf(lun->lun_stats, page); \
+ rcu_read_unlock(); \
+ return ret; \
+}
+
+tgt_port_show_per_cpu_stat(tgt_port_in_cmds, cmd_pdus, 0);
+tgt_port_show_per_cpu_stat(tgt_port_write_mbytes, rx_data_octets, 20);
+tgt_port_show_per_cpu_stat(tgt_port_read_mbytes, tx_data_octets, 20);
static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item,
char *page)
@@ -1035,92 +995,34 @@ static ssize_t target_stat_auth_att_count_show(struct config_item *item,
return ret;
}
-static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
- char *page)
-{
- struct se_lun_acl *lacl = auth_to_lacl(item);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry_io_stats *stats;
- struct se_dev_entry *deve;
- unsigned int cpu;
- ssize_t ret;
- u32 cmds = 0;
-
- rcu_read_lock();
- deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
- if (!deve) {
- rcu_read_unlock();
- return -ENODEV;
- }
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(deve->stats, cpu);
- cmds += stats->total_cmds;
- }
-
- /* scsiAuthIntrOutCommands */
- ret = snprintf(page, PAGE_SIZE, "%u\n", cmds);
- rcu_read_unlock();
- return ret;
-}
-
-static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_lun_acl *lacl = auth_to_lacl(item);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry_io_stats *stats;
- struct se_dev_entry *deve;
- unsigned int cpu;
- ssize_t ret;
- u32 bytes = 0;
-
- rcu_read_lock();
- deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
- if (!deve) {
- rcu_read_unlock();
- return -ENODEV;
- }
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(deve->stats, cpu);
- bytes += stats->read_bytes;
- }
-
- /* scsiAuthIntrReadMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
- rcu_read_unlock();
- return ret;
-}
-
-static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
- char *page)
-{
- struct se_lun_acl *lacl = auth_to_lacl(item);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry_io_stats *stats;
- struct se_dev_entry *deve;
- unsigned int cpu;
- ssize_t ret;
- u32 bytes = 0;
-
- rcu_read_lock();
- deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
- if (!deve) {
- rcu_read_unlock();
- return -ENODEV;
- }
-
- for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(deve->stats, cpu);
- bytes += stats->write_bytes;
- }
-
- /* scsiAuthIntrWrittenMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
- rcu_read_unlock();
- return ret;
-}
+#define auth_show_per_cpu_stat(prefix, field, shift) \
+per_cpu_stat_snprintf(se_dev_entry_io_stats, prefix, field, shift); \
+static ssize_t \
+target_stat_##prefix##_show(struct config_item *item, char *page) \
+{ \
+ struct se_lun_acl *lacl = auth_to_lacl(item); \
+ struct se_node_acl *nacl = lacl->se_lun_nacl; \
+ struct se_dev_entry *deve; \
+ int ret; \
+ \
+ rcu_read_lock(); \
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun); \
+ if (!deve) { \
+ rcu_read_unlock(); \
+ return -ENODEV; \
+ } \
+ \
+ ret = per_cpu_stat_##prefix##_snprintf(deve->stats, page); \
+ rcu_read_unlock(); \
+ return ret; \
+}
+
+/* scsiAuthIntrOutCommands */
+auth_show_per_cpu_stat(auth_num_cmds, total_cmds, 0);
+/* scsiAuthIntrReadMegaBytes */
+auth_show_per_cpu_stat(auth_read_mbytes, read_bytes, 20);
+/* scsiAuthIntrWrittenMegaBytes */
+auth_show_per_cpu_stat(auth_write_mbytes, write_bytes, 20);
static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item,
char *page)
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index c0e429e5ef31..8b5ad50baa43 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -548,7 +548,7 @@ int core_tpg_register(
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
true, g_lun0_dev);
if (ret < 0) {
- kfree(se_tpg->tpg_virt_lun0);
+ target_tpg_free_lun(&se_tpg->tpg_virt_lun0->rcu_head);
return ret;
}
}
@@ -595,7 +595,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
if (se_tpg->proto_id >= 0) {
core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
- kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
+ call_rcu(&se_tpg->tpg_virt_lun0->rcu_head, target_tpg_free_lun);
}
target_tpg_deregister_rtpi(se_tpg);
@@ -615,6 +615,13 @@ struct se_lun *core_tpg_alloc_lun(
pr_err("Unable to allocate se_lun memory\n");
return ERR_PTR(-ENOMEM);
}
+
+ lun->lun_stats = alloc_percpu(struct scsi_port_stats);
+ if (!lun->lun_stats) {
+ pr_err("Unable to allocate se_lun stats memory\n");
+ goto free_lun;
+ }
+
lun->unpacked_lun = unpacked_lun;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
@@ -628,6 +635,18 @@ struct se_lun *core_tpg_alloc_lun(
lun->lun_tpg = tpg;
return lun;
+
+free_lun:
+ kfree(lun);
+ return ERR_PTR(-ENOMEM);
+}
+
+void target_tpg_free_lun(struct rcu_head *head)
+{
+ struct se_lun *lun = container_of(head, struct se_lun, rcu_head);
+
+ free_percpu(lun->lun_stats);
+ kfree(lun);
}
int core_tpg_add_lun(
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0a76bdfe5528..e8b7955d40f2 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -126,12 +126,12 @@ int init_se_kmem_caches(void)
}
target_completion_wq = alloc_workqueue("target_completion",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!target_completion_wq)
goto out_free_lba_map_mem_cache;
target_submission_wq = alloc_workqueue("target_submission",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!target_submission_wq)
goto out_free_completion_wq;
@@ -1571,7 +1571,12 @@ target_cmd_parse_cdb(struct se_cmd *cmd)
return ret;
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
- atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
+ /*
+ * If this is the xcopy_lun then we won't have lun_stats since we
+ * can't export them.
+ */
+ if (cmd->se_lun->lun_stats)
+ this_cpu_inc(cmd->se_lun->lun_stats->cmd_pdus);
return 0;
}
EXPORT_SYMBOL(target_cmd_parse_cdb);
@@ -2597,8 +2602,9 @@ queue_rsp:
!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status;
- atomic_long_add(cmd->data_length,
- &cmd->se_lun->lun_stats.tx_data_octets);
+ if (cmd->se_lun->lun_stats)
+ this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
+ cmd->data_length);
/*
* Perform READ_STRIP of PI using software emulation when
* backend had PI enabled, if the transport will not be
@@ -2621,14 +2627,16 @@ queue_rsp:
goto queue_full;
break;
case DMA_TO_DEVICE:
- atomic_long_add(cmd->data_length,
- &cmd->se_lun->lun_stats.rx_data_octets);
+ if (cmd->se_lun->lun_stats)
+ this_cpu_add(cmd->se_lun->lun_stats->rx_data_octets,
+ cmd->data_length);
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
if (cmd->se_cmd_flags & SCF_BIDI) {
- atomic_long_add(cmd->data_length,
- &cmd->se_lun->lun_stats.tx_data_octets);
+ if (cmd->se_lun->lun_stats)
+ this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
+ cmd->data_length);
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret)
goto queue_full;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 877ce58c0a70..93534a6e14b7 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -462,7 +462,7 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
int target_xcopy_setup_pt(void)
{
- xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
+ xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!xcopy_wq) {
pr_err("Unable to allocate xcopy_wq\n");
return -ENOMEM;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 639fc358ed0f..f686d95d3273 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -250,7 +250,7 @@ static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list);
- wq = alloc_workqueue("tcm_fc", 0, 1);
+ wq = alloc_workqueue("tcm_fc", WQ_PERCPU, 1);
if (!wq) {
kfree(tpg);
return NULL;
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index cf820fa09a04..51e1867e524e 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
+ufshcd-core-$(CONFIG_RPMB) += ufs-rpmb.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index c9bdd4140fd0..9ab91b4c05b0 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -134,17 +134,15 @@ unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
/**
- * ufshcd_mcq_decide_queue_depth - decide the queue depth
+ * ufshcd_get_hba_mac - Maximum number of commands supported by the host
+ * controller.
* @hba: per adapter instance
*
- * Return: queue-depth on success, non-zero on error
+ * Return: queue depth on success; negative upon error.
*
- * MAC - Max. Active Command of the Host Controller (HC)
- * HC wouldn't send more than this commands to the device.
- * Calculates and adjusts the queue depth based on the depth
- * supported by the HC and ufs device.
+ * MAC = Maximum number of Active Commands supported by the Host Controller.
*/
-int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
+int ufshcd_get_hba_mac(struct ufs_hba *hba)
{
int mac;
@@ -162,18 +160,7 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
mac = hba->vops->get_hba_mac(hba);
}
if (mac < 0)
- goto err;
-
- WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
- /*
- * max. value of bqueuedepth = 256, mac is host dependent.
- * It is mandatory for UFS device to define bQueueDepth if
- * shared queuing architecture is enabled.
- */
- return min_t(int, mac, hba->dev_info.bqueuedepth);
-
-err:
- dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac;
}
@@ -307,9 +294,10 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
- int tag = ufshcd_mcq_get_tag(hba, cqe);
if (cqe->command_desc_base_addr) {
+ int tag = ufshcd_mcq_get_tag(hba, cqe);
+
ufshcd_compl_one_cqe(hba, tag, cqe);
/* After processed the cqe, mark it empty (invalid) entry */
cqe->command_desc_base_addr = 0;
@@ -491,9 +479,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
mutex_init(&hwq->sq_mutex);
}
- /* The very first HW queue serves device commands */
- hba->dev_cmd_queue = &hba->uhq[0];
-
host->host_tagset = 1;
return 0;
}
@@ -546,8 +531,9 @@ static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
*/
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
- struct scsi_cmnd *cmd = lrbp->cmd;
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
struct ufs_hw_queue *hwq;
void __iomem *reg, *opr_sqd_base;
u32 nexus, id, val;
@@ -556,24 +542,21 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT;
- if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
- if (!cmd)
- return -EINVAL;
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
- if (!hwq)
- return 0;
- } else {
- hwq = hba->dev_cmd_queue;
- }
+ if (!cmd)
+ return -EINVAL;
+
+ hwq = ufshcd_mcq_req_to_hwq(hba, rq);
+ if (!hwq)
+ return 0;
id = hwq->id;
- mutex_lock(&hwq->sq_mutex);
+ guard(mutex)(&hwq->sq_mutex);
/* stop the SQ fetching before working on it */
err = ufshcd_mcq_sq_stop(hba, hwq);
if (err)
- goto unlock;
+ return err;
/* SQCTI = EXT_IID, IID, LUN, Task Tag */
nexus = lrbp->lun << 8 | task_tag;
@@ -600,8 +583,6 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
if (ufshcd_mcq_sq_start(hba, hwq))
err = -ETIMEDOUT;
-unlock:
- mutex_unlock(&hwq->sq_mutex);
return err;
}
@@ -632,7 +613,8 @@ static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
struct ufs_hw_queue *hwq, int task_tag)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_transfer_req_desc *utrd;
__le64 cmd_desc_base_addr;
bool ret = false;
@@ -683,7 +665,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct ufs_hw_queue *hwq;
int err;
diff --git a/drivers/ufs/core/ufs-rpmb.c b/drivers/ufs/core/ufs-rpmb.c
new file mode 100644
index 000000000000..ffad049872b9
--- /dev/null
+++ b/drivers/ufs/core/ufs-rpmb.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UFS OP-TEE based RPMB Driver
+ *
+ * Copyright (C) 2025 Micron Technology, Inc.
+ * Copyright (C) 2025 Qualcomm Technologies, Inc.
+ *
+ * Authors:
+ * Bean Huo <beanhuo@micron.com>
+ * Can Guo <can.guo@oss.qualcomm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/rpmb.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <ufs/ufshcd.h>
+#include <linux/unaligned.h>
+#include "ufshcd-priv.h"
+
+#define UFS_RPMB_SEC_PROTOCOL 0xEC /* JEDEC UFS application */
+#define UFS_RPMB_SEC_PROTOCOL_ID 0x01 /* JEDEC UFS RPMB protocol ID, CDB byte3 */
+
+static const struct bus_type ufs_rpmb_bus_type = {
+ .name = "ufs_rpmb",
+};
+
+/* UFS RPMB device structure */
+struct ufs_rpmb_dev {
+ u8 region_id;
+ struct device dev;
+ struct rpmb_dev *rdev;
+ struct ufs_hba *hba;
+ struct list_head node;
+};
+
+static int ufs_sec_submit(struct ufs_hba *hba, u16 spsp, void *buffer, size_t len, bool send)
+{
+ struct scsi_device *sdev = hba->ufs_rpmb_wlun;
+ u8 cdb[12] = { };
+
+ cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
+ cdb[1] = UFS_RPMB_SEC_PROTOCOL;
+ put_unaligned_be16(spsp, &cdb[2]);
+ put_unaligned_be32(len, &cdb[6]);
+
+ return scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+ buffer, len, /*timeout=*/30 * HZ, 0, NULL);
+}
+
+/* UFS RPMB route frames implementation */
+static int ufs_rpmb_route_frames(struct device *dev, u8 *req, unsigned int req_len, u8 *resp,
+ unsigned int resp_len)
+{
+ struct ufs_rpmb_dev *ufs_rpmb = dev_get_drvdata(dev);
+ struct rpmb_frame *frm_out = (struct rpmb_frame *)req;
+ bool need_result_read = true;
+ u16 req_type, protocol_id;
+ struct ufs_hba *hba;
+ int ret;
+
+ if (!ufs_rpmb) {
+ dev_err(dev, "Missing driver data\n");
+ return -ENODEV;
+ }
+
+ hba = ufs_rpmb->hba;
+
+ req_type = be16_to_cpu(frm_out->req_resp);
+
+ switch (req_type) {
+ case RPMB_PROGRAM_KEY:
+ if (req_len != sizeof(struct rpmb_frame) || resp_len != sizeof(struct rpmb_frame))
+ return -EINVAL;
+ break;
+ case RPMB_GET_WRITE_COUNTER:
+ if (req_len != sizeof(struct rpmb_frame) || resp_len != sizeof(struct rpmb_frame))
+ return -EINVAL;
+ need_result_read = false;
+ break;
+ case RPMB_WRITE_DATA:
+ if (req_len % sizeof(struct rpmb_frame) || resp_len != sizeof(struct rpmb_frame))
+ return -EINVAL;
+ break;
+ case RPMB_READ_DATA:
+ if (req_len != sizeof(struct rpmb_frame) || resp_len % sizeof(struct rpmb_frame))
+ return -EINVAL;
+ need_result_read = false;
+ break;
+ default:
+ dev_err(dev, "Unknown request type=0x%04x\n", req_type);
+ return -EINVAL;
+ }
+
+ protocol_id = ufs_rpmb->region_id << 8 | UFS_RPMB_SEC_PROTOCOL_ID;
+
+ ret = ufs_sec_submit(hba, protocol_id, req, req_len, true);
+ if (ret) {
+ dev_err(dev, "Command failed with ret=%d\n", ret);
+ return ret;
+ }
+
+ if (need_result_read) {
+ struct rpmb_frame *frm_resp = (struct rpmb_frame *)resp;
+
+ memset(frm_resp, 0, sizeof(*frm_resp));
+ frm_resp->req_resp = cpu_to_be16(RPMB_RESULT_READ);
+ ret = ufs_sec_submit(hba, protocol_id, resp, resp_len, true);
+ if (ret) {
+ dev_err(dev, "Result read request failed with ret=%d\n", ret);
+ return ret;
+ }
+ }
+
+ if (!ret) {
+ ret = ufs_sec_submit(hba, protocol_id, resp, resp_len, false);
+ if (ret)
+ dev_err(dev, "Response read failed with ret=%d\n", ret);
+ }
+
+ return ret;
+}
+
+static void ufs_rpmb_device_release(struct device *dev)
+{
+ struct ufs_rpmb_dev *ufs_rpmb = dev_get_drvdata(dev);
+
+ rpmb_dev_unregister(ufs_rpmb->rdev);
+}
+
+/* UFS RPMB device registration */
+int ufs_rpmb_probe(struct ufs_hba *hba)
+{
+ struct ufs_rpmb_dev *ufs_rpmb, *it, *tmp;
+ struct rpmb_dev *rdev;
+ char *cid = NULL;
+ int region;
+ u32 cap;
+ int ret;
+
+ if (!hba->ufs_rpmb_wlun || hba->dev_info.b_advanced_rpmb_en) {
+ dev_info(hba->dev, "Skip OP-TEE RPMB registration\n");
+ return -ENODEV;
+ }
+
+ /* Check if device_id is available */
+ if (!hba->dev_info.device_id) {
+ dev_err(hba->dev, "UFS Device ID not available\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&hba->rpmbs);
+
+ struct rpmb_descr descr = {
+ .type = RPMB_TYPE_UFS,
+ .route_frames = ufs_rpmb_route_frames,
+ .reliable_wr_count = hba->dev_info.rpmb_io_size,
+ };
+
+ for (region = 0; region < ARRAY_SIZE(hba->dev_info.rpmb_region_size); region++) {
+ cap = hba->dev_info.rpmb_region_size[region];
+ if (!cap)
+ continue;
+
+ ufs_rpmb = devm_kzalloc(hba->dev, sizeof(*ufs_rpmb), GFP_KERNEL);
+ if (!ufs_rpmb) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ ufs_rpmb->hba = hba;
+ ufs_rpmb->dev.parent = &hba->ufs_rpmb_wlun->sdev_gendev;
+ ufs_rpmb->dev.bus = &ufs_rpmb_bus_type;
+ ufs_rpmb->dev.release = ufs_rpmb_device_release;
+ dev_set_name(&ufs_rpmb->dev, "ufs_rpmb%d", region);
+
+ /* Set driver data BEFORE device_register */
+ dev_set_drvdata(&ufs_rpmb->dev, ufs_rpmb);
+
+ ret = device_register(&ufs_rpmb->dev);
+ if (ret) {
+ dev_err(hba->dev, "Failed to register UFS RPMB device %d\n", region);
+ put_device(&ufs_rpmb->dev);
+ goto err_out;
+ }
+
+ /* Create unique ID by appending region number to device_id */
+ cid = kasprintf(GFP_KERNEL, "%s-R%d", hba->dev_info.device_id, region);
+ if (!cid) {
+ device_unregister(&ufs_rpmb->dev);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ descr.dev_id = cid;
+ descr.dev_id_len = strlen(cid);
+ descr.capacity = cap;
+
+ /* Register RPMB device */
+ rdev = rpmb_dev_register(&ufs_rpmb->dev, &descr);
+ if (IS_ERR(rdev)) {
+ dev_err(hba->dev, "Failed to register UFS RPMB device.\n");
+ device_unregister(&ufs_rpmb->dev);
+ ret = PTR_ERR(rdev);
+ goto err_out;
+ }
+
+ kfree(cid);
+ cid = NULL;
+
+ ufs_rpmb->rdev = rdev;
+ ufs_rpmb->region_id = region;
+
+ list_add_tail(&ufs_rpmb->node, &hba->rpmbs);
+
+ dev_info(hba->dev, "UFS RPMB region %d registered (capacity=%u)\n", region, cap);
+ }
+
+ return 0;
+err_out:
+ kfree(cid);
+ list_for_each_entry_safe(it, tmp, &hba->rpmbs, node) {
+ list_del(&it->node);
+ device_unregister(&it->dev);
+ }
+
+ return ret;
+}
+
+/* UFS RPMB remove handler */
+void ufs_rpmb_remove(struct ufs_hba *hba)
+{
+ struct ufs_rpmb_dev *ufs_rpmb, *tmp;
+
+ if (list_empty(&hba->rpmbs))
+ return;
+
+ /* Remove all registered RPMB devices */
+ list_for_each_entry_safe(ufs_rpmb, tmp, &hba->rpmbs, node) {
+ dev_info(hba->dev, "Removing UFS RPMB region %d\n", ufs_rpmb->region_id);
+ /* Remove from list first */
+ list_del(&ufs_rpmb->node);
+ /* Unregister device */
+ device_unregister(&ufs_rpmb->dev);
+ }
+
+ dev_info(hba->dev, "All UFS RPMB devices unregistered\n");
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("OP-TEE UFS RPMB driver");
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 0086816b27cd..b33f8656edb5 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -235,7 +235,7 @@ static int ufshcd_ahit_to_us(u32 ahit)
}
/* Convert microseconds to Auto-Hibernate Idle Timer register value */
-static u32 ufshcd_us_to_ahit(unsigned int timer)
+u32 ufshcd_us_to_ahit(unsigned int timer)
{
unsigned int scale;
@@ -245,6 +245,7 @@ static u32 ufshcd_us_to_ahit(unsigned int timer)
return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
}
+EXPORT_SYMBOL_GPL(ufshcd_us_to_ahit);
static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg)
{
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 252186124669..58b506eac6dc 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -105,7 +105,7 @@ static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *j
if (dir != DMA_NONE) {
payload = &job->request_payload;
- if (!payload || !payload->payload_len || !payload->sg_cnt)
+ if (!payload->payload_len || !payload->sg_cnt)
return -EINVAL;
sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h
index 584c2b5c6ad9..309ae51b4906 100644
--- a/drivers/ufs/core/ufs_trace.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -42,7 +42,6 @@
#define UFS_CMD_TRACE_STRINGS \
EM(UFS_CMD_SEND, "send_req") \
EM(UFS_CMD_COMP, "complete_rsp") \
- EM(UFS_DEV_COMP, "dev_complete") \
EM(UFS_QUERY_SEND, "query_send") \
EM(UFS_QUERY_COMP, "query_complete") \
EM(UFS_QUERY_ERR, "query_complete_err") \
diff --git a/drivers/ufs/core/ufs_trace_types.h b/drivers/ufs/core/ufs_trace_types.h
index f2d5ad1d92b9..bf821970f092 100644
--- a/drivers/ufs/core/ufs_trace_types.h
+++ b/drivers/ufs/core/ufs_trace_types.h
@@ -5,7 +5,6 @@
enum ufs_trace_str_t {
UFS_CMD_SEND,
UFS_CMD_COMP,
- UFS_DEV_COMP,
UFS_QUERY_SEND,
UFS_QUERY_COMP,
UFS_QUERY_ERR,
diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h
index 89bb97c14c15..c148a5194378 100644
--- a/drivers/ufs/core/ufshcd-crypto.h
+++ b/drivers/ufs/core/ufshcd-crypto.h
@@ -38,10 +38,10 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
}
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt)
return hba->vops->fill_crypto_prdt(hba, crypt_ctx,
@@ -51,17 +51,19 @@ static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
}
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT))
return;
- if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx))
+ if (!(scsi_cmd_to_rq(cmd)->crypt_ctx))
return;
/* Zeroize the PRDT because it can contain cryptographic keys. */
memzero_explicit(lrbp->ucd_prdt_ptr,
- ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd));
+ ufshcd_sg_entry_size(hba) * scsi_sg_count(cmd));
}
bool ufshcd_crypto_enable(struct ufs_hba *hba);
@@ -82,13 +84,15 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
struct request_desc_header *h) { }
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
return 0;
}
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp) { }
+ struct scsi_cmnd *cmd)
+{
+}
static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index d0a2c963a27d..4259f499382f 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -6,6 +6,8 @@
#include <linux/pm_runtime.h>
#include <ufs/ufshcd.h>
+void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs);
+
static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
{
return !hba->shutting_down;
@@ -65,7 +67,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe);
int ufshcd_mcq_init(struct ufs_hba *hba);
void ufshcd_mcq_disable(struct ufs_hba *hba);
-int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
+int ufshcd_get_hba_mac(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
@@ -75,14 +77,19 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp);
+void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd);
-#define SD_ASCII_STD true
-#define SD_RAW false
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii);
+/**
+ * enum ufs_descr_fmt - UFS string descriptor format
+ * @SD_RAW: Raw UTF-16 format
+ * @SD_ASCII_STD: Convert to null-terminated ASCII string
+ */
+enum ufs_descr_fmt {
+ SD_RAW = 0,
+ SD_ASCII_STD = 1,
+};
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, u8 **buf, enum ufs_descr_fmt fmt);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
@@ -361,6 +368,26 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
+/*
+ * Convert a block layer tag into a SCSI command pointer. This function is
+ * called once per I/O completion path and is also called from error paths.
+ */
+static inline struct scsi_cmnd *ufshcd_tag_to_cmd(struct ufs_hba *hba, u32 tag)
+{
+ /*
+ * Host-wide tags are enabled in MCQ mode only. See also the
+ * host->host_tagset assignment in ufs-mcq.c.
+ */
+ struct blk_mq_tags *tags = hba->host->tag_set.shared_tags ?:
+ hba->host->tag_set.tags[0];
+ struct request *rq = blk_mq_tag_to_rq(tags, tag);
+
+ if (WARN_ON_ONCE(!rq))
+ return NULL;
+
+ return blk_mq_rq_to_pdu(rq);
+}
+
static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
__must_hold(&q->sq_lock)
{
@@ -411,4 +438,17 @@ static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
return val / sizeof(struct utp_transfer_req_desc);
}
+#if IS_ENABLED(CONFIG_RPMB)
+int ufs_rpmb_probe(struct ufs_hba *hba);
+void ufs_rpmb_remove(struct ufs_hba *hba);
+#else
+static inline int ufs_rpmb_probe(struct ufs_hba *hba)
+{
+ return 0;
+}
+static inline void ufs_rpmb_remove(struct ufs_hba *hba)
+{
+}
+#endif
+
#endif /* _UFSHCD_PRIV_H_ */
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index d6a060a72461..040a0ceb170a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -28,6 +28,7 @@
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "ufshcd-priv.h"
#include <ufs/ufs_quirks.h>
#include <ufs/unipro.h>
@@ -403,10 +404,11 @@ static void ufshcd_configure_wb(struct ufs_hba *hba)
ufshcd_wb_toggle_buf_flush(hba, true);
}
-static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrb,
enum ufs_trace_str_t str_t)
{
- struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+ struct utp_upiu_req *rq = lrb->ucd_req_ptr;
struct utp_upiu_header *header;
if (!trace_ufshcd_upiu_enabled())
@@ -415,7 +417,7 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
if (str_t == UFS_CMD_SEND)
header = &rq->header;
else
- header = &hba->lrb[tag].ucd_rsp_ptr->header;
+ header = &lrb->ucd_rsp_ptr->header;
trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
@@ -472,7 +474,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
}
-static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+static void ufshcd_add_command_trace(struct ufs_hba *hba, struct scsi_cmnd *cmd,
enum ufs_trace_str_t str_t)
{
u64 lba = 0;
@@ -480,16 +482,13 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
u32 doorbell = 0;
u32 intr;
u32 hwq_id = 0;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- struct scsi_cmnd *cmd = lrbp->cmd;
struct request *rq = scsi_cmd_to_rq(cmd);
+ unsigned int tag = rq->tag;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
int transfer_len = -1;
- if (!cmd)
- return;
-
/* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ ufshcd_add_cmd_upiu_trace(hba, lrbp, str_t);
if (!trace_ufshcd_command_enabled())
return;
@@ -503,7 +502,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
lba = scsi_get_lba(cmd);
if (opcode == WRITE_10)
- group_id = lrbp->cmd->cmnd[6];
+ group_id = cmd->cmnd[6];
} else if (opcode == UNMAP) {
/*
* The number of Bytes to be unmapped beginning with the lba.
@@ -596,14 +595,13 @@ static void ufshcd_print_evt_hist(struct ufs_hba *hba)
ufshcd_vops_dbg_register_dump(hba);
}
-static
-void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
+static void ufshcd_print_tr(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ bool pr_prdt)
{
- const struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
int prdt_length;
- lrbp = &hba->lrb[tag];
-
if (hba->monitor.enabled) {
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", tag,
div_u64(lrbp->issue_time_stamp_local_clock, 1000));
@@ -646,7 +644,8 @@ static bool ufshcd_print_tr_iter(struct request *req, void *priv)
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
- ufshcd_print_tr(hba, req->tag, *(bool *)priv);
+ if (!blk_mq_is_reserved_rq(req))
+ ufshcd_print_tr(hba, blk_mq_rq_to_pdu(req), *(bool *)priv);
return true;
}
@@ -856,7 +855,7 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
struct cq_entry *cqe)
{
if (cqe)
- return le32_to_cpu(cqe->status) & MASK_OCS;
+ return cqe->overall_status & MASK_OCS;
return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
}
@@ -1076,7 +1075,7 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba)
* @hba: per adapter instance
* @on: If True, vote for perf PM QoS mode otherwise power save mode
*/
-static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
+void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
{
guard(mutex)(&hba->pm_qos_mutex);
@@ -1085,6 +1084,7 @@ static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
}
+EXPORT_SYMBOL_GPL(ufshcd_pm_qos_update);
/**
* ufshcd_set_clk_freq - set UFS controller clock frequencies
@@ -1290,13 +1290,13 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
*/
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
- const struct scsi_device *sdev;
+ struct scsi_device *sdev;
unsigned long flags;
u32 pending = 0;
spin_lock_irqsave(hba->host->host_lock, flags);
__shost_for_each_device(sdev, hba->host)
- pending += sbitmap_weight(&sdev->budget_map);
+ pending += scsi_device_busy(sdev);
spin_unlock_irqrestore(hba->host->host_lock, flags);
return pending;
@@ -2294,20 +2294,21 @@ static inline int ufshcd_monitor_opcode2dir(u8 opcode)
return -EINVAL;
}
+/* Must only be called for SCSI commands. */
static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
const struct ufs_hba_monitor *m = &hba->monitor;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
- return (m->enabled && lrbp && lrbp->cmd &&
- (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
- ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
+ return m->enabled &&
+ (!m->chunk_size || m->chunk_size == cmd->sdb.length) &&
+ ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp);
}
-static void ufshcd_start_monitor(struct ufs_hba *hba,
- const struct ufshcd_lrb *lrbp)
+static void ufshcd_start_monitor(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ int dir = ufshcd_monitor_opcode2dir(cmd->cmnd[0]);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2316,14 +2317,15 @@ static void ufshcd_start_monitor(struct ufs_hba *hba,
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
-static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
+static void ufshcd_update_monitor(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ struct request *req = scsi_cmd_to_rq(cmd);
+ const struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ int dir = ufshcd_monitor_opcode2dir(cmd->cmnd[0]);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
@@ -2348,17 +2350,24 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
+/* Returns %true for SCSI commands and %false for device management commands. */
+static bool ufshcd_is_scsi_cmd(struct scsi_cmnd *cmd)
+{
+ return !blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd));
+}
+
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
- * @task_tag: Task tag of the command
+ * @cmd: SCSI command or device management command pointer
* @hwq: pointer to hardware queue instance
*/
-static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
- struct ufs_hw_queue *hwq)
+static inline void ufshcd_send_command(struct ufs_hba *hba,
+ struct scsi_cmnd *cmd,
+ struct ufs_hw_queue *hwq)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
unsigned long flags;
if (hba->monitor.enabled) {
@@ -2367,11 +2376,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
lrbp->compl_time_stamp = ktime_set(0, 0);
lrbp->compl_time_stamp_local_clock = 0;
}
- ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
- if (lrbp->cmd)
+ if (ufshcd_is_scsi_cmd(cmd)) {
+ ufshcd_add_command_trace(hba, cmd, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_start_monitor(hba, lrbp);
+ if (unlikely(ufshcd_should_inform_monitor(hba, cmd)))
+ ufshcd_start_monitor(hba, cmd);
+ }
if (hba->mcq_enabled) {
int utrd_size = sizeof(struct utp_transfer_req_desc);
@@ -2386,22 +2396,22 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
} else {
spin_lock_irqsave(&hba->outstanding_lock, flags);
if (hba->vops && hba->vops->setup_xfer_req)
- hba->vops->setup_xfer_req(hba, lrbp->task_tag,
- !!lrbp->cmd);
- __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << lrbp->task_tag,
- REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ hba->vops->setup_xfer_req(hba, tag,
+ ufshcd_is_scsi_cmd(cmd));
+ __set_bit(tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
}
}
/**
* ufshcd_copy_sense_data - Copy sense data in case of check condition
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command
*/
-static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+static inline void ufshcd_copy_sense_data(struct scsi_cmnd *cmd)
{
- u8 *const sense_buffer = lrbp->cmd->sense_buffer;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ u8 *const sense_buffer = cmd->sense_buffer;
u16 resp_len;
int len;
@@ -2474,7 +2484,6 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
- hba->reserved_slot = hba->nutrs - 1;
hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1;
@@ -2618,7 +2627,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
init_completion(&uic_cmd->done);
- uic_cmd->cmd_active = 1;
+ uic_cmd->cmd_active = true;
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
return 0;
@@ -2706,13 +2715,13 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
/**
* ufshcd_map_sg - Map scatter-gather list to prdt
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command
*
* Return: 0 in case of success, non-zero value in case of failure.
*/
-static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
int sg_segments = scsi_dma_map(cmd);
if (sg_segments < 0)
@@ -2720,7 +2729,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
- return ufshcd_crypto_fill_prdt(hba, lrbp);
+ return ufshcd_crypto_fill_prdt(hba, cmd);
}
/**
@@ -2779,13 +2788,14 @@ ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
/**
* ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
* for scsi commands
- * @lrbp: local reference block pointer
+ * @cmd: SCSI command
* @upiu_flags: flags
*/
-static
-void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
+static void ufshcd_prepare_utp_scsi_cmd_upiu(struct scsi_cmnd *cmd,
+ u8 upiu_flags)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
@@ -2793,11 +2803,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
.transaction_code = UPIU_TRANSACTION_COMMAND,
.flags = upiu_flags,
.lun = lrbp->lun,
- .task_tag = lrbp->task_tag,
+ .task_tag = tag,
.command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
};
- WARN_ON_ONCE(ucd_req_ptr->header.task_tag != lrbp->task_tag);
+ WARN_ON_ONCE(ucd_req_ptr->header.task_tag != tag);
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
@@ -2810,13 +2820,15 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
/**
* ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
* @hba: UFS hba
- * @lrbp: local reference block pointer
+ * @cmd: SCSI command pointer
* @upiu_flags: flags
*/
static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, u8 upiu_flags)
+ struct scsi_cmnd *cmd, u8 upiu_flags)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufs_query *query = &hba->dev_cmd.query;
u16 len = be16_to_cpu(query->request.upiu_req.length);
@@ -2825,7 +2837,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
.transaction_code = UPIU_TRANSACTION_QUERY_REQ,
.flags = upiu_flags,
.lun = lrbp->lun,
- .task_tag = lrbp->task_tag,
+ .task_tag = tag,
.query_function = query->request.query_func,
/* Data segment length only need for WRITE_DESC */
.data_segment_length =
@@ -2844,15 +2856,17 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
memcpy(ucd_req_ptr + 1, query->descriptor, len);
}
-static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
+static inline void ufshcd_prepare_utp_nop_upiu(struct scsi_cmnd *cmd)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
ucd_req_ptr->header = (struct utp_upiu_header){
.transaction_code = UPIU_TRANSACTION_NOP_OUT,
- .task_tag = lrbp->task_tag,
+ .task_tag = tag,
};
}
@@ -2860,22 +2874,23 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
* ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
* for Device Management Purposes
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command pointer
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+ struct scsi_cmnd *cmd)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
u8 upiu_flags;
int ret = 0;
ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
- ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
+ ufshcd_prepare_utp_query_req_upiu(hba, cmd, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
- ufshcd_prepare_utp_nop_upiu(lrbp);
+ ufshcd_prepare_utp_nop_upiu(cmd);
else
ret = -EINVAL;
@@ -2888,38 +2903,69 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
* ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
* for SCSI Purposes
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @cmd: SCSI command
*/
-static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
u8 upiu_flags;
- ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
+ cmd->sc_data_direction, 0);
if (ioprio_class == IOPRIO_CLASS_RT)
upiu_flags |= UPIU_CMD_FLAGS_CP;
- ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+ ufshcd_prepare_utp_scsi_cmd_upiu(cmd, upiu_flags);
}
-static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag)
+static void ufshcd_init_lrb(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
+ const int i = scsi_cmd_to_rq(cmd)->tag;
+ struct utp_transfer_cmd_desc *cmd_descp =
+ (void *)hba->ucdl_base_addr + i * ufshcd_get_ucd_size(hba);
+ struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
+ dma_addr_t cmd_desc_element_addr =
+ hba->ucdl_dma_addr + i * ufshcd_get_ucd_size(hba);
+ u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
+ u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
+ struct ufshcd_lrb *lrb = scsi_cmd_priv(cmd);
+
+ lrb->utr_descriptor_ptr = utrdlp + i;
+ lrb->utrd_dma_addr =
+ hba->utrdl_dma_addr + i * sizeof(struct utp_transfer_req_desc);
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
+ lrb->ucd_req_dma_addr = cmd_desc_element_addr;
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
+ lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
+ lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
+}
+
+static void __ufshcd_setup_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ u8 lun, int tag)
+{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
+ ufshcd_init_lrb(hba, cmd);
+
memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr));
- lrbp->cmd = cmd;
- lrbp->task_tag = tag;
lrbp->lun = lun;
- ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp);
+ ufshcd_prepare_lrbp_crypto(ufshcd_is_scsi_cmd(cmd) ?
+ scsi_cmd_to_rq(cmd) : NULL, lrbp);
}
-static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- struct scsi_cmnd *cmd, u8 lun, int tag)
+static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ u8 lun, int tag)
{
- __ufshcd_setup_cmd(lrbp, cmd, lun, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
+ __ufshcd_setup_cmd(hba, cmd, lun, tag);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
lrbp->req_abort_skip = false;
- ufshcd_comp_scsi_upiu(hba, lrbp);
+ ufshcd_comp_scsi_upiu(hba, cmd);
}
/**
@@ -2970,25 +3016,13 @@ static void ufshcd_map_queues(struct Scsi_Host *shost)
}
}
-static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
+/*
+ * The only purpose of this function is to make the SCSI core skip the memset()
+ * call for the private command data.
+ */
+static int ufshcd_init_cmd_priv(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
- struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
- i * ufshcd_get_ucd_size(hba);
- struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
- dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
- i * ufshcd_get_ucd_size(hba);
- u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
- u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
-
- lrb->utr_descriptor_ptr = utrdlp + i;
- lrb->utrd_dma_addr = hba->utrdl_dma_addr +
- i * sizeof(struct utp_transfer_req_desc);
- lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
- lrb->ucd_req_dma_addr = cmd_desc_element_addr;
- lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
- lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
- lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
+ return 0;
}
/**
@@ -3002,7 +3036,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp;
int err = 0;
struct ufs_hw_queue *hwq = NULL;
@@ -3053,11 +3086,10 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_hold(hba);
- lrbp = &hba->lrb[tag];
+ ufshcd_setup_scsi_cmd(hba, cmd,
+ ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag);
- ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag);
-
- err = ufshcd_map_sg(hba, lrbp);
+ err = ufshcd_map_sg(hba, cmd);
if (err) {
ufshcd_release(hba);
goto out;
@@ -3066,7 +3098,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (hba->mcq_enabled)
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
- ufshcd_send_command(hba, tag, hwq);
+ ufshcd_send_command(hba, cmd, hwq);
out:
if (ufs_trigger_eh(hba)) {
@@ -3080,10 +3112,26 @@ out:
return err;
}
-static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- enum dev_cmd_type cmd_type, u8 lun, int tag)
+static int ufshcd_queue_reserved_command(struct Scsi_Host *host,
+ struct scsi_cmnd *cmd)
+{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ struct ufs_hba *hba = shost_priv(host);
+ struct ufs_hw_queue *hwq =
+ hba->mcq_enabled ? ufshcd_mcq_req_to_hwq(hba, rq) : NULL;
+
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
+ ufshcd_send_command(hba, cmd, hwq);
+ return 0;
+}
+
+static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ enum dev_cmd_type cmd_type, u8 lun, int tag)
{
- __ufshcd_setup_cmd(lrbp, NULL, lun, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
+ __ufshcd_setup_cmd(hba, cmd, lun, tag);
lrbp->intr_cmd = true; /* No interrupt aggregation */
hba->dev_cmd.type = cmd_type;
}
@@ -3091,12 +3139,12 @@ static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
/*
* Return: 0 upon success; < 0 upon failure.
*/
-static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ enum dev_cmd_type cmd_type, int tag)
{
- ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
+ ufshcd_setup_dev_cmd(hba, cmd, cmd_type, 0, tag);
- return ufshcd_compose_devman_upiu(hba, lrbp);
+ return ufshcd_compose_devman_upiu(hba, cmd);
}
/*
@@ -3207,87 +3255,6 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return err;
}
-/*
- * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
- * < 0 if another error occurred.
- */
-static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, int max_timeout)
-{
- unsigned long time_left = msecs_to_jiffies(max_timeout);
- unsigned long flags;
- bool pending;
- int err;
-
-retry:
- time_left = wait_for_completion_timeout(&hba->dev_cmd.complete,
- time_left);
-
- if (likely(time_left)) {
- err = ufshcd_get_tr_ocs(lrbp, NULL);
- if (!err)
- err = ufshcd_dev_cmd_completion(hba, lrbp);
- } else {
- err = -ETIMEDOUT;
- dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
- __func__, lrbp->task_tag);
-
- /* MCQ mode */
- if (hba->mcq_enabled) {
- /* successfully cleared the command, retry if needed */
- if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
- err = -EAGAIN;
- return err;
- }
-
- /* SDB mode */
- if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
- /* successfully cleared the command, retry if needed */
- err = -EAGAIN;
- /*
- * Since clearing the command succeeded we also need to
- * clear the task tag bit from the outstanding_reqs
- * variable.
- */
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- pending = test_bit(lrbp->task_tag,
- &hba->outstanding_reqs);
- if (pending)
- __clear_bit(lrbp->task_tag,
- &hba->outstanding_reqs);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- if (!pending) {
- /*
- * The completion handler ran while we tried to
- * clear the command.
- */
- time_left = 1;
- goto retry;
- }
- } else {
- dev_err(hba->dev, "%s: failed to clear tag %d\n",
- __func__, lrbp->task_tag);
-
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- pending = test_bit(lrbp->task_tag,
- &hba->outstanding_reqs);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- if (!pending) {
- /*
- * The completion handler ran while we tried to
- * clear the command.
- */
- time_left = 1;
- goto retry;
- }
- }
- }
-
- return err;
-}
-
static void ufshcd_dev_man_lock(struct ufs_hba *hba)
{
ufshcd_hold(hba);
@@ -3302,23 +3269,40 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
ufshcd_release(hba);
}
+static struct scsi_cmnd *ufshcd_get_dev_mgmt_cmd(struct ufs_hba *hba)
+{
+ /*
+ * The caller must hold this lock to guarantee that the NOWAIT
+ * allocation will succeed.
+ */
+ lockdep_assert_held(&hba->dev_cmd.lock);
+
+ return scsi_get_internal_cmd(
+ hba->host->pseudo_sdev, DMA_TO_DEVICE,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+}
+
+static void ufshcd_put_dev_mgmt_cmd(struct scsi_cmnd *cmd)
+{
+ scsi_put_internal_cmd(cmd);
+}
+
/*
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
-static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- const u32 tag, int timeout)
+static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ const u32 tag, int timeout)
{
- int err;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
-
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ blk_status_t sts;
- return err;
+ rq->timeout = timeout;
+ sts = blk_execute_rq(rq, true);
+ if (sts != BLK_STS_OK)
+ return blk_status_to_errno(sts);
+ return lrbp->utr_descriptor_ptr->header.ocs;
}
/**
@@ -3336,18 +3320,31 @@ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
- const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_get_dev_mgmt_cmd(hba);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ u32 tag;
int err;
- /* Protects use of hba->reserved_slot. */
+ /* Protects use of hba->dev_cmd. */
lockdep_assert_held(&hba->dev_cmd.lock);
- err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (WARN_ON_ONCE(!cmd))
+ return -ENOMEM;
+
+ tag = scsi_cmd_to_rq(cmd)->tag;
+
+ err = ufshcd_compose_dev_cmd(hba, cmd, cmd_type, tag);
if (unlikely(err))
- return err;
+ goto out;
+
+ err = ufshcd_issue_dev_cmd(hba, cmd, tag, timeout);
+ if (err == 0)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+
+out:
+ ufshcd_put_dev_mgmt_cmd(cmd);
- return ufshcd_issue_dev_cmd(hba, lrbp, tag, timeout);
+ return err;
}
/**
@@ -3773,16 +3770,14 @@ static inline char ufshcd_remove_non_printable(u8 ch)
* @desc_index: descriptor index
* @buf: pointer to buffer where descriptor would be read,
* the caller should free the memory.
- * @ascii: if true convert from unicode to ascii characters
- * null terminated string.
+ * @fmt: if %SD_ASCII_STD, convert from UTF-16 to ASCII
*
* Return:
* * string size on success.
* * -ENOMEM: on allocation failure
* * -EINVAL: on a wrong parameter
*/
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii)
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, u8 **buf, enum ufs_descr_fmt fmt)
{
struct uc_string_id *uc_str;
u8 *str;
@@ -3811,7 +3806,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
goto out;
}
- if (ascii) {
+ if (fmt == SD_ASCII_STD) {
ssize_t ascii_len;
int i;
/* remove header and divide by 2 to move from UTF16 to UTF8 */
@@ -3837,7 +3832,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
str[ret++] = '\0';
} else {
- str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
+ str = kmemdup(uc_str->uc, uc_str->len, GFP_KERNEL);
if (!str) {
ret = -ENOMEM;
goto out;
@@ -3977,14 +3972,6 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
}
skip_utmrdl:
- /* Allocate memory for local reference block */
- hba->lrb = devm_kcalloc(hba->dev,
- hba->nutrs, sizeof(struct ufshcd_lrb),
- GFP_KERNEL);
- if (!hba->lrb) {
- dev_err(hba->dev, "LRB Memory allocation failed\n");
- goto out;
- }
return 0;
out:
return -ENOMEM;
@@ -4046,8 +4033,6 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
}
-
- ufshcd_init_lrb(hba, &hba->lrb[i], i);
}
}
@@ -5253,10 +5238,15 @@ static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
hba->dev_info.is_lu_power_on_wp = true;
- /* In case of RPMB LU, check if advanced RPMB mode is enabled */
- if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
- desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
- hba->dev_info.b_advanced_rpmb_en = true;
+ /* In case of RPMB LU, check if advanced RPMB mode is enabled, and get region size */
+ if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN) {
+ if (desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
+ hba->dev_info.b_advanced_rpmb_en = true;
+ hba->dev_info.rpmb_region_size[0] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION0_SIZE];
+ hba->dev_info.rpmb_region_size[1] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION1_SIZE];
+ hba->dev_info.rpmb_region_size[2] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION2_SIZE];
+ hba->dev_info.rpmb_region_size[3] = desc_buf[RPMB_UNIT_DESC_PARAM_REGION3_SIZE];
+ }
kfree(desc_buf);
@@ -5396,19 +5386,18 @@ static void ufshcd_sdev_destroy(struct scsi_device *sdev)
/**
* ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
- * @lrbp: pointer to local reference block of completed command
+ * @cmd: SCSI command
* @scsi_status: SCSI command status
*
* Return: value base on SCSI command status.
*/
-static inline int
-ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+static inline int ufshcd_scsi_cmd_status(struct scsi_cmnd *cmd, int scsi_status)
{
int result = 0;
switch (scsi_status) {
case SAM_STAT_CHECK_CONDITION:
- ufshcd_copy_sense_data(lrbp);
+ ufshcd_copy_sense_data(cmd);
fallthrough;
case SAM_STAT_GOOD:
result |= DID_OK << 16 | scsi_status;
@@ -5416,7 +5405,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
case SAM_STAT_TASK_SET_FULL:
case SAM_STAT_BUSY:
case SAM_STAT_TASK_ABORTED:
- ufshcd_copy_sense_data(lrbp);
+ ufshcd_copy_sense_data(cmd);
result |= scsi_status;
break;
default:
@@ -5430,15 +5419,17 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
/**
* ufshcd_transfer_rsp_status - Get overall status of the response
* @hba: per adapter instance
- * @lrbp: pointer to local reference block of completed command
+ * @cmd: SCSI command
* @cqe: pointer to the completion queue entry
*
* Return: result of the command to notify SCSI midlayer.
*/
-static inline int
-ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- struct cq_entry *cqe)
+static inline int ufshcd_transfer_rsp_status(struct ufs_hba *hba,
+ struct scsi_cmnd *cmd,
+ struct cq_entry *cqe)
{
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ const int tag = scsi_cmd_to_rq(cmd)->tag;
int result = 0;
int scsi_status;
enum utp_ocs ocs;
@@ -5452,7 +5443,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* not set either flag.
*/
if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
- scsi_set_resid(lrbp->cmd, resid);
+ scsi_set_resid(cmd, resid);
/* overall command status of utrd */
ocs = ufshcd_get_tr_ocs(lrbp, cqe);
@@ -5473,7 +5464,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* to notify the SCSI midlayer of the command status
*/
scsi_status = lrbp->ucd_rsp_ptr->header.status;
- result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+ result = ufshcd_scsi_cmd_status(cmd, scsi_status);
/*
* Currently we are only supporting BKOPs exception
@@ -5510,10 +5501,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
case OCS_ABORTED:
case OCS_INVALID_COMMAND_STATUS:
result |= DID_REQUEUE << 16;
- dev_warn(hba->dev,
- "OCS %s from controller for tag %d\n",
- (ocs == OCS_ABORTED ? "aborted" : "invalid"),
- lrbp->task_tag);
+ dev_warn(hba->dev, "OCS %s from controller for tag %d\n",
+ ocs == OCS_ABORTED ? "aborted" : "invalid", tag);
break;
case OCS_INVALID_CMD_TABLE_ATTR:
case OCS_INVALID_PRDT_ATTR:
@@ -5526,17 +5515,19 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
case OCS_GENERAL_CRYPTO_ERROR:
default:
result |= DID_ERROR << 16;
- dev_err(hba->dev,
- "OCS error from controller = %x for tag %d\n",
- ocs, lrbp->task_tag);
+ dev_err(hba->dev, "OCS error from controller = %x for tag %d\n",
+ ocs, tag);
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
break;
} /* end of switch */
if ((host_byte(result) != DID_OK) &&
- (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
- ufshcd_print_tr(hba, lrbp->task_tag, true);
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) {
+ if (cqe)
+ ufshcd_hex_dump("UPIU CQE: ", cqe, sizeof(struct cq_entry));
+ ufshcd_print_tr(hba, cmd, true);
+ }
return result;
}
@@ -5575,7 +5566,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
guard(spinlock_irqsave)(hba->host->host_lock);
cmd = hba->active_uic_cmd;
if (!cmd)
- goto unlock;
+ return retval;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
@@ -5584,13 +5575,13 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
cmd->argument2 |= ufshcd_get_uic_cmd_result(hba);
cmd->argument3 = ufshcd_get_dme_attr_val(hba);
if (!hba->uic_async_done)
- cmd->cmd_active = 0;
+ cmd->cmd_active = false;
complete(&cmd->done);
retval = IRQ_HANDLED;
}
if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) {
- cmd->cmd_active = 0;
+ cmd->cmd_active = false;
complete(hba->uic_async_done);
retval = IRQ_HANDLED;
}
@@ -5598,18 +5589,14 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
-unlock:
return retval;
}
/* Release the resources allocated for processing a SCSI command. */
-void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
+void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd = lrbp->cmd;
-
scsi_dma_unmap(cmd);
- ufshcd_crypto_clear_prdt(hba, lrbp);
+ ufshcd_crypto_clear_prdt(hba, cmd);
ufshcd_release(hba);
ufshcd_clk_scaling_update_busy(hba);
}
@@ -5623,31 +5610,39 @@ void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe)
{
- struct ufshcd_lrb *lrbp;
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
enum utp_ocs ocs;
- lrbp = &hba->lrb[task_tag];
+ if (WARN_ONCE(!cmd, "cqe->command_desc_base_addr = %#llx\n",
+ le64_to_cpu(cqe->command_desc_base_addr)))
+ return;
+
if (hba->monitor.enabled) {
lrbp->compl_time_stamp = ktime_get();
lrbp->compl_time_stamp_local_clock = local_clock();
}
- cmd = lrbp->cmd;
- if (cmd) {
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_update_monitor(hba, lrbp);
- ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
- cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
- ufshcd_release_scsi_cmd(hba, lrbp);
- /* Do not touch lrbp after scsi done */
- scsi_done(cmd);
+ if (ufshcd_is_scsi_cmd(cmd)) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, cmd)))
+ ufshcd_update_monitor(hba, cmd);
+ ufshcd_add_command_trace(hba, cmd, UFS_CMD_COMP);
+ cmd->result = ufshcd_transfer_rsp_status(hba, cmd, cqe);
+ ufshcd_release_scsi_cmd(hba, cmd);
} else {
if (cqe) {
- ocs = le32_to_cpu(cqe->status) & MASK_OCS;
+ ocs = cqe->overall_status & MASK_OCS;
lrbp->utr_descriptor_ptr->header.ocs = ocs;
+ } else {
+ ocs = lrbp->utr_descriptor_ptr->header.ocs;
}
- complete(&hba->dev_cmd.complete);
+ ufshcd_add_query_upiu_trace(
+ hba,
+ ocs == OCS_SUCCESS ? UFS_QUERY_COMP : UFS_QUERY_ERR,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+ cmd->result = 0;
}
+ /* Do not touch lrbp after scsi_done() has been called. */
+ scsi_done(cmd);
}
/**
@@ -5675,7 +5670,7 @@ static void ufshcd_clear_polled(struct ufs_hba *hba,
int tag;
for_each_set_bit(tag, completed_reqs, hba->nutrs) {
- struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
+ struct scsi_cmnd *cmd = scsi_host_find_tag(hba->host, tag);
if (!cmd)
continue;
@@ -5720,6 +5715,47 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
return completed_reqs != 0;
}
+static bool ufshcd_mcq_force_compl_one(struct request *rq, void *priv)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_device *sdev = rq->q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+ struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
+
+ if (blk_mq_is_reserved_rq(rq) || !hwq)
+ return true;
+
+ ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
+
+ /*
+ * For those cmds of which the cqes are not present in the cq, complete
+ * them explicitly.
+ */
+ scoped_guard(spinlock_irqsave, &hwq->cq_lock) {
+ if (!test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
+ set_host_byte(cmd, DID_REQUEUE);
+ ufshcd_release_scsi_cmd(hba, cmd);
+ scsi_done(cmd);
+ }
+ }
+
+ return true;
+}
+
+static bool ufshcd_mcq_compl_one(struct request *rq, void *priv)
+{
+ struct scsi_device *sdev = rq->q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+ struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
+
+ if (!blk_mq_is_reserved_rq(rq) && hwq)
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+
+ return true;
+}
+
/**
* ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
* invoked from the error handler context or ufshcd_host_reset_and_restore()
@@ -5734,40 +5770,10 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
bool force_compl)
{
- struct ufs_hw_queue *hwq;
- struct ufshcd_lrb *lrbp;
- struct scsi_cmnd *cmd;
- unsigned long flags;
- int tag;
-
- for (tag = 0; tag < hba->nutrs; tag++) {
- lrbp = &hba->lrb[tag];
- cmd = lrbp->cmd;
- if (!ufshcd_cmd_inflight(cmd) ||
- test_bit(SCMD_STATE_COMPLETE, &cmd->state))
- continue;
-
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
- if (!hwq)
- continue;
-
- if (force_compl) {
- ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
- /*
- * For those cmds of which the cqes are not present
- * in the cq, complete them explicitly.
- */
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
- set_host_byte(cmd, DID_REQUEUE);
- ufshcd_release_scsi_cmd(hba, lrbp);
- scsi_done(cmd);
- }
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
- } else {
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
- }
- }
+ blk_mq_tagset_busy_iter(&hba->host->tag_set,
+ force_compl ? ufshcd_mcq_force_compl_one :
+ ufshcd_mcq_compl_one,
+ NULL);
}
/**
@@ -6612,9 +6618,12 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
+ if (blk_mq_is_reserved_rq(rq))
+ return true;
+
*ret = ufshcd_try_to_abort_task(hba, tag);
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ufshcd_is_scsi_cmd(cmd) ? cmd->cmnd[0] : -1,
*ret ? "failed" : "succeeded");
return *ret == 0;
@@ -7349,15 +7358,21 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
- const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_get_dev_mgmt_cmd(hba);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+ u32 tag;
int err = 0;
u8 upiu_flags;
- /* Protects use of hba->reserved_slot. */
+ /* Protects use of hba->dev_cmd. */
lockdep_assert_held(&hba->dev_cmd.lock);
- ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
+ if (WARN_ON_ONCE(!cmd))
+ return -ENOMEM;
+
+ tag = scsi_cmd_to_rq(cmd)->tag;
+
+ ufshcd_setup_dev_cmd(hba, cmd, cmd_type, 0, tag);
ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
@@ -7377,12 +7392,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- /*
- * ignore the returning value here - ufshcd_check_query_response is
- * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
- * read the response directly ignoring all errors.
- */
- ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout);
+ err = ufshcd_issue_dev_cmd(hba, cmd, tag, dev_cmd_timeout);
+ if (err)
+ goto put_dev_mgmt_cmd;
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7403,6 +7415,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
}
}
+put_dev_mgmt_cmd:
+ ufshcd_put_dev_mgmt_cmd(cmd);
+
return err;
}
@@ -7496,8 +7511,9 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
enum dma_data_direction dir)
{
- const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd;
+ struct ufshcd_lrb *lrbp;
+ u32 tag;
int err = 0;
int result;
u8 upiu_flags;
@@ -7505,10 +7521,20 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
u16 ehs_len;
int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0;
- /* Protects use of hba->reserved_slot. */
ufshcd_dev_man_lock(hba);
- ufshcd_setup_dev_cmd(hba, lrbp, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, tag);
+ cmd = ufshcd_get_dev_mgmt_cmd(hba);
+
+ if (WARN_ON_ONCE(!cmd)) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ lrbp = scsi_cmd_priv(cmd);
+ tag = scsi_cmd_to_rq(cmd)->tag;
+
+ ufshcd_setup_dev_cmd(hba, cmd, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN,
+ tag);
ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, ehs);
@@ -7525,8 +7551,11 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- err = ufshcd_issue_dev_cmd(hba, lrbp, tag, ADVANCED_RPMB_REQ_TIMEOUT);
+ err = ufshcd_issue_dev_cmd(hba, cmd, tag, ADVANCED_RPMB_REQ_TIMEOUT);
+ if (err)
+ goto put_dev_mgmt_cmd;
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
if (!err) {
/* Just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7550,11 +7579,45 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
}
}
+put_dev_mgmt_cmd:
+ ufshcd_put_dev_mgmt_cmd(cmd);
+
+unlock:
ufshcd_dev_man_unlock(hba);
return err ? : result;
}
+static bool ufshcd_clear_lu_cmds(struct request *req, void *priv)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+ const u64 lun = *(u64 *)priv;
+ const u32 tag = req->tag;
+
+ if (blk_mq_is_reserved_rq(req) || sdev->lun != lun)
+ return true;
+
+ if (ufshcd_clear_cmd(hba, tag) < 0) {
+ dev_err(hba->dev, "%s: failed to clear request %d\n", __func__,
+ tag);
+ return true;
+ }
+
+ if (hba->mcq_enabled) {
+ struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, req);
+
+ if (hwq)
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ return true;
+ }
+
+ ufshcd_compl_one_cqe(hba, tag, NULL);
+ return true;
+}
+
/**
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
@@ -7563,12 +7626,8 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
*/
static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
- unsigned long flags, pending_reqs = 0, not_cleared = 0;
struct Scsi_Host *host;
struct ufs_hba *hba;
- struct ufs_hw_queue *hwq;
- struct ufshcd_lrb *lrbp;
- u32 pos, not_cleared_mask = 0;
int err;
u8 resp = 0xF, lun;
@@ -7577,50 +7636,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
- if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err)
- err = resp;
- goto out;
- }
-
- if (hba->mcq_enabled) {
- for (pos = 0; pos < hba->nutrs; pos++) {
- lrbp = &hba->lrb[pos];
- if (ufshcd_cmd_inflight(lrbp->cmd) &&
- lrbp->lun == lun) {
- ufshcd_clear_cmd(hba, pos);
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
- }
- }
- err = 0;
- goto out;
- }
-
- /* clear the commands that were pending for corresponding LUN */
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
- if (hba->lrb[pos].lun == lun)
- __set_bit(pos, &pending_reqs);
- hba->outstanding_reqs &= ~pending_reqs;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
- if (ufshcd_clear_cmd(hba, pos) < 0) {
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- not_cleared = 1U << pos &
- ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- hba->outstanding_reqs |= not_cleared;
- not_cleared_mask |= not_cleared;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- dev_err(hba->dev, "%s: failed to clear request %d\n",
- __func__, pos);
- }
+ if (err) {
+ } else if (resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ err = resp;
+ } else {
+ /* clear the commands that were pending for corresponding LUN */
+ blk_mq_tagset_busy_iter(&hba->host->tag_set,
+ ufshcd_clear_lu_cmds,
+ &cmd->device->lun);
}
- __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
-out:
hba->req_abort_count = 0;
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
if (!err) {
@@ -7634,11 +7659,12 @@ out:
static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
{
- struct ufshcd_lrb *lrbp;
int tag;
for_each_set_bit(tag, &bitmap, hba->nutrs) {
- lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
+
lrbp->req_abort_skip = true;
}
}
@@ -7646,7 +7672,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
/**
* ufshcd_try_to_abort_task - abort a specific task
* @hba: Pointer to adapter instance
- * @tag: Task tag/index to be aborted
+ * @tag: Tag of the task to be aborted
*
* Abort the pending command in device by sending UFS_ABORT_TASK task management
* command, and in host controller by clearing the door-bell register. There can
@@ -7658,14 +7684,15 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
*/
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, tag);
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
int err;
int poll_cnt;
u8 resp = 0xF;
for (poll_cnt = 100; poll_cnt; poll_cnt--) {
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_QUERY_TASK, &resp);
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, tag, UFS_QUERY_TASK,
+ &resp);
if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
/* cmd pending in the device */
dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
@@ -7680,7 +7707,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
hba->dev,
"%s: cmd with tag %d not pending in the device.\n",
__func__, tag);
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+ if (!ufshcd_cmd_inflight(cmd)) {
dev_info(hba->dev,
"%s: cmd with tag=%d completed.\n",
__func__, tag);
@@ -7698,8 +7725,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
if (!poll_cnt)
return -EBUSY;
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_ABORT_TASK, &resp);
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, tag, UFS_ABORT_TASK, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err) {
err = resp; /* service response error */
@@ -7727,8 +7753,9 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host);
- int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct request *rq = scsi_cmd_to_rq(cmd);
+ int tag = rq->tag;
+ struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
unsigned long flags;
int err = FAILED;
bool outstanding;
@@ -7757,15 +7784,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* to reduce repeated printouts. For other aborted requests only print
* basic details.
*/
- scsi_print_command(cmd);
+ if (ufshcd_is_scsi_cmd(cmd))
+ scsi_print_command(cmd);
if (!hba->req_abort_count) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
- ufshcd_print_tr(hba, tag, true);
+ ufshcd_print_tr(hba, cmd, true);
} else {
- ufshcd_print_tr(hba, tag, false);
+ ufshcd_print_tr(hba, cmd, false);
}
hba->req_abort_count++;
@@ -7809,7 +7837,10 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
- err = ufshcd_try_to_abort_task(hba, tag);
+ if (blk_mq_is_reserved_rq(rq))
+ err = ufshcd_clear_cmd(hba, tag);
+ else
+ err = ufshcd_try_to_abort_task(hba, tag);
if (err) {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
@@ -7826,7 +7857,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (outstanding)
- ufshcd_release_scsi_cmd(hba, lrbp);
+ ufshcd_release_scsi_cmd(hba, cmd);
err = SUCCESS;
@@ -8192,8 +8223,11 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
if (IS_ERR(sdev_rpmb)) {
ret = PTR_ERR(sdev_rpmb);
+ hba->ufs_rpmb_wlun = NULL;
+ dev_err(hba->dev, "%s: RPMB WLUN not found\n", __func__);
goto remove_ufs_device_wlun;
}
+ hba->ufs_rpmb_wlun = sdev_rpmb;
ufshcd_blk_pm_runtime_init(sdev_rpmb);
scsi_device_put(sdev_rpmb);
@@ -8461,12 +8495,74 @@ static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
dev_info->rtc_update_period = 0;
}
+/**
+ * ufshcd_create_device_id - Generate unique device identifier string
+ * @hba: per-adapter instance
+ * @desc_buf: device descriptor buffer
+ *
+ * Creates a unique device ID string combining manufacturer ID, spec version,
+ * model name, serial number (as hex), device version, and manufacture date.
+ *
+ * Returns: Allocated device ID string on success, NULL on failure
+ */
+static char *ufshcd_create_device_id(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 manufacture_date;
+ u16 device_version;
+ u8 *serial_number;
+ char *serial_hex;
+ char *device_id;
+ u8 serial_index;
+ int serial_len;
+ int ret;
+
+ serial_index = desc_buf[DEVICE_DESC_PARAM_SN];
+
+ ret = ufshcd_read_string_desc(hba, serial_index, &serial_number, SD_RAW);
+ if (ret < 0) {
+ dev_err(hba->dev, "Failed reading Serial Number. err = %d\n", ret);
+ return NULL;
+ }
+
+ device_version = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_DEV_VER]);
+ manufacture_date = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_MANF_DATE]);
+
+ serial_len = ret;
+ /* Allocate buffer for hex string: 2 chars per byte + null terminator */
+ serial_hex = kzalloc(serial_len * 2 + 1, GFP_KERNEL);
+ if (!serial_hex) {
+ kfree(serial_number);
+ return NULL;
+ }
+
+ bin2hex(serial_hex, serial_number, serial_len);
+
+ /*
+ * Device ID format is ABI with secure world - do not change without firmware
+ * coordination.
+ */
+ device_id = kasprintf(GFP_KERNEL, "%04X-%04X-%s-%s-%04X-%04X",
+ dev_info->wmanufacturerid, dev_info->wspecversion,
+ dev_info->model, serial_hex, device_version,
+ manufacture_date);
+
+ kfree(serial_hex);
+ kfree(serial_number);
+
+ if (!device_id)
+ dev_warn(hba->dev, "Failed to allocate unique device ID\n");
+
+ return device_id;
+}
+
static int ufs_get_device_desc(struct ufs_hba *hba)
{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ struct Scsi_Host *shost = hba->host;
int err;
u8 model_index;
u8 *desc_buf;
- struct ufs_dev_info *dev_info = &hba->dev_info;
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
@@ -8494,6 +8590,18 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
+ /*
+ * According to the UFS standard, the UFS device queue depth
+ * (bQueueDepth) must be in the range 1..255 if the shared queueing
+ * architecture is supported. bQueueDepth is zero if the shared queueing
+ * architecture is not supported.
+ */
+ if (dev_info->bqueuedepth)
+ shost->cmd_per_lun = min(hba->nutrs, dev_info->bqueuedepth) -
+ UFSHCD_NUM_RESERVED;
+ else
+ shost->cmd_per_lun = shost->can_queue;
+
dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
dev_info->hid_sup = get_unaligned_be32(desc_buf +
@@ -8510,6 +8618,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ /* Generate unique device ID */
+ dev_info->device_id = ufshcd_create_device_id(hba, desc_buf);
+
hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
@@ -8545,6 +8656,8 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
kfree(dev_info->model);
dev_info->model = NULL;
+ kfree(dev_info->device_id);
+ dev_info->device_id = NULL;
}
/**
@@ -8688,6 +8801,8 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
+ hba->dev_info.rpmb_io_size = desc_buf[GEOMETRY_DESC_PARAM_RPMB_RW_SIZE];
+
out:
kfree(desc_buf);
return err;
@@ -8874,6 +8989,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
+ ufs_rpmb_probe(hba);
out:
return ret;
@@ -8891,8 +9007,6 @@ static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
hba->utrdl_dma_addr);
-
- devm_kfree(hba->dev, hba->lrb);
}
static int ufshcd_alloc_mcq(struct ufs_hba *hba)
@@ -8900,7 +9014,7 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
int ret;
int old_nutrs = hba->nutrs;
- ret = ufshcd_mcq_decide_queue_depth(hba);
+ ret = ufshcd_get_hba_mac(hba);
if (ret < 0)
return ret;
@@ -8926,7 +9040,6 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
goto err;
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
- hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
return 0;
err:
@@ -9164,7 +9277,11 @@ static const struct scsi_host_template ufshcd_driver_template = {
.name = UFSHCD,
.proc_name = UFSHCD,
.map_queues = ufshcd_map_queues,
+ .cmd_size = sizeof(struct ufshcd_lrb),
+ .init_cmd_priv = ufshcd_init_cmd_priv,
.queuecommand = ufshcd_queuecommand,
+ .queue_reserved_command = ufshcd_queue_reserved_command,
+ .nr_reserved_cmds = UFSHCD_NUM_RESERVED,
.mq_poll = ufshcd_poll,
.sdev_init = ufshcd_sdev_init,
.sdev_configure = ufshcd_sdev_configure,
@@ -9775,11 +9892,11 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
}
/*
- * Some UFS devices require delay after VCC power rail is turned-off.
+ * All UFS devices require delay after VCC power rail is turned-off.
*/
- if (vcc_off && hba->vreg_info.vcc &&
- hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
- usleep_range(5000, 5100);
+ if (vcc_off && hba->vreg_info.vcc && !hba->vreg_info.vcc->always_on)
+ usleep_range(hba->vcc_off_delay_us,
+ hba->vcc_off_delay_us + 100);
}
#ifdef CONFIG_PM
@@ -10428,6 +10545,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_rpm_get_sync(hba);
ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
+ ufs_rpmb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
blk_mq_destroy_queue(hba->tmf_queue);
@@ -10587,6 +10705,9 @@ static int ufshcd_add_scsi_host(struct ufs_hba *hba)
{
int err;
+ WARN_ON_ONCE(!hba->host->can_queue);
+ WARN_ON_ONCE(!hba->host->cmd_per_lun);
+
if (is_mcq_supported(hba)) {
ufshcd_mcq_enable(hba);
err = ufshcd_alloc_mcq(hba);
@@ -10706,7 +10827,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
- init_completion(&hba->dev_cmd.complete);
+ /*
+ * Most ufs devices require 1ms delay after vcc is powered off before
+ * it can be powered on again. Set the default to 2ms. The platform
+ * drivers can override this setting as needed.
+ */
+ hba->vcc_off_delay_us = 2000;
err = ufshcd_hba_init(hba);
if (err)
@@ -10740,7 +10866,11 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_host_memory_configure(hba);
host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
- host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
+ /*
+ * Set the queue depth for WLUNs. ufs_get_device_desc() will increase
+ * host->cmd_per_lun to a larger value.
+ */
+ host->cmd_per_lun = 1;
host->max_id = UFSHCD_MAX_ID;
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
@@ -10832,6 +10962,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
}
+ err = ufshcd_add_scsi_host(hba);
+ if (err)
+ goto out_disable;
+
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
@@ -10882,10 +11016,6 @@ initialized:
if (err)
goto out_disable;
- err = ufshcd_add_scsi_host(hba);
- if (err)
- goto out_disable;
-
ufs_sysfs_add_nodes(hba->dev);
async_schedule(ufshcd_async_scan, hba);
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 191fbd799ec5..7d5117b2dab4 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -154,3 +154,16 @@ config SCSI_UFS_ROCKCHIP
Select this if you have UFS controller on Rockchip chipset.
If unsure, say N.
+
+config SCSI_UFS_AMD_VERSAL2
+ tristate "AMD Versal Gen 2 UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && (ARCH_ZYNQMP || COMPILE_TEST)
+ help
+ This selects the AMD Versal Gen 2 specific additions on top of
+ the UFSHCD DWC and UFSHCD platform driver. UFS host on AMD
+ Versal Gen 2 needs some vendor specific configurations like PHY
+ and vendor specific register accesses before accessing the
+ hardware.
+
+ Select this if you have UFS controller on AMD Versal Gen 2 SoC.
+ If unsure, say N.
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index 2f97feb5db3f..65d8bb23ab7b 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o
obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
+obj-$(CONFIG_SCSI_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o
diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c
index 21214e5d5896..43781593b5c1 100644
--- a/drivers/ufs/host/ti-j721e-ufs.c
+++ b/drivers/ufs/host/ti-j721e-ufs.c
@@ -15,18 +15,26 @@
#define TI_UFS_SS_RST_N_PCS BIT(0)
#define TI_UFS_SS_CLK_26MHZ BIT(4)
+struct ti_j721e_ufs {
+ void __iomem *regbase;
+ u32 reg;
+};
+
static int ti_j721e_ufs_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct ti_j721e_ufs *ufs;
unsigned long clk_rate;
- void __iomem *regbase;
struct clk *clk;
- u32 reg = 0;
int ret;
- regbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(regbase))
- return PTR_ERR(regbase);
+ ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
+ if (!ufs)
+ return -ENOMEM;
+
+ ufs->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ufs->regbase))
+ return PTR_ERR(ufs->regbase);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
@@ -42,12 +50,14 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
}
clk_rate = clk_get_rate(clk);
if (clk_rate == 26000000)
- reg |= TI_UFS_SS_CLK_26MHZ;
+ ufs->reg |= TI_UFS_SS_CLK_26MHZ;
devm_clk_put(dev, clk);
/* Take UFS slave device out of reset */
- reg |= TI_UFS_SS_RST_N_PCS;
- writel(reg, regbase + TI_UFS_SS_CTRL);
+ ufs->reg |= TI_UFS_SS_RST_N_PCS;
+ writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL);
+
+ dev_set_drvdata(dev, ufs);
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
dev);
@@ -72,6 +82,16 @@ static void ti_j721e_ufs_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
+static int ti_j721e_ufs_resume(struct device *dev)
+{
+ struct ti_j721e_ufs *ufs = dev_get_drvdata(dev);
+
+ writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL);
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ti_j721e_ufs_pm_ops, NULL, ti_j721e_ufs_resume);
+
static const struct of_device_id ti_j721e_ufs_of_match[] = {
{
.compatible = "ti,j721e-ufs",
@@ -87,6 +107,7 @@ static struct platform_driver ti_j721e_ufs_driver = {
.driver = {
.name = "ti-j721e-ufs",
.of_match_table = ti_j721e_ufs_of_match,
+ .pm = pm_sleep_ptr(&ti_j721e_ufs_pm_ops),
},
};
module_platform_driver(ti_j721e_ufs_driver);
diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c
new file mode 100644
index 000000000000..40543db621a1
--- /dev/null
+++ b/drivers/ufs/host/ufs-amd-versal2.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Authors: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <ufs/unipro.h>
+
+#include "ufshcd-dwc.h"
+#include "ufshcd-pltfrm.h"
+#include "ufshci-dwc.h"
+
+/* PHY modes */
+#define UFSHCD_DWC_PHY_MODE_ROM 0
+
+#define MPHY_FAST_RX_AFE_CAL BIT(2)
+#define MPHY_FW_CALIB_CFG_VAL BIT(8)
+
+#define MPHY_RX_OVRD_EN BIT(3)
+#define MPHY_RX_OVRD_VAL BIT(2)
+#define MPHY_RX_ACK_MASK BIT(0)
+
+#define TIMEOUT_MICROSEC 1000000
+
+struct ufs_versal2_host {
+ struct ufs_hba *hba;
+ struct reset_control *rstc;
+ struct reset_control *rstphy;
+ u32 phy_mode;
+ unsigned long host_clk;
+ u8 attcompval0;
+ u8 attcompval1;
+ u8 ctlecompval0;
+ u8 ctlecompval1;
+};
+
+static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val)
+{
+ static struct ufshcd_dme_attr_val phy_write_attrs[] = {
+ { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL },
+ { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
+ };
+
+ phy_write_attrs[0].mib_val = (u8)addr;
+ phy_write_attrs[1].mib_val = (u8)(addr >> 8);
+ phy_write_attrs[2].mib_val = (u8)val;
+ phy_write_attrs[3].mib_val = (u8)(val >> 8);
+
+ return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs));
+}
+
+static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val)
+{
+ u32 mib_val;
+ int ret;
+ static struct ufshcd_dme_attr_val phy_read_attrs[] = {
+ { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
+ { UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL },
+ { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
+ };
+
+ phy_read_attrs[0].mib_val = (u8)addr;
+ phy_read_attrs[1].mib_val = (u8)(addr >> 8);
+
+ ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs));
+ if (ret)
+ return ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val);
+ if (ret)
+ return ret;
+
+ *val = mib_val;
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val);
+ if (ret)
+ return ret;
+
+ *val |= (mib_val << 8);
+
+ return 0;
+}
+
+static int ufs_versal2_enable_phy(struct ufs_hba *hba)
+{
+ u32 offset, reg;
+ int ret;
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
+ if (ret)
+ return ret;
+
+ /* Check Tx/Rx FSM states */
+ for (offset = 0; offset < 2; offset++) {
+ u32 time_left, mibsel;
+
+ time_left = TIMEOUT_MICROSEC;
+ mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset));
+ do {
+ ret = ufshcd_dme_get(hba, mibsel, &reg);
+ if (ret)
+ return ret;
+
+ if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP ||
+ reg == TX_STATE_LSBURST)
+ break;
+
+ time_left--;
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "Invalid Tx FSM state.\n");
+ return -ETIMEDOUT;
+ }
+
+ time_left = TIMEOUT_MICROSEC;
+ mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset));
+ do {
+ ret = ufshcd_dme_get(hba, mibsel, &reg);
+ if (ret)
+ return ret;
+
+ if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP ||
+ reg == RX_STATE_LSBURST)
+ break;
+
+ time_left--;
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "Invalid Rx FSM state.\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int ufs_versal2_setup_phy(struct ufs_hba *hba)
+{
+ struct ufs_versal2_host *host = ufshcd_get_variant(hba);
+ int ret;
+ u32 reg;
+
+ /* Bypass RX-AFE offset calibrations (ATT/CTLE) */
+ ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_FAST_RX_AFE_CAL;
+ ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg);
+ if (ret)
+ return ret;
+
+ ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_FAST_RX_AFE_CAL;
+ ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg);
+ if (ret)
+ return ret;
+
+ /* Program ATT and CTLE compensation values */
+ if (host->attcompval0) {
+ ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), host->attcompval0);
+ if (ret)
+ return ret;
+ }
+
+ if (host->attcompval1) {
+ ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), host->attcompval1);
+ if (ret)
+ return ret;
+ }
+
+ if (host->ctlecompval0) {
+ ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), host->ctlecompval0);
+ if (ret)
+ return ret;
+ }
+
+ if (host->ctlecompval1) {
+ ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), host->ctlecompval1);
+ if (ret)
+ return ret;
+ }
+
+ ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_FW_CALIB_CFG_VAL;
+ ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg);
+ if (ret)
+ return ret;
+
+ ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_FW_CALIB_CFG_VAL;
+ return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg);
+}
+
+static int ufs_versal2_phy_init(struct ufs_hba *hba)
+{
+ struct ufs_versal2_host *host = ufshcd_get_variant(hba);
+ u32 time_left;
+ bool is_ready;
+ int ret;
+ static const struct ufshcd_dme_attr_val rmmi_attrs[] = {
+ { UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL },
+ { UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL },
+ { UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL },
+ { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
+ };
+
+ /* Wait for Tx/Rx config_rdy */
+ time_left = TIMEOUT_MICROSEC;
+ do {
+ time_left--;
+ ret = zynqmp_pm_is_mphy_tx_rx_config_ready(&is_ready);
+ if (ret)
+ return ret;
+
+ if (!is_ready)
+ break;
+
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "Tx/Rx configuration signal busy.\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs));
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(host->rstphy);
+ if (ret) {
+ dev_err(hba->dev, "ufsphy reset deassert failed, err = %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for SRAM init done */
+ time_left = TIMEOUT_MICROSEC;
+ do {
+ time_left--;
+ ret = zynqmp_pm_is_sram_init_done(&is_ready);
+ if (ret)
+ return ret;
+
+ if (is_ready)
+ break;
+
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "SRAM initialization failed.\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = ufs_versal2_setup_phy(hba);
+ if (ret)
+ return ret;
+
+ return ufs_versal2_enable_phy(hba);
+}
+
+static int ufs_versal2_init(struct ufs_hba *hba)
+{
+ struct ufs_versal2_host *host;
+ struct device *dev = hba->dev;
+ struct ufs_clk_info *clki;
+ int ret;
+ u32 cal;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ host->phy_mode = UFSHCD_DWC_PHY_MODE_ROM;
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core"))
+ host->host_clk = clk_get_rate(clki->clk);
+ }
+
+ host->rstc = devm_reset_control_get_exclusive(dev, "host");
+ if (IS_ERR(host->rstc)) {
+ dev_err(dev, "failed to get reset ctrl: host\n");
+ return PTR_ERR(host->rstc);
+ }
+
+ host->rstphy = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(host->rstphy)) {
+ dev_err(dev, "failed to get reset ctrl: phy\n");
+ return PTR_ERR(host->rstphy);
+ }
+
+ ret = reset_control_assert(host->rstc);
+ if (ret) {
+ dev_err(hba->dev, "host reset assert failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_assert(host->rstphy);
+ if (ret) {
+ dev_err(hba->dev, "phy reset assert failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = zynqmp_pm_set_sram_bypass();
+ if (ret) {
+ dev_err(dev, "Bypass SRAM interface failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(host->rstc);
+ if (ret)
+ dev_err(hba->dev, "host reset deassert failed, err = %d\n", ret);
+
+ ret = zynqmp_pm_get_ufs_calibration_values(&cal);
+ if (ret) {
+ dev_err(dev, "failed to read calibration values\n");
+ return ret;
+ }
+
+ host->attcompval0 = (u8)cal;
+ host->attcompval1 = (u8)(cal >> 8);
+ host->ctlecompval0 = (u8)(cal >> 16);
+ host->ctlecompval1 = (u8)(cal >> 24);
+
+ hba->quirks |= UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING;
+
+ return 0;
+}
+
+static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int ret = 0;
+
+ if (status == PRE_CHANGE) {
+ ret = ufs_versal2_phy_init(hba);
+ if (ret)
+ dev_err(hba->dev, "Phy init failed (%d)\n", ret);
+ }
+
+ return ret;
+}
+
+static int ufs_versal2_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_versal2_host *host = ufshcd_get_variant(hba);
+ int ret = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (host->host_clk)
+ ufshcd_writel(hba, host->host_clk / 1000000, DWC_UFS_REG_HCLKDIV);
+
+ break;
+ case POST_CHANGE:
+ ret = ufshcd_dwc_link_startup_notify(hba, status);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_req)
+{
+ u32 time_left, reg, lane;
+ int ret;
+
+ for (lane = 0; lane < activelanes; lane++) {
+ time_left = TIMEOUT_MICROSEC;
+ ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
+ if (ret)
+ return ret;
+
+ reg |= MPHY_RX_OVRD_EN;
+ if (rx_req)
+ reg |= MPHY_RX_OVRD_VAL;
+ else
+ reg &= ~MPHY_RX_OVRD_VAL;
+
+ ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
+ if (ret)
+ return ret;
+
+ do {
+ ret = ufs_versal2_phy_reg_read(hba, RX_PCS_OUT(lane), &reg);
+ if (ret)
+ return ret;
+
+ reg &= MPHY_RX_ACK_MASK;
+ if (reg == rx_req)
+ break;
+
+ time_left--;
+ usleep_range(1, 5);
+ } while (time_left);
+
+ if (!time_left) {
+ dev_err(hba->dev, "Invalid Rx Ack value.\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_versal2_host *host = ufshcd_get_variant(hba);
+ u32 lane, reg, rate = 0;
+ int ret = 0;
+
+ if (status == PRE_CHANGE) {
+ memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr));
+
+ /* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */
+ if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 &&
+ !host->ctlecompval1) {
+ dev_req_params->pwr_rx = SLOW_MODE;
+ dev_req_params->pwr_tx = SLOW_MODE;
+ return 0;
+ }
+
+ if (dev_req_params->pwr_rx == SLOW_MODE || dev_req_params->pwr_rx == SLOWAUTO_MODE)
+ return 0;
+
+ if (dev_req_params->hs_rate == PA_HS_MODE_B)
+ rate = 1;
+
+ /* Select the rate */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), rate);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
+ if (ret)
+ return ret;
+
+ ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 1);
+ if (ret)
+ return ret;
+
+ ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 0);
+ if (ret)
+ return ret;
+
+ /* Remove rx_req override */
+ for (lane = 0; lane < dev_req_params->lane_tx; lane++) {
+ ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
+ if (ret)
+ return ret;
+
+ reg &= ~MPHY_RX_OVRD_EN;
+ ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
+ if (ret)
+ return ret;
+ }
+
+ if (dev_req_params->lane_tx == UFS_LANE_2 && dev_req_params->lane_rx == UFS_LANE_2)
+ ret = ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ }
+
+ return ret;
+}
+
+static struct ufs_hba_variant_ops ufs_versal2_hba_vops = {
+ .name = "ufs-versal2-pltfm",
+ .init = ufs_versal2_init,
+ .link_startup_notify = ufs_versal2_link_startup_notify,
+ .hce_enable_notify = ufs_versal2_hce_enable_notify,
+ .pwr_change_notify = ufs_versal2_pwr_change_notify,
+};
+
+static const struct of_device_id ufs_versal2_pltfm_match[] = {
+ {
+ .compatible = "amd,versal2-ufs",
+ .data = &ufs_versal2_hba_vops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ufs_versal2_pltfm_match);
+
+static int ufs_versal2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ /* Perform generic probe */
+ ret = ufshcd_pltfrm_init(pdev, &ufs_versal2_hba_vops);
+ if (ret)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", ret);
+
+ return ret;
+}
+
+static void ufs_versal2_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+}
+
+static const struct dev_pm_ops ufs_versal2_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+};
+
+static struct platform_driver ufs_versal2_pltfm = {
+ .probe = ufs_versal2_probe,
+ .remove = ufs_versal2_remove,
+ .driver = {
+ .name = "ufshcd-versal2",
+ .pm = &ufs_versal2_pm_ops,
+ .of_match_table = of_match_ptr(ufs_versal2_pltfm_match),
+ },
+};
+
+module_platform_driver(ufs_versal2_pltfm);
+
+MODULE_AUTHOR("Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>");
+MODULE_DESCRIPTION("AMD Versal Gen 2 UFS Host Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 758a393a9de1..ecbbf52bf734 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -41,8 +41,7 @@ static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up);
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR,
.model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
+ .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "H9HQ21AFAMZDAR",
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
@@ -280,12 +279,21 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
+ if (host->legacy_ip_ver)
+ return 0;
+
/* DDR_EN setting */
if (host->ip_ver >= IP_VER_MT6989) {
ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
0x453000, REG_UFS_MMIO_OPT_CTRL_0);
}
+ if (host->ip_ver >= IP_VER_MT6991_A0) {
+ /* Enable multi-rtt */
+ ufshcd_rmwl(hba, MRTT_EN, MRTT_EN, REG_UFS_MMIO_OPT_CTRL_0);
+ /* Enable random performance improvement */
+ ufshcd_rmwl(hba, RDN_PFM_IMPV_DIS, 0, REG_UFS_MMIO_OPT_CTRL_0);
+ }
}
return 0;
@@ -405,7 +413,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
+ if (!host->legacy_ip_ver && host->ip_ver >= IP_VER_MT6983) {
ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
@@ -422,6 +430,7 @@ static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
u64 timeout, time_checked;
u32 val, sm;
bool wait_idle;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
/* cannot use plain ktime_get() in suspend */
timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
@@ -432,8 +441,13 @@ static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
do {
time_checked = ktime_get_mono_fast_ns();
- ufs_mtk_dbg_sel(hba);
- val = ufshcd_readl(hba, REG_UFS_PROBE);
+ if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ } else {
+ val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
+ val = val >> 16;
+ }
sm = val & 0x1f;
@@ -465,13 +479,20 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
{
ktime_t timeout, time_checked;
u32 val;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do {
time_checked = ktime_get();
- ufs_mtk_dbg_sel(hba);
- val = ufshcd_readl(hba, REG_UFS_PROBE);
- val = val >> 28;
+
+ if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ val = val >> 28;
+ } else {
+ val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
+ val = val >> 24;
+ }
if (val == state)
return 0;
@@ -1109,18 +1130,6 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
}
}
-/* Convert microseconds to Auto-Hibernate Idle Timer register value */
-static u32 ufs_mtk_us_to_ahit(unsigned int timer)
-{
- unsigned int scale;
-
- for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
- timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
-
- return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
-}
-
static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
{
unsigned int us;
@@ -1143,7 +1152,7 @@ static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
break;
}
- hba->ahit = ufs_mtk_us_to_ahit(us);
+ hba->ahit = ufshcd_us_to_ahit(us);
}
ufs_mtk_setup_clk_gating(hba);
@@ -1332,6 +1341,36 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
return true;
}
+static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba)
+{
+ int i;
+ u32 value;
+ u32 cnt, att, min;
+ struct attr_min {
+ u32 attr;
+ u32 min_value;
+ } pa_min_sync_length[] = {
+ {PA_TXHSG1SYNCLENGTH, 0x48},
+ {PA_TXHSG2SYNCLENGTH, 0x48},
+ {PA_TXHSG3SYNCLENGTH, 0x48},
+ {PA_TXHSG4SYNCLENGTH, 0x48},
+ {PA_TXHSG5SYNCLENGTH, 0x48}
+ };
+
+ cnt = sizeof(pa_min_sync_length) / sizeof(struct attr_min);
+ for (i = 0; i < cnt; i++) {
+ att = pa_min_sync_length[i].attr;
+ min = pa_min_sync_length[i].min_value;
+ ufshcd_dme_get(hba, UIC_ARG_MIB(att), &value);
+ if (value < min)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(att), min);
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(att), &value);
+ if (value < min)
+ ufshcd_dme_peer_set(hba, UIC_ARG_MIB(att), min);
+ }
+}
+
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
@@ -1355,6 +1394,8 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
+ ufs_mtk_adjust_sync_length(hba);
+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
@@ -1619,14 +1660,26 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
{
int err;
+ u32 val;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
err = ufshcd_hba_enable(hba);
if (err)
return err;
err = ufs_mtk_unipro_set_lpm(hba, false);
- if (err)
+ if (err) {
+ if (host->ip_ver < IP_VER_MT6899) {
+ ufs_mtk_dbg_sel(hba);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ } else {
+ val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
+ }
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
+ val = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
return err;
+ }
err = ufshcd_uic_hibern8_exit(hba);
if (err)
@@ -1744,6 +1797,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
{
int err;
struct arm_smccc_res res;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba))
@@ -1773,6 +1827,15 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
ufs_mtk_sram_pwr_ctrl(false, res);
+ /* Release pm_qos/clk if in scale-up mode during suspend */
+ if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
+ ufshcd_pm_qos_update(hba, false);
+ _ufs_mtk_clk_scale(hba, false);
+ } else if ((!ufshcd_is_clkscaling_supported(hba) &&
+ hba->pwr_info.gear_rx >= UFS_HS_G5)) {
+ _ufs_mtk_clk_scale(hba, false);
+ }
+
return 0;
fail:
/*
@@ -1788,6 +1851,7 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
struct arm_smccc_res res;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
ufs_mtk_dev_vreg_set_lpm(hba, false);
@@ -1798,6 +1862,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (err)
goto fail;
+ /* Request pm_qos/clk if in scale-up mode after resume */
+ if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
+ ufshcd_pm_qos_update(hba, true);
+ _ufs_mtk_clk_scale(hba, true);
+ } else if ((!ufshcd_is_clkscaling_supported(hba) &&
+ hba->pwr_info.gear_rx >= UFS_HS_G5)) {
+ _ufs_mtk_clk_scale(hba, true);
+ }
+
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
@@ -1889,15 +1962,13 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
- if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
- (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+ if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) {
hba->vreg_info.vcc->always_on = true;
/*
* VCC will be kept always-on thus we don't
- * need any delay during regulator operations
+ * need any delay before putting device's VCC in LPM mode.
*/
- hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
- UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+ hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM;
}
ufs_mtk_vreg_fix_vcc(hba);
@@ -2373,6 +2444,11 @@ static int ufs_mtk_system_suspend(struct device *dev)
struct arm_smccc_res res;
int ret;
+ if (hba->shutting_down) {
+ ret = -EBUSY;
+ goto out;
+ }
+
ret = ufshcd_system_suspend(dev);
if (ret)
goto out;
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index dfbf78bd8664..9747277f11e8 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -20,6 +20,9 @@
#define MCQ_MULTI_INTR_EN BIT(2)
#define MCQ_CMB_INTR_EN BIT(3)
#define MCQ_AH8 BIT(4)
+#define MON_EN BIT(5)
+#define MRTT_EN BIT(25)
+#define RDN_PFM_IMPV_DIS BIT(28)
#define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN)
@@ -28,6 +31,7 @@
*/
#define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144
+#define REG_UFS_UFS_MMIO_OTSD_CTRL 0x14C
#define REG_UFS_MMIO_OPT_CTRL_0 0x160
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index eba0e6617483..8d119b3223cb 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1037,9 +1037,6 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
- { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
- .model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM },
{ .wmanufacturerid = UFS_VENDOR_WDC,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE },
diff --git a/drivers/ufs/host/ufs-rockchip.c b/drivers/ufs/host/ufs-rockchip.c
index 8754085dd0cc..7fff34513a60 100644
--- a/drivers/ufs/host/ufs-rockchip.c
+++ b/drivers/ufs/host/ufs-rockchip.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -20,9 +21,17 @@
#include "ufshcd-pltfrm.h"
#include "ufs-rockchip.h"
+static void ufs_rockchip_controller_reset(struct ufs_rockchip_host *host)
+{
+ reset_control_assert(host->rst);
+ udelay(1);
+ reset_control_deassert(host->rst);
+}
+
static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
int err = 0;
if (status == POST_CHANGE) {
@@ -37,6 +46,9 @@ static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba,
return ufshcd_vops_phy_initialization(hba);
}
+ /* PRE_CHANGE */
+ ufs_rockchip_controller_reset(host);
+
return 0;
}
@@ -156,9 +168,7 @@ static int ufs_rockchip_common_init(struct ufs_hba *hba)
return dev_err_probe(dev, PTR_ERR(host->rst),
"failed to get reset control\n");
- reset_control_assert(host->rst);
- udelay(1);
- reset_control_deassert(host->rst);
+ ufs_rockchip_controller_reset(host);
host->ref_out_clk = devm_clk_get_enabled(dev, "ref_out");
if (IS_ERR(host->ref_out_clk))
@@ -282,9 +292,7 @@ static int ufs_rockchip_runtime_resume(struct device *dev)
return err;
}
- reset_control_assert(host->rst);
- udelay(1);
- reset_control_deassert(host->rst);
+ ufs_rockchip_controller_reset(host);
return ufshcd_runtime_resume(dev);
}
diff --git a/drivers/ufs/host/ufshcd-dwc.h b/drivers/ufs/host/ufshcd-dwc.h
index ad91ea56662c..c618bb914904 100644
--- a/drivers/ufs/host/ufshcd-dwc.h
+++ b/drivers/ufs/host/ufshcd-dwc.h
@@ -12,6 +12,52 @@
#include <ufs/ufshcd.h>
+/* RMMI Attributes */
+#define CBREFCLKCTRL2 0x8132
+#define CBCRCTRL 0x811F
+#define CBC10DIRECTCONF2 0x810E
+#define CBRATESEL 0x8114
+#define CBCREGADDRLSB 0x8116
+#define CBCREGADDRMSB 0x8117
+#define CBCREGWRLSB 0x8118
+#define CBCREGWRMSB 0x8119
+#define CBCREGRDLSB 0x811A
+#define CBCREGRDMSB 0x811B
+#define CBCREGRDWRSEL 0x811C
+
+#define CBREFREFCLK_GATE_OVR_EN BIT(7)
+
+/* M-PHY Attributes */
+#define MTX_FSM_STATE 0x41
+#define MRX_FSM_STATE 0xC1
+
+/* M-PHY registers */
+#define RX_OVRD_IN_1(n) (0x3006 + ((n) * 0x100))
+#define RX_PCS_OUT(n) (0x300F + ((n) * 0x100))
+#define FAST_FLAGS(n) (0x401C + ((n) * 0x100))
+#define RX_AFE_ATT_IDAC(n) (0x4000 + ((n) * 0x100))
+#define RX_AFE_CTLE_IDAC(n) (0x4001 + ((n) * 0x100))
+#define FW_CALIB_CCFG(n) (0x404D + ((n) * 0x100))
+
+/* Tx/Rx FSM state */
+enum rx_fsm_state {
+ RX_STATE_DISABLED = 0,
+ RX_STATE_HIBERN8 = 1,
+ RX_STATE_SLEEP = 2,
+ RX_STATE_STALL = 3,
+ RX_STATE_LSBURST = 4,
+ RX_STATE_HSBURST = 5,
+};
+
+enum tx_fsm_state {
+ TX_STATE_DISABLED = 0,
+ TX_STATE_HIBERN8 = 1,
+ TX_STATE_SLEEP = 2,
+ TX_STATE_STALL = 3,
+ TX_STATE_LSBURST = 4,
+ TX_STATE_HSBURST = 5,
+};
+
struct ufshcd_dme_attr_val {
u32 attr_sel;
u32 mib_val;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 5f15f855b4b9..4b263c328ed2 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -35,7 +35,7 @@
static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
-static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
+static bool debugfs_enabled __ro_after_init = IS_ENABLED(CONFIG_DEBUG_FS_ALLOW_ALL);
/*
* Don't allow access attributes to be changed whilst the kernel is locked down
@@ -287,9 +287,6 @@ static int debugfs_get_tree(struct fs_context *fc)
{
int err;
- if (!(debugfs_allow & DEBUGFS_ALLOW_API))
- return -EPERM;
-
err = get_tree_single(fc, debugfs_fill_super);
if (err)
return err;
@@ -368,7 +365,7 @@ static struct dentry *debugfs_start_creating(const char *name,
struct dentry *dentry;
int error;
- if (!(debugfs_allow & DEBUGFS_ALLOW_API))
+ if (!debugfs_enabled)
return ERR_PTR(-EPERM);
if (!debugfs_initialized())
@@ -883,21 +880,25 @@ static int __init debugfs_kernel(char *str)
{
if (str) {
if (!strcmp(str, "on"))
- debugfs_allow = DEBUGFS_ALLOW_API | DEBUGFS_ALLOW_MOUNT;
- else if (!strcmp(str, "no-mount"))
- debugfs_allow = DEBUGFS_ALLOW_API;
+ debugfs_enabled = true;
else if (!strcmp(str, "off"))
- debugfs_allow = 0;
+ debugfs_enabled = false;
+ else if (!strcmp(str, "no-mount")) {
+ pr_notice("debugfs=no-mount is a deprecated alias "
+ "for debugfs=off\n");
+ debugfs_enabled = false;
+ }
}
return 0;
}
early_param("debugfs", debugfs_kernel);
+
static int __init debugfs_init(void)
{
int retval;
- if (!(debugfs_allow & DEBUGFS_ALLOW_MOUNT))
+ if (!debugfs_enabled)
return -EPERM;
retval = sysfs_create_mount_point(kernel_kobj, "debug");
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index 427987f81571..c95699b27a56 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -55,17 +55,4 @@ enum {
HAS_IOCTL = 16
};
-#define DEBUGFS_ALLOW_API BIT(0)
-#define DEBUGFS_ALLOW_MOUNT BIT(1)
-
-#ifdef CONFIG_DEBUG_FS_ALLOW_ALL
-#define DEFAULT_DEBUGFS_ALLOW_BITS (DEBUGFS_ALLOW_MOUNT | DEBUGFS_ALLOW_API)
-#endif
-#ifdef CONFIG_DEBUG_FS_DISALLOW_MOUNT
-#define DEFAULT_DEBUGFS_ALLOW_BITS (DEBUGFS_ALLOW_API)
-#endif
-#ifdef CONFIG_DEBUG_FS_ALLOW_NONE
-#define DEFAULT_DEBUGFS_ALLOW_BITS (0)
-#endif
-
#endif /* _DEBUGFS_INTERNAL_H_ */
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index a670ba3e565e..5c0efd6b239f 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -675,11 +675,14 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
if (parent) {
ret = security_kernfs_init_security(parent, kn);
if (ret)
- goto err_out3;
+ goto err_out4;
}
return kn;
+ err_out4:
+ simple_xattrs_free(&kn->iattr->xattrs, NULL);
+ kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
err_out3:
spin_lock(&root->kernfs_idr_lock);
idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 76eaf64b9d9e..3ac52e141766 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -298,6 +298,7 @@ static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *k
if (info->root->flags & KERNFS_ROOT_SUPPORT_EXPORTOP)
sb->s_export_op = &kernfs_export_ops;
sb->s_time_gran = 1;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
/* sysfs dentries and inodes don't require IO to create */
sb->s_shrink->seeks = 0;
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
index 49ed90c6b9f2..f33bfa7b58e6 100644
--- a/fs/nfs/localio.c
+++ b/fs/nfs/localio.c
@@ -615,8 +615,11 @@ static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret)
nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_read_aio_complete_work */
}
-static void do_nfs_local_call_read(struct nfs_local_kiocb *iocb, struct file *filp)
+static void nfs_local_call_read(struct work_struct *work)
{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
bool force_done = false;
ssize_t status;
int n_iters;
@@ -633,7 +636,9 @@ static void do_nfs_local_call_read(struct nfs_local_kiocb *iocb, struct file *fi
} else
iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
- status = filp->f_op->read_iter(&iocb->kiocb, &iocb->iters[i]);
+ scoped_with_creds(filp->f_cred)
+ status = filp->f_op->read_iter(&iocb->kiocb, &iocb->iters[i]);
+
if (status != -EIOCBQUEUED) {
if (unlikely(status >= 0 && status < iocb->iters[i].count))
force_done = true; /* Partial read */
@@ -645,16 +650,6 @@ static void do_nfs_local_call_read(struct nfs_local_kiocb *iocb, struct file *fi
}
}
-static void nfs_local_call_read(struct work_struct *work)
-{
- struct nfs_local_kiocb *iocb =
- container_of(work, struct nfs_local_kiocb, work);
- struct file *filp = iocb->kiocb.ki_filp;
-
- scoped_with_creds(filp->f_cred)
- do_nfs_local_call_read(iocb, filp);
-}
-
static int
nfs_local_do_read(struct nfs_local_kiocb *iocb,
const struct rpc_call_ops *call_ops)
@@ -822,13 +817,18 @@ static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret)
nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_write_aio_complete_work */
}
-static ssize_t do_nfs_local_call_write(struct nfs_local_kiocb *iocb,
- struct file *filp)
+static void nfs_local_call_write(struct work_struct *work)
{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ unsigned long old_flags = current->flags;
bool force_done = false;
ssize_t status;
int n_iters;
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+
file_start_write(filp);
n_iters = atomic_read(&iocb->n_iters);
for (int i = 0; i < n_iters ; i++) {
@@ -842,7 +842,9 @@ static ssize_t do_nfs_local_call_write(struct nfs_local_kiocb *iocb,
} else
iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
- status = filp->f_op->write_iter(&iocb->kiocb, &iocb->iters[i]);
+ scoped_with_creds(filp->f_cred)
+ status = filp->f_op->write_iter(&iocb->kiocb, &iocb->iters[i]);
+
if (status != -EIOCBQUEUED) {
if (unlikely(status >= 0 && status < iocb->iters[i].count))
force_done = true; /* Partial write */
@@ -854,22 +856,6 @@ static ssize_t do_nfs_local_call_write(struct nfs_local_kiocb *iocb,
}
file_end_write(filp);
- return status;
-}
-
-static void nfs_local_call_write(struct work_struct *work)
-{
- struct nfs_local_kiocb *iocb =
- container_of(work, struct nfs_local_kiocb, work);
- struct file *filp = iocb->kiocb.ki_filp;
- unsigned long old_flags = current->flags;
- ssize_t status;
-
- current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
-
- scoped_with_creds(filp->f_cred)
- status = do_nfs_local_call_write(iocb, filp);
-
current->flags = old_flags;
}
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index e142bac4f9f8..e1e639f515a0 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -36,6 +36,9 @@ static umode_t __first_visible(const struct attribute_group *grp, struct kobject
if (grp->attrs && grp->attrs[0] && grp->is_visible)
return grp->is_visible(kobj, grp->attrs[0], 0);
+ if (grp->attrs && grp->attrs[0] && grp->is_visible_const)
+ return grp->is_visible_const(kobj, grp->attrs[0], 0);
+
if (grp->bin_attrs && grp->bin_attrs[0] && grp->is_bin_visible)
return grp->is_bin_visible(kobj, grp->bin_attrs[0], 0);
@@ -61,8 +64,11 @@ static int create_files(struct kernfs_node *parent, struct kobject *kobj,
*/
if (update)
kernfs_remove_by_name(parent, (*attr)->name);
- if (grp->is_visible) {
- mode = grp->is_visible(kobj, *attr, i);
+ if (grp->is_visible || grp->is_visible_const) {
+ if (grp->is_visible)
+ mode = grp->is_visible(kobj, *attr, i);
+ else
+ mode = grp->is_visible_const(kobj, *attr, i);
mode &= ~SYSFS_GROUP_INVISIBLE;
if (!mode)
continue;
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 0c2a8b846c20..ebd7f8935f96 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -80,6 +80,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
+
+#ifndef arch_cpu_is_threaded
+#define arch_cpu_is_threaded() (0)
+#endif
+
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c9013e472aa3..54b416e26995 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -29,6 +29,7 @@ enum {
ATA_MAX_SECTORS_128 = 128,
ATA_MAX_SECTORS = 256,
ATA_MAX_SECTORS_1024 = 1024,
+ ATA_MAX_SECTORS_8191 = 8191,
ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */
ATA_MAX_SECTORS_TAPE = 65535,
ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */
diff --git a/include/linux/device.h b/include/linux/device.h
index b031ff71a5bd..0be95294b6e6 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -281,25 +281,6 @@ int __must_check device_create_bin_file(struct device *dev,
void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr);
-/**
- * devm_alloc_percpu - Resource-managed alloc_percpu
- * @dev: Device to allocate per-cpu memory for
- * @type: Type to allocate per-cpu memory for
- *
- * Managed alloc_percpu. Per-cpu memory allocated with this function is
- * automatically freed on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-#define devm_alloc_percpu(dev, type) \
- ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
- __alignof__(type)))
-
-void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
- size_t align);
-void devm_free_percpu(struct device *dev, void __percpu *pdata);
-
struct device_dma_parameters {
/*
* a low level driver may set these to teach IOMMU code about
diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
index 8c5f57e0d613..9c1e3d643d69 100644
--- a/include/linux/device/devres.h
+++ b/include/linux/device/devres.h
@@ -9,6 +9,7 @@
#include <linux/stdarg.h>
#include <linux/types.h>
#include <asm/bug.h>
+#include <asm/percpu.h>
struct device;
struct device_node;
@@ -96,6 +97,22 @@ devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap);
char * __printf(3, 4) __malloc
devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
+/**
+ * devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @type: Type to allocate per-cpu memory for
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+#define devm_alloc_percpu(dev, type) \
+ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), __alignof__(type)))
+
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align);
+
unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order);
void devm_free_pages(struct device *dev, unsigned long addr);
diff --git a/include/linux/firmware/xlnx-zynqmp-ufs.h b/include/linux/firmware/xlnx-zynqmp-ufs.h
new file mode 100644
index 000000000000..d3538dd5822a
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp-ufs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Firmware layer for UFS APIs.
+ *
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __FIRMWARE_XLNX_ZYNQMP_UFS_H__
+#define __FIRMWARE_XLNX_ZYNQMP_UFS_H__
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready);
+int zynqmp_pm_is_sram_init_done(bool *is_done);
+int zynqmp_pm_set_sram_bypass(void);
+int zynqmp_pm_get_ufs_calibration_values(u32 *val);
+#else
+static inline int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_is_sram_init_done(bool *is_done)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sram_bypass(void)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_ufs_calibration_values(u32 *val)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __FIRMWARE_XLNX_ZYNQMP_UFS_H__ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index be6817ac5120..15fdbd089bbf 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp-ufs.h>
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -236,6 +237,7 @@ enum pm_ioctl_id {
IOCTL_GET_FEATURE_CONFIG = 27,
/* IOCTL for Secure Read/Write Interface */
IOCTL_READ_REG = 28,
+ IOCTL_MASK_WRITE_REG = 29,
/* Dynamic SD/GEM configuration */
IOCTL_SET_SD_CONFIG = 30,
IOCTL_SET_GEM_CONFIG = 31,
@@ -614,6 +616,9 @@ int zynqmp_pm_feature(const u32 api_id);
int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
+int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value);
+int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
+ u32 mask, u32 value);
int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset);
int zynqmp_pm_force_pwrdwn(const u32 target,
const enum zynqmp_pm_request_ack ack);
@@ -913,6 +918,17 @@ static inline int zynqmp_pm_request_wake(const u32 node,
return -ENODEV;
}
+static inline int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
+ u32 mask, u32 value)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
{
return -ENODEV;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 7a98de1cc995..39534fafa36a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -75,6 +75,7 @@ enum ata_quirks {
__ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */
__ATA_QUIRK_NOTRIM, /* Do not use TRIM */
__ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */
+ __ATA_QUIRK_MAX_SEC_8191, /* Limit max sects to 8191 */
__ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */
__ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */
__ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */
@@ -85,6 +86,45 @@ enum ata_quirks {
__ATA_QUIRK_MAX,
};
+/*
+ * Quirk flags: may be set by libata or controller drivers on drives.
+ * Some quirks may be drive/controller pair dependent.
+ */
+enum {
+ ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
+ ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
+ ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
+ ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
+ ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
+ ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
+ ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
+ ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
+ ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
+ ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
+ ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
+ ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
+ ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
+ ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
+ ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
+ ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
+ ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
+ ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
+ ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
+ ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
+ ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
+ ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
+ ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
+ ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
+ ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
+ ATA_QUIRK_MAX_SEC_8191 = (1U << __ATA_QUIRK_MAX_SEC_8191),
+ ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
+ ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
+ ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI),
+ ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
+ ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
+ ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
+};
+
enum {
/* various global constants */
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
@@ -390,42 +430,6 @@ enum {
*/
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
- /*
- * Quirk flags: may be set by libata or controller drivers on drives.
- * Some quirks may be drive/controller pair dependent.
- */
- ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
- ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
- ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
- ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
- ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
- ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
- ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
- ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
- ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
- ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
- ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
- ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
- ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
- ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
- ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
- ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
- ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
- ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
- ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
- ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
- ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
- ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
- ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
- ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
- ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
- ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
- ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
- ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI),
- ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
- ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
- ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
-
/* User visible DMA mask for DMA control. DO NOT renumber. */
ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 6077972e8b45..24eb5a88a5c5 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -867,7 +867,7 @@ struct mhi_device_id {
kernel_ulong_t driver_data;
};
-#define AUXILIARY_NAME_SIZE 32
+#define AUXILIARY_NAME_SIZE 40
#define AUXILIARY_MODULE_PREFIX "auxiliary:"
struct auxiliary_device_id {
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 93c945331f39..813da101b5bf 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -80,7 +80,7 @@ static inline void __iomem *
devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
unsigned int index, struct resource **res)
{
- return ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
}
@@ -88,14 +88,14 @@ static inline void __iomem *
devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index)
{
- return ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
}
static inline void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
const char *name)
{
- return ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
}
#endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 9a25a2911652..c33a96b7391a 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -58,6 +58,12 @@ do { \
#define sysfs_attr_init(attr) do {} while (0)
#endif
+#ifdef CONFIG_CFI
+#define __SYSFS_FUNCTION_ALTERNATIVE(MEMBERS...) struct { MEMBERS }
+#else
+#define __SYSFS_FUNCTION_ALTERNATIVE(MEMBERS...) union { MEMBERS }
+#endif
+
/**
* struct attribute_group - data structure used to declare an attribute group.
* @name: Optional: Attribute group name
@@ -98,14 +104,21 @@ do { \
*/
struct attribute_group {
const char *name;
- umode_t (*is_visible)(struct kobject *,
- struct attribute *, int);
+ __SYSFS_FUNCTION_ALTERNATIVE(
+ umode_t (*is_visible)(struct kobject *,
+ struct attribute *, int);
+ umode_t (*is_visible_const)(struct kobject *,
+ const struct attribute *, int);
+ );
umode_t (*is_bin_visible)(struct kobject *,
const struct bin_attribute *, int);
size_t (*bin_size)(struct kobject *,
const struct bin_attribute *,
int);
- struct attribute **attrs;
+ union {
+ struct attribute **attrs;
+ const struct attribute *const *attrs_const;
+ };
const struct bin_attribute *const *bin_attrs;
};
@@ -238,28 +251,20 @@ struct attribute_group {
.store = _store, \
}
-#define __ATTR_RO(_name) { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = _name##_show, \
-}
-
#define __ATTR_RO_MODE(_name, _mode) { \
.attr = { .name = __stringify(_name), \
.mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
.show = _name##_show, \
}
-#define __ATTR_RW_MODE(_name, _mode) { \
- .attr = { .name = __stringify(_name), \
- .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
- .show = _name##_show, \
- .store = _name##_store, \
-}
+#define __ATTR_RO(_name) \
+ __ATTR_RO_MODE(_name, 0444)
-#define __ATTR_WO(_name) { \
- .attr = { .name = __stringify(_name), .mode = 0200 }, \
- .store = _name##_store, \
-}
+#define __ATTR_RW_MODE(_name, _mode) \
+ __ATTR(_name, _mode, _name##_show, _name##_store)
+
+#define __ATTR_WO(_name) \
+ __ATTR(_name, 0200, NULL, _name##_store)
#define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store)
@@ -284,7 +289,12 @@ static const struct attribute_group *_name##_groups[] = { \
#define ATTRIBUTE_GROUPS(_name) \
static const struct attribute_group _name##_group = { \
- .attrs = _name##_attrs, \
+ .attrs = _Generic(_name##_attrs, \
+ struct attribute **: \
+ _name##_attrs, \
+ const struct attribute *const *: \
+ (void *)_name##_attrs \
+ ), \
}; \
__ATTRIBUTE_GROUPS(_name)
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 3d8f7d1ce2b8..202da079d500 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -529,41 +529,18 @@ static inline struct tpm2_auth *tpm2_chip_auth(struct tpm_chip *chip)
#endif
}
-void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
- u32 handle, u8 *name);
+int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name);
void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
u8 attributes, u8 *passphrase,
int passphraselen);
void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
- u8 attributes, u8 *passphrase, int passphraselen);
-static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
- struct tpm_buf *buf,
- u8 attributes,
- u8 *passphrase,
- int passphraselen)
-{
- struct tpm_header *head;
- int offset;
-
- if (tpm2_chip_auth(chip)) {
- tpm_buf_append_hmac_session(chip, buf, attributes, passphrase, passphraselen);
- } else {
- offset = buf->handles * 4 + TPM_HEADER_SIZE;
- head = (struct tpm_header *)buf->data;
-
- /*
- * If the only sessions are optional, the command tag must change to
- * TPM2_ST_NO_SESSIONS.
- */
- if (tpm_buf_length(buf) == offset)
- head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
- }
-}
+ u8 *passphrase, int passphraselen);
#ifdef CONFIG_TCG_TPM2_HMAC
int tpm2_start_auth_session(struct tpm_chip *chip);
-void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf);
+int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf);
int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
int rc);
void tpm2_end_auth_session(struct tpm_chip *chip);
@@ -577,10 +554,13 @@ static inline int tpm2_start_auth_session(struct tpm_chip *chip)
static inline void tpm2_end_auth_session(struct tpm_chip *chip)
{
}
-static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip,
- struct tpm_buf *buf)
+
+static inline int tpm_buf_fill_hmac_session(struct tpm_chip *chip,
+ struct tpm_buf *buf)
{
+ return 0;
}
+
static inline int tpm_buf_check_hmac_response(struct tpm_chip *chip,
struct tpm_buf *buf,
int rc)
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index bd29cdb513a5..efcdc78530d5 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -11,11 +11,11 @@ extern size_t __scsi_format_command(char *, size_t,
const unsigned char *, size_t);
extern void scsi_print_sense_hdr(const struct scsi_device *, const char *,
const struct scsi_sense_hdr *);
-extern void scsi_print_sense(const struct scsi_cmnd *);
+extern void scsi_print_sense(struct scsi_cmnd *);
extern void __scsi_print_sense(const struct scsi_device *, const char *name,
const unsigned char *sense_buffer,
int sense_len);
-extern void scsi_print_result(const struct scsi_cmnd *, const char *, int);
+extern void scsi_print_result(struct scsi_cmnd *, const char *, int);
#ifdef CONFIG_SCSI_CONSTANTS
extern bool scsi_opcode_sa_name(int, int, const char **, const char **);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 993008cdea65..d32f5841f4f8 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -179,6 +179,12 @@ struct scsi_device {
unsigned manage_shutdown:1;
/*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system restart (reboot) operations.
+ */
+ unsigned manage_restart:1;
+
+ /*
* If set and if the device is runtime suspended, ask the high-level
* device driver (sd) to force a runtime resume of the device.
*/
@@ -313,8 +319,8 @@ sdev_prefix_printk(const char *, const struct scsi_device *, const char *,
#define sdev_printk(l, sdev, fmt, a...) \
sdev_prefix_printk(l, sdev, NULL, fmt, ##a)
-__printf(3, 4) void
-scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
+__printf(3, 4) void scmd_printk(const char *, struct scsi_cmnd *, const char *,
+ ...);
#define scmd_dbg(scmd, fmt, a...) \
do { \
@@ -558,6 +564,10 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
const struct scsi_exec_args *args);
void scsi_failures_reset_retries(struct scsi_failures *failures);
+struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev,
+ enum dma_data_direction data_direction,
+ blk_mq_req_flags_t flags);
+void scsi_put_internal_cmd(struct scsi_cmnd *scmd);
extern void sdev_disable_disk_events(struct scsi_device *sdev);
extern void sdev_enable_disk_events(struct scsi_device *sdev);
extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t);
@@ -589,6 +599,22 @@ static inline unsigned int sdev_id(struct scsi_device *sdev)
#define scmd_id(scmd) sdev_id((scmd)->device)
#define scmd_channel(scmd) sdev_channel((scmd)->device)
+/**
+ * scsi_device_is_pseudo_dev() - Whether a device is a pseudo SCSI device.
+ * @sdev: SCSI device to examine
+ *
+ * A pseudo SCSI device can be used to allocate SCSI commands but does not show
+ * up in sysfs. Additionally, the logical unit information in *@sdev is made up.
+ *
+ * This function tests the LUN number instead of comparing @sdev with
+ * @sdev->host->pseudo_sdev because this function may be called before
+ * @sdev->host->pseudo_sdev has been initialized.
+ */
+static inline bool scsi_device_is_pseudo_dev(struct scsi_device *sdev)
+{
+ return sdev->lun == U64_MAX;
+}
+
/*
* checks for positions of the SCSI state machine
*/
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index f5a243261236..e87cf7eadd26 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -87,6 +87,12 @@ struct scsi_host_template {
int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
/*
+ * Queue a reserved command (BLK_MQ_REQ_RESERVED). The .queuecommand()
+ * documentation also applies to the .queue_reserved_command() callback.
+ */
+ int (*queue_reserved_command)(struct Scsi_Host *, struct scsi_cmnd *);
+
+ /*
* The commit_rqs function is used to trigger a hardware
* doorbell after some requests have been queued with
* queuecommand, when an error is encountered before sending
@@ -375,11 +381,20 @@ struct scsi_host_template {
/*
* This determines if we will use a non-interrupt driven
* or an interrupt driven scheme. It is set to the maximum number
- * of simultaneous commands a single hw queue in HBA will accept.
+ * of simultaneous commands a single hw queue in HBA will accept
+ * excluding internal commands.
*/
int can_queue;
/*
+ * This determines how many commands the HBA will set aside
+ * for internal commands. This number will be added to
+ * @can_queue to calculate the maximum number of simultaneous
+ * commands sent to the host.
+ */
+ int nr_reserved_cmds;
+
+ /*
* In many instances, especially where disconnect / reconnect are
* supported, our host also has an ID on the SCSI bus. If this is
* the case, then it must be reserved. Please set this_id to -1 if
@@ -611,7 +626,17 @@ struct Scsi_Host {
unsigned short max_cmd_len;
int this_id;
+
+ /*
+ * Number of commands this host can handle at the same time.
+ * This excludes reserved commands as specified by nr_reserved_cmds.
+ */
int can_queue;
+ /*
+ * Number of reserved commands to allocate, if any.
+ */
+ unsigned int nr_reserved_cmds;
+
short cmd_per_lun;
short unsigned int sg_tablesize;
short unsigned int sg_prot_tablesize;
@@ -703,6 +728,12 @@ struct Scsi_Host {
struct device shost_gendev, shost_dev;
/*
+ * A SCSI device structure used for sending internal commands to the
+ * HBA. There is no corresponding logical unit inside the SCSI device.
+ */
+ struct scsi_device *pseudo_sdev;
+
+ /*
* Points to the transport data (if any) which is allocated
* separately
*/
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 4063a701081b..e32de80854b6 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -121,8 +121,10 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
-bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct block_device *bdev);
+bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev);
+void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev);
static inline bool target_dev_configured(struct se_device *se_dev)
{
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c4d9116904aa..7016d93fa383 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -158,6 +158,7 @@ enum se_cmd_flags_table {
SCF_TASK_ATTR_SET = (1 << 17),
SCF_TREAT_READ_AS_NORMAL = (1 << 18),
SCF_TASK_ORDERED_SYNC = (1 << 19),
+ SCF_ATOMIC = (1 << 20),
};
/*
@@ -671,9 +672,9 @@ struct se_lun_acl {
};
struct se_dev_entry_io_stats {
- u32 total_cmds;
- u32 read_bytes;
- u32 write_bytes;
+ u64 total_cmds;
+ u64 read_bytes;
+ u64 write_bytes;
};
struct se_dev_entry {
@@ -731,6 +732,11 @@ struct se_dev_attrib {
u32 unmap_granularity;
u32 unmap_granularity_alignment;
u32 max_write_same_len;
+ u32 atomic_max_len;
+ u32 atomic_alignment;
+ u32 atomic_granularity;
+ u32 atomic_max_with_boundary;
+ u32 atomic_max_boundary;
u8 submit_type;
struct se_device *da_dev;
struct config_group da_group;
@@ -744,9 +750,9 @@ struct se_port_stat_grps {
};
struct scsi_port_stats {
- atomic_long_t cmd_pdus;
- atomic_long_t tx_data_octets;
- atomic_long_t rx_data_octets;
+ u64 cmd_pdus;
+ u64 tx_data_octets;
+ u64 rx_data_octets;
};
struct se_lun {
@@ -773,7 +779,7 @@ struct se_lun {
spinlock_t lun_tg_pt_gp_lock;
struct se_portal_group *lun_tpg;
- struct scsi_port_stats lun_stats;
+ struct scsi_port_stats __percpu *lun_stats;
struct config_group lun_group;
struct se_port_stat_grps port_stat_grps;
struct completion lun_shutdown_comp;
@@ -806,9 +812,9 @@ struct se_device_queue {
};
struct se_dev_io_stats {
- u32 total_cmds;
- u32 read_bytes;
- u32 write_bytes;
+ u64 total_cmds;
+ u64 read_bytes;
+ u64 write_bytes;
};
struct se_device {
diff --git a/include/uapi/linux/media/amlogic/c3-isp-config.h b/include/uapi/linux/media/amlogic/c3-isp-config.h
index 0a3c1cc55ccb..92db5dcdda18 100644
--- a/include/uapi/linux/media/amlogic/c3-isp-config.h
+++ b/include/uapi/linux/media/amlogic/c3-isp-config.h
@@ -186,7 +186,7 @@ enum c3_isp_params_block_type {
#define C3_ISP_PARAMS_BLOCK_FL_ENABLE V4L2_ISP_PARAMS_FL_BLOCK_ENABLE
/**
- * struct c3_isp_params_block_header - C3 ISP parameter block header
+ * c3_isp_params_block_header - C3 ISP parameter block header
*
* This structure represents the common part of all the ISP configuration
* blocks and is identical to :c:type:`v4l2_isp_params_block_header`.
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 245a6a829ce9..ab8f6c07b5a2 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -651,6 +651,11 @@ struct ufs_dev_info {
u8 rtt_cap; /* bDeviceRTTCap */
bool hid_sup;
+
+ /* Unique device ID string (manufacturer+model+serial+version+date) */
+ char *device_id;
+ u8 rpmb_io_size;
+ u8 rpmb_region_size[4];
};
#endif /* End of Header */
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index 83563247c36c..e9c59ec1ceae 100644
--- a/include/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -101,13 +101,6 @@ struct ufs_dev_quirk {
#define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10)
/*
- * Some UFS devices require delay after VCC power rail is turned-off.
- * Enable this quirk to introduce 5ms delays after VCC power-off during
- * suspend flow.
- */
-#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
-
-/*
* Some ufs devices may need more time to be in hibern8 before exiting.
* Enable this quirk to give it an additional 100us.
*/
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 0f95576bf1f6..19154228780b 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -78,7 +78,7 @@ struct uic_command {
const u32 argument1;
u32 argument2;
u32 argument3;
- int cmd_active;
+ bool cmd_active;
struct completion done;
};
@@ -161,7 +161,6 @@ struct ufs_pm_lvl_states {
* @ucd_prdt_dma_addr: PRDT dma address for debug
* @ucd_rsp_dma_addr: UPIU response dma address for debug
* @ucd_req_dma_addr: UPIU request dma address for debug
- * @cmd: pointer to SCSI command
* @scsi_status: SCSI status of the command
* @command_type: SCSI, UFS, Query.
* @task_tag: Task tag of the command
@@ -186,11 +185,9 @@ struct ufshcd_lrb {
dma_addr_t ucd_rsp_dma_addr;
dma_addr_t ucd_prdt_dma_addr;
- struct scsi_cmnd *cmd;
int scsi_status;
int command_type;
- int task_tag;
u8 lun; /* UPIU LUN id field is only 8-bit wide */
bool intr_cmd;
bool req_abort_skip;
@@ -239,13 +236,11 @@ struct ufs_query {
* struct ufs_dev_cmd - all assosiated fields with device management commands
* @type: device management command type - Query, NOP OUT
* @lock: lock to allow one command at a time
- * @complete: internal commands completion
* @query: Device management query information
*/
struct ufs_dev_cmd {
enum dev_cmd_type type;
struct mutex lock;
- struct completion complete;
struct ufs_query query;
};
@@ -833,6 +828,7 @@ enum ufshcd_mcq_opr {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @ufs_device_wlun: WLUN that controls the entire UFS device.
+ * @ufs_rpmb_wlun: RPMB WLUN SCSI device
* @hwmon_device: device instance registered with the hwmon core.
* @curr_dev_pwr_mode: active UFS device power mode.
* @uic_link_state: active state of the link to the UFS device.
@@ -840,7 +836,6 @@ enum ufshcd_mcq_opr {
* @spm_lvl: desired UFS power management level during system PM.
* @pm_op_in_progress: whether or not a PM operation is in progress.
* @ahit: value of Auto-Hibernate Idle Timer register.
- * @lrb: local reference block
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_lock: Protects @outstanding_reqs.
* @outstanding_reqs: Bits representing outstanding transfer requests
@@ -849,7 +844,6 @@ enum ufshcd_mcq_opr {
* @nutrs: Transfer Request Queue depth supported by controller
* @nortt - Max outstanding RTTs supported by controller
* @nutmrs: Task Management Queue depth supported by controller
- * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
* @ufs_version: UFS Version to which controller complies
* @vops: pointer to variant specific operations
* @vps: pointer to variant specific parameters
@@ -940,7 +934,6 @@ enum ufshcd_mcq_opr {
* @res: array of resource info of MCQ registers
* @mcq_base: Multi circular queue registers base address
* @uhq: array of supported hardware queues
- * @dev_cmd_queue: Queue for issuing device management commands
* @mcq_opr: MCQ operation and runtime registers
* @ufs_rtc_update_work: A work for UFS RTC periodic update
* @pm_qos_req: PM QoS request handle
@@ -948,8 +941,8 @@ enum ufshcd_mcq_opr {
* @pm_qos_mutex: synchronizes PM QoS request and status updates
* @critical_health_count: count of critical health exceptions
* @dev_lvl_exception_count: count of device level exceptions since last reset
- * @dev_lvl_exception_id: vendor specific information about the
- * device level exception event.
+ * @dev_lvl_exception_id: vendor specific information about the device level exception event.
+ * @rpmbs: list of OP-TEE RPMB devices (one per RPMB region)
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -967,6 +960,7 @@ struct ufs_hba {
struct Scsi_Host *host;
struct device *dev;
struct scsi_device *ufs_device_wlun;
+ struct scsi_device *ufs_rpmb_wlun;
#ifdef CONFIG_SCSI_UFS_HWMON
struct device *hwmon_device;
@@ -983,8 +977,6 @@ struct ufs_hba {
/* Auto-Hibernate Idle Timer register value */
u32 ahit;
- struct ufshcd_lrb *lrb;
-
unsigned long outstanding_tasks;
spinlock_t outstanding_lock;
unsigned long outstanding_reqs;
@@ -994,7 +986,6 @@ struct ufs_hba {
int nortt;
u32 mcq_capabilities;
int nutmrs;
- u32 reserved_slot;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
struct ufs_hba_variant_params *vps;
@@ -1112,7 +1103,6 @@ struct ufs_hba {
bool mcq_esi_enabled;
void __iomem *mcq_base;
struct ufs_hw_queue *uhq;
- struct ufs_hw_queue *dev_cmd_queue;
struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
struct delayed_work ufs_rtc_update_work;
@@ -1124,6 +1114,8 @@ struct ufs_hba {
int critical_health_count;
atomic_t dev_lvl_exception_count;
u64 dev_lvl_exception_id;
+ u32 vcc_off_delay_us;
+ struct list_head rpmbs;
};
/**
@@ -1302,7 +1294,6 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
void ufshcd_enable_irq(struct ufs_hba *hba);
void ufshcd_disable_irq(struct ufs_hba *hba);
-void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs);
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
@@ -1438,10 +1429,6 @@ static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups);
-#define SD_ASCII_STD true
-#define SD_RAW false
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii);
void ufshcd_hold(struct ufs_hba *hba);
void ufshcd_release(struct ufs_hba *hba);
@@ -1494,5 +1481,7 @@ int ufshcd_write_ee_control(struct ufs_hba *hba);
int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
const u16 *other_mask, u16 set, u16 clr);
void ufshcd_force_error_recovery(struct ufs_hba *hba);
+void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on);
+u32 ufshcd_us_to_ahit(unsigned int timer);
#endif /* End of Header */
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index e64b70132101..d36df24242a3 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -83,12 +83,14 @@ enum {
};
enum {
+ /* Submission Queue (SQ) Configuration Registers */
REG_SQATTR = 0x0,
REG_SQLBA = 0x4,
REG_SQUBA = 0x8,
REG_SQDAO = 0xC,
REG_SQISAO = 0x10,
+ /* Completion Queue (CQ) Configuration Registers */
REG_CQATTR = 0x20,
REG_CQLBA = 0x24,
REG_CQUBA = 0x28,
@@ -96,6 +98,7 @@ enum {
REG_CQISAO = 0x30,
};
+/* Operation and Runtime Registers - Submission Queues and Completion Queues */
enum {
REG_SQHP = 0x0,
REG_SQTP = 0x4,
@@ -569,10 +572,26 @@ struct cq_entry {
__le16 prd_table_offset;
/* DW 4 */
- __le32 status;
+ u8 overall_status;
+ u8 extended_error_code;
+ __le16 reserved_1;
- /* DW 5-7 */
- __le32 reserved[3];
+ /* DW 5 */
+ u8 task_tag;
+ u8 lun;
+#if defined(__BIG_ENDIAN)
+ u8 ext_iid:4;
+ u8 iid:4;
+#elif defined(__LITTLE_ENDIAN)
+ u8 iid:4;
+ u8 ext_iid:4;
+#else
+#error
+#endif
+ u8 reserved_2;
+
+ /* DW 6-7 */
+ __le32 reserved_3[2];
};
static_assert(sizeof(struct cq_entry) == 32);
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
index 360e1245fb40..59de737490ca 100644
--- a/include/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -111,6 +111,9 @@
#define PA_TXLINKSTARTUPHS 0x1544
#define PA_AVAILRXDATALANES 0x1540
#define PA_MINRXTRAILINGCLOCKS 0x1543
+#define PA_TXHSG1SYNCLENGTH 0x1552
+#define PA_TXHSG2SYNCLENGTH 0x1554
+#define PA_TXHSG3SYNCLENGTH 0x1556
#define PA_LOCAL_TX_LCC_ENABLE 0x155E
#define PA_ACTIVETXDATALANES 0x1560
#define PA_CONNECTEDTXDATALANES 0x1561
@@ -160,7 +163,9 @@
#define PA_PACPFRAMECOUNT 0x15C0
#define PA_PACPERRORCOUNT 0x15C1
#define PA_PHYTESTCONTROL 0x15C2
-#define PA_TXHSADAPTTYPE 0x15D4
+#define PA_TXHSG4SYNCLENGTH 0x15D0
+#define PA_TXHSADAPTTYPE 0x15D4
+#define PA_TXHSG5SYNCLENGTH 0x15D6
/* Adpat type for PA_TXHSADAPTTYPE attribute */
#define PA_REFRESH_ADAPT 0x00
@@ -174,6 +179,7 @@
#define VS_POWERSTATE 0xD083
#define VS_MPHYCFGUPDT 0xD085
#define VS_DEBUGOMC 0xD09E
+#define VS_MPHYDISABLE 0xD0C1
#define PA_GRANULARITY_MIN_VAL 1
#define PA_GRANULARITY_MAX_VAL 6
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 742b23ef0d8b..c2654075377e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -688,7 +688,7 @@ choice
help
This selects the default access restrictions for debugfs.
It can be overridden with kernel command line option
- debugfs=[on,no-mount,off]. The restrictions apply for API access
+ debugfs=[on,off]. The restrictions apply for API access
and filesystem registration.
config DEBUG_FS_ALLOW_ALL
@@ -697,13 +697,6 @@ config DEBUG_FS_ALLOW_ALL
No restrictions apply. Both API and filesystem registration
is on. This is the normal default operation.
-config DEBUG_FS_DISALLOW_MOUNT
- bool "Do not register debugfs as filesystem"
- help
- The API is open but filesystem is not loaded. Clients can still do
- their work and read with debug tools that do not need
- debugfs filesystem.
-
config DEBUG_FS_ALLOW_NONE
bool "No access"
help
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 139786c0337d..5ea7b49f7dd9 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -58,6 +58,7 @@
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
+#include <linux/i2c.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/jump_label.h>
@@ -86,6 +87,12 @@
#include <linux/xarray.h>
#include <trace/events/rust_sample.h>
+/*
+ * The driver-core Rust code needs to know about some C driver-core private
+ * structures.
+ */
+#include <../../drivers/base/base.h>
+
#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE)
// Used by `#[export]` in `drivers/gpu/drm/drm_panic_qr.rs`.
#include <drm/drm_panic.h>
diff --git a/rust/helpers/pci.c b/rust/helpers/pci.c
index fb814572b236..bf8173979c5e 100644
--- a/rust/helpers/pci.c
+++ b/rust/helpers/pci.c
@@ -23,9 +23,21 @@ bool rust_helper_dev_is_pci(const struct device *dev)
}
#ifndef CONFIG_PCI_MSI
+int rust_helper_pci_alloc_irq_vectors(struct pci_dev *dev,
+ unsigned int min_vecs,
+ unsigned int max_vecs,
+ unsigned int flags)
+{
+ return pci_alloc_irq_vectors(dev, min_vecs, max_vecs, flags);
+}
+
+void rust_helper_pci_free_irq_vectors(struct pci_dev *dev)
+{
+ pci_free_irq_vectors(dev);
+}
+
int rust_helper_pci_irq_vector(struct pci_dev *pdev, unsigned int nvec)
{
return pci_irq_vector(pdev, nvec);
}
-
#endif
diff --git a/rust/helpers/time.c b/rust/helpers/time.c
index a318e9fa4408..67a36ccc3ec4 100644
--- a/rust/helpers/time.c
+++ b/rust/helpers/time.c
@@ -33,3 +33,8 @@ s64 rust_helper_ktime_to_ms(const ktime_t kt)
{
return ktime_to_ms(kt);
}
+
+void rust_helper_udelay(unsigned long usec)
+{
+ udelay(usec);
+}
diff --git a/rust/kernel/auxiliary.rs b/rust/kernel/auxiliary.rs
index 7a3b0b9c418e..56f3c180e8f6 100644
--- a/rust/kernel/auxiliary.rs
+++ b/rust/kernel/auxiliary.rs
@@ -7,6 +7,7 @@
use crate::{
bindings, container_of, device,
device_id::{RawDeviceId, RawDeviceIdIndex},
+ devres::Devres,
driver,
error::{from_result, to_result, Result},
prelude::*,
@@ -15,6 +16,7 @@ use crate::{
};
use core::{
marker::PhantomData,
+ mem::offset_of,
ptr::{addr_of_mut, NonNull},
};
@@ -68,9 +70,9 @@ impl<T: Driver + 'static> Adapter<T> {
let info = T::ID_TABLE.info(id.index());
from_result(|| {
- let data = T::probe(adev, info)?;
+ let data = T::probe(adev, info);
- adev.as_ref().set_drvdata(data);
+ adev.as_ref().set_drvdata(data)?;
Ok(0)
})
}
@@ -85,7 +87,7 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: `remove_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- drop(unsafe { adev.as_ref().drvdata_obtain::<Pin<KBox<T>>>() });
+ drop(unsafe { adev.as_ref().drvdata_obtain::<T>() });
}
}
@@ -184,7 +186,7 @@ pub trait Driver {
/// Auxiliary driver probe.
///
/// Called when an auxiliary device is matches a corresponding driver.
- fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
+ fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> impl PinInit<Self, Error>;
}
/// The auxiliary device representation.
@@ -214,14 +216,25 @@ impl<Ctx: device::DeviceContext> Device<Ctx> {
// `struct auxiliary_device`.
unsafe { (*self.as_raw()).id }
}
+}
+
+impl Device<device::Bound> {
+ /// Returns a bound reference to the parent [`device::Device`].
+ pub fn parent(&self) -> &device::Device<device::Bound> {
+ let parent = (**self).parent();
- /// Returns a reference to the parent [`device::Device`], if any.
- pub fn parent(&self) -> Option<&device::Device> {
- self.as_ref().parent()
+ // SAFETY: A bound auxiliary device always has a bound parent device.
+ unsafe { parent.as_bound() }
}
}
impl Device {
+ /// Returns a reference to the parent [`device::Device`].
+ pub fn parent(&self) -> &device::Device {
+ // SAFETY: A `struct auxiliary_device` always has a parent.
+ unsafe { self.as_ref().parent().unwrap_unchecked() }
+ }
+
extern "C" fn release(dev: *mut bindings::device) {
// SAFETY: By the type invariant `self.0.as_raw` is a pointer to the `struct device`
// embedded in `struct auxiliary_device`.
@@ -233,6 +246,12 @@ impl Device {
}
}
+// SAFETY: `auxiliary::Device` is a transparent wrapper of `struct auxiliary_device`.
+// The offset is guaranteed to point to a valid device field inside `auxiliary::Device`.
+unsafe impl<Ctx: device::DeviceContext> device::AsBusDevice<Ctx> for Device<Ctx> {
+ const OFFSET: usize = offset_of!(bindings::auxiliary_device, dev);
+}
+
// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
// argument.
kernel::impl_device_context_deref!(unsafe { Device });
@@ -278,8 +297,8 @@ unsafe impl Sync for Device {}
/// The registration of an auxiliary device.
///
-/// This type represents the registration of a [`struct auxiliary_device`]. When an instance of this
-/// type is dropped, its respective auxiliary device will be unregistered from the system.
+/// This type represents the registration of a [`struct auxiliary_device`]. When its parent device
+/// is unbound, the corresponding auxiliary device will be unregistered from the system.
///
/// # Invariants
///
@@ -289,44 +308,55 @@ pub struct Registration(NonNull<bindings::auxiliary_device>);
impl Registration {
/// Create and register a new auxiliary device.
- pub fn new(parent: &device::Device, name: &CStr, id: u32, modname: &CStr) -> Result<Self> {
- let boxed = KBox::new(Opaque::<bindings::auxiliary_device>::zeroed(), GFP_KERNEL)?;
- let adev = boxed.get();
-
- // SAFETY: It's safe to set the fields of `struct auxiliary_device` on initialization.
- unsafe {
- (*adev).dev.parent = parent.as_raw();
- (*adev).dev.release = Some(Device::release);
- (*adev).name = name.as_char_ptr();
- (*adev).id = id;
- }
-
- // SAFETY: `adev` is guaranteed to be a valid pointer to a `struct auxiliary_device`,
- // which has not been initialized yet.
- unsafe { bindings::auxiliary_device_init(adev) };
+ pub fn new<'a>(
+ parent: &'a device::Device<device::Bound>,
+ name: &'a CStr,
+ id: u32,
+ modname: &'a CStr,
+ ) -> impl PinInit<Devres<Self>, Error> + 'a {
+ pin_init::pin_init_scope(move || {
+ let boxed = KBox::new(Opaque::<bindings::auxiliary_device>::zeroed(), GFP_KERNEL)?;
+ let adev = boxed.get();
+
+ // SAFETY: It's safe to set the fields of `struct auxiliary_device` on initialization.
+ unsafe {
+ (*adev).dev.parent = parent.as_raw();
+ (*adev).dev.release = Some(Device::release);
+ (*adev).name = name.as_char_ptr();
+ (*adev).id = id;
+ }
- // Now that `adev` is initialized, leak the `Box`; the corresponding memory will be freed
- // by `Device::release` when the last reference to the `struct auxiliary_device` is dropped.
- let _ = KBox::into_raw(boxed);
-
- // SAFETY:
- // - `adev` is guaranteed to be a valid pointer to a `struct auxiliary_device`, which has
- // been initialialized,
- // - `modname.as_char_ptr()` is a NULL terminated string.
- let ret = unsafe { bindings::__auxiliary_device_add(adev, modname.as_char_ptr()) };
- if ret != 0 {
// SAFETY: `adev` is guaranteed to be a valid pointer to a `struct auxiliary_device`,
- // which has been initialialized.
- unsafe { bindings::auxiliary_device_uninit(adev) };
-
- return Err(Error::from_errno(ret));
- }
-
- // SAFETY: `adev` is guaranteed to be non-null, since the `KBox` was allocated successfully.
- //
- // INVARIANT: The device will remain registered until `auxiliary_device_delete()` is called,
- // which happens in `Self::drop()`.
- Ok(Self(unsafe { NonNull::new_unchecked(adev) }))
+ // which has not been initialized yet.
+ unsafe { bindings::auxiliary_device_init(adev) };
+
+ // Now that `adev` is initialized, leak the `Box`; the corresponding memory will be
+ // freed by `Device::release` when the last reference to the `struct auxiliary_device`
+ // is dropped.
+ let _ = KBox::into_raw(boxed);
+
+ // SAFETY:
+ // - `adev` is guaranteed to be a valid pointer to a `struct auxiliary_device`, which
+ // has been initialized,
+ // - `modname.as_char_ptr()` is a NULL terminated string.
+ let ret = unsafe { bindings::__auxiliary_device_add(adev, modname.as_char_ptr()) };
+ if ret != 0 {
+ // SAFETY: `adev` is guaranteed to be a valid pointer to a
+ // `struct auxiliary_device`, which has been initialized.
+ unsafe { bindings::auxiliary_device_uninit(adev) };
+
+ return Err(Error::from_errno(ret));
+ }
+
+ // INVARIANT: The device will remain registered until `auxiliary_device_delete()` is
+ // called, which happens in `Self::drop()`.
+ Ok(Devres::new(
+ parent,
+ // SAFETY: `adev` is guaranteed to be non-null, since the `KBox` was allocated
+ // successfully.
+ Self(unsafe { NonNull::new_unchecked(adev) }),
+ ))
+ })
}
}
diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs
index 1a555fcb120a..f968fbd22890 100644
--- a/rust/kernel/cpufreq.rs
+++ b/rust/kernel/cpufreq.rs
@@ -893,9 +893,9 @@ pub trait Driver {
/// fn probe(
/// pdev: &platform::Device<Core>,
/// _id_info: Option<&Self::IdInfo>,
-/// ) -> Result<Pin<KBox<Self>>> {
+/// ) -> impl PinInit<Self, Error> {
/// cpufreq::Registration::<SampleDriver>::new_foreign_owned(pdev.as_ref())?;
-/// Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+/// Ok(Self {})
/// }
/// }
/// ```
diff --git a/rust/kernel/debugfs.rs b/rust/kernel/debugfs.rs
index 8c35d032acfe..facad81e8290 100644
--- a/rust/kernel/debugfs.rs
+++ b/rust/kernel/debugfs.rs
@@ -21,12 +21,15 @@ use core::mem::ManuallyDrop;
use core::ops::Deref;
mod traits;
-pub use traits::{Reader, Writer};
+pub use traits::{BinaryReader, BinaryReaderMut, BinaryWriter, Reader, Writer};
mod callback_adapters;
use callback_adapters::{FormatAdapter, NoWriter, WritableAdapter};
mod file_ops;
-use file_ops::{FileOps, ReadFile, ReadWriteFile, WriteFile};
+use file_ops::{
+ BinaryReadFile, BinaryReadWriteFile, BinaryWriteFile, FileOps, ReadFile, ReadWriteFile,
+ WriteFile,
+};
#[cfg(CONFIG_DEBUG_FS)]
mod entry;
#[cfg(CONFIG_DEBUG_FS)]
@@ -150,6 +153,32 @@ impl Dir {
self.create_file(name, data, file_ops)
}
+ /// Creates a read-only binary file in this directory.
+ ///
+ /// The file's contents are produced by invoking [`BinaryWriter::write_to_slice`] on the value
+ /// initialized by `data`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::debugfs::Dir;
+ /// # use kernel::prelude::*;
+ /// # let dir = Dir::new(c_str!("my_debugfs_dir"));
+ /// let file = KBox::pin_init(dir.read_binary_file(c_str!("foo"), [0x1, 0x2]), GFP_KERNEL)?;
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn read_binary_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: BinaryWriter + Send + Sync + 'static,
+ {
+ self.create_file(name, data, &T::FILE_OPS)
+ }
+
/// Creates a read-only file in this directory, with contents from a callback.
///
/// `f` must be a function item or a non-capturing closure.
@@ -206,6 +235,22 @@ impl Dir {
self.create_file(name, data, file_ops)
}
+ /// Creates a read-write binary file in this directory.
+ ///
+ /// Reading the file uses the [`BinaryWriter`] implementation.
+ /// Writing to the file uses the [`BinaryReader`] implementation.
+ pub fn read_write_binary_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: BinaryWriter + BinaryReader + Send + Sync + 'static,
+ {
+ let file_ops = &<T as BinaryReadWriteFile<_>>::FILE_OPS;
+ self.create_file(name, data, file_ops)
+ }
+
/// Creates a read-write file in this directory, with logic from callbacks.
///
/// Reading from the file is handled by `f`. Writing to the file is handled by `w`.
@@ -248,6 +293,23 @@ impl Dir {
self.create_file(name, data, &T::FILE_OPS)
}
+ /// Creates a write-only binary file in this directory.
+ ///
+ /// The file owns its backing data. Writing to the file uses the [`BinaryReader`]
+ /// implementation.
+ ///
+ /// The file is removed when the returned [`File`] is dropped.
+ pub fn write_binary_file<'a, T, E: 'a>(
+ &'a self,
+ name: &'a CStr,
+ data: impl PinInit<T, E> + 'a,
+ ) -> impl PinInit<File<T>, E> + 'a
+ where
+ T: BinaryReader + Send + Sync + 'static,
+ {
+ self.create_file(name, data, &T::FILE_OPS)
+ }
+
/// Creates a write-only file in this directory, with write logic from a callback.
///
/// `w` must be a function item or a non-capturing closure.
@@ -468,6 +530,20 @@ impl<'data, 'dir> ScopedDir<'data, 'dir> {
self.create_file(name, data, &T::FILE_OPS)
}
+ /// Creates a read-only binary file in this directory.
+ ///
+ /// The file's contents are produced by invoking [`BinaryWriter::write_to_slice`].
+ ///
+ /// This function does not produce an owning handle to the file. The created file is removed
+ /// when the [`Scope`] that this directory belongs to is dropped.
+ pub fn read_binary_file<T: BinaryWriter + Send + Sync + 'static>(
+ &self,
+ name: &CStr,
+ data: &'data T,
+ ) {
+ self.create_file(name, data, &T::FILE_OPS)
+ }
+
/// Creates a read-only file in this directory, with contents from a callback.
///
/// The file contents are generated by calling `f` with `data`.
@@ -505,6 +581,22 @@ impl<'data, 'dir> ScopedDir<'data, 'dir> {
self.create_file(name, data, vtable)
}
+ /// Creates a read-write binary file in this directory.
+ ///
+ /// Reading the file uses the [`BinaryWriter`] implementation on `data`. Writing to the file
+ /// uses the [`BinaryReader`] implementation on `data`.
+ ///
+ /// This function does not produce an owning handle to the file. The created file is removed
+ /// when the [`Scope`] that this directory belongs to is dropped.
+ pub fn read_write_binary_file<T: BinaryWriter + BinaryReader + Send + Sync + 'static>(
+ &self,
+ name: &CStr,
+ data: &'data T,
+ ) {
+ let vtable = &<T as BinaryReadWriteFile<_>>::FILE_OPS;
+ self.create_file(name, data, vtable)
+ }
+
/// Creates a read-write file in this directory, with logic from callbacks.
///
/// Reading from the file is handled by `f`. Writing to the file is handled by `w`.
@@ -544,6 +636,20 @@ impl<'data, 'dir> ScopedDir<'data, 'dir> {
self.create_file(name, data, vtable)
}
+ /// Creates a write-only binary file in this directory.
+ ///
+ /// Writing to the file uses the [`BinaryReader`] implementation on `data`.
+ ///
+ /// This function does not produce an owning handle to the file. The created file is removed
+ /// when the [`Scope`] that this directory belongs to is dropped.
+ pub fn write_binary_file<T: BinaryReader + Send + Sync + 'static>(
+ &self,
+ name: &CStr,
+ data: &'data T,
+ ) {
+ self.create_file(name, data, &T::FILE_OPS)
+ }
+
/// Creates a write-only file in this directory, with write logic from a callback.
///
/// Writing to the file is handled by `w`.
diff --git a/rust/kernel/debugfs/file_ops.rs b/rust/kernel/debugfs/file_ops.rs
index 9ad5e3fa6f69..8a0442d6dd7a 100644
--- a/rust/kernel/debugfs/file_ops.rs
+++ b/rust/kernel/debugfs/file_ops.rs
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
-use super::{Reader, Writer};
+use super::{BinaryReader, BinaryWriter, Reader, Writer};
use crate::debugfs::callback_adapters::Adapter;
use crate::fmt;
+use crate::fs::file;
use crate::prelude::*;
use crate::seq_file::SeqFile;
use crate::seq_print;
@@ -245,3 +246,140 @@ impl<T: Reader + Sync> WriteFile<T> for T {
unsafe { FileOps::new(operations, 0o200) }
};
}
+
+extern "C" fn blob_read<T: BinaryWriter>(
+ file: *mut bindings::file,
+ buf: *mut c_char,
+ count: usize,
+ ppos: *mut bindings::loff_t,
+) -> isize {
+ // SAFETY:
+ // - `file` is a valid pointer to a `struct file`.
+ // - The type invariant of `FileOps` guarantees that `private_data` points to a valid `T`.
+ let this = unsafe { &*((*file).private_data.cast::<T>()) };
+
+ // SAFETY:
+ // - `ppos` is a valid `file::Offset` pointer.
+ // - We have exclusive access to `ppos`.
+ let pos: &mut file::Offset = unsafe { &mut *ppos };
+
+ let mut writer = UserSlice::new(UserPtr::from_ptr(buf.cast()), count).writer();
+
+ let ret = || -> Result<isize> {
+ let written = this.write_to_slice(&mut writer, pos)?;
+
+ Ok(written.try_into()?)
+ }();
+
+ match ret {
+ Ok(n) => n,
+ Err(e) => e.to_errno() as isize,
+ }
+}
+
+/// Representation of [`FileOps`] for read only binary files.
+pub(crate) trait BinaryReadFile<T> {
+ const FILE_OPS: FileOps<T>;
+}
+
+impl<T: BinaryWriter + Sync> BinaryReadFile<T> for T {
+ const FILE_OPS: FileOps<T> = {
+ let operations = bindings::file_operations {
+ read: Some(blob_read::<T>),
+ llseek: Some(bindings::default_llseek),
+ open: Some(bindings::simple_open),
+ // SAFETY: `file_operations` supports zeroes in all fields.
+ ..unsafe { core::mem::zeroed() }
+ };
+
+ // SAFETY:
+ // - The private data of `struct inode` does always contain a pointer to a valid `T`.
+ // - `simple_open()` stores the `struct inode`'s private data in the private data of the
+ // corresponding `struct file`.
+ // - `blob_read()` re-creates a reference to `T` from the `struct file`'s private data.
+ // - `default_llseek()` does not access the `struct file`'s private data.
+ unsafe { FileOps::new(operations, 0o400) }
+ };
+}
+
+extern "C" fn blob_write<T: BinaryReader>(
+ file: *mut bindings::file,
+ buf: *const c_char,
+ count: usize,
+ ppos: *mut bindings::loff_t,
+) -> isize {
+ // SAFETY:
+ // - `file` is a valid pointer to a `struct file`.
+ // - The type invariant of `FileOps` guarantees that `private_data` points to a valid `T`.
+ let this = unsafe { &*((*file).private_data.cast::<T>()) };
+
+ // SAFETY:
+ // - `ppos` is a valid `file::Offset` pointer.
+ // - We have exclusive access to `ppos`.
+ let pos: &mut file::Offset = unsafe { &mut *ppos };
+
+ let mut reader = UserSlice::new(UserPtr::from_ptr(buf.cast_mut().cast()), count).reader();
+
+ let ret = || -> Result<isize> {
+ let read = this.read_from_slice(&mut reader, pos)?;
+
+ Ok(read.try_into()?)
+ }();
+
+ match ret {
+ Ok(n) => n,
+ Err(e) => e.to_errno() as isize,
+ }
+}
+
+/// Representation of [`FileOps`] for write only binary files.
+pub(crate) trait BinaryWriteFile<T> {
+ const FILE_OPS: FileOps<T>;
+}
+
+impl<T: BinaryReader + Sync> BinaryWriteFile<T> for T {
+ const FILE_OPS: FileOps<T> = {
+ let operations = bindings::file_operations {
+ write: Some(blob_write::<T>),
+ llseek: Some(bindings::default_llseek),
+ open: Some(bindings::simple_open),
+ // SAFETY: `file_operations` supports zeroes in all fields.
+ ..unsafe { core::mem::zeroed() }
+ };
+
+ // SAFETY:
+ // - The private data of `struct inode` does always contain a pointer to a valid `T`.
+ // - `simple_open()` stores the `struct inode`'s private data in the private data of the
+ // corresponding `struct file`.
+ // - `blob_write()` re-creates a reference to `T` from the `struct file`'s private data.
+ // - `default_llseek()` does not access the `struct file`'s private data.
+ unsafe { FileOps::new(operations, 0o200) }
+ };
+}
+
+/// Representation of [`FileOps`] for read/write binary files.
+pub(crate) trait BinaryReadWriteFile<T> {
+ const FILE_OPS: FileOps<T>;
+}
+
+impl<T: BinaryWriter + BinaryReader + Sync> BinaryReadWriteFile<T> for T {
+ const FILE_OPS: FileOps<T> = {
+ let operations = bindings::file_operations {
+ read: Some(blob_read::<T>),
+ write: Some(blob_write::<T>),
+ llseek: Some(bindings::default_llseek),
+ open: Some(bindings::simple_open),
+ // SAFETY: `file_operations` supports zeroes in all fields.
+ ..unsafe { core::mem::zeroed() }
+ };
+
+ // SAFETY:
+ // - The private data of `struct inode` does always contain a pointer to a valid `T`.
+ // - `simple_open()` stores the `struct inode`'s private data in the private data of the
+ // corresponding `struct file`.
+ // - `blob_read()` re-creates a reference to `T` from the `struct file`'s private data.
+ // - `blob_write()` re-creates a reference to `T` from the `struct file`'s private data.
+ // - `default_llseek()` does not access the `struct file`'s private data.
+ unsafe { FileOps::new(operations, 0o600) }
+ };
+}
diff --git a/rust/kernel/debugfs/traits.rs b/rust/kernel/debugfs/traits.rs
index e8a8a98f18dc..3eee60463fd5 100644
--- a/rust/kernel/debugfs/traits.rs
+++ b/rust/kernel/debugfs/traits.rs
@@ -3,11 +3,16 @@
//! Traits for rendering or updating values exported to DebugFS.
+use crate::alloc::Allocator;
use crate::fmt;
+use crate::fs::file;
use crate::prelude::*;
use crate::sync::atomic::{Atomic, AtomicBasicOps, AtomicType, Relaxed};
+use crate::sync::Arc;
use crate::sync::Mutex;
-use crate::uaccess::UserSliceReader;
+use crate::transmute::{AsBytes, FromBytes};
+use crate::uaccess::{UserSliceReader, UserSliceWriter};
+use core::ops::{Deref, DerefMut};
use core::str::FromStr;
/// A trait for types that can be written into a string.
@@ -36,6 +41,110 @@ impl<T: fmt::Debug> Writer for T {
}
}
+/// Trait for types that can be written out as binary.
+pub trait BinaryWriter {
+ /// Writes the binary form of `self` into `writer`.
+ ///
+ /// `offset` is the requested offset into the binary representation of `self`.
+ ///
+ /// On success, returns the number of bytes written in to `writer`.
+ fn write_to_slice(
+ &self,
+ writer: &mut UserSliceWriter,
+ offset: &mut file::Offset,
+ ) -> Result<usize>;
+}
+
+// Base implementation for any `T: AsBytes`.
+impl<T: AsBytes> BinaryWriter for T {
+ fn write_to_slice(
+ &self,
+ writer: &mut UserSliceWriter,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ writer.write_slice_file(self.as_bytes(), offset)
+ }
+}
+
+// Delegate for `Mutex<T>`: Support a `T` with an outer mutex.
+impl<T: BinaryWriter> BinaryWriter for Mutex<T> {
+ fn write_to_slice(
+ &self,
+ writer: &mut UserSliceWriter,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ let guard = self.lock();
+
+ guard.write_to_slice(writer, offset)
+ }
+}
+
+// Delegate for `Box<T, A>`: Support a `Box<T, A>` with no lock or an inner lock.
+impl<T, A> BinaryWriter for Box<T, A>
+where
+ T: BinaryWriter,
+ A: Allocator,
+{
+ fn write_to_slice(
+ &self,
+ writer: &mut UserSliceWriter,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ self.deref().write_to_slice(writer, offset)
+ }
+}
+
+// Delegate for `Pin<Box<T, A>>`: Support a `Pin<Box<T, A>>` with no lock or an inner lock.
+impl<T, A> BinaryWriter for Pin<Box<T, A>>
+where
+ T: BinaryWriter,
+ A: Allocator,
+{
+ fn write_to_slice(
+ &self,
+ writer: &mut UserSliceWriter,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ self.deref().write_to_slice(writer, offset)
+ }
+}
+
+// Delegate for `Arc<T>`: Support a `Arc<T>` with no lock or an inner lock.
+impl<T> BinaryWriter for Arc<T>
+where
+ T: BinaryWriter,
+{
+ fn write_to_slice(
+ &self,
+ writer: &mut UserSliceWriter,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ self.deref().write_to_slice(writer, offset)
+ }
+}
+
+// Delegate for `Vec<T, A>`.
+impl<T, A> BinaryWriter for Vec<T, A>
+where
+ T: AsBytes,
+ A: Allocator,
+{
+ fn write_to_slice(
+ &self,
+ writer: &mut UserSliceWriter,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ let slice = self.as_slice();
+
+ // SAFETY: `T: AsBytes` allows us to treat `&[T]` as `&[u8]`.
+ let buffer = unsafe {
+ core::slice::from_raw_parts(slice.as_ptr().cast(), core::mem::size_of_val(slice))
+ };
+
+ writer.write_slice_file(buffer, offset)
+ }
+}
+
/// A trait for types that can be updated from a user slice.
///
/// This works similarly to `FromStr`, but operates on a `UserSliceReader` rather than a &str.
@@ -81,3 +190,130 @@ where
Ok(())
}
}
+
+/// Trait for types that can be constructed from a binary representation.
+///
+/// See also [`BinaryReader`] for interior mutability.
+pub trait BinaryReaderMut {
+ /// Reads the binary form of `self` from `reader`.
+ ///
+ /// Same as [`BinaryReader::read_from_slice`], but takes a mutable reference.
+ ///
+ /// `offset` is the requested offset into the binary representation of `self`.
+ ///
+ /// On success, returns the number of bytes read from `reader`.
+ fn read_from_slice_mut(
+ &mut self,
+ reader: &mut UserSliceReader,
+ offset: &mut file::Offset,
+ ) -> Result<usize>;
+}
+
+// Base implementation for any `T: AsBytes + FromBytes`.
+impl<T: AsBytes + FromBytes> BinaryReaderMut for T {
+ fn read_from_slice_mut(
+ &mut self,
+ reader: &mut UserSliceReader,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ reader.read_slice_file(self.as_bytes_mut(), offset)
+ }
+}
+
+// Delegate for `Box<T, A>`: Support a `Box<T, A>` with an outer lock.
+impl<T: ?Sized + BinaryReaderMut, A: Allocator> BinaryReaderMut for Box<T, A> {
+ fn read_from_slice_mut(
+ &mut self,
+ reader: &mut UserSliceReader,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ self.deref_mut().read_from_slice_mut(reader, offset)
+ }
+}
+
+// Delegate for `Vec<T, A>`: Support a `Vec<T, A>` with an outer lock.
+impl<T, A> BinaryReaderMut for Vec<T, A>
+where
+ T: AsBytes + FromBytes,
+ A: Allocator,
+{
+ fn read_from_slice_mut(
+ &mut self,
+ reader: &mut UserSliceReader,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ let slice = self.as_mut_slice();
+
+ // SAFETY: `T: AsBytes + FromBytes` allows us to treat `&mut [T]` as `&mut [u8]`.
+ let buffer = unsafe {
+ core::slice::from_raw_parts_mut(
+ slice.as_mut_ptr().cast(),
+ core::mem::size_of_val(slice),
+ )
+ };
+
+ reader.read_slice_file(buffer, offset)
+ }
+}
+
+/// Trait for types that can be constructed from a binary representation.
+///
+/// See also [`BinaryReaderMut`] for the mutable version.
+pub trait BinaryReader {
+ /// Reads the binary form of `self` from `reader`.
+ ///
+ /// `offset` is the requested offset into the binary representation of `self`.
+ ///
+ /// On success, returns the number of bytes read from `reader`.
+ fn read_from_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: &mut file::Offset,
+ ) -> Result<usize>;
+}
+
+// Delegate for `Mutex<T>`: Support a `T` with an outer `Mutex`.
+impl<T: BinaryReaderMut + Unpin> BinaryReader for Mutex<T> {
+ fn read_from_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ let mut this = self.lock();
+
+ this.read_from_slice_mut(reader, offset)
+ }
+}
+
+// Delegate for `Box<T, A>`: Support a `Box<T, A>` with an inner lock.
+impl<T: ?Sized + BinaryReader, A: Allocator> BinaryReader for Box<T, A> {
+ fn read_from_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ self.deref().read_from_slice(reader, offset)
+ }
+}
+
+// Delegate for `Pin<Box<T, A>>`: Support a `Pin<Box<T, A>>` with an inner lock.
+impl<T: ?Sized + BinaryReader, A: Allocator> BinaryReader for Pin<Box<T, A>> {
+ fn read_from_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ self.deref().read_from_slice(reader, offset)
+ }
+}
+
+// Delegate for `Arc<T>`: Support an `Arc<T>` with an inner lock.
+impl<T: ?Sized + BinaryReader> BinaryReader for Arc<T> {
+ fn read_from_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: &mut file::Offset,
+ ) -> Result<usize> {
+ self.deref().read_from_slice(reader, offset)
+ }
+}
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index 7116dd7539a6..c79be2e2bfe3 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -6,10 +6,11 @@
use crate::{
bindings, fmt,
+ prelude::*,
sync::aref::ARef,
types::{ForeignOwnable, Opaque},
};
-use core::{marker::PhantomData, ptr};
+use core::{any::TypeId, marker::PhantomData, ptr};
#[cfg(CONFIG_PRINTK)]
use crate::c_str;
@@ -17,6 +18,9 @@ use crate::str::CStrExt as _;
pub mod property;
+// Assert that we can `read()` / `write()` a `TypeId` instance from / into `struct driver_type`.
+static_assert!(core::mem::size_of::<bindings::driver_type>() >= core::mem::size_of::<TypeId>());
+
/// The core representation of a device in the kernel's driver model.
///
/// This structure represents the Rust abstraction for a C `struct device`. A [`Device`] can either
@@ -198,10 +202,31 @@ impl Device {
}
impl Device<CoreInternal> {
+ fn set_type_id<T: 'static>(&self) {
+ // SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
+ let private = unsafe { (*self.as_raw()).p };
+
+ // SAFETY: For a bound device (implied by the `CoreInternal` device context), `private` is
+ // guaranteed to be a valid pointer to a `struct device_private`.
+ let driver_type = unsafe { &raw mut (*private).driver_type };
+
+ // SAFETY: `driver_type` is valid for (unaligned) writes of a `TypeId`.
+ unsafe {
+ driver_type
+ .cast::<TypeId>()
+ .write_unaligned(TypeId::of::<T>())
+ };
+ }
+
/// Store a pointer to the bound driver's private data.
- pub fn set_drvdata(&self, data: impl ForeignOwnable) {
+ pub fn set_drvdata<T: 'static>(&self, data: impl PinInit<T, Error>) -> Result {
+ let data = KBox::pin_init(data, GFP_KERNEL)?;
+
// SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
- unsafe { bindings::dev_set_drvdata(self.as_raw(), data.into_foreign().cast()) }
+ unsafe { bindings::dev_set_drvdata(self.as_raw(), data.into_foreign().cast()) };
+ self.set_type_id::<T>();
+
+ Ok(())
}
/// Take ownership of the private data stored in this [`Device`].
@@ -211,16 +236,19 @@ impl Device<CoreInternal> {
/// - Must only be called once after a preceding call to [`Device::set_drvdata`].
/// - The type `T` must match the type of the `ForeignOwnable` previously stored by
/// [`Device::set_drvdata`].
- pub unsafe fn drvdata_obtain<T: ForeignOwnable>(&self) -> T {
+ pub unsafe fn drvdata_obtain<T: 'static>(&self) -> Pin<KBox<T>> {
// SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
let ptr = unsafe { bindings::dev_get_drvdata(self.as_raw()) };
+ // SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
+ unsafe { bindings::dev_set_drvdata(self.as_raw(), core::ptr::null_mut()) };
+
// SAFETY:
// - By the safety requirements of this function, `ptr` comes from a previous call to
// `into_foreign()`.
// - `dev_get_drvdata()` guarantees to return the same pointer given to `dev_set_drvdata()`
// in `into_foreign()`.
- unsafe { T::from_foreign(ptr.cast()) }
+ unsafe { Pin::<KBox<T>>::from_foreign(ptr.cast()) }
}
/// Borrow the driver's private data bound to this [`Device`].
@@ -231,7 +259,23 @@ impl Device<CoreInternal> {
/// [`Device::drvdata_obtain`].
/// - The type `T` must match the type of the `ForeignOwnable` previously stored by
/// [`Device::set_drvdata`].
- pub unsafe fn drvdata_borrow<T: ForeignOwnable>(&self) -> T::Borrowed<'_> {
+ pub unsafe fn drvdata_borrow<T: 'static>(&self) -> Pin<&T> {
+ // SAFETY: `drvdata_unchecked()` has the exact same safety requirements as the ones
+ // required by this method.
+ unsafe { self.drvdata_unchecked() }
+ }
+}
+
+impl Device<Bound> {
+ /// Borrow the driver's private data bound to this [`Device`].
+ ///
+ /// # Safety
+ ///
+ /// - Must only be called after a preceding call to [`Device::set_drvdata`] and before
+ /// [`Device::drvdata_obtain`].
+ /// - The type `T` must match the type of the `ForeignOwnable` previously stored by
+ /// [`Device::set_drvdata`].
+ unsafe fn drvdata_unchecked<T: 'static>(&self) -> Pin<&T> {
// SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
let ptr = unsafe { bindings::dev_get_drvdata(self.as_raw()) };
@@ -240,7 +284,46 @@ impl Device<CoreInternal> {
// `into_foreign()`.
// - `dev_get_drvdata()` guarantees to return the same pointer given to `dev_set_drvdata()`
// in `into_foreign()`.
- unsafe { T::borrow(ptr.cast()) }
+ unsafe { Pin::<KBox<T>>::borrow(ptr.cast()) }
+ }
+
+ fn match_type_id<T: 'static>(&self) -> Result {
+ // SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
+ let private = unsafe { (*self.as_raw()).p };
+
+ // SAFETY: For a bound device, `private` is guaranteed to be a valid pointer to a
+ // `struct device_private`.
+ let driver_type = unsafe { &raw mut (*private).driver_type };
+
+ // SAFETY:
+ // - `driver_type` is valid for (unaligned) reads of a `TypeId`.
+ // - A bound device guarantees that `driver_type` contains a valid `TypeId` value.
+ let type_id = unsafe { driver_type.cast::<TypeId>().read_unaligned() };
+
+ if type_id != TypeId::of::<T>() {
+ return Err(EINVAL);
+ }
+
+ Ok(())
+ }
+
+ /// Access a driver's private data.
+ ///
+ /// Returns a pinned reference to the driver's private data or [`EINVAL`] if it doesn't match
+ /// the asserted type `T`.
+ pub fn drvdata<T: 'static>(&self) -> Result<Pin<&T>> {
+ // SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
+ if unsafe { bindings::dev_get_drvdata(self.as_raw()) }.is_null() {
+ return Err(ENOENT);
+ }
+
+ self.match_type_id::<T>()?;
+
+ // SAFETY:
+ // - The above check of `dev_get_drvdata()` guarantees that we are called after
+ // `set_drvdata()` and before `drvdata_obtain()`.
+ // - We've just checked that the type of the driver's private data is in fact `T`.
+ Ok(unsafe { self.drvdata_unchecked() })
}
}
@@ -512,6 +595,39 @@ impl DeviceContext for Core {}
impl DeviceContext for CoreInternal {}
impl DeviceContext for Normal {}
+/// Convert device references to bus device references.
+///
+/// Bus devices can implement this trait to allow abstractions to provide the bus device in
+/// class device callbacks.
+///
+/// This must not be used by drivers and is intended for bus and class device abstractions only.
+///
+/// # Safety
+///
+/// `AsBusDevice::OFFSET` must be the offset of the embedded base `struct device` field within a
+/// bus device structure.
+pub unsafe trait AsBusDevice<Ctx: DeviceContext>: AsRef<Device<Ctx>> {
+ /// The relative offset to the device field.
+ ///
+ /// Use `offset_of!(bindings, field)` macro to avoid breakage.
+ const OFFSET: usize;
+
+ /// Convert a reference to [`Device`] into `Self`.
+ ///
+ /// # Safety
+ ///
+ /// `dev` must be contained in `Self`.
+ unsafe fn from_device(dev: &Device<Ctx>) -> &Self
+ where
+ Self: Sized,
+ {
+ let raw = dev.as_raw();
+ // SAFETY: `raw - Self::OFFSET` is guaranteed by the safety requirements
+ // to be a valid pointer to `Self`.
+ unsafe { &*raw.byte_sub(Self::OFFSET).cast::<Self>() }
+ }
+}
+
/// # Safety
///
/// The type given as `$device` must be a transparent wrapper of a type that doesn't depend on the
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
index 2392c281459e..835d9c11948e 100644
--- a/rust/kernel/devres.rs
+++ b/rust/kernel/devres.rs
@@ -52,8 +52,20 @@ struct Inner<T: Send> {
/// # Examples
///
/// ```no_run
-/// # use kernel::{bindings, device::{Bound, Device}, devres::Devres, io::{Io, IoRaw}};
-/// # use core::ops::Deref;
+/// use kernel::{
+/// bindings,
+/// device::{
+/// Bound,
+/// Device,
+/// },
+/// devres::Devres,
+/// io::{
+/// Io,
+/// IoRaw,
+/// PhysAddr,
+/// },
+/// };
+/// use core::ops::Deref;
///
/// // See also [`pci::Bar`] for a real example.
/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
@@ -66,7 +78,7 @@ struct Inner<T: Send> {
/// unsafe fn new(paddr: usize) -> Result<Self>{
/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
/// // valid for `ioremap`.
-/// let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
+/// let addr = unsafe { bindings::ioremap(paddr as PhysAddr, SIZE) };
/// if addr.is_null() {
/// return Err(ENOMEM);
/// }
diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs
index 4e0af3e1a3b9..84d3c67269e8 100644
--- a/rust/kernel/dma.rs
+++ b/rust/kernel/dma.rs
@@ -12,6 +12,7 @@ use crate::{
sync::aref::ARef,
transmute::{AsBytes, FromBytes},
};
+use core::ptr::NonNull;
/// DMA address type.
///
@@ -358,7 +359,7 @@ pub struct CoherentAllocation<T: AsBytes + FromBytes> {
dev: ARef<device::Device>,
dma_handle: DmaAddress,
count: usize,
- cpu_addr: *mut T,
+ cpu_addr: NonNull<T>,
dma_attrs: Attrs,
}
@@ -392,7 +393,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
.ok_or(EOVERFLOW)?;
let mut dma_handle = 0;
// SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
- let ret = unsafe {
+ let addr = unsafe {
bindings::dma_alloc_attrs(
dev.as_raw(),
size,
@@ -401,9 +402,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
dma_attrs.as_raw(),
)
};
- if ret.is_null() {
- return Err(ENOMEM);
- }
+ let addr = NonNull::new(addr).ok_or(ENOMEM)?;
// INVARIANT:
// - We just successfully allocated a coherent region which is accessible for
// `count` elements, hence the cpu address is valid. We also hold a refcounted reference
@@ -414,7 +413,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
dev: dev.into(),
dma_handle,
count,
- cpu_addr: ret.cast::<T>(),
+ cpu_addr: addr.cast(),
dma_attrs,
})
}
@@ -446,13 +445,13 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
/// Returns the base address to the allocated region in the CPU's virtual address space.
pub fn start_ptr(&self) -> *const T {
- self.cpu_addr
+ self.cpu_addr.as_ptr()
}
/// Returns the base address to the allocated region in the CPU's virtual address space as
/// a mutable pointer.
pub fn start_ptr_mut(&mut self) -> *mut T {
- self.cpu_addr
+ self.cpu_addr.as_ptr()
}
/// Returns a DMA handle which may be given to the device as the DMA address base of
@@ -505,7 +504,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
// data is also guaranteed by the safety requirements of the function.
// - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
// that `self.count` won't overflow early in the constructor.
- Ok(unsafe { core::slice::from_raw_parts(self.cpu_addr.add(offset), count) })
+ Ok(unsafe { core::slice::from_raw_parts(self.start_ptr().add(offset), count) })
}
/// Performs the same functionality as [`CoherentAllocation::as_slice`], except that a mutable
@@ -525,7 +524,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
// data is also guaranteed by the safety requirements of the function.
// - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
// that `self.count` won't overflow early in the constructor.
- Ok(unsafe { core::slice::from_raw_parts_mut(self.cpu_addr.add(offset), count) })
+ Ok(unsafe { core::slice::from_raw_parts_mut(self.start_ptr_mut().add(offset), count) })
}
/// Writes data to the region starting from `offset`. `offset` is in units of `T`, not the
@@ -557,7 +556,11 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
// - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
// that `self.count` won't overflow early in the constructor.
unsafe {
- core::ptr::copy_nonoverlapping(src.as_ptr(), self.cpu_addr.add(offset), src.len())
+ core::ptr::copy_nonoverlapping(
+ src.as_ptr(),
+ self.start_ptr_mut().add(offset),
+ src.len(),
+ )
};
Ok(())
}
@@ -576,7 +579,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
// and we've just checked that the range and index is within bounds.
// - `offset` can't overflow since it is smaller than `self.count` and we've checked
// that `self.count` won't overflow early in the constructor.
- Ok(unsafe { self.cpu_addr.add(offset) })
+ Ok(unsafe { self.cpu_addr.as_ptr().add(offset) })
}
/// Reads the value of `field` and ensures that its type is [`FromBytes`].
@@ -637,7 +640,7 @@ impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
bindings::dma_free_attrs(
self.dev.as_raw(),
size,
- self.cpu_addr.cast(),
+ self.start_ptr_mut().cast(),
self.dma_handle,
self.dma_attrs.as_raw(),
)
diff --git a/rust/kernel/driver.rs b/rust/kernel/driver.rs
index 279e3af20682..9beae2e3d57e 100644
--- a/rust/kernel/driver.rs
+++ b/rust/kernel/driver.rs
@@ -24,7 +24,7 @@
//! const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = None;
//!
//! /// Driver probe.
-//! fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
+//! fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> impl PinInit<Self, Error>;
//!
//! /// Driver unbind (optional).
//! fn unbind(dev: &Device<device::Core>, this: Pin<&Self>) {
@@ -35,7 +35,7 @@
//!
//! For specific examples see [`auxiliary::Driver`], [`pci::Driver`] and [`platform::Driver`].
//!
-//! The `probe()` callback should return a `Result<Pin<KBox<Self>>>`, i.e. the driver's private
+//! The `probe()` callback should return a `impl PinInit<Self, Error>`, i.e. the driver's private
//! data. The bus abstraction should store the pointer in the corresponding bus device. The generic
//! [`Device`] infrastructure provides common helpers for this purpose on its
//! [`Device<CoreInternal>`] implementation.
diff --git a/rust/kernel/fs/file.rs b/rust/kernel/fs/file.rs
index cd6987850332..23ee689bd240 100644
--- a/rust/kernel/fs/file.rs
+++ b/rust/kernel/fs/file.rs
@@ -17,6 +17,11 @@ use crate::{
};
use core::ptr;
+/// Primitive type representing the offset within a [`File`].
+///
+/// Type alias for `bindings::loff_t`.
+pub type Offset = bindings::loff_t;
+
/// Flags associated with a [`File`].
pub mod flags {
/// File is opened in append mode.
diff --git a/rust/kernel/i2c.rs b/rust/kernel/i2c.rs
new file mode 100644
index 000000000000..491e6cc25cf4
--- /dev/null
+++ b/rust/kernel/i2c.rs
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! I2C Driver subsystem
+
+// I2C Driver abstractions.
+use crate::{
+ acpi,
+ container_of,
+ device,
+ device_id::{
+ RawDeviceId,
+ RawDeviceIdIndex, //
+ },
+ devres::Devres,
+ driver,
+ error::*,
+ of,
+ prelude::*,
+ types::{
+ AlwaysRefCounted,
+ Opaque, //
+ }, //
+};
+
+use core::{
+ marker::PhantomData,
+ mem::offset_of,
+ ptr::{
+ from_ref,
+ NonNull, //
+ }, //
+};
+
+use kernel::types::ARef;
+
+/// An I2C device id table.
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct DeviceId(bindings::i2c_device_id);
+
+impl DeviceId {
+ const I2C_NAME_SIZE: usize = 20;
+
+ /// Create a new device id from an I2C 'id' string.
+ #[inline(always)]
+ pub const fn new(id: &'static CStr) -> Self {
+ let src = id.to_bytes_with_nul();
+ build_assert!(src.len() <= Self::I2C_NAME_SIZE, "ID exceeds 20 bytes");
+ let mut i2c: bindings::i2c_device_id = pin_init::zeroed();
+ let mut i = 0;
+ while i < src.len() {
+ i2c.name[i] = src[i];
+ i += 1;
+ }
+
+ Self(i2c)
+ }
+}
+
+// SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `i2c_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
+unsafe impl RawDeviceId for DeviceId {
+ type RawType = bindings::i2c_device_id;
+}
+
+// SAFETY: `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field.
+unsafe impl RawDeviceIdIndex for DeviceId {
+ const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::i2c_device_id, driver_data);
+
+ fn index(&self) -> usize {
+ self.0.driver_data
+ }
+}
+
+/// IdTable type for I2C
+pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
+
+/// Create a I2C `IdTable` with its alias for modpost.
+#[macro_export]
+macro_rules! i2c_device_table {
+ ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => {
+ const $table_name: $crate::device_id::IdArray<
+ $crate::i2c::DeviceId,
+ $id_info_type,
+ { $table_data.len() },
+ > = $crate::device_id::IdArray::new($table_data);
+
+ $crate::module_device_table!("i2c", $module_table_name, $table_name);
+ };
+}
+
+/// An adapter for the registration of I2C drivers.
+pub struct Adapter<T: Driver>(T);
+
+// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// a preceding call to `register` has been successful.
+unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
+ type RegType = bindings::i2c_driver;
+
+ unsafe fn register(
+ idrv: &Opaque<Self::RegType>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ build_assert!(
+ T::ACPI_ID_TABLE.is_some() || T::OF_ID_TABLE.is_some() || T::I2C_ID_TABLE.is_some(),
+ "At least one of ACPI/OF/Legacy tables must be present when registering an i2c driver"
+ );
+
+ let i2c_table = match T::I2C_ID_TABLE {
+ Some(table) => table.as_ptr(),
+ None => core::ptr::null(),
+ };
+
+ let of_table = match T::OF_ID_TABLE {
+ Some(table) => table.as_ptr(),
+ None => core::ptr::null(),
+ };
+
+ let acpi_table = match T::ACPI_ID_TABLE {
+ Some(table) => table.as_ptr(),
+ None => core::ptr::null(),
+ };
+
+ // SAFETY: It's safe to set the fields of `struct i2c_client` on initialization.
+ unsafe {
+ (*idrv.get()).driver.name = name.as_char_ptr();
+ (*idrv.get()).probe = Some(Self::probe_callback);
+ (*idrv.get()).remove = Some(Self::remove_callback);
+ (*idrv.get()).shutdown = Some(Self::shutdown_callback);
+ (*idrv.get()).id_table = i2c_table;
+ (*idrv.get()).driver.of_match_table = of_table;
+ (*idrv.get()).driver.acpi_match_table = acpi_table;
+ }
+
+ // SAFETY: `idrv` is guaranteed to be a valid `RegType`.
+ to_result(unsafe { bindings::i2c_register_driver(module.0, idrv.get()) })
+ }
+
+ unsafe fn unregister(idrv: &Opaque<Self::RegType>) {
+ // SAFETY: `idrv` is guaranteed to be a valid `RegType`.
+ unsafe { bindings::i2c_del_driver(idrv.get()) }
+ }
+}
+
+impl<T: Driver + 'static> Adapter<T> {
+ extern "C" fn probe_callback(idev: *mut bindings::i2c_client) -> kernel::ffi::c_int {
+ // SAFETY: The I2C bus only ever calls the probe callback with a valid pointer to a
+ // `struct i2c_client`.
+ //
+ // INVARIANT: `idev` is valid for the duration of `probe_callback()`.
+ let idev = unsafe { &*idev.cast::<I2cClient<device::CoreInternal>>() };
+
+ let info =
+ Self::i2c_id_info(idev).or_else(|| <Self as driver::Adapter>::id_info(idev.as_ref()));
+
+ from_result(|| {
+ let data = T::probe(idev, info);
+
+ idev.as_ref().set_drvdata(data)?;
+ Ok(0)
+ })
+ }
+
+ extern "C" fn remove_callback(idev: *mut bindings::i2c_client) {
+ // SAFETY: `idev` is a valid pointer to a `struct i2c_client`.
+ let idev = unsafe { &*idev.cast::<I2cClient<device::CoreInternal>>() };
+
+ // SAFETY: `remove_callback` is only ever called after a successful call to
+ // `probe_callback`, hence it's guaranteed that `I2cClient::set_drvdata()` has been called
+ // and stored a `Pin<KBox<T>>`.
+ let data = unsafe { idev.as_ref().drvdata_obtain::<T>() };
+
+ T::unbind(idev, data.as_ref());
+ }
+
+ extern "C" fn shutdown_callback(idev: *mut bindings::i2c_client) {
+ // SAFETY: `shutdown_callback` is only ever called for a valid `idev`
+ let idev = unsafe { &*idev.cast::<I2cClient<device::CoreInternal>>() };
+
+ // SAFETY: `shutdown_callback` is only ever called after a successful call to
+ // `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
+ // and stored a `Pin<KBox<T>>`.
+ let data = unsafe { idev.as_ref().drvdata_obtain::<T>() };
+
+ T::shutdown(idev, data.as_ref());
+ }
+
+ /// The [`i2c::IdTable`] of the corresponding driver.
+ fn i2c_id_table() -> Option<IdTable<<Self as driver::Adapter>::IdInfo>> {
+ T::I2C_ID_TABLE
+ }
+
+ /// Returns the driver's private data from the matching entry in the [`i2c::IdTable`], if any.
+ ///
+ /// If this returns `None`, it means there is no match with an entry in the [`i2c::IdTable`].
+ fn i2c_id_info(dev: &I2cClient) -> Option<&'static <Self as driver::Adapter>::IdInfo> {
+ let table = Self::i2c_id_table()?;
+
+ // SAFETY:
+ // - `table` has static lifetime, hence it's valid for reads
+ // - `dev` is guaranteed to be valid while it's alive, and so is `dev.as_raw()`.
+ let raw_id = unsafe { bindings::i2c_match_id(table.as_ptr(), dev.as_raw()) };
+
+ if raw_id.is_null() {
+ return None;
+ }
+
+ // SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct i2c_device_id` and
+ // does not add additional invariants, so it's safe to transmute.
+ let id = unsafe { &*raw_id.cast::<DeviceId>() };
+
+ Some(table.info(<DeviceId as RawDeviceIdIndex>::index(id)))
+ }
+}
+
+impl<T: Driver + 'static> driver::Adapter for Adapter<T> {
+ type IdInfo = T::IdInfo;
+
+ fn of_id_table() -> Option<of::IdTable<Self::IdInfo>> {
+ T::OF_ID_TABLE
+ }
+
+ fn acpi_id_table() -> Option<acpi::IdTable<Self::IdInfo>> {
+ T::ACPI_ID_TABLE
+ }
+}
+
+/// Declares a kernel module that exposes a single i2c driver.
+///
+/// # Examples
+///
+/// ```ignore
+/// kernel::module_i2c_driver! {
+/// type: MyDriver,
+/// name: "Module name",
+/// authors: ["Author name"],
+/// description: "Description",
+/// license: "GPL v2",
+/// }
+/// ```
+#[macro_export]
+macro_rules! module_i2c_driver {
+ ($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::i2c::Adapter<T>, { $($f)* });
+ };
+}
+
+/// The i2c driver trait.
+///
+/// Drivers must implement this trait in order to get a i2c driver registered.
+///
+/// # Example
+///
+///```
+/// # use kernel::{acpi, bindings, c_str, device::Core, i2c, of};
+///
+/// struct MyDriver;
+///
+/// kernel::acpi_device_table!(
+/// ACPI_TABLE,
+/// MODULE_ACPI_TABLE,
+/// <MyDriver as i2c::Driver>::IdInfo,
+/// [
+/// (acpi::DeviceId::new(c_str!("LNUXBEEF")), ())
+/// ]
+/// );
+///
+/// kernel::i2c_device_table!(
+/// I2C_TABLE,
+/// MODULE_I2C_TABLE,
+/// <MyDriver as i2c::Driver>::IdInfo,
+/// [
+/// (i2c::DeviceId::new(c_str!("rust_driver_i2c")), ())
+/// ]
+/// );
+///
+/// kernel::of_device_table!(
+/// OF_TABLE,
+/// MODULE_OF_TABLE,
+/// <MyDriver as i2c::Driver>::IdInfo,
+/// [
+/// (of::DeviceId::new(c_str!("test,device")), ())
+/// ]
+/// );
+///
+/// impl i2c::Driver for MyDriver {
+/// type IdInfo = ();
+/// const I2C_ID_TABLE: Option<i2c::IdTable<Self::IdInfo>> = Some(&I2C_TABLE);
+/// const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+/// const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = Some(&ACPI_TABLE);
+///
+/// fn probe(
+/// _idev: &i2c::I2cClient<Core>,
+/// _id_info: Option<&Self::IdInfo>,
+/// ) -> impl PinInit<Self, Error> {
+/// Err(ENODEV)
+/// }
+///
+/// fn shutdown(_idev: &i2c::I2cClient<Core>, this: Pin<&Self>) {
+/// }
+/// }
+///```
+pub trait Driver: Send {
+ /// The type holding information about each device id supported by the driver.
+ // TODO: Use `associated_type_defaults` once stabilized:
+ //
+ // ```
+ // type IdInfo: 'static = ();
+ // ```
+ type IdInfo: 'static;
+
+ /// The table of device ids supported by the driver.
+ const I2C_ID_TABLE: Option<IdTable<Self::IdInfo>> = None;
+
+ /// The table of OF device ids supported by the driver.
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = None;
+
+ /// The table of ACPI device ids supported by the driver.
+ const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = None;
+
+ /// I2C driver probe.
+ ///
+ /// Called when a new i2c client is added or discovered.
+ /// Implementers should attempt to initialize the client here.
+ fn probe(
+ dev: &I2cClient<device::Core>,
+ id_info: Option<&Self::IdInfo>,
+ ) -> impl PinInit<Self, Error>;
+
+ /// I2C driver shutdown.
+ ///
+ /// Called by the kernel during system reboot or power-off to allow the [`Driver`] to bring the
+ /// [`I2cClient`] into a safe state. Implementing this callback is optional.
+ ///
+ /// Typical actions include stopping transfers, disabling interrupts, or resetting the hardware
+ /// to prevent undesired behavior during shutdown.
+ ///
+ /// This callback is distinct from final resource cleanup, as the driver instance remains valid
+ /// after it returns. Any deallocation or teardown of driver-owned resources should instead be
+ /// handled in `Self::drop`.
+ fn shutdown(dev: &I2cClient<device::Core>, this: Pin<&Self>) {
+ let _ = (dev, this);
+ }
+
+ /// I2C driver unbind.
+ ///
+ /// Called when the [`I2cClient`] is unbound from its bound [`Driver`]. Implementing this
+ /// callback is optional.
+ ///
+ /// This callback serves as a place for drivers to perform teardown operations that require a
+ /// `&Device<Core>` or `&Device<Bound>` reference. For instance, drivers may try to perform I/O
+ /// operations to gracefully tear down the device.
+ ///
+ /// Otherwise, release operations for driver resources should be performed in `Self::drop`.
+ fn unbind(dev: &I2cClient<device::Core>, this: Pin<&Self>) {
+ let _ = (dev, this);
+ }
+}
+
+/// The i2c adapter representation.
+///
+/// This structure represents the Rust abstraction for a C `struct i2c_adapter`. The
+/// implementation abstracts the usage of an existing C `struct i2c_adapter` that
+/// gets passed from the C side
+///
+/// # Invariants
+///
+/// A [`I2cAdapter`] instance represents a valid `struct i2c_adapter` created by the C portion of
+/// the kernel.
+#[repr(transparent)]
+pub struct I2cAdapter<Ctx: device::DeviceContext = device::Normal>(
+ Opaque<bindings::i2c_adapter>,
+ PhantomData<Ctx>,
+);
+
+impl<Ctx: device::DeviceContext> I2cAdapter<Ctx> {
+ fn as_raw(&self) -> *mut bindings::i2c_adapter {
+ self.0.get()
+ }
+}
+
+impl I2cAdapter {
+ /// Returns the I2C Adapter index.
+ #[inline]
+ pub fn index(&self) -> i32 {
+ // SAFETY: `self.as_raw` is a valid pointer to a `struct i2c_adapter`.
+ unsafe { (*self.as_raw()).nr }
+ }
+
+ /// Gets pointer to an `i2c_adapter` by index.
+ pub fn get(index: i32) -> Result<ARef<Self>> {
+ // SAFETY: `index` must refer to a valid I2C adapter; the kernel
+ // guarantees that `i2c_get_adapter(index)` returns either a valid
+ // pointer or NULL. `NonNull::new` guarantees the correct check.
+ let adapter = NonNull::new(unsafe { bindings::i2c_get_adapter(index) }).ok_or(ENODEV)?;
+
+ // SAFETY: `adapter` is non-null and points to a live `i2c_adapter`.
+ // `I2cAdapter` is #[repr(transparent)], so this cast is valid.
+ Ok(unsafe { (&*adapter.as_ptr().cast::<I2cAdapter<device::Normal>>()).into() })
+ }
+}
+
+// SAFETY: `I2cAdapter` is a transparent wrapper of a type that doesn't depend on
+// `I2cAdapter`'s generic argument.
+kernel::impl_device_context_deref!(unsafe { I2cAdapter });
+kernel::impl_device_context_into_aref!(I2cAdapter);
+
+// SAFETY: Instances of `I2cAdapter` are always reference-counted.
+unsafe impl crate::types::AlwaysRefCounted for I2cAdapter {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::i2c_get_adapter(self.index()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::i2c_put_adapter(obj.as_ref().as_raw()) }
+ }
+}
+
+/// The i2c board info representation
+///
+/// This structure represents the Rust abstraction for a C `struct i2c_board_info` structure,
+/// which is used for manual I2C client creation.
+#[repr(transparent)]
+pub struct I2cBoardInfo(bindings::i2c_board_info);
+
+impl I2cBoardInfo {
+ const I2C_TYPE_SIZE: usize = 20;
+ /// Create a new [`I2cBoardInfo`] for a kernel driver.
+ #[inline(always)]
+ pub const fn new(type_: &'static CStr, addr: u16) -> Self {
+ let src = type_.to_bytes_with_nul();
+ build_assert!(src.len() <= Self::I2C_TYPE_SIZE, "Type exceeds 20 bytes");
+ let mut i2c_board_info: bindings::i2c_board_info = pin_init::zeroed();
+ let mut i: usize = 0;
+ while i < src.len() {
+ i2c_board_info.type_[i] = src[i];
+ i += 1;
+ }
+
+ i2c_board_info.addr = addr;
+ Self(i2c_board_info)
+ }
+
+ fn as_raw(&self) -> *const bindings::i2c_board_info {
+ from_ref(&self.0)
+ }
+}
+
+/// The i2c client representation.
+///
+/// This structure represents the Rust abstraction for a C `struct i2c_client`. The
+/// implementation abstracts the usage of an existing C `struct i2c_client` that
+/// gets passed from the C side
+///
+/// # Invariants
+///
+/// A [`I2cClient`] instance represents a valid `struct i2c_client` created by the C portion of
+/// the kernel.
+#[repr(transparent)]
+pub struct I2cClient<Ctx: device::DeviceContext = device::Normal>(
+ Opaque<bindings::i2c_client>,
+ PhantomData<Ctx>,
+);
+
+impl<Ctx: device::DeviceContext> I2cClient<Ctx> {
+ fn as_raw(&self) -> *mut bindings::i2c_client {
+ self.0.get()
+ }
+}
+
+// SAFETY: `I2cClient` is a transparent wrapper of `struct i2c_client`.
+// The offset is guaranteed to point to a valid device field inside `I2cClient`.
+unsafe impl<Ctx: device::DeviceContext> device::AsBusDevice<Ctx> for I2cClient<Ctx> {
+ const OFFSET: usize = offset_of!(bindings::i2c_client, dev);
+}
+
+// SAFETY: `I2cClient` is a transparent wrapper of a type that doesn't depend on
+// `I2cClient`'s generic argument.
+kernel::impl_device_context_deref!(unsafe { I2cClient });
+kernel::impl_device_context_into_aref!(I2cClient);
+
+// SAFETY: Instances of `I2cClient` are always reference-counted.
+unsafe impl AlwaysRefCounted for I2cClient {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::get_device(self.as_ref().as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::put_device(&raw mut (*obj.as_ref().as_raw()).dev) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for I2cClient<Ctx> {
+ fn as_ref(&self) -> &device::Device<Ctx> {
+ let raw = self.as_raw();
+ // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid
+ // `struct i2c_client`.
+ let dev = unsafe { &raw mut (*raw).dev };
+
+ // SAFETY: `dev` points to a valid `struct device`.
+ unsafe { device::Device::from_raw(dev) }
+ }
+}
+
+impl<Ctx: device::DeviceContext> TryFrom<&device::Device<Ctx>> for &I2cClient<Ctx> {
+ type Error = kernel::error::Error;
+
+ fn try_from(dev: &device::Device<Ctx>) -> Result<Self, Self::Error> {
+ // SAFETY: By the type invariant of `Device`, `dev.as_raw()` is a valid pointer to a
+ // `struct device`.
+ if unsafe { bindings::i2c_verify_client(dev.as_raw()).is_null() } {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: We've just verified that the type of `dev` equals to
+ // `bindings::i2c_client_type`, hence `dev` must be embedded in a valid
+ // `struct i2c_client` as guaranteed by the corresponding C code.
+ let idev = unsafe { container_of!(dev.as_raw(), bindings::i2c_client, dev) };
+
+ // SAFETY: `idev` is a valid pointer to a `struct i2c_client`.
+ Ok(unsafe { &*idev.cast() })
+ }
+}
+
+// SAFETY: A `I2cClient` is always reference-counted and can be released from any thread.
+unsafe impl Send for I2cClient {}
+
+// SAFETY: `I2cClient` can be shared among threads because all methods of `I2cClient`
+// (i.e. `I2cClient<Normal>) are thread safe.
+unsafe impl Sync for I2cClient {}
+
+/// The registration of an i2c client device.
+///
+/// This type represents the registration of a [`struct i2c_client`]. When an instance of this
+/// type is dropped, its respective i2c client device will be unregistered from the system.
+///
+/// # Invariants
+///
+/// `self.0` always holds a valid pointer to an initialized and registered
+/// [`struct i2c_client`].
+#[repr(transparent)]
+pub struct Registration(NonNull<bindings::i2c_client>);
+
+impl Registration {
+ /// The C `i2c_new_client_device` function wrapper for manual I2C client creation.
+ pub fn new<'a>(
+ i2c_adapter: &I2cAdapter,
+ i2c_board_info: &I2cBoardInfo,
+ parent_dev: &'a device::Device<device::Bound>,
+ ) -> impl PinInit<Devres<Self>, Error> + 'a {
+ Devres::new(parent_dev, Self::try_new(i2c_adapter, i2c_board_info))
+ }
+
+ fn try_new(i2c_adapter: &I2cAdapter, i2c_board_info: &I2cBoardInfo) -> Result<Self> {
+ // SAFETY: the kernel guarantees that `i2c_new_client_device()` returns either a valid
+ // pointer or NULL. `from_err_ptr` separates errors. Following `NonNull::new`
+ // checks for NULL.
+ let raw_dev = from_err_ptr(unsafe {
+ bindings::i2c_new_client_device(i2c_adapter.as_raw(), i2c_board_info.as_raw())
+ })?;
+
+ let dev_ptr = NonNull::new(raw_dev).ok_or(ENODEV)?;
+
+ Ok(Self(dev_ptr))
+ }
+}
+
+impl Drop for Registration {
+ fn drop(&mut self) {
+ // SAFETY: `Drop` is only called for a valid `Registration`, which by invariant
+ // always contains a non-null pointer to an `i2c_client`.
+ unsafe { bindings::i2c_unregister_device(self.0.as_ptr()) }
+ }
+}
+
+// SAFETY: A `Registration` of a `struct i2c_client` can be released from any thread.
+unsafe impl Send for Registration {}
+
+// SAFETY: `Registration` offers no interior mutability (no mutation through &self
+// and no mutable access is exposed)
+unsafe impl Sync for Registration {}
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
index ee182b0b5452..98e8b84e68d1 100644
--- a/rust/kernel/io.rs
+++ b/rust/kernel/io.rs
@@ -4,8 +4,10 @@
//!
//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
-use crate::error::{code::EINVAL, Result};
-use crate::{bindings, build_assert, ffi::c_void};
+use crate::{
+ bindings,
+ prelude::*, //
+};
pub mod mem;
pub mod poll;
@@ -13,6 +15,18 @@ pub mod resource;
pub use resource::Resource;
+/// Physical address type.
+///
+/// This is a type alias to either `u32` or `u64` depending on the config option
+/// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
+pub type PhysAddr = bindings::phys_addr_t;
+
+/// Resource Size type.
+///
+/// This is a type alias to either `u32` or `u64` depending on the config option
+/// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
+pub type ResourceSize = bindings::resource_size_t;
+
/// Raw representation of an MMIO region.
///
/// By itself, the existence of an instance of this structure does not provide any guarantees that
@@ -62,8 +76,16 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// # Examples
///
/// ```no_run
-/// # use kernel::{bindings, ffi::c_void, io::{Io, IoRaw}};
-/// # use core::ops::Deref;
+/// use kernel::{
+/// bindings,
+/// ffi::c_void,
+/// io::{
+/// Io,
+/// IoRaw,
+/// PhysAddr,
+/// },
+/// };
+/// use core::ops::Deref;
///
/// // See also [`pci::Bar`] for a real example.
/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
@@ -76,7 +98,7 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// unsafe fn new(paddr: usize) -> Result<Self>{
/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
/// // valid for `ioremap`.
-/// let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
+/// let addr = unsafe { bindings::ioremap(paddr as PhysAddr, SIZE) };
/// if addr.is_null() {
/// return Err(ENOMEM);
/// }
diff --git a/rust/kernel/io/mem.rs b/rust/kernel/io/mem.rs
index 6f99510bfc3a..b03b82cd531b 100644
--- a/rust/kernel/io/mem.rs
+++ b/rust/kernel/io/mem.rs
@@ -4,16 +4,24 @@
use core::ops::Deref;
-use crate::c_str;
-use crate::device::Bound;
-use crate::device::Device;
-use crate::devres::Devres;
-use crate::io;
-use crate::io::resource::Region;
-use crate::io::resource::Resource;
-use crate::io::Io;
-use crate::io::IoRaw;
-use crate::prelude::*;
+use crate::{
+ c_str,
+ device::{
+ Bound,
+ Device, //
+ },
+ devres::Devres,
+ io::{
+ self,
+ resource::{
+ Region,
+ Resource, //
+ },
+ Io,
+ IoRaw, //
+ },
+ prelude::*,
+};
/// An IO request for a specific device and resource.
pub struct IoRequest<'a> {
@@ -53,7 +61,7 @@ impl<'a> IoRequest<'a> {
/// fn probe(
/// pdev: &platform::Device<Core>,
/// info: Option<&Self::IdInfo>,
- /// ) -> Result<Pin<KBox<Self>>> {
+ /// ) -> impl PinInit<Self, Error> {
/// let offset = 0; // Some offset.
///
/// // If the size is known at compile time, use [`Self::iomap_sized`].
@@ -70,7 +78,7 @@ impl<'a> IoRequest<'a> {
///
/// io.write32_relaxed(data, offset);
///
- /// # Ok(KBox::new(SampleDriver, GFP_KERNEL)?.into())
+ /// # Ok(SampleDriver)
/// }
/// }
/// ```
@@ -111,7 +119,7 @@ impl<'a> IoRequest<'a> {
/// fn probe(
/// pdev: &platform::Device<Core>,
/// info: Option<&Self::IdInfo>,
- /// ) -> Result<Pin<KBox<Self>>> {
+ /// ) -> impl PinInit<Self, Error> {
/// let offset = 0; // Some offset.
///
/// // Unlike [`Self::iomap_sized`], here the size of the memory region
@@ -128,7 +136,7 @@ impl<'a> IoRequest<'a> {
///
/// io.try_write32_relaxed(data, offset)?;
///
- /// # Ok(KBox::new(SampleDriver, GFP_KERNEL)?.into())
+ /// # Ok(SampleDriver)
/// }
/// }
/// ```
diff --git a/rust/kernel/io/poll.rs b/rust/kernel/io/poll.rs
index 613eb25047ef..b1a2570364f4 100644
--- a/rust/kernel/io/poll.rs
+++ b/rust/kernel/io/poll.rs
@@ -5,10 +5,18 @@
//! C header: [`include/linux/iopoll.h`](srctree/include/linux/iopoll.h).
use crate::{
- error::{code::*, Result},
+ prelude::*,
processor::cpu_relax,
task::might_sleep,
- time::{delay::fsleep, Delta, Instant, Monotonic},
+ time::{
+ delay::{
+ fsleep,
+ udelay, //
+ },
+ Delta,
+ Instant,
+ Monotonic, //
+ },
};
/// Polls periodically until a condition is met, an error occurs,
@@ -42,8 +50,8 @@ use crate::{
///
/// const HW_READY: u16 = 0x01;
///
-/// fn wait_for_hardware<const SIZE: usize>(io: &Io<SIZE>) -> Result<()> {
-/// match read_poll_timeout(
+/// fn wait_for_hardware<const SIZE: usize>(io: &Io<SIZE>) -> Result {
+/// read_poll_timeout(
/// // The `op` closure reads the value of a specific status register.
/// || io.try_read16(0x1000),
/// // The `cond` closure takes a reference to the value returned by `op`
@@ -51,14 +59,8 @@ use crate::{
/// |val: &u16| *val == HW_READY,
/// Delta::from_millis(50),
/// Delta::from_secs(3),
-/// ) {
-/// Ok(_) => {
-/// // The hardware is ready. The returned value of the `op` closure
-/// // isn't used.
-/// Ok(())
-/// }
-/// Err(e) => Err(e),
-/// }
+/// )?;
+/// Ok(())
/// }
/// ```
#[track_caller]
@@ -102,3 +104,70 @@ where
cpu_relax();
}
}
+
+/// Polls periodically until a condition is met, an error occurs,
+/// or the attempt limit is reached.
+///
+/// The function repeatedly executes the given operation `op` closure and
+/// checks its result using the condition closure `cond`.
+///
+/// If `cond` returns `true`, the function returns successfully with the result of `op`.
+/// Otherwise, it performs a busy wait for a duration specified by `delay_delta`
+/// before executing `op` again.
+///
+/// This process continues until either `op` returns an error, `cond`
+/// returns `true`, or the attempt limit specified by `retry` is reached.
+///
+/// # Errors
+///
+/// If `op` returns an error, then that error is returned directly.
+///
+/// If the attempt limit specified by `retry` is reached, then
+/// `Err(ETIMEDOUT)` is returned.
+///
+/// # Examples
+///
+/// ```no_run
+/// use kernel::io::{poll::read_poll_timeout_atomic, Io};
+/// use kernel::time::Delta;
+///
+/// const HW_READY: u16 = 0x01;
+///
+/// fn wait_for_hardware<const SIZE: usize>(io: &Io<SIZE>) -> Result {
+/// read_poll_timeout_atomic(
+/// // The `op` closure reads the value of a specific status register.
+/// || io.try_read16(0x1000),
+/// // The `cond` closure takes a reference to the value returned by `op`
+/// // and checks whether the hardware is ready.
+/// |val: &u16| *val == HW_READY,
+/// Delta::from_micros(50),
+/// 1000,
+/// )?;
+/// Ok(())
+/// }
+/// ```
+pub fn read_poll_timeout_atomic<Op, Cond, T>(
+ mut op: Op,
+ mut cond: Cond,
+ delay_delta: Delta,
+ retry: usize,
+) -> Result<T>
+where
+ Op: FnMut() -> Result<T>,
+ Cond: FnMut(&T) -> bool,
+{
+ for _ in 0..retry {
+ let val = op()?;
+ if cond(&val) {
+ return Ok(val);
+ }
+
+ if !delay_delta.is_zero() {
+ udelay(delay_delta);
+ }
+
+ cpu_relax();
+ }
+
+ Err(ETIMEDOUT)
+}
diff --git a/rust/kernel/io/resource.rs b/rust/kernel/io/resource.rs
index bea3ee0ed87b..56cfde97ce87 100644
--- a/rust/kernel/io/resource.rs
+++ b/rust/kernel/io/resource.rs
@@ -5,18 +5,21 @@
//!
//! C header: [`include/linux/ioport.h`](srctree/include/linux/ioport.h)
-use core::ops::Deref;
-use core::ptr::NonNull;
-
-use crate::prelude::*;
-use crate::str::{CStr, CString};
-use crate::types::Opaque;
-
-/// Resource Size type.
-///
-/// This is a type alias to either `u32` or `u64` depending on the config option
-/// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
-pub type ResourceSize = bindings::phys_addr_t;
+use core::{
+ ops::Deref,
+ ptr::NonNull, //
+};
+
+use crate::{
+ prelude::*,
+ str::CString,
+ types::Opaque, //
+};
+
+pub use super::{
+ PhysAddr,
+ ResourceSize, //
+};
/// A region allocated from a parent [`Resource`].
///
@@ -97,7 +100,7 @@ impl Resource {
/// the region, or a part of it, is already in use.
pub fn request_region(
&self,
- start: ResourceSize,
+ start: PhysAddr,
size: ResourceSize,
name: CString,
flags: Flags,
@@ -131,7 +134,7 @@ impl Resource {
}
/// Returns the start address of the resource.
- pub fn start(&self) -> ResourceSize {
+ pub fn start(&self) -> PhysAddr {
let inner = self.0.get();
// SAFETY: Safe as per the invariants of `Resource`.
unsafe { (*inner).start }
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index a4eee75cf07a..12c4f0c1cdaf 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -97,6 +97,8 @@ pub mod faux;
pub mod firmware;
pub mod fmt;
pub mod fs;
+#[cfg(CONFIG_I2C = "y")]
+pub mod i2c;
pub mod id_pool;
pub mod init;
pub mod io;
diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs
index 7fcc5f6022c1..82e128431f08 100644
--- a/rust/kernel/pci.rs
+++ b/rust/kernel/pci.rs
@@ -5,28 +5,47 @@
//! C header: [`include/linux/pci.h`](srctree/include/linux/pci.h)
use crate::{
- bindings, container_of, device,
- device_id::{RawDeviceId, RawDeviceIdIndex},
- devres::Devres,
+ bindings,
+ container_of,
+ device,
+ device_id::{
+ RawDeviceId,
+ RawDeviceIdIndex, //
+ },
driver,
- error::{from_result, to_result, Result},
- io::{Io, IoRaw},
- irq::{self, IrqRequest},
+ error::{
+ from_result,
+ to_result, //
+ },
+ prelude::*,
str::CStr,
- sync::aref::ARef,
types::Opaque,
- ThisModule,
+ ThisModule, //
};
use core::{
marker::PhantomData,
- ops::Deref,
- ptr::{addr_of_mut, NonNull},
+ mem::offset_of,
+ ptr::{
+ addr_of_mut,
+ NonNull, //
+ },
};
-use kernel::prelude::*;
mod id;
+mod io;
+mod irq;
-pub use self::id::{Class, ClassMask, Vendor};
+pub use self::id::{
+ Class,
+ ClassMask,
+ Vendor, //
+};
+pub use self::io::Bar;
+pub use self::irq::{
+ IrqType,
+ IrqTypes,
+ IrqVector, //
+};
/// An adapter for the registration of PCI drivers.
pub struct Adapter<T: Driver>(T);
@@ -78,9 +97,9 @@ impl<T: Driver + 'static> Adapter<T> {
let info = T::ID_TABLE.info(id.index());
from_result(|| {
- let data = T::probe(pdev, info)?;
+ let data = T::probe(pdev, info);
- pdev.as_ref().set_drvdata(data);
+ pdev.as_ref().set_drvdata(data)?;
Ok(0)
})
}
@@ -95,7 +114,7 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: `remove_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- let data = unsafe { pdev.as_ref().drvdata_obtain::<Pin<KBox<T>>>() };
+ let data = unsafe { pdev.as_ref().drvdata_obtain::<T>() };
T::unbind(pdev, data.as_ref());
}
@@ -249,7 +268,7 @@ macro_rules! pci_device_table {
/// fn probe(
/// _pdev: &pci::Device<Core>,
/// _id_info: &Self::IdInfo,
-/// ) -> Result<Pin<KBox<Self>>> {
+/// ) -> impl PinInit<Self, Error> {
/// Err(ENODEV)
/// }
/// }
@@ -272,7 +291,7 @@ pub trait Driver: Send {
///
/// Called when a new pci device is added or discovered. Implementers should
/// attempt to initialize the device here.
- fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
+ fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> impl PinInit<Self, Error>;
/// PCI driver unbind.
///
@@ -305,112 +324,6 @@ pub struct Device<Ctx: device::DeviceContext = device::Normal>(
PhantomData<Ctx>,
);
-/// A PCI BAR to perform I/O-Operations on.
-///
-/// # Invariants
-///
-/// `Bar` always holds an `IoRaw` inststance that holds a valid pointer to the start of the I/O
-/// memory mapped PCI bar and its size.
-pub struct Bar<const SIZE: usize = 0> {
- pdev: ARef<Device>,
- io: IoRaw<SIZE>,
- num: i32,
-}
-
-impl<const SIZE: usize> Bar<SIZE> {
- fn new(pdev: &Device, num: u32, name: &CStr) -> Result<Self> {
- let len = pdev.resource_len(num)?;
- if len == 0 {
- return Err(ENOMEM);
- }
-
- // Convert to `i32`, since that's what all the C bindings use.
- let num = i32::try_from(num)?;
-
- // SAFETY:
- // `pdev` is valid by the invariants of `Device`.
- // `num` is checked for validity by a previous call to `Device::resource_len`.
- // `name` is always valid.
- let ret = unsafe { bindings::pci_request_region(pdev.as_raw(), num, name.as_char_ptr()) };
- if ret != 0 {
- return Err(EBUSY);
- }
-
- // SAFETY:
- // `pdev` is valid by the invariants of `Device`.
- // `num` is checked for validity by a previous call to `Device::resource_len`.
- // `name` is always valid.
- let ioptr: usize = unsafe { bindings::pci_iomap(pdev.as_raw(), num, 0) } as usize;
- if ioptr == 0 {
- // SAFETY:
- // `pdev` valid by the invariants of `Device`.
- // `num` is checked for validity by a previous call to `Device::resource_len`.
- unsafe { bindings::pci_release_region(pdev.as_raw(), num) };
- return Err(ENOMEM);
- }
-
- let io = match IoRaw::new(ioptr, len as usize) {
- Ok(io) => io,
- Err(err) => {
- // SAFETY:
- // `pdev` is valid by the invariants of `Device`.
- // `ioptr` is guaranteed to be the start of a valid I/O mapped memory region.
- // `num` is checked for validity by a previous call to `Device::resource_len`.
- unsafe { Self::do_release(pdev, ioptr, num) };
- return Err(err);
- }
- };
-
- Ok(Bar {
- pdev: pdev.into(),
- io,
- num,
- })
- }
-
- /// # Safety
- ///
- /// `ioptr` must be a valid pointer to the memory mapped PCI bar number `num`.
- unsafe fn do_release(pdev: &Device, ioptr: usize, num: i32) {
- // SAFETY:
- // `pdev` is valid by the invariants of `Device`.
- // `ioptr` is valid by the safety requirements.
- // `num` is valid by the safety requirements.
- unsafe {
- bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut c_void);
- bindings::pci_release_region(pdev.as_raw(), num);
- }
- }
-
- fn release(&self) {
- // SAFETY: The safety requirements are guaranteed by the type invariant of `self.pdev`.
- unsafe { Self::do_release(&self.pdev, self.io.addr(), self.num) };
- }
-}
-
-impl Bar {
- #[inline]
- fn index_is_valid(index: u32) -> bool {
- // A `struct pci_dev` owns an array of resources with at most `PCI_NUM_RESOURCES` entries.
- index < bindings::PCI_NUM_RESOURCES
- }
-}
-
-impl<const SIZE: usize> Drop for Bar<SIZE> {
- fn drop(&mut self) {
- self.release();
- }
-}
-
-impl<const SIZE: usize> Deref for Bar<SIZE> {
- type Target = Io<SIZE>;
-
- fn deref(&self) -> &Self::Target {
- // SAFETY: By the type invariant of `Self`, the MMIO range in `self.io` is properly mapped.
- unsafe { Io::from_raw(&self.io) }
- }
-}
-
impl<Ctx: device::DeviceContext> Device<Ctx> {
#[inline]
fn as_raw(&self) -> *mut bindings::pci_dev {
@@ -484,7 +397,7 @@ impl Device {
unsafe { (*self.as_raw()).subsystem_device }
}
- /// Returns the start of the given PCI bar resource.
+ /// Returns the start of the given PCI BAR resource.
pub fn resource_start(&self, bar: u32) -> Result<bindings::resource_size_t> {
if !Bar::index_is_valid(bar) {
return Err(EINVAL);
@@ -496,7 +409,7 @@ impl Device {
Ok(unsafe { bindings::pci_resource_start(self.as_raw(), bar.try_into()?) })
}
- /// Returns the size of the given PCI bar resource.
+ /// Returns the size of the given PCI BAR resource.
pub fn resource_len(&self, bar: u32) -> Result<bindings::resource_size_t> {
if !Bar::index_is_valid(bar) {
return Err(EINVAL);
@@ -516,68 +429,6 @@ impl Device {
}
}
-impl Device<device::Bound> {
- /// Mapps an entire PCI-BAR after performing a region-request on it. I/O operation bound checks
- /// can be performed on compile time for offsets (plus the requested type size) < SIZE.
- pub fn iomap_region_sized<'a, const SIZE: usize>(
- &'a self,
- bar: u32,
- name: &'a CStr,
- ) -> impl PinInit<Devres<Bar<SIZE>>, Error> + 'a {
- Devres::new(self.as_ref(), Bar::<SIZE>::new(self, bar, name))
- }
-
- /// Mapps an entire PCI-BAR after performing a region-request on it.
- pub fn iomap_region<'a>(
- &'a self,
- bar: u32,
- name: &'a CStr,
- ) -> impl PinInit<Devres<Bar>, Error> + 'a {
- self.iomap_region_sized::<0>(bar, name)
- }
-
- /// Returns an [`IrqRequest`] for the IRQ vector at the given index, if any.
- pub fn irq_vector(&self, index: u32) -> Result<IrqRequest<'_>> {
- // SAFETY: `self.as_raw` returns a valid pointer to a `struct pci_dev`.
- let irq = unsafe { crate::bindings::pci_irq_vector(self.as_raw(), index) };
- if irq < 0 {
- return Err(crate::error::Error::from_errno(irq));
- }
- // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
- Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
- }
-
- /// Returns a [`kernel::irq::Registration`] for the IRQ vector at the given
- /// index.
- pub fn request_irq<'a, T: crate::irq::Handler + 'static>(
- &'a self,
- index: u32,
- flags: irq::Flags,
- name: &'static CStr,
- handler: impl PinInit<T, Error> + 'a,
- ) -> Result<impl PinInit<irq::Registration<T>, Error> + 'a> {
- let request = self.irq_vector(index)?;
-
- Ok(irq::Registration::<T>::new(request, flags, name, handler))
- }
-
- /// Returns a [`kernel::irq::ThreadedRegistration`] for the IRQ vector at
- /// the given index.
- pub fn request_threaded_irq<'a, T: crate::irq::ThreadedHandler + 'static>(
- &'a self,
- index: u32,
- flags: irq::Flags,
- name: &'static CStr,
- handler: impl PinInit<T, Error> + 'a,
- ) -> Result<impl PinInit<irq::ThreadedRegistration<T>, Error> + 'a> {
- let request = self.irq_vector(index)?;
-
- Ok(irq::ThreadedRegistration::<T>::new(
- request, flags, name, handler,
- ))
- }
-}
-
impl Device<device::Core> {
/// Enable memory resources for this device.
pub fn enable_device_mem(&self) -> Result {
@@ -593,6 +444,12 @@ impl Device<device::Core> {
}
}
+// SAFETY: `pci::Device` is a transparent wrapper of `struct pci_dev`.
+// The offset is guaranteed to point to a valid device field inside `pci::Device`.
+unsafe impl<Ctx: device::DeviceContext> device::AsBusDevice<Ctx> for Device<Ctx> {
+ const OFFSET: usize = offset_of!(bindings::pci_dev, dev);
+}
+
// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
// argument.
kernel::impl_device_context_deref!(unsafe { Device });
diff --git a/rust/kernel/pci/id.rs b/rust/kernel/pci/id.rs
index 5f5d59ff49fc..c09125946d9e 100644
--- a/rust/kernel/pci/id.rs
+++ b/rust/kernel/pci/id.rs
@@ -4,7 +4,11 @@
//!
//! This module contains PCI class codes, Vendor IDs, and supporting types.
-use crate::{bindings, error::code::EINVAL, error::Error, fmt, prelude::*};
+use crate::{
+ bindings,
+ fmt,
+ prelude::*, //
+};
/// PCI device class codes.
///
diff --git a/rust/kernel/pci/io.rs b/rust/kernel/pci/io.rs
new file mode 100644
index 000000000000..0d55c3139b6f
--- /dev/null
+++ b/rust/kernel/pci/io.rs
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! PCI memory-mapped I/O infrastructure.
+
+use super::Device;
+use crate::{
+ bindings,
+ device,
+ devres::Devres,
+ io::{
+ Io,
+ IoRaw, //
+ },
+ prelude::*,
+ sync::aref::ARef, //
+};
+use core::ops::Deref;
+
+/// A PCI BAR to perform I/O-Operations on.
+///
+/// # Invariants
+///
+/// `Bar` always holds an `IoRaw` inststance that holds a valid pointer to the start of the I/O
+/// memory mapped PCI BAR and its size.
+pub struct Bar<const SIZE: usize = 0> {
+ pdev: ARef<Device>,
+ io: IoRaw<SIZE>,
+ num: i32,
+}
+
+impl<const SIZE: usize> Bar<SIZE> {
+ pub(super) fn new(pdev: &Device, num: u32, name: &CStr) -> Result<Self> {
+ let len = pdev.resource_len(num)?;
+ if len == 0 {
+ return Err(ENOMEM);
+ }
+
+ // Convert to `i32`, since that's what all the C bindings use.
+ let num = i32::try_from(num)?;
+
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ // `name` is always valid.
+ let ret = unsafe { bindings::pci_request_region(pdev.as_raw(), num, name.as_char_ptr()) };
+ if ret != 0 {
+ return Err(EBUSY);
+ }
+
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ // `name` is always valid.
+ let ioptr: usize = unsafe { bindings::pci_iomap(pdev.as_raw(), num, 0) } as usize;
+ if ioptr == 0 {
+ // SAFETY:
+ // `pdev` valid by the invariants of `Device`.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ unsafe { bindings::pci_release_region(pdev.as_raw(), num) };
+ return Err(ENOMEM);
+ }
+
+ let io = match IoRaw::new(ioptr, len as usize) {
+ Ok(io) => io,
+ Err(err) => {
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `ioptr` is guaranteed to be the start of a valid I/O mapped memory region.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ unsafe { Self::do_release(pdev, ioptr, num) };
+ return Err(err);
+ }
+ };
+
+ Ok(Bar {
+ pdev: pdev.into(),
+ io,
+ num,
+ })
+ }
+
+ /// # Safety
+ ///
+ /// `ioptr` must be a valid pointer to the memory mapped PCI BAR number `num`.
+ unsafe fn do_release(pdev: &Device, ioptr: usize, num: i32) {
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `ioptr` is valid by the safety requirements.
+ // `num` is valid by the safety requirements.
+ unsafe {
+ bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut c_void);
+ bindings::pci_release_region(pdev.as_raw(), num);
+ }
+ }
+
+ fn release(&self) {
+ // SAFETY: The safety requirements are guaranteed by the type invariant of `self.pdev`.
+ unsafe { Self::do_release(&self.pdev, self.io.addr(), self.num) };
+ }
+}
+
+impl Bar {
+ #[inline]
+ pub(super) fn index_is_valid(index: u32) -> bool {
+ // A `struct pci_dev` owns an array of resources with at most `PCI_NUM_RESOURCES` entries.
+ index < bindings::PCI_NUM_RESOURCES
+ }
+}
+
+impl<const SIZE: usize> Drop for Bar<SIZE> {
+ fn drop(&mut self) {
+ self.release();
+ }
+}
+
+impl<const SIZE: usize> Deref for Bar<SIZE> {
+ type Target = Io<SIZE>;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariant of `Self`, the MMIO range in `self.io` is properly mapped.
+ unsafe { Io::from_raw(&self.io) }
+ }
+}
+
+impl Device<device::Bound> {
+ /// Maps an entire PCI BAR after performing a region-request on it. I/O operation bound checks
+ /// can be performed on compile time for offsets (plus the requested type size) < SIZE.
+ pub fn iomap_region_sized<'a, const SIZE: usize>(
+ &'a self,
+ bar: u32,
+ name: &'a CStr,
+ ) -> impl PinInit<Devres<Bar<SIZE>>, Error> + 'a {
+ Devres::new(self.as_ref(), Bar::<SIZE>::new(self, bar, name))
+ }
+
+ /// Maps an entire PCI BAR after performing a region-request on it.
+ pub fn iomap_region<'a>(
+ &'a self,
+ bar: u32,
+ name: &'a CStr,
+ ) -> impl PinInit<Devres<Bar>, Error> + 'a {
+ self.iomap_region_sized::<0>(bar, name)
+ }
+}
diff --git a/rust/kernel/pci/irq.rs b/rust/kernel/pci/irq.rs
new file mode 100644
index 000000000000..d9230e105541
--- /dev/null
+++ b/rust/kernel/pci/irq.rs
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! PCI interrupt infrastructure.
+
+use super::Device;
+use crate::{
+ bindings,
+ device,
+ device::Bound,
+ devres,
+ error::to_result,
+ irq::{
+ self,
+ IrqRequest, //
+ },
+ prelude::*,
+ str::CStr,
+ sync::aref::ARef, //
+};
+use core::ops::RangeInclusive;
+
+/// IRQ type flags for PCI interrupt allocation.
+#[derive(Debug, Clone, Copy)]
+pub enum IrqType {
+ /// INTx interrupts.
+ Intx,
+ /// Message Signaled Interrupts (MSI).
+ Msi,
+ /// Extended Message Signaled Interrupts (MSI-X).
+ MsiX,
+}
+
+impl IrqType {
+ /// Convert to the corresponding kernel flags.
+ const fn as_raw(self) -> u32 {
+ match self {
+ IrqType::Intx => bindings::PCI_IRQ_INTX,
+ IrqType::Msi => bindings::PCI_IRQ_MSI,
+ IrqType::MsiX => bindings::PCI_IRQ_MSIX,
+ }
+ }
+}
+
+/// Set of IRQ types that can be used for PCI interrupt allocation.
+#[derive(Debug, Clone, Copy, Default)]
+pub struct IrqTypes(u32);
+
+impl IrqTypes {
+ /// Create a set containing all IRQ types (MSI-X, MSI, and INTx).
+ pub const fn all() -> Self {
+ Self(bindings::PCI_IRQ_ALL_TYPES)
+ }
+
+ /// Build a set of IRQ types.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore
+ /// // Create a set with only MSI and MSI-X (no INTx interrupts).
+ /// let msi_only = IrqTypes::default()
+ /// .with(IrqType::Msi)
+ /// .with(IrqType::MsiX);
+ /// ```
+ pub const fn with(self, irq_type: IrqType) -> Self {
+ Self(self.0 | irq_type.as_raw())
+ }
+
+ /// Get the raw flags value.
+ const fn as_raw(self) -> u32 {
+ self.0
+ }
+}
+
+/// Represents an allocated IRQ vector for a specific PCI device.
+///
+/// This type ties an IRQ vector to the device it was allocated for,
+/// ensuring the vector is only used with the correct device.
+#[derive(Clone, Copy)]
+pub struct IrqVector<'a> {
+ dev: &'a Device<Bound>,
+ index: u32,
+}
+
+impl<'a> IrqVector<'a> {
+ /// Creates a new [`IrqVector`] for the given device and index.
+ ///
+ /// # Safety
+ ///
+ /// - `index` must be a valid IRQ vector index for `dev`.
+ /// - `dev` must point to a [`Device`] that has successfully allocated IRQ vectors.
+ unsafe fn new(dev: &'a Device<Bound>, index: u32) -> Self {
+ Self { dev, index }
+ }
+
+ /// Returns the raw vector index.
+ fn index(&self) -> u32 {
+ self.index
+ }
+}
+
+impl<'a> TryInto<IrqRequest<'a>> for IrqVector<'a> {
+ type Error = Error;
+
+ fn try_into(self) -> Result<IrqRequest<'a>> {
+ // SAFETY: `self.as_raw` returns a valid pointer to a `struct pci_dev`.
+ let irq = unsafe { bindings::pci_irq_vector(self.dev.as_raw(), self.index()) };
+ if irq < 0 {
+ return Err(crate::error::Error::from_errno(irq));
+ }
+ // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
+ Ok(unsafe { IrqRequest::new(self.dev.as_ref(), irq as u32) })
+ }
+}
+
+/// Represents an IRQ vector allocation for a PCI device.
+///
+/// This type ensures that IRQ vectors are properly allocated and freed by
+/// tying the allocation to the lifetime of this registration object.
+///
+/// # Invariants
+///
+/// The [`Device`] has successfully allocated IRQ vectors.
+struct IrqVectorRegistration {
+ dev: ARef<Device>,
+}
+
+impl IrqVectorRegistration {
+ /// Allocate and register IRQ vectors for the given PCI device.
+ ///
+ /// Allocates IRQ vectors and registers them with devres for automatic cleanup.
+ /// Returns a range of valid IRQ vectors.
+ fn register<'a>(
+ dev: &'a Device<Bound>,
+ min_vecs: u32,
+ max_vecs: u32,
+ irq_types: IrqTypes,
+ ) -> Result<RangeInclusive<IrqVector<'a>>> {
+ // SAFETY:
+ // - `dev.as_raw()` is guaranteed to be a valid pointer to a `struct pci_dev`
+ // by the type invariant of `Device`.
+ // - `pci_alloc_irq_vectors` internally validates all other parameters
+ // and returns error codes.
+ let ret = unsafe {
+ bindings::pci_alloc_irq_vectors(dev.as_raw(), min_vecs, max_vecs, irq_types.as_raw())
+ };
+
+ to_result(ret)?;
+ let count = ret as u32;
+
+ // SAFETY:
+ // - `pci_alloc_irq_vectors` returns the number of allocated vectors on success.
+ // - Vectors are 0-based, so valid indices are [0, count-1].
+ // - `pci_alloc_irq_vectors` guarantees `count >= min_vecs > 0`, so both `0` and
+ // `count - 1` are valid IRQ vector indices for `dev`.
+ let range = unsafe { IrqVector::new(dev, 0)..=IrqVector::new(dev, count - 1) };
+
+ // INVARIANT: The IRQ vector allocation for `dev` above was successful.
+ let irq_vecs = Self { dev: dev.into() };
+ devres::register(dev.as_ref(), irq_vecs, GFP_KERNEL)?;
+
+ Ok(range)
+ }
+}
+
+impl Drop for IrqVectorRegistration {
+ fn drop(&mut self) {
+ // SAFETY:
+ // - By the type invariant, `self.dev.as_raw()` is a valid pointer to a `struct pci_dev`.
+ // - `self.dev` has successfully allocated IRQ vectors.
+ unsafe { bindings::pci_free_irq_vectors(self.dev.as_raw()) };
+ }
+}
+
+impl Device<device::Bound> {
+ /// Returns a [`kernel::irq::Registration`] for the given IRQ vector.
+ pub fn request_irq<'a, T: crate::irq::Handler + 'static>(
+ &'a self,
+ vector: IrqVector<'a>,
+ flags: irq::Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> impl PinInit<irq::Registration<T>, Error> + 'a {
+ pin_init::pin_init_scope(move || {
+ let request = vector.try_into()?;
+
+ Ok(irq::Registration::<T>::new(request, flags, name, handler))
+ })
+ }
+
+ /// Returns a [`kernel::irq::ThreadedRegistration`] for the given IRQ vector.
+ pub fn request_threaded_irq<'a, T: crate::irq::ThreadedHandler + 'static>(
+ &'a self,
+ vector: IrqVector<'a>,
+ flags: irq::Flags,
+ name: &'static CStr,
+ handler: impl PinInit<T, Error> + 'a,
+ ) -> impl PinInit<irq::ThreadedRegistration<T>, Error> + 'a {
+ pin_init::pin_init_scope(move || {
+ let request = vector.try_into()?;
+
+ Ok(irq::ThreadedRegistration::<T>::new(
+ request, flags, name, handler,
+ ))
+ })
+ }
+
+ /// Allocate IRQ vectors for this PCI device with automatic cleanup.
+ ///
+ /// Allocates between `min_vecs` and `max_vecs` interrupt vectors for the device.
+ /// The allocation will use MSI-X, MSI, or INTx interrupts based on the `irq_types`
+ /// parameter and hardware capabilities. When multiple types are specified, the kernel
+ /// will try them in order of preference: MSI-X first, then MSI, then INTx interrupts.
+ ///
+ /// The allocated vectors are automatically freed when the device is unbound, using the
+ /// devres (device resource management) system.
+ ///
+ /// # Arguments
+ ///
+ /// * `min_vecs` - Minimum number of vectors required.
+ /// * `max_vecs` - Maximum number of vectors to allocate.
+ /// * `irq_types` - Types of interrupts that can be used.
+ ///
+ /// # Returns
+ ///
+ /// Returns a range of IRQ vectors that were successfully allocated, or an error if the
+ /// allocation fails or cannot meet the minimum requirement.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::{ device::Bound, pci};
+ /// # fn no_run(dev: &pci::Device<Bound>) -> Result {
+ /// // Allocate using any available interrupt type in the order mentioned above.
+ /// let vectors = dev.alloc_irq_vectors(1, 32, pci::IrqTypes::all())?;
+ ///
+ /// // Allocate MSI or MSI-X only (no INTx interrupts).
+ /// let msi_only = pci::IrqTypes::default()
+ /// .with(pci::IrqType::Msi)
+ /// .with(pci::IrqType::MsiX);
+ /// let vectors = dev.alloc_irq_vectors(4, 16, msi_only)?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn alloc_irq_vectors(
+ &self,
+ min_vecs: u32,
+ max_vecs: u32,
+ irq_types: IrqTypes,
+ ) -> Result<RangeInclusive<IrqVector<'_>>> {
+ IrqVectorRegistration::register(self, min_vecs, max_vecs, irq_types)
+ }
+}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
index 7205fe3416d3..ed889f079cab 100644
--- a/rust/kernel/platform.rs
+++ b/rust/kernel/platform.rs
@@ -19,6 +19,7 @@ use crate::{
use core::{
marker::PhantomData,
+ mem::offset_of,
ptr::{addr_of_mut, NonNull},
};
@@ -74,9 +75,9 @@ impl<T: Driver + 'static> Adapter<T> {
let info = <Self as driver::Adapter>::id_info(pdev.as_ref());
from_result(|| {
- let data = T::probe(pdev, info)?;
+ let data = T::probe(pdev, info);
- pdev.as_ref().set_drvdata(data);
+ pdev.as_ref().set_drvdata(data)?;
Ok(0)
})
}
@@ -91,7 +92,7 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: `remove_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- let data = unsafe { pdev.as_ref().drvdata_obtain::<Pin<KBox<T>>>() };
+ let data = unsafe { pdev.as_ref().drvdata_obtain::<T>() };
T::unbind(pdev, data.as_ref());
}
@@ -166,7 +167,7 @@ macro_rules! module_platform_driver {
/// fn probe(
/// _pdev: &platform::Device<Core>,
/// _id_info: Option<&Self::IdInfo>,
-/// ) -> Result<Pin<KBox<Self>>> {
+/// ) -> impl PinInit<Self, Error> {
/// Err(ENODEV)
/// }
/// }
@@ -190,8 +191,10 @@ pub trait Driver: Send {
///
/// Called when a new platform device is added or discovered.
/// Implementers should attempt to initialize the device here.
- fn probe(dev: &Device<device::Core>, id_info: Option<&Self::IdInfo>)
- -> Result<Pin<KBox<Self>>>;
+ fn probe(
+ dev: &Device<device::Core>,
+ id_info: Option<&Self::IdInfo>,
+ ) -> impl PinInit<Self, Error>;
/// Platform driver unbind.
///
@@ -285,6 +288,12 @@ impl Device<Bound> {
}
}
+// SAFETY: `platform::Device` is a transparent wrapper of `struct platform_device`.
+// The offset is guaranteed to point to a valid device field inside `platform::Device`.
+unsafe impl<Ctx: device::DeviceContext> device::AsBusDevice<Ctx> for Device<Ctx> {
+ const OFFSET: usize = offset_of!(bindings::platform_device, dev);
+}
+
macro_rules! define_irq_accessor_by_index {
(
$(#[$meta:meta])* $fn_name:ident,
@@ -299,15 +308,17 @@ macro_rules! define_irq_accessor_by_index {
index: u32,
name: &'static CStr,
handler: impl PinInit<T, Error> + 'a,
- ) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> {
- let request = self.$request_fn(index)?;
-
- Ok(irq::$reg_type::<T>::new(
- request,
- flags,
- name,
- handler,
- ))
+ ) -> impl PinInit<irq::$reg_type<T>, Error> + 'a {
+ pin_init::pin_init_scope(move || {
+ let request = self.$request_fn(index)?;
+
+ Ok(irq::$reg_type::<T>::new(
+ request,
+ flags,
+ name,
+ handler,
+ ))
+ })
}
};
}
@@ -323,18 +334,20 @@ macro_rules! define_irq_accessor_by_name {
pub fn $fn_name<'a, T: irq::$handler_trait + 'static>(
&'a self,
flags: irq::Flags,
- irq_name: &CStr,
+ irq_name: &'a CStr,
name: &'static CStr,
handler: impl PinInit<T, Error> + 'a,
- ) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> {
- let request = self.$request_fn(irq_name)?;
-
- Ok(irq::$reg_type::<T>::new(
- request,
- flags,
- name,
- handler,
- ))
+ ) -> impl PinInit<irq::$reg_type<T>, Error> + 'a {
+ pin_init::pin_init_scope(move || {
+ let request = self.$request_fn(irq_name)?;
+
+ Ok(irq::$reg_type::<T>::new(
+ request,
+ flags,
+ name,
+ handler,
+ ))
+ })
}
};
}
diff --git a/rust/kernel/scatterlist.rs b/rust/kernel/scatterlist.rs
index 9709dff60b5a..196fdb9a75e7 100644
--- a/rust/kernel/scatterlist.rs
+++ b/rust/kernel/scatterlist.rs
@@ -35,7 +35,7 @@ use crate::{
device::{Bound, Device},
devres::Devres,
dma, error,
- io::resource::ResourceSize,
+ io::ResourceSize,
page,
prelude::*,
types::{ARef, Opaque},
diff --git a/rust/kernel/time/delay.rs b/rust/kernel/time/delay.rs
index eb8838da62bc..b5b1b42797a0 100644
--- a/rust/kernel/time/delay.rs
+++ b/rust/kernel/time/delay.rs
@@ -47,3 +47,40 @@ pub fn fsleep(delta: Delta) {
bindings::fsleep(delta.as_micros_ceil() as c_ulong)
}
}
+
+/// Inserts a delay based on microseconds with busy waiting.
+///
+/// Equivalent to the C side [`udelay()`], which delays in microseconds.
+///
+/// `delta` must be within `[0, MAX_UDELAY_MS]` in milliseconds;
+/// otherwise, it is erroneous behavior. That is, it is considered a bug to
+/// call this function with an out-of-range value.
+///
+/// The behavior above differs from the C side [`udelay()`] for which out-of-range
+/// values could lead to an overflow and unexpected behavior.
+///
+/// [`udelay()`]: https://docs.kernel.org/timers/delay_sleep_functions.html#c.udelay
+pub fn udelay(delta: Delta) {
+ const MAX_UDELAY_DELTA: Delta = Delta::from_millis(bindings::MAX_UDELAY_MS as i64);
+
+ debug_assert!(delta.as_nanos() >= 0);
+ debug_assert!(delta <= MAX_UDELAY_DELTA);
+
+ let delta = if (Delta::ZERO..=MAX_UDELAY_DELTA).contains(&delta) {
+ delta
+ } else {
+ MAX_UDELAY_DELTA
+ };
+
+ // SAFETY: It is always safe to call `udelay()` with any duration.
+ // Note that the kernel is compiled with `-fno-strict-overflow`
+ // so any out-of-range value could lead to unexpected behavior
+ // but won't lead to undefined behavior.
+ unsafe {
+ // Convert the duration to microseconds and round up to preserve
+ // the guarantee; `udelay()` inserts a delay for at least
+ // the provided duration, but that it may delay for longer
+ // under some circumstances.
+ bindings::udelay(delta.as_micros_ceil() as c_ulong)
+ }
+}
diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs
index a8fb4764185a..f989539a31b4 100644
--- a/rust/kernel/uaccess.rs
+++ b/rust/kernel/uaccess.rs
@@ -9,6 +9,7 @@ use crate::{
bindings,
error::Result,
ffi::{c_char, c_void},
+ fs::file,
prelude::*,
transmute::{AsBytes, FromBytes},
};
@@ -287,6 +288,48 @@ impl UserSliceReader {
self.read_raw(out)
}
+ /// Reads raw data from the user slice into a kernel buffer partially.
+ ///
+ /// This is the same as [`Self::read_slice`] but considers the given `offset` into `out` and
+ /// truncates the read to the boundaries of `self` and `out`.
+ ///
+ /// On success, returns the number of bytes read.
+ pub fn read_slice_partial(&mut self, out: &mut [u8], offset: usize) -> Result<usize> {
+ let end = offset.saturating_add(self.len()).min(out.len());
+
+ let Some(dst) = out.get_mut(offset..end) else {
+ return Ok(0);
+ };
+
+ self.read_slice(dst)?;
+ Ok(dst.len())
+ }
+
+ /// Reads raw data from the user slice into a kernel buffer partially.
+ ///
+ /// This is the same as [`Self::read_slice_partial`] but updates the given [`file::Offset`] by
+ /// the number of bytes read.
+ ///
+ /// This is equivalent to C's `simple_write_to_buffer()`.
+ ///
+ /// On success, returns the number of bytes read.
+ pub fn read_slice_file(&mut self, out: &mut [u8], offset: &mut file::Offset) -> Result<usize> {
+ if offset.is_negative() {
+ return Err(EINVAL);
+ }
+
+ let Ok(offset_index) = (*offset).try_into() else {
+ return Ok(0);
+ };
+
+ let read = self.read_slice_partial(out, offset_index)?;
+
+ // OVERFLOW: `offset + read <= data.len() <= isize::MAX <= Offset::MAX`
+ *offset += read as i64;
+
+ Ok(read)
+ }
+
/// Reads a value of the specified type.
///
/// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
@@ -438,6 +481,48 @@ impl UserSliceWriter {
Ok(())
}
+ /// Writes raw data to this user pointer from a kernel buffer partially.
+ ///
+ /// This is the same as [`Self::write_slice`] but considers the given `offset` into `data` and
+ /// truncates the write to the boundaries of `self` and `data`.
+ ///
+ /// On success, returns the number of bytes written.
+ pub fn write_slice_partial(&mut self, data: &[u8], offset: usize) -> Result<usize> {
+ let end = offset.saturating_add(self.len()).min(data.len());
+
+ let Some(src) = data.get(offset..end) else {
+ return Ok(0);
+ };
+
+ self.write_slice(src)?;
+ Ok(src.len())
+ }
+
+ /// Writes raw data to this user pointer from a kernel buffer partially.
+ ///
+ /// This is the same as [`Self::write_slice_partial`] but updates the given [`file::Offset`] by
+ /// the number of bytes written.
+ ///
+ /// This is equivalent to C's `simple_read_from_buffer()`.
+ ///
+ /// On success, returns the number of bytes written.
+ pub fn write_slice_file(&mut self, data: &[u8], offset: &mut file::Offset) -> Result<usize> {
+ if offset.is_negative() {
+ return Err(EINVAL);
+ }
+
+ let Ok(offset_index) = (*offset).try_into() else {
+ return Ok(0);
+ };
+
+ let written = self.write_slice_partial(data, offset_index)?;
+
+ // OVERFLOW: `offset + written <= data.len() <= isize::MAX <= Offset::MAX`
+ *offset += written as i64;
+
+ Ok(written)
+ }
+
/// Writes the provided Rust value to this userspace pointer.
///
/// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
diff --git a/rust/kernel/usb.rs b/rust/kernel/usb.rs
index 14ddb711bab3..d10b65e9fb6a 100644
--- a/rust/kernel/usb.rs
+++ b/rust/kernel/usb.rs
@@ -15,7 +15,14 @@ use crate::{
types::{AlwaysRefCounted, Opaque},
ThisModule,
};
-use core::{marker::PhantomData, mem::MaybeUninit, ptr::NonNull};
+use core::{
+ marker::PhantomData,
+ mem::{
+ offset_of,
+ MaybeUninit, //
+ },
+ ptr::NonNull,
+};
/// An adapter for the registration of USB drivers.
pub struct Adapter<T: Driver>(T);
@@ -67,10 +74,10 @@ impl<T: Driver + 'static> Adapter<T> {
let id = unsafe { &*id.cast::<DeviceId>() };
let info = T::ID_TABLE.info(id.index());
- let data = T::probe(intf, id, info)?;
+ let data = T::probe(intf, id, info);
let dev: &device::Device<device::CoreInternal> = intf.as_ref();
- dev.set_drvdata(data);
+ dev.set_drvdata(data)?;
Ok(0)
})
}
@@ -87,7 +94,7 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: `disconnect_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- let data = unsafe { dev.drvdata_obtain::<Pin<KBox<T>>>() };
+ let data = unsafe { dev.drvdata_obtain::<T>() };
T::disconnect(intf, data.as_ref());
}
@@ -270,7 +277,7 @@ macro_rules! usb_device_table {
/// _interface: &usb::Interface<Core>,
/// _id: &usb::DeviceId,
/// _info: &Self::IdInfo,
-/// ) -> Result<Pin<KBox<Self>>> {
+/// ) -> impl PinInit<Self, Error> {
/// Err(ENODEV)
/// }
///
@@ -292,7 +299,7 @@ pub trait Driver {
interface: &Interface<device::Core>,
id: &DeviceId,
id_info: &Self::IdInfo,
- ) -> Result<Pin<KBox<Self>>>;
+ ) -> impl PinInit<Self, Error>;
/// USB driver disconnect.
///
@@ -324,6 +331,12 @@ impl<Ctx: device::DeviceContext> Interface<Ctx> {
}
}
+// SAFETY: `usb::Interface` is a transparent wrapper of `struct usb_interface`.
+// The offset is guaranteed to point to a valid device field inside `usb::Interface`.
+unsafe impl<Ctx: device::DeviceContext> device::AsBusDevice<Ctx> for Interface<Ctx> {
+ const OFFSET: usize = offset_of!(bindings::usb_interface, dev);
+}
+
// SAFETY: `Interface` is a transparent wrapper of a type that doesn't depend on
// `Interface`'s generic argument.
kernel::impl_device_context_deref!(unsafe { Interface });
diff --git a/rust/pin-init/src/lib.rs b/rust/pin-init/src/lib.rs
index dd553212836e..8dc9dd5ac6fd 100644
--- a/rust/pin-init/src/lib.rs
+++ b/rust/pin-init/src/lib.rs
@@ -1392,6 +1392,93 @@ where
unsafe { pin_init_from_closure(init) }
}
+/// Construct an initializer in a closure and run it.
+///
+/// Returns an initializer that first runs the closure and then the initializer returned by it.
+///
+/// See also [`init_scope`].
+///
+/// # Examples
+///
+/// ```
+/// # use pin_init::*;
+/// # #[pin_data]
+/// # struct Foo { a: u64, b: isize }
+/// # struct Bar { a: u32, b: isize }
+/// # fn lookup_bar() -> Result<Bar, Error> { todo!() }
+/// # struct Error;
+/// fn init_foo() -> impl PinInit<Foo, Error> {
+/// pin_init_scope(|| {
+/// let bar = lookup_bar()?;
+/// Ok(try_pin_init!(Foo { a: bar.a.into(), b: bar.b }? Error))
+/// })
+/// }
+/// ```
+///
+/// This initializer will first execute `lookup_bar()`, match on it, if it returned an error, the
+/// initializer itself will fail with that error. If it returned `Ok`, then it will run the
+/// initializer returned by the [`try_pin_init!`] invocation.
+pub fn pin_init_scope<T, E, F, I>(make_init: F) -> impl PinInit<T, E>
+where
+ F: FnOnce() -> Result<I, E>,
+ I: PinInit<T, E>,
+{
+ // SAFETY:
+ // - If `make_init` returns `Err`, `Err` is returned and `slot` is completely uninitialized,
+ // - If `make_init` returns `Ok`, safety requirement are fulfilled by `init.__pinned_init`.
+ // - The safety requirements of `init.__pinned_init` are fulfilled, since it's being called
+ // from an initializer.
+ unsafe {
+ pin_init_from_closure(move |slot: *mut T| -> Result<(), E> {
+ let init = make_init()?;
+ init.__pinned_init(slot)
+ })
+ }
+}
+
+/// Construct an initializer in a closure and run it.
+///
+/// Returns an initializer that first runs the closure and then the initializer returned by it.
+///
+/// See also [`pin_init_scope`].
+///
+/// # Examples
+///
+/// ```
+/// # use pin_init::*;
+/// # struct Foo { a: u64, b: isize }
+/// # struct Bar { a: u32, b: isize }
+/// # fn lookup_bar() -> Result<Bar, Error> { todo!() }
+/// # struct Error;
+/// fn init_foo() -> impl Init<Foo, Error> {
+/// init_scope(|| {
+/// let bar = lookup_bar()?;
+/// Ok(try_init!(Foo { a: bar.a.into(), b: bar.b }? Error))
+/// })
+/// }
+/// ```
+///
+/// This initializer will first execute `lookup_bar()`, match on it, if it returned an error, the
+/// initializer itself will fail with that error. If it returned `Ok`, then it will run the
+/// initializer returned by the [`try_init!`] invocation.
+pub fn init_scope<T, E, F, I>(make_init: F) -> impl Init<T, E>
+where
+ F: FnOnce() -> Result<I, E>,
+ I: Init<T, E>,
+{
+ // SAFETY:
+ // - If `make_init` returns `Err`, `Err` is returned and `slot` is completely uninitialized,
+ // - If `make_init` returns `Ok`, safety requirement are fulfilled by `init.__init`.
+ // - The safety requirements of `init.__init` are fulfilled, since it's being called from an
+ // initializer.
+ unsafe {
+ init_from_closure(move |slot: *mut T| -> Result<(), E> {
+ let init = make_init()?;
+ init.__init(slot)
+ })
+ }
+}
+
// SAFETY: the `__init` function always returns `Ok(())` and initializes every field of `slot`.
unsafe impl<T> Init<T> for T {
unsafe fn __init(self, slot: *mut T) -> Result<(), Infallible> {
diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
index 579ce150217c..d0103904e5dd 100644
--- a/samples/kobject/kset-example.c
+++ b/samples/kobject/kset-example.c
@@ -37,10 +37,11 @@ struct foo_obj {
/* a custom attribute that works just for a struct foo_obj. */
struct foo_attribute {
struct attribute attr;
- ssize_t (*show)(struct foo_obj *foo, struct foo_attribute *attr, char *buf);
- ssize_t (*store)(struct foo_obj *foo, struct foo_attribute *attr, const char *buf, size_t count);
+ ssize_t (*show)(struct foo_obj *foo, const struct foo_attribute *attr, char *buf);
+ ssize_t (*store)(struct foo_obj *foo, const struct foo_attribute *attr,
+ const char *buf, size_t count);
};
-#define to_foo_attr(x) container_of(x, struct foo_attribute, attr)
+#define to_foo_attr(x) container_of_const(x, struct foo_attribute, attr)
/*
* The default show function that must be passed to sysfs. This will be
@@ -53,7 +54,7 @@ static ssize_t foo_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
- struct foo_attribute *attribute;
+ const struct foo_attribute *attribute;
struct foo_obj *foo;
attribute = to_foo_attr(attr);
@@ -73,7 +74,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t len)
{
- struct foo_attribute *attribute;
+ const struct foo_attribute *attribute;
struct foo_obj *foo;
attribute = to_foo_attr(attr);
@@ -109,13 +110,13 @@ static void foo_release(struct kobject *kobj)
/*
* The "foo" file where the .foo variable is read from and written to.
*/
-static ssize_t foo_show(struct foo_obj *foo_obj, struct foo_attribute *attr,
+static ssize_t foo_show(struct foo_obj *foo_obj, const struct foo_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", foo_obj->foo);
}
-static ssize_t foo_store(struct foo_obj *foo_obj, struct foo_attribute *attr,
+static ssize_t foo_store(struct foo_obj *foo_obj, const struct foo_attribute *attr,
const char *buf, size_t count)
{
int ret;
@@ -128,14 +129,14 @@ static ssize_t foo_store(struct foo_obj *foo_obj, struct foo_attribute *attr,
}
/* Sysfs attributes cannot be world-writable. */
-static struct foo_attribute foo_attribute =
+static const struct foo_attribute foo_attribute =
__ATTR(foo, 0664, foo_show, foo_store);
/*
* More complex function where we determine which variable is being accessed by
* looking at the attribute for the "baz" and "bar" files.
*/
-static ssize_t b_show(struct foo_obj *foo_obj, struct foo_attribute *attr,
+static ssize_t b_show(struct foo_obj *foo_obj, const struct foo_attribute *attr,
char *buf)
{
int var;
@@ -147,7 +148,7 @@ static ssize_t b_show(struct foo_obj *foo_obj, struct foo_attribute *attr,
return sysfs_emit(buf, "%d\n", var);
}
-static ssize_t b_store(struct foo_obj *foo_obj, struct foo_attribute *attr,
+static ssize_t b_store(struct foo_obj *foo_obj, const struct foo_attribute *attr,
const char *buf, size_t count)
{
int var, ret;
@@ -163,22 +164,37 @@ static ssize_t b_store(struct foo_obj *foo_obj, struct foo_attribute *attr,
return count;
}
-static struct foo_attribute baz_attribute =
+static const struct foo_attribute baz_attribute =
__ATTR(baz, 0664, b_show, b_store);
-static struct foo_attribute bar_attribute =
+static const struct foo_attribute bar_attribute =
__ATTR(bar, 0664, b_show, b_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
-static struct attribute *foo_default_attrs[] = {
+static const struct attribute *const foo_default_attrs[] = {
&foo_attribute.attr,
&baz_attribute.attr,
&bar_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
-ATTRIBUTE_GROUPS(foo_default);
+
+static umode_t foo_default_attrs_is_visible(struct kobject *kobj,
+ const struct attribute *attr,
+ int n)
+{
+ /* Hide attributes with the same name as the kobject. */
+ if (strcmp(kobject_name(kobj), attr->name) == 0)
+ return 0;
+ return attr->mode;
+}
+
+static const struct attribute_group foo_default_group = {
+ .attrs_const = foo_default_attrs,
+ .is_visible_const = foo_default_attrs_is_visible,
+};
+__ATTRIBUTE_GROUPS(foo_default);
/*
* Our own ktype for our kobjects. Here we specify our sysfs ops, the
diff --git a/samples/rust/Kconfig b/samples/rust/Kconfig
index c376eb899b7a..cde1dc9451d3 100644
--- a/samples/rust/Kconfig
+++ b/samples/rust/Kconfig
@@ -84,6 +84,29 @@ config SAMPLE_RUST_DEBUGFS_SCOPED
If unsure, say N.
+config SAMPLE_RUST_DRIVER_I2C
+ tristate "I2C Driver"
+ depends on I2C=y
+ help
+ This option builds the Rust I2C driver sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_driver_i2c.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_I2C_CLIENT
+ tristate "I2C Client Registration"
+ depends on I2C=y
+ help
+ This option builds the Rust I2C client manual creation
+ sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_i2c_client.
+
+ If unsure, say N.
+
config SAMPLE_RUST_DRIVER_PCI
tristate "PCI Driver"
depends on PCI
@@ -91,7 +114,7 @@ config SAMPLE_RUST_DRIVER_PCI
This option builds the Rust PCI driver sample.
To compile this as a module, choose M here:
- the module will be called driver_pci.
+ the module will be called rust_driver_pci.
If unsure, say N.
diff --git a/samples/rust/Makefile b/samples/rust/Makefile
index cf8422f8f219..f65885d1d62b 100644
--- a/samples/rust/Makefile
+++ b/samples/rust/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o
obj-$(CONFIG_SAMPLE_RUST_DEBUGFS) += rust_debugfs.o
obj-$(CONFIG_SAMPLE_RUST_DEBUGFS_SCOPED) += rust_debugfs_scoped.o
obj-$(CONFIG_SAMPLE_RUST_DMA) += rust_dma.o
+obj-$(CONFIG_SAMPLE_RUST_DRIVER_I2C) += rust_driver_i2c.o
+obj-$(CONFIG_SAMPLE_RUST_I2C_CLIENT) += rust_i2c_client.o
obj-$(CONFIG_SAMPLE_RUST_DRIVER_PCI) += rust_driver_pci.o
obj-$(CONFIG_SAMPLE_RUST_DRIVER_PLATFORM) += rust_driver_platform.o
obj-$(CONFIG_SAMPLE_RUST_DRIVER_USB) += rust_driver_usb.o
diff --git a/samples/rust/rust_debugfs.rs b/samples/rust/rust_debugfs.rs
index 711faa07bece..025e8f9d12de 100644
--- a/samples/rust/rust_debugfs.rs
+++ b/samples/rust/rust_debugfs.rs
@@ -36,6 +36,7 @@ use kernel::c_str;
use kernel::debugfs::{Dir, File};
use kernel::new_mutex;
use kernel::prelude::*;
+use kernel::sizes::*;
use kernel::sync::atomic::{Atomic, Relaxed};
use kernel::sync::Mutex;
use kernel::{acpi, device::Core, of, platform, str::CString, types::ARef};
@@ -60,6 +61,10 @@ struct RustDebugFs {
counter: File<Atomic<usize>>,
#[pin]
inner: File<Mutex<Inner>>,
+ #[pin]
+ array_blob: File<Mutex<[u8; 4]>>,
+ #[pin]
+ vector_blob: File<Mutex<KVec<u8>>>,
}
#[derive(Debug)]
@@ -104,16 +109,17 @@ impl platform::Driver for RustDebugFs {
fn probe(
pdev: &platform::Device<Core>,
_info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
- let result = KBox::try_pin_init(RustDebugFs::new(pdev), GFP_KERNEL)?;
- // We can still mutate fields through the files which are atomic or mutexed:
- result.counter.store(91, Relaxed);
- {
- let mut guard = result.inner.lock();
- guard.x = guard.y;
- guard.y = 42;
- }
- Ok(result)
+ ) -> impl PinInit<Self, Error> {
+ RustDebugFs::new(pdev).pin_chain(|this| {
+ this.counter.store(91, Relaxed);
+ {
+ let mut guard = this.inner.lock();
+ guard.x = guard.y;
+ guard.y = 42;
+ }
+
+ Ok(())
+ })
}
}
@@ -141,6 +147,14 @@ impl RustDebugFs {
),
counter <- Self::build_counter(&debugfs),
inner <- Self::build_inner(&debugfs),
+ array_blob <- debugfs.read_write_binary_file(
+ c_str!("array_blob"),
+ new_mutex!([0x62, 0x6c, 0x6f, 0x62]),
+ ),
+ vector_blob <- debugfs.read_write_binary_file(
+ c_str!("vector_blob"),
+ new_mutex!(kernel::kvec!(0x42; SZ_4K)?),
+ ),
_debugfs: debugfs,
pdev: pdev.into(),
}
diff --git a/samples/rust/rust_debugfs_scoped.rs b/samples/rust/rust_debugfs_scoped.rs
index 406e6588b17a..702a6546d3fb 100644
--- a/samples/rust/rust_debugfs_scoped.rs
+++ b/samples/rust/rust_debugfs_scoped.rs
@@ -8,6 +8,7 @@
use kernel::debugfs::{Dir, Scope};
use kernel::prelude::*;
+use kernel::sizes::*;
use kernel::sync::atomic::Atomic;
use kernel::sync::Mutex;
use kernel::{c_str, new_mutex, str::CString};
@@ -66,18 +67,22 @@ fn create_file_write(
GFP_KERNEL,
)?;
}
+ let blob = KBox::pin_init(new_mutex!([0x42; SZ_4K]), GFP_KERNEL)?;
let scope = KBox::pin_init(
- mod_data
- .device_dir
- .scope(DeviceData { name, nums }, &file_name, |dev_data, dir| {
+ mod_data.device_dir.scope(
+ DeviceData { name, nums, blob },
+ &file_name,
+ |dev_data, dir| {
for (idx, val) in dev_data.nums.iter().enumerate() {
let Ok(name) = CString::try_from_fmt(fmt!("{idx}")) else {
return;
};
dir.read_write_file(&name, val);
}
- }),
+ dir.read_write_binary_file(c_str!("blob"), &dev_data.blob);
+ },
+ ),
GFP_KERNEL,
)?;
(*mod_data.devices.lock()).push(scope, GFP_KERNEL)?;
@@ -110,6 +115,7 @@ impl ModuleData {
struct DeviceData {
name: CString,
nums: KVec<Atomic<usize>>,
+ blob: Pin<KBox<Mutex<[u8; SZ_4K]>>>,
}
fn init_control(base_dir: &Dir, dyn_dirs: Dir) -> impl PinInit<Scope<ModuleData>> + '_ {
diff --git a/samples/rust/rust_dma.rs b/samples/rust/rust_dma.rs
index 4d324f06cc2a..f53bce2a73e3 100644
--- a/samples/rust/rust_dma.rs
+++ b/samples/rust/rust_dma.rs
@@ -55,36 +55,33 @@ impl pci::Driver for DmaSampleDriver {
type IdInfo = ();
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
- dev_info!(pdev.as_ref(), "Probe DMA test driver.\n");
+ fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> impl PinInit<Self, Error> {
+ pin_init::pin_init_scope(move || {
+ dev_info!(pdev.as_ref(), "Probe DMA test driver.\n");
- let mask = DmaMask::new::<64>();
+ let mask = DmaMask::new::<64>();
- // SAFETY: There are no concurrent calls to DMA allocation and mapping primitives.
- unsafe { pdev.dma_set_mask_and_coherent(mask)? };
+ // SAFETY: There are no concurrent calls to DMA allocation and mapping primitives.
+ unsafe { pdev.dma_set_mask_and_coherent(mask)? };
- let ca: CoherentAllocation<MyStruct> =
- CoherentAllocation::alloc_coherent(pdev.as_ref(), TEST_VALUES.len(), GFP_KERNEL)?;
+ let ca: CoherentAllocation<MyStruct> =
+ CoherentAllocation::alloc_coherent(pdev.as_ref(), TEST_VALUES.len(), GFP_KERNEL)?;
- for (i, value) in TEST_VALUES.into_iter().enumerate() {
- kernel::dma_write!(ca[i] = MyStruct::new(value.0, value.1))?;
- }
+ for (i, value) in TEST_VALUES.into_iter().enumerate() {
+ kernel::dma_write!(ca[i] = MyStruct::new(value.0, value.1))?;
+ }
- let size = 4 * page::PAGE_SIZE;
- let pages = VVec::with_capacity(size, GFP_KERNEL)?;
+ let size = 4 * page::PAGE_SIZE;
+ let pages = VVec::with_capacity(size, GFP_KERNEL)?;
- let sgt = SGTable::new(pdev.as_ref(), pages, DataDirection::ToDevice, GFP_KERNEL);
+ let sgt = SGTable::new(pdev.as_ref(), pages, DataDirection::ToDevice, GFP_KERNEL);
- let drvdata = KBox::pin_init(
- try_pin_init!(Self {
+ Ok(try_pin_init!(Self {
pdev: pdev.into(),
ca,
sgt <- sgt,
- }),
- GFP_KERNEL,
- )?;
-
- Ok(drvdata)
+ }))
+ })
}
}
diff --git a/samples/rust/rust_driver_auxiliary.rs b/samples/rust/rust_driver_auxiliary.rs
index 55ece336ee45..5761ea314f44 100644
--- a/samples/rust/rust_driver_auxiliary.rs
+++ b/samples/rust/rust_driver_auxiliary.rs
@@ -5,9 +5,17 @@
//! To make this driver probe, QEMU must be run with `-device pci-testdev`.
use kernel::{
- auxiliary, c_str, device::Core, driver, error::Error, pci, prelude::*, InPlaceModule,
+ auxiliary, c_str,
+ device::{Bound, Core},
+ devres::Devres,
+ driver,
+ error::Error,
+ pci,
+ prelude::*,
+ InPlaceModule,
};
+use core::any::TypeId;
use pin_init::PinInit;
const MODULE_NAME: &CStr = <LocalModule as kernel::ModuleMetadata>::NAME;
@@ -27,7 +35,7 @@ impl auxiliary::Driver for AuxiliaryDriver {
const ID_TABLE: auxiliary::IdTable<Self::IdInfo> = &AUX_TABLE;
- fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> impl PinInit<Self, Error> {
dev_info!(
adev.as_ref(),
"Probing auxiliary driver for auxiliary device with id={}\n",
@@ -36,14 +44,17 @@ impl auxiliary::Driver for AuxiliaryDriver {
ParentDriver::connect(adev)?;
- let this = KBox::new(Self, GFP_KERNEL)?;
-
- Ok(this.into())
+ Ok(Self)
}
}
+#[pin_data]
struct ParentDriver {
- _reg: [auxiliary::Registration; 2],
+ private: TypeId,
+ #[pin]
+ _reg0: Devres<auxiliary::Registration>,
+ #[pin]
+ _reg1: Devres<auxiliary::Registration>,
}
kernel::pci_device_table!(
@@ -58,35 +69,35 @@ impl pci::Driver for ParentDriver {
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
- let this = KBox::new(
- Self {
- _reg: [
- auxiliary::Registration::new(pdev.as_ref(), AUXILIARY_NAME, 0, MODULE_NAME)?,
- auxiliary::Registration::new(pdev.as_ref(), AUXILIARY_NAME, 1, MODULE_NAME)?,
- ],
- },
- GFP_KERNEL,
- )?;
-
- Ok(this.into())
+ fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ private: TypeId::of::<Self>(),
+ _reg0 <- auxiliary::Registration::new(pdev.as_ref(), AUXILIARY_NAME, 0, MODULE_NAME),
+ _reg1 <- auxiliary::Registration::new(pdev.as_ref(), AUXILIARY_NAME, 1, MODULE_NAME),
+ })
}
}
impl ParentDriver {
- fn connect(adev: &auxiliary::Device) -> Result<()> {
- let parent = adev.parent().ok_or(EINVAL)?;
- let pdev: &pci::Device = parent.try_into()?;
+ fn connect(adev: &auxiliary::Device<Bound>) -> Result {
+ let dev = adev.parent();
+ let pdev: &pci::Device<Bound> = dev.try_into()?;
+ let drvdata = dev.drvdata::<Self>()?;
- let vendor = pdev.vendor_id();
dev_info!(
- adev.as_ref(),
+ dev,
"Connect auxiliary {} with parent: VendorID={}, DeviceID={:#x}\n",
adev.id(),
- vendor,
+ pdev.vendor_id(),
pdev.device_id()
);
+ dev_info!(
+ dev,
+ "We have access to the private data of {:?}.\n",
+ drvdata.private
+ );
+
Ok(())
}
}
diff --git a/samples/rust/rust_driver_i2c.rs b/samples/rust/rust_driver_i2c.rs
new file mode 100644
index 000000000000..ecefeca3e22f
--- /dev/null
+++ b/samples/rust/rust_driver_i2c.rs
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust I2C driver sample.
+
+use kernel::{
+ acpi,
+ c_str,
+ device::Core,
+ i2c,
+ of,
+ prelude::*, //
+};
+
+struct SampleDriver;
+
+kernel::acpi_device_table! {
+ ACPI_TABLE,
+ MODULE_ACPI_TABLE,
+ <SampleDriver as i2c::Driver>::IdInfo,
+ [(acpi::DeviceId::new(c_str!("LNUXBEEF")), 0)]
+}
+
+kernel::i2c_device_table! {
+ I2C_TABLE,
+ MODULE_I2C_TABLE,
+ <SampleDriver as i2c::Driver>::IdInfo,
+ [(i2c::DeviceId::new(c_str!("rust_driver_i2c")), 0)]
+}
+
+kernel::of_device_table! {
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <SampleDriver as i2c::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("test,rust_driver_i2c")), 0)]
+}
+
+impl i2c::Driver for SampleDriver {
+ type IdInfo = u32;
+
+ const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = Some(&ACPI_TABLE);
+ const I2C_ID_TABLE: Option<i2c::IdTable<Self::IdInfo>> = Some(&I2C_TABLE);
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ idev: &i2c::I2cClient<Core>,
+ info: Option<&Self::IdInfo>,
+ ) -> impl PinInit<Self, Error> {
+ let dev = idev.as_ref();
+
+ dev_info!(dev, "Probe Rust I2C driver sample.\n");
+
+ if let Some(info) = info {
+ dev_info!(dev, "Probed with info: '{}'.\n", info);
+ }
+
+ Ok(Self)
+ }
+
+ fn shutdown(idev: &i2c::I2cClient<Core>, _this: Pin<&Self>) {
+ dev_info!(idev.as_ref(), "Shutdown Rust I2C driver sample.\n");
+ }
+
+ fn unbind(idev: &i2c::I2cClient<Core>, _this: Pin<&Self>) {
+ dev_info!(idev.as_ref(), "Unbind Rust I2C driver sample.\n");
+ }
+}
+
+kernel::module_i2c_driver! {
+ type: SampleDriver,
+ name: "rust_driver_i2c",
+ authors: ["Igor Korotin"],
+ description: "Rust I2C driver",
+ license: "GPL v2",
+}
diff --git a/samples/rust/rust_driver_pci.rs b/samples/rust/rust_driver_pci.rs
index 55a683c39ed9..5823787bea8e 100644
--- a/samples/rust/rust_driver_pci.rs
+++ b/samples/rust/rust_driver_pci.rs
@@ -65,35 +65,34 @@ impl pci::Driver for SampleDriver {
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &pci::Device<Core>, info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
- let vendor = pdev.vendor_id();
- dev_dbg!(
- pdev.as_ref(),
- "Probe Rust PCI driver sample (PCI ID: {}, 0x{:x}).\n",
- vendor,
- pdev.device_id()
- );
-
- pdev.enable_device_mem()?;
- pdev.set_master();
-
- let drvdata = KBox::pin_init(
- try_pin_init!(Self {
+ fn probe(pdev: &pci::Device<Core>, info: &Self::IdInfo) -> impl PinInit<Self, Error> {
+ pin_init::pin_init_scope(move || {
+ let vendor = pdev.vendor_id();
+ dev_dbg!(
+ pdev.as_ref(),
+ "Probe Rust PCI driver sample (PCI ID: {}, 0x{:x}).\n",
+ vendor,
+ pdev.device_id()
+ );
+
+ pdev.enable_device_mem()?;
+ pdev.set_master();
+
+ Ok(try_pin_init!(Self {
bar <- pdev.iomap_region_sized::<{ Regs::END }>(0, c_str!("rust_driver_pci")),
- pdev: pdev.into(),
index: *info,
- }),
- GFP_KERNEL,
- )?;
-
- let bar = drvdata.bar.access(pdev.as_ref())?;
- dev_info!(
- pdev.as_ref(),
- "pci-testdev data-match count: {}\n",
- Self::testdev(info, bar)?
- );
-
- Ok(drvdata)
+ _: {
+ let bar = bar.access(pdev.as_ref())?;
+
+ dev_info!(
+ pdev.as_ref(),
+ "pci-testdev data-match count: {}\n",
+ Self::testdev(info, bar)?
+ );
+ },
+ pdev: pdev.into(),
+ }))
+ })
}
fn unbind(pdev: &pci::Device<Core>, this: Pin<&Self>) {
diff --git a/samples/rust/rust_driver_platform.rs b/samples/rust/rust_driver_platform.rs
index 8a82e251820f..6bf4f0c9633d 100644
--- a/samples/rust/rust_driver_platform.rs
+++ b/samples/rust/rust_driver_platform.rs
@@ -103,7 +103,7 @@ impl platform::Driver for SampleDriver {
fn probe(
pdev: &platform::Device<Core>,
info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
let dev = pdev.as_ref();
dev_dbg!(dev, "Probe Rust Platform driver sample.\n");
@@ -116,9 +116,7 @@ impl platform::Driver for SampleDriver {
Self::properties_parse(dev)?;
}
- let drvdata = KBox::new(Self { pdev: pdev.into() }, GFP_KERNEL)?;
-
- Ok(drvdata.into())
+ Ok(Self { pdev: pdev.into() })
}
}
diff --git a/samples/rust/rust_driver_usb.rs b/samples/rust/rust_driver_usb.rs
index 5c396f421de7..4eaad14867b2 100644
--- a/samples/rust/rust_driver_usb.rs
+++ b/samples/rust/rust_driver_usb.rs
@@ -24,12 +24,11 @@ impl usb::Driver for SampleDriver {
intf: &usb::Interface<Core>,
_id: &usb::DeviceId,
_info: &Self::IdInfo,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
let dev: &device::Device<Core> = intf.as_ref();
dev_info!(dev, "Rust USB driver sample probed\n");
- let drvdata = KBox::new(Self { _intf: intf.into() }, GFP_KERNEL)?;
- Ok(drvdata.into())
+ Ok(Self { _intf: intf.into() })
}
fn disconnect(intf: &usb::Interface<Core>, _data: Pin<&Self>) {
diff --git a/samples/rust/rust_i2c_client.rs b/samples/rust/rust_i2c_client.rs
new file mode 100644
index 000000000000..f67938396dce
--- /dev/null
+++ b/samples/rust/rust_i2c_client.rs
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust I2C client registration sample.
+//!
+//! An I2C client in Rust cannot exist on its own. To register a new I2C client,
+//! it must be bound to a parent device. In this sample driver, a platform device
+//! is used as the parent.
+//!
+
+//! ACPI match table test
+//!
+//! This demonstrates how to test an ACPI-based Rust I2C client registration driver
+//! using QEMU with a custom SSDT.
+//!
+//! Steps:
+//!
+//! 1. **Create an SSDT source file** (`ssdt.dsl`) with the following content:
+//!
+//! ```asl
+//! DefinitionBlock ("", "SSDT", 2, "TEST", "VIRTACPI", 0x00000001)
+//! {
+//! Scope (\_SB)
+//! {
+//! Device (T432)
+//! {
+//! Name (_HID, "LNUXBEEF") // ACPI hardware ID to match
+//! Name (_UID, 1)
+//! Name (_STA, 0x0F) // Device present, enabled
+//! Name (_CRS, ResourceTemplate ()
+//! {
+//! Memory32Fixed (ReadWrite, 0xFED00000, 0x1000)
+//! })
+//! }
+//! }
+//! }
+//! ```
+//!
+//! 2. **Compile the table**:
+//!
+//! ```sh
+//! iasl -tc ssdt.dsl
+//! ```
+//!
+//! This generates `ssdt.aml`
+//!
+//! 3. **Run QEMU** with the compiled AML file:
+//!
+//! ```sh
+//! qemu-system-x86_64 -m 512M \
+//! -enable-kvm \
+//! -kernel path/to/bzImage \
+//! -append "root=/dev/sda console=ttyS0" \
+//! -hda rootfs.img \
+//! -serial stdio \
+//! -acpitable file=ssdt.aml
+//! ```
+//!
+//! Requirements:
+//! - The `rust_driver_platform` must be present either:
+//! - built directly into the kernel (`bzImage`), or
+//! - available as a `.ko` file and loadable from `rootfs.img`
+//!
+//! 4. **Verify it worked** by checking `dmesg`:
+//!
+//! ```
+//! rust_driver_platform LNUXBEEF:00: Probed with info: '0'.
+//! ```
+//!
+
+use kernel::{
+ acpi,
+ c_str,
+ device,
+ devres::Devres,
+ i2c,
+ of,
+ platform,
+ prelude::*,
+ sync::aref::ARef, //
+};
+
+#[pin_data]
+struct SampleDriver {
+ parent_dev: ARef<platform::Device>,
+ #[pin]
+ _reg: Devres<i2c::Registration>,
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <SampleDriver as platform::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("test,rust-device")), ())]
+);
+
+kernel::acpi_device_table!(
+ ACPI_TABLE,
+ MODULE_ACPI_TABLE,
+ <SampleDriver as platform::Driver>::IdInfo,
+ [(acpi::DeviceId::new(c_str!("LNUXBEEF")), ())]
+);
+
+const SAMPLE_I2C_CLIENT_ADDR: u16 = 0x30;
+const SAMPLE_I2C_ADAPTER_INDEX: i32 = 0;
+const BOARD_INFO: i2c::I2cBoardInfo =
+ i2c::I2cBoardInfo::new(c_str!("rust_driver_i2c"), SAMPLE_I2C_CLIENT_ADDR);
+
+impl platform::Driver for SampleDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+ const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = Some(&ACPI_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<device::Core>,
+ _info: Option<&Self::IdInfo>,
+ ) -> impl PinInit<Self, Error> {
+ dev_info!(
+ pdev.as_ref(),
+ "Probe Rust I2C Client registration sample.\n"
+ );
+
+ kernel::try_pin_init!( Self {
+ parent_dev: pdev.into(),
+
+ _reg <- {
+ let adapter = i2c::I2cAdapter::get(SAMPLE_I2C_ADAPTER_INDEX)?;
+
+ i2c::Registration::new(&adapter, &BOARD_INFO, pdev.as_ref())
+ }
+ })
+ }
+
+ fn unbind(pdev: &platform::Device<device::Core>, _this: Pin<&Self>) {
+ dev_info!(
+ pdev.as_ref(),
+ "Unbind Rust I2C Client registration sample.\n"
+ );
+ }
+}
+
+kernel::module_platform_driver! {
+ type: SampleDriver,
+ name: "rust_device_i2c",
+ authors: ["Danilo Krummrich", "Igor Korotin"],
+ description: "Rust I2C client registration",
+ license: "GPL v2",
+}
diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
index 8bc6efa8accb..a7ea4a1c3bed 100644
--- a/security/keys/trusted-keys/trusted_tpm2.c
+++ b/security/keys/trusted-keys/trusted_tpm2.c
@@ -268,7 +268,10 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
goto out_put;
}
- tpm_buf_append_name(chip, &buf, options->keyhandle, NULL);
+ rc = tpm_buf_append_name(chip, &buf, options->keyhandle, NULL);
+ if (rc)
+ goto out;
+
tpm_buf_append_hmac_session(chip, &buf, TPM2_SA_DECRYPT,
options->keyauth, TPM_DIGEST_SIZE);
@@ -316,7 +319,10 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
goto out;
}
- tpm_buf_fill_hmac_session(chip, &buf);
+ rc = tpm_buf_fill_hmac_session(chip, &buf);
+ if (rc)
+ goto out;
+
rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data");
rc = tpm_buf_check_hmac_response(chip, &buf, rc);
if (rc)
@@ -427,7 +433,10 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
return rc;
}
- tpm_buf_append_name(chip, &buf, options->keyhandle, NULL);
+ rc = tpm_buf_append_name(chip, &buf, options->keyhandle, NULL);
+ if (rc)
+ goto out;
+
tpm_buf_append_hmac_session(chip, &buf, 0, options->keyauth,
TPM_DIGEST_SIZE);
@@ -439,7 +448,10 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
goto out;
}
- tpm_buf_fill_hmac_session(chip, &buf);
+ rc = tpm_buf_fill_hmac_session(chip, &buf);
+ if (rc)
+ goto out;
+
rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob");
rc = tpm_buf_check_hmac_response(chip, &buf, rc);
if (!rc)
@@ -469,8 +481,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
struct trusted_key_options *options,
u32 blob_handle)
{
+ struct tpm_header *head;
struct tpm_buf buf;
u16 data_len;
+ int offset;
u8 *data;
int rc;
@@ -484,7 +498,9 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
return rc;
}
- tpm_buf_append_name(chip, &buf, blob_handle, NULL);
+ rc = tpm_buf_append_name(chip, &buf, options->keyhandle, NULL);
+ if (rc)
+ goto out;
if (!options->policyhandle) {
tpm_buf_append_hmac_session(chip, &buf, TPM2_SA_ENCRYPT,
@@ -505,11 +521,20 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
tpm2_buf_append_auth(&buf, options->policyhandle,
NULL /* nonce */, 0, 0,
options->blobauth, options->blobauth_len);
- tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT,
- NULL, 0);
+ if (tpm2_chip_auth(chip)) {
+ tpm_buf_append_hmac_session(chip, &buf, TPM2_SA_ENCRYPT, NULL, 0);
+ } else {
+ offset = buf.handles * 4 + TPM_HEADER_SIZE;
+ head = (struct tpm_header *)buf.data;
+ if (tpm_buf_length(&buf) == offset)
+ head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ }
}
- tpm_buf_fill_hmac_session(chip, &buf);
+ rc = tpm_buf_fill_hmac_session(chip, &buf);
+ if (rc)
+ goto out;
+
rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
rc = tpm_buf_check_hmac_response(chip, &buf, rc);