summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/apei/ghes.c36
-rw-r--r--drivers/android/binder/process.rs64
-rw-r--r--drivers/base/bus.c38
-rw-r--r--drivers/clk/at91/clk-peripheral.c1
-rw-r--r--drivers/clk/at91/pmc.h3
-rw-r--r--drivers/clk/renesas/clk-div6.c6
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c15
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c9
-rw-r--r--drivers/crypto/ccp/Kconfig1
-rw-r--r--drivers/crypto/ccp/Makefile4
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.c864
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.h123
-rw-r--r--drivers/crypto/ccp/sev-dev-tsm.c405
-rw-r--r--drivers/crypto/ccp/sev-dev.c66
-rw-r--r--drivers/crypto/ccp/sev-dev.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c8
-rw-r--r--drivers/edac/ie31200_edac.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c5
-rw-r--r--drivers/iio/dac/ad3530r.c3
-rw-r--r--drivers/iio/temperature/mlx90614.c5
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c9
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/Makefile1
-rw-r--r--drivers/nvdimm/ramdax.c282
-rw-r--r--drivers/nvdimm/security.c4
-rw-r--r--drivers/nvme/common/auth.c4
-rw-r--r--drivers/parisc/ccio-dma.c54
-rw-r--r--drivers/parisc/gsc.c4
-rw-r--r--drivers/parisc/iommu-helpers.h10
-rw-r--r--drivers/parisc/sba_iommu.c54
-rw-r--r--drivers/pci/Kconfig18
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c39
-rw-r--r--drivers/pci/doe.c2
-rw-r--r--drivers/pci/ide.c815
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.h21
-rw-r--r--drivers/pci/pcie/aer.c2
-rw-r--r--drivers/pci/probe.c31
-rw-r--r--drivers/pci/remove.c7
-rw-r--r--drivers/pci/search.c62
-rw-r--r--drivers/pci/tsm.c900
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c4
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c404
-rw-r--r--drivers/remoteproc/imx_rproc.c238
-rw-r--r--drivers/remoteproc/imx_rproc.h16
-rw-r--r--drivers/remoteproc/mtk_scp.c65
-rw-r--r--drivers/remoteproc/omap_remoteproc.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c29
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c60
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c80
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c40
-rw-r--r--drivers/remoteproc/qcom_wcnss.c27
-rw-r--r--drivers/remoteproc/rcar_rproc.c38
-rw-r--r--drivers/remoteproc/remoteproc_core.c31
-rw-r--r--drivers/remoteproc/st_remoteproc.c44
-rw-r--r--drivers/remoteproc/stm32_rproc.c46
-rw-r--r--drivers/remoteproc/ti_k3_common.c28
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c53
-rw-r--r--drivers/rpmsg/qcom_glink_native.c35
-rw-r--r--drivers/soc/renesas/renesas-soc.c4
-rw-r--r--drivers/soc/renesas/rz-sysc.c3
-rw-r--r--drivers/video/fbdev/gbefb.c5
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/i810/i810_main.c46
-rw-r--r--drivers/video/fbdev/pxafb.c12
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c4
-rw-r--r--drivers/video/fbdev/vesafb.c29
-rw-r--r--drivers/video/fbdev/vga16fb.c21
-rw-r--r--drivers/virt/Kconfig4
-rw-r--r--drivers/virt/coco/Kconfig5
-rw-r--r--drivers/virt/coco/Makefile1
-rw-r--r--drivers/virt/coco/tsm-core.c163
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/aspeed_wdt.c30
-rw-r--r--drivers/watchdog/diag288_wdt.c6
-rw-r--r--drivers/watchdog/loongson1_wdt.c89
-rw-r--r--drivers/watchdog/renesas_wwdt.c163
-rw-r--r--drivers/watchdog/starfive-wdt.c4
-rw-r--r--drivers/watchdog/via_wdt.c1
-rw-r--r--drivers/watchdog/wdat_wdt.c64
-rw-r--r--drivers/xen/grant-dma-ops.c20
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/swiotlb-xen.c63
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c16
90 files changed, 5044 insertions, 991 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 20eb17596b89..b9f70e01f269 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -160,7 +160,7 @@ obj-$(CONFIG_RPMSG) += rpmsg/
obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
-obj-$(CONFIG_VIRT_DRIVERS) += virt/
+obj-y += virt/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 626908491d8f..0dc767392a6c 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -44,6 +44,7 @@
#include <linux/uuid.h>
#include <linux/ras.h>
#include <linux/task_work.h>
+#include <linux/vmcore_info.h>
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
@@ -864,6 +865,40 @@ int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
}
EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
+static void ghes_log_hwerr(int sev, guid_t *sec_type)
+{
+ if (sev != CPER_SEV_RECOVERABLE)
+ return;
+
+ if (guid_equal(sec_type, &CPER_SEC_PROC_ARM) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_GENERIC) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
+ hwerr_log_error_type(HWERR_RECOV_CPU);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
+ hwerr_log_error_type(HWERR_RECOV_CXL);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_PCIE) ||
+ guid_equal(sec_type, &CPER_SEC_PCI_X_BUS)) {
+ hwerr_log_error_type(HWERR_RECOV_PCI);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+ hwerr_log_error_type(HWERR_RECOV_MEMORY);
+ return;
+ }
+
+ hwerr_log_error_type(HWERR_RECOV_OTHERS);
+}
+
static void ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -885,6 +920,7 @@ static void ghes_do_proc(struct ghes *ghes,
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
+ ghes_log_hwerr(sev, sec_type);
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index e5237e9ec552..ac981614544e 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -19,6 +19,7 @@ use kernel::{
cred::Credential,
error::Error,
fs::file::{self, File},
+ id_pool::IdPool,
list::{List, ListArc, ListArcField, ListLinks},
mm,
prelude::*,
@@ -394,6 +395,8 @@ kernel::list::impl_list_item! {
struct ProcessNodeRefs {
/// Used to look up nodes using the 32-bit id that this process knows it by.
by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to quickly find unused ids in `by_handle`.
+ handle_is_present: IdPool,
/// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
/// the underlying `Node` struct as returned by `Node::global_id`.
by_node: RBTree<usize, u32>,
@@ -408,6 +411,7 @@ impl ProcessNodeRefs {
fn new() -> Self {
Self {
by_handle: RBTree::new(),
+ handle_is_present: IdPool::new(),
by_node: RBTree::new(),
freeze_listeners: RBTree::new(),
}
@@ -802,7 +806,7 @@ impl Process {
pub(crate) fn insert_or_update_handle(
self: ArcBorrow<'_, Process>,
node_ref: NodeRef,
- is_mananger: bool,
+ is_manager: bool,
) -> Result<u32> {
{
let mut refs = self.node_refs.lock();
@@ -821,7 +825,33 @@ impl Process {
let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
let info = UniqueArc::new_uninit(GFP_KERNEL)?;
- let mut refs = self.node_refs.lock();
+ let mut refs_lock = self.node_refs.lock();
+ let mut refs = &mut *refs_lock;
+
+ let (unused_id, by_handle_slot) = loop {
+ // ID 0 may only be used by the manager.
+ let start = if is_manager { 0 } else { 1 };
+
+ if let Some(res) = refs.handle_is_present.find_unused_id(start) {
+ match refs.by_handle.entry(res.as_u32()) {
+ rbtree::Entry::Vacant(entry) => break (res, entry),
+ rbtree::Entry::Occupied(_) => {
+ pr_err!("Detected mismatch between handle_is_present and by_handle");
+ res.acquire();
+ kernel::warn_on!(true);
+ return Err(EINVAL);
+ }
+ }
+ }
+
+ let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
+ drop(refs_lock);
+ let resizer = grow_request.realloc(GFP_KERNEL)?;
+ refs_lock = self.node_refs.lock();
+ refs = &mut *refs_lock;
+ refs.handle_is_present.grow(resizer);
+ };
+ let handle = unused_id.as_u32();
// Do a lookup again as node may have been inserted before the lock was reacquired.
if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
@@ -831,20 +861,9 @@ impl Process {
return Ok(handle);
}
- // Find id.
- let mut target: u32 = if is_mananger { 0 } else { 1 };
- for handle in refs.by_handle.keys() {
- if *handle > target {
- break;
- }
- if *handle == target {
- target = target.checked_add(1).ok_or(ENOMEM)?;
- }
- }
-
let gid = node_ref.node.global_id();
let (info_proc, info_node) = {
- let info_init = NodeRefInfo::new(node_ref, target, self.into());
+ let info_init = NodeRefInfo::new(node_ref, handle, self.into());
match info.pin_init_with(info_init) {
Ok(info) => ListArc::pair_from_pin_unique(info),
// error is infallible
@@ -865,9 +884,10 @@ impl Process {
// `info_node` into the right node's `refs` list.
unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
- refs.by_node.insert(reserve1.into_node(gid, target));
- refs.by_handle.insert(reserve2.into_node(target, info_proc));
- Ok(target)
+ refs.by_node.insert(reserve1.into_node(gid, handle));
+ by_handle_slot.insert(info_proc, reserve2);
+ unused_id.acquire();
+ Ok(handle)
}
pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
@@ -932,6 +952,16 @@ impl Process {
let id = info.node_ref().node.global_id();
refs.by_handle.remove(&handle);
refs.by_node.remove(&id);
+ refs.handle_is_present.release_id(handle as usize);
+
+ if let Some(shrink) = refs.handle_is_present.shrink_request() {
+ drop(refs);
+ // This intentionally ignores allocation failures.
+ if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
+ refs = self.node_refs.lock();
+ refs.handle_is_present.shrink(new_bitmap);
+ }
+ }
}
} else {
// All refs are cleared in process exit, so this warning is expected in that case.
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 320e155c6be7..9eb7771706f0 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -334,6 +334,19 @@ static struct device *next_device(struct klist_iter *i)
return dev;
}
+static struct device *prev_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_prev(i);
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
+ }
+ return dev;
+}
+
/**
* bus_for_each_dev - device iterator.
* @bus: bus type.
@@ -414,6 +427,31 @@ struct device *bus_find_device(const struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_find_device);
+struct device *bus_find_device_reverse(const struct bus_type *bus,
+ struct device *start, const void *data,
+ device_match_t match)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!sp)
+ return NULL;
+
+ klist_iter_init_node(&sp->klist_devices, &i,
+ (start ? &start->p->knode_bus : NULL));
+ while ((dev = prev_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
+ break;
+ }
+ }
+ klist_iter_exit(&i);
+ subsys_put(sp);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(bus_find_device_reverse);
+
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index e700f40fd87f..e7208c47268b 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -3,6 +3,7 @@
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 5daa32c4cf25..543d7aee8d24 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -117,9 +117,6 @@ struct at91_clk_pms {
unsigned int parent;
};
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 3abd6e5400ad..f7b827b5e9b2 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -7,6 +7,7 @@
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -171,8 +172,7 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
if (clock->src_mask == 0)
return 0;
- hw_index = (readl(clock->reg) & clock->src_mask) >>
- __ffs(clock->src_mask);
+ hw_index = field_get(clock->src_mask, readl(clock->reg));
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (clock->parents[i] == hw_index)
return i;
@@ -191,7 +191,7 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
- src = clock->parents[index] << __ffs(clock->src_mask);
+ src = field_prep(clock->src_mask, clock->parents[index]);
writel((readl(clock->reg) & ~clock->src_mask) | src, clock->reg);
return 0;
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 10ae20489df9..b954278ddd9d 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -54,10 +54,8 @@ static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
{
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
unsigned int mult;
- u32 val;
- val = readl(pll_clk->pllcr_reg) & CPG_PLLnCR_STC_MASK;
- mult = (val >> __ffs(CPG_PLLnCR_STC_MASK)) + 1;
+ mult = FIELD_GET(CPG_PLLnCR_STC_MASK, readl(pll_clk->pllcr_reg)) + 1;
return parent_rate * mult * pll_clk->fixed_mult;
}
@@ -94,7 +92,7 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val = readl(pll_clk->pllcr_reg);
val &= ~CPG_PLLnCR_STC_MASK;
- val |= (mult - 1) << __ffs(CPG_PLLnCR_STC_MASK);
+ val |= FIELD_PREP(CPG_PLLnCR_STC_MASK, mult - 1);
writel(val, pll_clk->pllcr_reg);
for (i = 1000; i; i--) {
@@ -176,11 +174,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -231,7 +225,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index fb9a876aaba5..db3a0b8ef2b9 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -279,11 +279,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -334,7 +330,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index f394e45e11ab..f16a0f611317 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -39,6 +39,7 @@ config CRYPTO_DEV_SP_PSP
bool "Platform Security Processor (PSP) device"
default y
depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU
+ select PCI_TSM if PCI
help
Provide support for the AMD Platform Security Processor (PSP).
The PSP is a dedicated processor that provides support for key
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index a9626b30044a..0424e08561ef 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -16,6 +16,10 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
hsti.o \
sfs.o
+ifeq ($(CONFIG_PCI_TSM),y)
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += sev-dev-tsm.o sev-dev-tio.o
+endif
+
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes.o \
diff --git a/drivers/crypto/ccp/sev-dev-tio.c b/drivers/crypto/ccp/sev-dev-tio.c
new file mode 100644
index 000000000000..9a98f98c20a7
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to PSP for CCP/SEV-TIO/SNP-VM
+
+#include <linux/pci.h>
+#include <linux/tsm.h>
+#include <linux/psp.h>
+#include <linux/vmalloc.h>
+#include <linux/bitfield.h>
+#include <linux/pci-doe.h>
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+#include <asm/page.h>
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+#define to_tio_status(dev_data) \
+ (container_of((dev_data), struct tio_dsm, data)->sev->tio_status)
+
+#define SLA_PAGE_TYPE_DATA 0
+#define SLA_PAGE_TYPE_SCATTER 1
+#define SLA_PAGE_SIZE_4K 0
+#define SLA_PAGE_SIZE_2M 1
+#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K)
+#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t))
+#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) })
+#define SLA_NULL ((struct sla_addr_t) { 0 })
+#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla)
+#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla)
+
+static phys_addr_t sla_to_pa(struct sla_addr_t sla)
+{
+ u64 pfn = sla.pfn;
+ u64 pa = pfn << PAGE_SHIFT;
+
+ return pa;
+}
+
+static void *sla_to_va(struct sla_addr_t sla)
+{
+ void *va = __va(__sme_clr(sla_to_pa(sla)));
+
+ return va;
+}
+
+#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT)
+#define sla_to_page(sla) virt_to_page(sla_to_va(sla))
+
+static struct sla_addr_t make_sla(struct page *pg, bool stp)
+{
+ u64 pa = __sme_set(page_to_phys(pg));
+ struct sla_addr_t ret = {
+ .pfn = pa >> PAGE_SHIFT,
+ .page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */
+ .page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA
+ };
+
+ return ret;
+}
+
+/* the BUFFER Structure */
+#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0)
+
+/*
+ * struct sla_buffer_hdr - Scatter list address buffer header
+ *
+ * @capacity_sz: Total capacity of the buffer in bytes
+ * @payload_sz: Size of buffer payload in bytes, must be multiple of 32B
+ * @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted)
+ * @iv: Initialization vector used for encryption
+ * @authtag: Authentication tag for encrypted buffer
+ */
+struct sla_buffer_hdr {
+ u32 capacity_sz;
+ u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */
+ u32 flags;
+ u8 reserved1[4];
+ u8 iv[16]; /* IV used for the encryption of this buffer */
+ u8 authtag[16]; /* Authentication tag for this buffer */
+ u8 reserved2[16];
+} __packed;
+
+enum spdm_data_type_t {
+ DOBJ_DATA_TYPE_SPDM = 0x1,
+ DOBJ_DATA_TYPE_SECURE_SPDM = 0x2,
+};
+
+struct spdm_dobj_hdr_req {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+struct spdm_dobj_hdr_resp {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */
+struct spdm_dobj_hdr_cert;
+struct spdm_dobj_hdr_meas;
+struct spdm_dobj_hdr_report;
+
+/* Used in all SPDM-aware TIO commands */
+struct spdm_ctrl {
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+} __packed;
+
+static size_t sla_dobj_id_to_size(u8 id)
+{
+ size_t n;
+
+ BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10);
+ switch (id) {
+ case SPDM_DOBJ_ID_REQ:
+ n = sizeof(struct spdm_dobj_hdr_req);
+ break;
+ case SPDM_DOBJ_ID_RESP:
+ n = sizeof(struct spdm_dobj_hdr_resp);
+ break;
+ default:
+ WARN_ON(1);
+ n = 0;
+ break;
+ }
+
+ return n;
+}
+
+#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id)
+#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr))
+#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr))
+
+#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP))
+#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ))
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return NULL;
+
+ return (struct spdm_dobj_hdr *) &buf[1];
+}
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(!hdr))
+ return NULL;
+
+ if (hdr->id != check_dobjid) {
+ pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id);
+ return NULL;
+ }
+
+ return hdr;
+}
+
+static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP))
+ return NULL;
+
+ if (!hdr)
+ return NULL;
+
+ return (u8 *) hdr + sla_dobj_id_to_size(dobjid);
+}
+
+/*
+ * struct sev_data_tio_status - SEV_CMD_TIO_STATUS command
+ *
+ * @length: Length of this command buffer in bytes
+ * @status_paddr: System physical address of the TIO_STATUS structure
+ */
+struct sev_data_tio_status {
+ u32 length;
+ u8 reserved[4];
+ u64 status_paddr;
+} __packed;
+
+/* TIO_INIT */
+struct sev_data_tio_init {
+ u32 length;
+ u8 reserved[12];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_create - TIO_DEV_CREATE command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer
+ * @device_id: PCIe Routing Identifier of the device to connect to
+ * @root_port_id: PCIe Routing Identifier of the root port of the device
+ * @segment_id: PCIe Segment Identifier of the device to connect to
+ */
+struct sev_data_tio_dev_create {
+ u32 length;
+ u8 reserved1[4];
+ struct sla_addr_t dev_ctx_sla;
+ u16 device_id;
+ u16 root_port_id;
+ u8 segment_id;
+ u8 reserved2[11];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage.
+ * Setting the kth bit of the TC_MASK to 1 indicates that the traffic
+ * class k will be initialized
+ * @cert_slot: Slot number of the certificate requested for constructing the SPDM session
+ * @ide_stream_id: IDE stream IDs to be associated with this device.
+ * Valid only if corresponding bit in TC_MASK is set
+ */
+struct sev_data_tio_dev_connect {
+ u32 length;
+ u8 reserved1[4];
+ struct spdm_ctrl spdm_ctrl;
+ u8 reserved2[8];
+ struct sla_addr_t dev_ctx_sla;
+ u8 tc_mask;
+ u8 cert_slot;
+ u8 reserved3[6];
+ u8 ide_stream_id[8];
+ u8 reserved4[8];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0)
+
+struct sev_data_tio_dev_disconnect {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @meas_nonce: Nonce for measurement freshness verification
+ */
+#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0)
+
+struct sev_data_tio_dev_meas {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+ u8 meas_nonce[32];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+struct sev_data_tio_dev_certs {
+ u32 length;
+ u8 reserved[4];
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ *
+ * This command reclaims resources associated with a device context.
+ */
+struct sev_data_tio_dev_reclaim {
+ u32 length;
+ u8 reserved[4];
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla)
+{
+ struct sla_buffer_hdr *buf;
+
+ BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40);
+ if (IS_SLA_NULL(sla))
+ return NULL;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K))
+ return NULL;
+
+ if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER))
+ return NULL;
+
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (WARN_ON_ONCE(!npages))
+ return NULL;
+
+ struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL);
+
+ if (!pp)
+ return NULL;
+
+ for (i = 0; i < npages; ++i)
+ pp[i] = sla_to_page(scatter[i]);
+
+ buf = vm_map_ram(pp, npages, 0);
+ kfree(pp);
+ } else {
+ struct page *pg = sla_to_page(sla);
+
+ buf = vm_map_ram(&pg, 1, 0);
+ }
+
+ return buf;
+}
+
+static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (!npages)
+ return;
+
+ vm_unmap_ram(buf, npages);
+ } else {
+ vm_unmap_ram(buf, 1);
+ }
+}
+
+static void dobj_response_init(struct sla_buffer_hdr *buf)
+{
+ struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf);
+
+ dobj->id = SPDM_DOBJ_ID_RESP;
+ dobj->version.major = 0x1;
+ dobj->version.minor = 0;
+ dobj->length = 0;
+ buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length;
+}
+
+static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state)
+{
+ unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ int ret = 0, i;
+
+ if (IS_SLA_NULL(sla))
+ return;
+
+ if (firmware_state) {
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ scatter = sla_to_va(sla);
+
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+
+ ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false);
+ if (ret)
+ break;
+ }
+ } else {
+ ret = snp_reclaim_pages(sla_to_pa(sla), 1, false);
+ }
+ }
+
+ if (WARN_ON(ret))
+ return;
+
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+ free_page((unsigned long)sla_to_va(scatter[i]));
+ }
+ }
+
+ free_page((unsigned long)sla_to_va(sla));
+}
+
+static struct sla_addr_t sla_alloc(size_t len, bool firmware_state)
+{
+ unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ struct sla_addr_t ret = SLA_NULL;
+ struct sla_buffer_hdr *buf;
+ struct page *pg;
+
+ if (npages == 0)
+ return ret;
+
+ if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1)))
+ return ret;
+
+ BUILD_BUG_ON(PAGE_SIZE < SZ_4K);
+
+ if (npages > 1) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, true);
+ scatter = page_to_virt(pg);
+ for (i = 0; i < npages; ++i) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ goto no_reclaim_exit;
+
+ scatter[i] = make_sla(pg, false);
+ }
+ scatter[i] = SLA_EOL;
+ } else {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, false);
+ }
+
+ buf = sla_buffer_map(ret);
+ if (!buf)
+ goto no_reclaim_exit;
+
+ buf->capacity_sz = (npages << PAGE_SHIFT);
+ sla_buffer_unmap(ret, buf);
+
+ if (firmware_state) {
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (rmp_make_private(sla_to_pfn(scatter[i]), 0,
+ PG_LEVEL_4K, 0, true))
+ goto free_exit;
+ }
+ } else {
+ if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true))
+ goto no_reclaim_exit;
+ }
+ }
+
+ return ret;
+
+no_reclaim_exit:
+ firmware_state = false;
+free_exit:
+ sla_free(ret, len, firmware_state);
+ return SLA_NULL;
+}
+
+/* Expands a buffer, only firmware owned buffers allowed for now */
+static int sla_expand(struct sla_addr_t *sla, size_t *len)
+{
+ struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf;
+ struct sla_addr_t oldsla = *sla, newsla;
+ size_t oldlen = *len, newlen;
+
+ if (!oldbuf)
+ return -EFAULT;
+
+ newlen = oldbuf->capacity_sz;
+ if (oldbuf->capacity_sz == oldlen) {
+ /* This buffer does not require expansion, must be another buffer */
+ sla_buffer_unmap(oldsla, oldbuf);
+ return 1;
+ }
+
+ pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen);
+
+ newsla = sla_alloc(newlen, true);
+ if (IS_SLA_NULL(newsla))
+ return -ENOMEM;
+
+ newbuf = sla_buffer_map(newsla);
+ if (!newbuf) {
+ sla_free(newsla, newlen, true);
+ return -EFAULT;
+ }
+
+ memcpy(newbuf, oldbuf, oldlen);
+
+ sla_buffer_unmap(newsla, newbuf);
+ sla_free(oldsla, oldlen, true);
+ *sla = newsla;
+ *len = newlen;
+
+ return 0;
+}
+
+static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret,
+ struct tsm_dsm_tio *dev_data)
+{
+ int rc;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+
+ if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST))
+ return -EIO;
+
+ if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) {
+ int rc1, rc2;
+
+ rc1 = sla_expand(&dev_data->output, &dev_data->output_len);
+ if (rc1 < 0)
+ return rc1;
+
+ rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len);
+ if (rc2 < 0)
+ return rc2;
+
+ if (!rc1 && !rc2)
+ /* Neither buffer requires expansion, this is wrong */
+ return -EFAULT;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+ }
+
+ if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) {
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ struct spdm_dobj_hdr_req *req_hdr;
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t resp_len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr));
+
+ if (!dev_data->cmd) {
+ if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data)))
+ return -EINVAL;
+ if (WARN_ON(data_len > sizeof(dev_data->cmd_data)))
+ return -EFAULT;
+ memcpy(dev_data->cmd_data, data, data_len);
+ memset(&dev_data->cmd_data[data_len], 0xFF,
+ sizeof(dev_data->cmd_data) - data_len);
+ dev_data->cmd = cmd;
+ }
+
+ req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf);
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ switch (req_hdr->data_type) {
+ case DOBJ_DATA_TYPE_SPDM:
+ rc = PCI_DOE_FEATURE_CMA;
+ break;
+ case DOBJ_DATA_TYPE_SECURE_SPDM:
+ rc = PCI_DOE_FEATURE_SSESSION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ resp_hdr->data_type = req_hdr->data_type;
+ dev_data->spdm.req_len = req_hdr->hdr.length -
+ sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ);
+ dev_data->spdm.rsp_len = resp_len;
+ } else if (dev_data && dev_data->cmd) {
+ /* For either error or success just stop the bouncing */
+ memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data));
+ dev_data->cmd = 0;
+ }
+
+ return rc;
+}
+
+int sev_tio_continue(struct tsm_dsm_tio *dev_data)
+{
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ int ret;
+
+ if (!dev_data || !dev_data->cmd)
+ return -EINVAL;
+
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ dev_data->spdm.rsp_len, 32);
+ dev_data->respbuf->payload_sz = resp_hdr->hdr.length;
+
+ ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0,
+ &dev_data->psp_ret, dev_data);
+ if (ret)
+ return ret;
+
+ if (dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data)
+{
+ ctrl->req = dev_data->req;
+ ctrl->resp = dev_data->resp;
+ ctrl->scratch = dev_data->scratch;
+ ctrl->output = dev_data->output;
+}
+
+static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ sizeof(struct sla_buffer_hdr));
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ sla_buffer_unmap(dev_data->resp, dev_data->respbuf);
+ sla_buffer_unmap(dev_data->req, dev_data->reqbuf);
+ spdm->rsp = NULL;
+ spdm->req = NULL;
+ sla_free(dev_data->req, len, true);
+ sla_free(dev_data->resp, len, false);
+ sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true);
+
+ dev_data->req.sla = 0;
+ dev_data->resp.sla = 0;
+ dev_data->scratch.sla = 0;
+ dev_data->respbuf = NULL;
+ dev_data->reqbuf = NULL;
+ sla_free(dev_data->output, tio_status->spdm_out_size_max, true);
+}
+
+static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct tsm_spdm *spdm = &dev_data->spdm;
+ int ret;
+
+ dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true);
+ dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false);
+ dev_data->scratch_len = tio_status->spdm_scratch_size_max;
+ dev_data->scratch = sla_alloc(dev_data->scratch_len, true);
+ dev_data->output_len = tio_status->spdm_out_size_max;
+ dev_data->output = sla_alloc(dev_data->output_len, true);
+
+ if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) ||
+ IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) {
+ ret = -ENOMEM;
+ goto free_spdm_exit;
+ }
+
+ dev_data->reqbuf = sla_buffer_map(dev_data->req);
+ dev_data->respbuf = sla_buffer_map(dev_data->resp);
+ if (!dev_data->reqbuf || !dev_data->respbuf) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ);
+ spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP);
+ if (!spdm->req || !spdm->rsp) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ dobj_response_init(dev_data->respbuf);
+
+ return 0;
+
+free_spdm_exit:
+ spdm_ctrl_free(dev_data);
+ return ret;
+}
+
+int sev_tio_init_locked(void *tio_status_page)
+{
+ struct sev_tio_status *tio_status = tio_status_page;
+ struct sev_data_tio_status data_status = {
+ .length = sizeof(data_status),
+ };
+ int ret, psp_ret;
+
+ data_status.status_paddr = __psp_pa(tio_status_page);
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) ||
+ tio_status->reserved)
+ return -EFAULT;
+
+ if (!tio_status->tio_en && !tio_status->tio_init_done)
+ return -ENOENT;
+
+ if (tio_status->tio_init_done)
+ return -EBUSY;
+
+ struct sev_data_tio_init ti = { .length = sizeof(ti) };
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret);
+ if (ret)
+ return ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id,
+ u16 root_port_id, u8 segment_id)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_create create = {
+ .length = sizeof(create),
+ .device_id = device_id,
+ .root_port_id = root_port_id,
+ .segment_id = segment_id,
+ };
+ void *data_pg;
+ int ret;
+
+ dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true);
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return -ENOMEM;
+
+ data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
+ if (!data_pg) {
+ ret = -ENOMEM;
+ goto free_ctx_exit;
+ }
+
+ create.dev_ctx_sla = dev_data->dev_ctx;
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret);
+ if (ret)
+ goto free_data_pg_exit;
+
+ dev_data->data_pg = data_pg;
+
+ return 0;
+
+free_data_pg_exit:
+ snp_free_firmware_page(data_pg);
+free_ctx_exit:
+ sla_free(create.dev_ctx_sla, tio_status->devctx_size, true);
+ return ret;
+}
+
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_reclaim r = {
+ .length = sizeof(r),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ };
+ int ret;
+
+ if (dev_data->data_pg) {
+ snp_free_firmware_page(dev_data->data_pg);
+ dev_data->data_pg = NULL;
+ }
+
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return 0;
+
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret);
+
+ sla_free(dev_data->dev_ctx, tio_status->devctx_size, true);
+ dev_data->dev_ctx = SLA_NULL;
+
+ spdm_ctrl_free(dev_data);
+
+ return ret;
+}
+
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot)
+{
+ struct sev_data_tio_dev_connect connect = {
+ .length = sizeof(connect),
+ .tc_mask = tc_mask,
+ .cert_slot = cert_slot,
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .ide_stream_id = {
+ ids[0], ids[1], ids[2], ids[3],
+ ids[4], ids[5], ids[6], ids[7]
+ },
+ };
+ int ret;
+
+ if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+ if (!(tc_mask & 1))
+ return -EINVAL;
+
+ ret = spdm_ctrl_alloc(dev_data);
+ if (ret)
+ return ret;
+
+ spdm_ctrl_init(&connect.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force)
+{
+ struct sev_data_tio_dev_disconnect dc = {
+ .length = sizeof(dc),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0,
+ };
+
+ if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+
+ spdm_ctrl_init(&dc.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status);
+ case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init);
+ case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create);
+ case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim);
+ case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect);
+ case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect);
+ default: return 0;
+ }
+}
diff --git a/drivers/crypto/ccp/sev-dev-tio.h b/drivers/crypto/ccp/sev-dev-tio.h
new file mode 100644
index 000000000000..67512b3dbc53
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PSP_SEV_TIO_H__
+#define __PSP_SEV_TIO_H__
+
+#include <linux/pci-tsm.h>
+#include <linux/pci-ide.h>
+#include <linux/tsm.h>
+#include <uapi/linux/psp-sev.h>
+
+struct sla_addr_t {
+ union {
+ u64 sla;
+ struct {
+ u64 page_type :1,
+ page_size :1,
+ reserved1 :10,
+ pfn :40,
+ reserved2 :12;
+ };
+ };
+} __packed;
+
+#define SEV_TIO_MAX_COMMAND_LENGTH 128
+
+/* SPDM control structure for DOE */
+struct tsm_spdm {
+ unsigned long req_len;
+ void *req;
+ unsigned long rsp_len;
+ void *rsp;
+};
+
+/* Describes TIO device */
+struct tsm_dsm_tio {
+ u8 cert_slot;
+ struct sla_addr_t dev_ctx;
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+ size_t output_len;
+ size_t scratch_len;
+ struct tsm_spdm spdm;
+ struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */
+ struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */
+
+ int cmd;
+ int psp_ret;
+ u8 cmd_data[SEV_TIO_MAX_COMMAND_LENGTH];
+ void *data_pg; /* Data page for DEV_STATUS/TDI_STATUS/TDI_INFO/ASID_FENCE */
+
+#define TIO_IDE_MAX_TC 8
+ struct pci_ide *ide[TIO_IDE_MAX_TC];
+};
+
+/* Describes TSM structure for PF0 pointed by pci_dev->tsm */
+struct tio_dsm {
+ struct pci_tsm_pf0 tsm;
+ struct tsm_dsm_tio data;
+ struct sev_device *sev;
+};
+
+/* Data object IDs */
+#define SPDM_DOBJ_ID_NONE 0
+#define SPDM_DOBJ_ID_REQ 1
+#define SPDM_DOBJ_ID_RESP 2
+
+struct spdm_dobj_hdr {
+ u32 id; /* Data object type identifier */
+ u32 length; /* Length of the data object, INCLUDING THIS HEADER */
+ struct { /* Version of the data object structure */
+ u8 minor;
+ u8 major;
+ } version;
+} __packed;
+
+/**
+ * struct sev_tio_status - TIO_STATUS command's info_paddr buffer
+ *
+ * @length: Length of this structure in bytes
+ * @tio_en: Indicates that SNP_INIT_EX initialized the RMP for SEV-TIO
+ * @tio_init_done: Indicates TIO_INIT has been invoked
+ * @spdm_req_size_min: Minimum SPDM request buffer size in bytes
+ * @spdm_req_size_max: Maximum SPDM request buffer size in bytes
+ * @spdm_scratch_size_min: Minimum SPDM scratch buffer size in bytes
+ * @spdm_scratch_size_max: Maximum SPDM scratch buffer size in bytes
+ * @spdm_out_size_min: Minimum SPDM output buffer size in bytes
+ * @spdm_out_size_max: Maximum for the SPDM output buffer size in bytes
+ * @spdm_rsp_size_min: Minimum SPDM response buffer size in bytes
+ * @spdm_rsp_size_max: Maximum SPDM response buffer size in bytes
+ * @devctx_size: Size of a device context buffer in bytes
+ * @tdictx_size: Size of a TDI context buffer in bytes
+ * @tio_crypto_alg: TIO crypto algorithms supported
+ */
+struct sev_tio_status {
+ u32 length;
+ u32 tio_en :1,
+ tio_init_done :1,
+ reserved :30;
+ u32 spdm_req_size_min;
+ u32 spdm_req_size_max;
+ u32 spdm_scratch_size_min;
+ u32 spdm_scratch_size_max;
+ u32 spdm_out_size_min;
+ u32 spdm_out_size_max;
+ u32 spdm_rsp_size_min;
+ u32 spdm_rsp_size_max;
+ u32 devctx_size;
+ u32 tdictx_size;
+ u32 tio_crypto_alg;
+ u8 reserved2[12];
+} __packed;
+
+int sev_tio_init_locked(void *tio_status_page);
+int sev_tio_continue(struct tsm_dsm_tio *dev_data);
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, u16 root_port_id,
+ u8 segment_id);
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot);
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force);
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data);
+
+#endif /* __PSP_SEV_TIO_H__ */
diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c
new file mode 100644
index 000000000000..ea29cd5d0ff9
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tsm.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to CCP/SEV-TIO for generic PCIe TDISP module
+
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/tsm.h>
+#include <linux/iommu.h>
+#include <linux/pci-doe.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+MODULE_IMPORT_NS("PCI_IDE");
+
+#define TIO_DEFAULT_NR_IDE_STREAMS 1
+
+static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS;
+module_param_named(ide_nr, nr_ide_streams, uint, 0644);
+MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB");
+
+#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
+#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
+#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
+#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent)
+
+#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm))
+
+static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret)
+{
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ /* Check the main command handler response before entering the loop */
+ if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ if (ret <= 0)
+ return ret;
+
+ /* ret > 0 means "SPDM requested" */
+ while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) {
+ ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret,
+ spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
+ if (ret < 0)
+ break;
+
+ WARN_ON_ONCE(ret == 0); /* The response should never be empty */
+ spdm->rsp_len = ret;
+ ret = sev_tio_continue(dev_data);
+ }
+
+ return ret;
+}
+
+static int stream_enable(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+ int ret;
+
+ ret = pci_ide_stream_enable(rp, ide);
+ if (ret)
+ return ret;
+
+ ret = pci_ide_stream_enable(ide->pdev, ide);
+ if (ret)
+ pci_ide_stream_disable(rp, ide);
+
+ return ret;
+}
+
+static int streams_enable(struct pci_ide **ide)
+{
+ int ret = 0;
+
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = stream_enable(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void stream_disable(struct pci_ide *ide)
+{
+ pci_ide_stream_disable(ide->pdev, ide);
+ pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_disable(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ stream_disable(ide[i]);
+}
+
+static void stream_setup(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ ide->partner[PCI_IDE_EP].rid_start = 0;
+ ide->partner[PCI_IDE_EP].rid_end = 0xffff;
+ ide->partner[PCI_IDE_RP].rid_start = 0;
+ ide->partner[PCI_IDE_RP].rid_end = 0xffff;
+
+ ide->pdev->ide_cfg = 0;
+ ide->pdev->ide_tee_limit = 1;
+ rp->ide_cfg = 1;
+ rp->ide_tee_limit = 0;
+
+ pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp));
+ pci_ide_stream_setup(ide->pdev, ide);
+ pci_ide_stream_setup(rp, ide);
+}
+
+static u8 streams_setup(struct pci_ide **ide, u8 *ids)
+{
+ bool def = false;
+ u8 tc_mask = 0;
+ int i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (!ide[i]) {
+ ids[i] = 0xFF;
+ continue;
+ }
+
+ tc_mask |= BIT(i);
+ ids[i] = ide[i]->stream_id;
+
+ if (!def) {
+ struct pci_ide_partner *settings;
+
+ settings = pci_ide_to_settings(ide[i]->pdev, ide[i]);
+ settings->default_stream = 1;
+ def = true;
+ }
+
+ stream_setup(ide[i]);
+ }
+
+ return tc_mask;
+}
+
+static int streams_register(struct pci_ide **ide)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = pci_ide_stream_register(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void streams_unregister(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ pci_ide_stream_unregister(ide[i]);
+}
+
+static void stream_teardown(struct pci_ide *ide)
+{
+ pci_ide_stream_teardown(ide->pdev, ide);
+ pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_teardown(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ stream_teardown(ide[i]);
+ pci_ide_stream_free(ide[i]);
+ ide[i] = NULL;
+ }
+ }
+}
+
+static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
+ unsigned int tc)
+{
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_ide *ide1;
+
+ if (ide[tc]) {
+ pci_err(pdev, "Stream for class=%d already registered", tc);
+ return -EBUSY;
+ }
+
+ /* FIXME: find a better way */
+ if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS)
+ pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams);
+ pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams);
+
+ ide1 = pci_ide_stream_alloc(pdev);
+ if (!ide1)
+ return -EFAULT;
+
+ /* Blindly assign streamid=0 to TC=0, and so on */
+ ide1->stream_id = tc;
+
+ ide[tc] = ide1;
+
+ return 0;
+}
+
+static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev)
+{
+ struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL);
+ int rc;
+
+ if (!dsm)
+ return NULL;
+
+ rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev);
+ if (rc)
+ return NULL;
+
+ pci_dbg(pdev, "TSM enabled\n");
+ dsm->sev = sev;
+ return &no_free_ptr(dsm)->tsm.base_tsm;
+}
+
+static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
+{
+ struct sev_device *sev = tsm_dev_to_sev(tsmdev);
+
+ if (is_pci_tsm_pf0(pdev))
+ return tio_pf0_probe(pdev, sev);
+ return 0;
+}
+
+static void dsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev = tsm->pdev;
+
+ pci_dbg(pdev, "TSM disabled\n");
+
+ if (is_pci_tsm_pf0(pdev)) {
+ struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm);
+
+ pci_tsm_pf0_destructor(&dsm->tsm);
+ kfree(dsm);
+ }
+}
+
+static int dsm_create(struct tio_dsm *dsm)
+{
+ struct pci_dev *pdev = dsm->tsm.base_tsm.pdev;
+ u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0;
+ struct pci_dev *rootport = pcie_find_root_port(pdev);
+ u16 device_id = pci_dev_id(pdev);
+ u16 root_port_id;
+ u32 lnkcap = 0;
+
+ if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP,
+ &lnkcap))
+ return -ENODEV;
+
+ root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+
+ return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id);
+}
+
+static int dsm_connect(struct pci_dev *pdev)
+{
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ u8 ids[TIO_IDE_MAX_TC];
+ u8 tc_mask;
+ int ret;
+
+ if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) {
+ pci_err(pdev, "CMA DOE MB must support SSESSION\n");
+ return -EFAULT;
+ }
+
+ ret = stream_alloc(pdev, dev_data->ide, 0);
+ if (ret)
+ return ret;
+
+ ret = dsm_create(dsm);
+ if (ret)
+ goto ide_free_exit;
+
+ tc_mask = streams_setup(dev_data->ide, ids);
+
+ ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret)
+ goto free_exit;
+
+ streams_enable(dev_data->ide);
+
+ ret = streams_register(dev_data->ide);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+
+free_exit:
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ide_free_exit:
+
+ streams_teardown(dev_data->ide);
+
+ return ret;
+}
+
+static void dsm_disconnect(struct pci_dev *pdev)
+{
+ bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART;
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ int ret;
+
+ ret = sev_tio_dev_disconnect(dev_data, force);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret && !force) {
+ ret = sev_tio_dev_disconnect(dev_data, true);
+ sev_tio_spdm_cmd(dsm, ret);
+ }
+
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ streams_unregister(dev_data->ide);
+ streams_teardown(dev_data->ide);
+}
+
+static struct pci_tsm_ops sev_tsm_ops = {
+ .probe = dsm_probe,
+ .remove = dsm_remove,
+ .connect = dsm_connect,
+ .disconnect = dsm_disconnect,
+};
+
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
+{
+ struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ struct tsm_dev *tsmdev;
+ int ret;
+
+ WARN_ON(sev->tio_status);
+
+ if (!t)
+ return;
+
+ ret = sev_tio_init_locked(tio_status_page);
+ if (ret) {
+ pr_warn("SEV-TIO STATUS failed with %d\n", ret);
+ goto error_exit;
+ }
+
+ tsmdev = tsm_register(sev->dev, &sev_tsm_ops);
+ if (IS_ERR(tsmdev))
+ goto error_exit;
+
+ memcpy(t, tio_status_page, sizeof(*t));
+
+ pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d "
+ "scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n",
+ t->tio_en, t->tio_init_done,
+ t->spdm_req_size_min, t->spdm_req_size_max,
+ t->spdm_rsp_size_min, t->spdm_rsp_size_max,
+ t->spdm_scratch_size_min, t->spdm_scratch_size_max,
+ t->spdm_out_size_min, t->spdm_out_size_max,
+ t->devctx_size, t->tdictx_size,
+ t->tio_crypto_alg);
+
+ sev->tsmdev = tsmdev;
+ sev->tio_status = t;
+
+ return;
+
+error_exit:
+ kfree(t);
+ pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
+ ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
+}
+
+void sev_tsm_uninit(struct sev_device *sev)
+{
+ if (sev->tsmdev)
+ tsm_unregister(sev->tsmdev);
+
+ sev->tsmdev = NULL;
+}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 52ba892444a8..956ea609d0cc 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -75,6 +75,14 @@ static bool psp_init_on_probe = true;
module_param(psp_init_on_probe, bool, 0444);
MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
+#if IS_ENABLED(CONFIG_PCI_TSM)
+static bool sev_tio_enabled = true;
+module_param_named(tio, sev_tio_enabled, bool, 0444);
+MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX");
+#else
+static const bool sev_tio_enabled = false;
+#endif
+
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
@@ -251,7 +259,7 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info);
case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load);
- default: return 0;
+ default: return sev_tio_cmd_buffer_len(cmd);
}
return 0;
@@ -380,13 +388,7 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
return sev_write_init_ex_file();
}
-/*
- * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked()
- * needs snp_reclaim_pages(), so a forward declaration is needed.
- */
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
-
-static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
{
int ret, err, i;
@@ -420,6 +422,7 @@ cleanup:
snp_leak_pages(__phys_to_pfn(paddr), npages - i);
return ret;
}
+EXPORT_SYMBOL_GPL(snp_reclaim_pages);
static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
{
@@ -850,7 +853,7 @@ static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
return 0;
}
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
struct psp_device *psp = psp_master;
@@ -1392,6 +1395,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
*
*/
if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
+ bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED);
+
/*
* Firmware checks that the pages containing the ranges enumerated
* in the RANGES structure are either in the default page state or in the
@@ -1432,6 +1437,17 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
data.init_rmp = 1;
data.list_paddr_en = 1;
data.list_paddr = __psp_pa(snp_range_list);
+
+ data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported();
+
+ /*
+ * When psp_init_on_probe is disabled, the userspace calling
+ * SEV ioctl can inadvertently shut down SNP and SEV-TIO causing
+ * unexpected state loss.
+ */
+ if (data.tio_en && !psp_init_on_probe)
+ dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n");
+
cmd = SEV_CMD_SNP_INIT_EX;
} else {
cmd = SEV_CMD_SNP_INIT;
@@ -1469,7 +1485,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
snp_hv_fixed_pages_state_update(sev, HV_FIXED);
sev->snp_initialized = true;
- dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n",
+ data.tio_en ? "enabled" : "disabled");
dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
sev->api_minor, sev->build);
@@ -1477,6 +1494,23 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
atomic_notifier_chain_register(&panic_notifier_list,
&snp_panic_notifier);
+ if (data.tio_en) {
+ /*
+ * This executes with the sev_cmd_mutex held so down the stack
+ * snp_reclaim_pages(locked=false) might be needed (which is extremely
+ * unlikely) but will cause a deadlock.
+ * Instead of exporting __snp_alloc_firmware_pages(), allocate a page
+ * for this one call here.
+ */
+ void *tio_status = page_address(__snp_alloc_firmware_pages(
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true));
+
+ if (tio_status) {
+ sev_tsm_init_locked(sev, tio_status);
+ __snp_free_firmware_pages(virt_to_page(tio_status), 0, true);
+ }
+ }
+
sev_es_tmr_size = SNP_TMR_SIZE;
return 0;
@@ -2756,8 +2790,20 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
static void sev_firmware_shutdown(struct sev_device *sev)
{
+ /*
+ * Calling without sev_cmd_mutex held as TSM will likely try disconnecting
+ * IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex.
+ */
+ if (sev->tio_status)
+ sev_tsm_uninit(sev);
+
mutex_lock(&sev_cmd_mutex);
+
__sev_firmware_shutdown(sev, false);
+
+ kfree(sev->tio_status);
+ sev->tio_status = NULL;
+
mutex_unlock(&sev_cmd_mutex);
}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index ac03bd0848f7..b1cd556bbbf6 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -34,6 +34,8 @@ struct sev_misc_dev {
struct miscdevice misc;
};
+struct sev_tio_status;
+
struct sev_device {
struct device *dev;
struct psp_device *psp;
@@ -61,15 +63,24 @@ struct sev_device {
struct sev_user_data_snp_status snp_plat_status;
struct snp_feature_info snp_feat_info_0;
+
+ struct tsm_dev *tsmdev;
+ struct sev_tio_status *tio_status;
};
int sev_dev_init(struct psp_device *psp);
void sev_dev_destroy(struct psp_device *psp);
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
+
void sev_pci_init(void);
void sev_pci_exit(void);
struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
void snp_free_hv_fixed_pages(struct page *page);
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page);
+void sev_tsm_uninit(struct sev_device *sev);
+int sev_tio_cmd_buffer_len(int cmd);
+
#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
index 69295a9ddf0a..4ccc94ed9493 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
@@ -1,18 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/sprintf.h>
#include <linux/string_helpers.h>
#include "adf_pm_dbgfs_utils.h"
-/*
- * This is needed because a variable is used to index the mask at
- * pm_scnprint_table(), making it not compile time constant, so the compile
- * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
- */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
#define PM_INFO_MAX_KEY_LEN 21
static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 8d4ddaa85ae8..eaab6af143e1 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -44,6 +44,7 @@
* but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -139,9 +140,6 @@
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-/* Non-constant mask variant of FIELD_GET() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 2e0ae953dd99..cbdf781994dc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -5,6 +5,7 @@
* Joel Stanley <joel@jms.id.au>
*/
+#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/aspeed.h>
@@ -30,10 +31,6 @@
*/
#include <linux/gpio/consumer.h>
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
#define GPIO_G7_IRQ_STS_BASE 0x100
#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4)
#define GPIO_G7_CTRL_REG_BASE 0x180
diff --git a/drivers/iio/dac/ad3530r.c b/drivers/iio/dac/ad3530r.c
index 6134613777b8..b97b46090d80 100644
--- a/drivers/iio/dac/ad3530r.c
+++ b/drivers/iio/dac/ad3530r.c
@@ -53,9 +53,6 @@
#define AD3530R_MAX_CHANNELS 8
#define AD3531R_MAX_CHANNELS 4
-/* Non-constant mask variant of FIELD_PREP() */
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
enum ad3530r_mode {
AD3530R_NORMAL_OP,
AD3530R_POWERDOWN_1K,
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 8a44a00bfd5e..1ad21b73e1b4 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -22,6 +22,7 @@
* the "wakeup" GPIO is not given, power management will be disabled.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
@@ -68,10 +69,6 @@
#define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */
#define MLX90614_CONST_FIR 0x7 /* Fixed value for FIR part of low pass filter */
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
struct mlx_chip_info {
/* EEPROM offsets with 16-bit data, MSB first */
/* emissivity correction coefficient */
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 78b1c44bd6b5..320733e7d8b4 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -107,6 +107,7 @@
/* Extended Feature 2 Bits */
+#define FEATURE_SEVSNPIO_SUP BIT_ULL(1)
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index fdd6328bca89..4b2953418977 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2261,6 +2261,9 @@ static void print_iommu_info(void)
if (check_feature(FEATURE_SNP))
pr_cont(" SNP");
+ if (check_feature2(FEATURE_SEVSNPIO_SUP))
+ pr_cont(" SEV-TIO");
+
pr_cont("\n");
}
@@ -4028,4 +4031,10 @@ int amd_iommu_snp_disable(void)
return 0;
}
EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
+
+bool amd_iommu_sev_tio_supported(void)
+{
+ return check_feature2(FEATURE_SEVSNPIO_SUP);
+}
+EXPORT_SYMBOL_GPL(amd_iommu_sev_tio_supported);
#endif
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index fde3e17c836c..44ab929a1ad5 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -97,6 +97,25 @@ config OF_PMEM
Select Y if unsure.
+config RAMDAX
+ tristate "Support persistent memory interfaces on RAM carveouts"
+ depends on X86_PMEM_LEGACY || OF || COMPILE_TEST
+ default LIBNVDIMM
+ help
+ Allows creation of DAX devices on RAM carveouts.
+
+ Memory ranges that are manually specified by the
+ 'memmap=nn[KMG]!ss[KMG]' kernel command line or defined by dummy
+ pmem-region device tree nodes would be managed by this driver as DIMM
+ devices with support for dynamic layout of namespaces.
+ The driver steals 128K in the end of the memmap range for the
+ namespace management. This allows supporting up to 509 namespaces
+ (see 'ndctl create-namespace --help').
+ The driver should be force bound to e820_pmem or pmem-region platform
+ devices using 'driver_override' device attribute.
+
+ Select N if unsure.
+
config NVDIMM_KEYS
def_bool y
depends on ENCRYPTED_KEYS
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index ba0296dca9db..8c268814936c 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_OF_PMEM) += of_pmem.o
obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
+obj-$(CONFIG_RAMDAX) += ramdax.o
nd_pmem-y := pmem.o
diff --git a/drivers/nvdimm/ramdax.c b/drivers/nvdimm/ramdax.c
new file mode 100644
index 000000000000..954cb7919807
--- /dev/null
+++ b/drivers/nvdimm/ramdax.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Mike Rapoport, Microsoft
+ *
+ * Based on e820 pmem driver:
+ * Copyright (c) 2015, Christoph Hellwig.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+#include <linux/platform_device.h>
+#include <linux/memory_hotplug.h>
+#include <linux/libnvdimm.h>
+#include <linux/module.h>
+#include <linux/numa.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include <uapi/linux/ndctl.h>
+
+#define LABEL_AREA_SIZE SZ_128K
+
+struct ramdax_dimm {
+ struct nvdimm *nvdimm;
+ void *label_area;
+};
+
+static void ramdax_remove(struct platform_device *pdev)
+{
+ struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
+
+ nvdimm_bus_unregister(nvdimm_bus);
+}
+
+static int ramdax_register_region(struct resource *res,
+ struct nvdimm *nvdimm,
+ struct nvdimm_bus *nvdimm_bus)
+{
+ struct nd_mapping_desc mapping;
+ struct nd_region_desc ndr_desc;
+ struct nd_interleave_set *nd_set;
+ int nid = phys_to_target_node(res->start);
+
+ nd_set = kzalloc(sizeof(*nd_set), GFP_KERNEL);
+ if (!nd_set)
+ return -ENOMEM;
+
+ nd_set->cookie1 = 0xcafebeefcafebeef;
+ nd_set->cookie2 = nd_set->cookie1;
+ nd_set->altcookie = nd_set->cookie1;
+
+ memset(&mapping, 0, sizeof(mapping));
+ mapping.nvdimm = nvdimm;
+ mapping.start = 0;
+ mapping.size = resource_size(res) - LABEL_AREA_SIZE;
+
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+ ndr_desc.res = res;
+ ndr_desc.numa_node = numa_map_to_online_node(nid);
+ ndr_desc.target_node = nid;
+ ndr_desc.num_mappings = 1;
+ ndr_desc.mapping = &mapping;
+ ndr_desc.nd_set = nd_set;
+
+ if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
+ goto err_free_nd_set;
+
+ return 0;
+
+err_free_nd_set:
+ kfree(nd_set);
+ return -ENXIO;
+}
+
+static int ramdax_register_dimm(struct resource *res, void *data)
+{
+ resource_size_t start = res->start;
+ resource_size_t size = resource_size(res);
+ unsigned long flags = 0, cmd_mask = 0;
+ struct nvdimm_bus *nvdimm_bus = data;
+ struct ramdax_dimm *dimm;
+ int err;
+
+ dimm = kzalloc(sizeof(*dimm), GFP_KERNEL);
+ if (!dimm)
+ return -ENOMEM;
+
+ dimm->label_area = memremap(start + size - LABEL_AREA_SIZE,
+ LABEL_AREA_SIZE, MEMREMAP_WB);
+ if (!dimm->label_area) {
+ err = -ENOMEM;
+ goto err_free_dimm;
+ }
+
+ set_bit(NDD_LABELING, &flags);
+ set_bit(NDD_REGISTER_SYNC, &flags);
+ set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
+ set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
+ set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
+ dimm->nvdimm = nvdimm_create(nvdimm_bus, dimm,
+ /* dimm_attribute_groups */ NULL,
+ flags, cmd_mask, 0, NULL);
+ if (!dimm->nvdimm) {
+ err = -ENOMEM;
+ goto err_unmap_label;
+ }
+
+ err = ramdax_register_region(res, dimm->nvdimm, nvdimm_bus);
+ if (err)
+ goto err_remove_nvdimm;
+
+ return 0;
+
+err_remove_nvdimm:
+ nvdimm_delete(dimm->nvdimm);
+err_unmap_label:
+ memunmap(dimm->label_area);
+err_free_dimm:
+ kfree(dimm);
+ return err;
+}
+
+static int ramdax_get_config_size(struct nvdimm *nvdimm, int buf_len,
+ struct nd_cmd_get_config_size *cmd)
+{
+ if (sizeof(*cmd) > buf_len)
+ return -EINVAL;
+
+ *cmd = (struct nd_cmd_get_config_size){
+ .status = 0,
+ .config_size = LABEL_AREA_SIZE,
+ .max_xfer = 8,
+ };
+
+ return 0;
+}
+
+static int ramdax_get_config_data(struct nvdimm *nvdimm, int buf_len,
+ struct nd_cmd_get_config_data_hdr *cmd)
+{
+ struct ramdax_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ if (sizeof(*cmd) > buf_len)
+ return -EINVAL;
+ if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
+ return -EINVAL;
+ if (size_add(cmd->in_offset, cmd->in_length) > LABEL_AREA_SIZE)
+ return -EINVAL;
+
+ memcpy(cmd->out_buf, dimm->label_area + cmd->in_offset, cmd->in_length);
+
+ return 0;
+}
+
+static int ramdax_set_config_data(struct nvdimm *nvdimm, int buf_len,
+ struct nd_cmd_set_config_hdr *cmd)
+{
+ struct ramdax_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ if (sizeof(*cmd) > buf_len)
+ return -EINVAL;
+ if (struct_size(cmd, in_buf, cmd->in_length) > buf_len)
+ return -EINVAL;
+ if (size_add(cmd->in_offset, cmd->in_length) > LABEL_AREA_SIZE)
+ return -EINVAL;
+
+ memcpy(dimm->label_area + cmd->in_offset, cmd->in_buf, cmd->in_length);
+
+ return 0;
+}
+
+static int ramdax_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
+ void *buf, unsigned int buf_len)
+{
+ unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
+
+ if (!test_bit(cmd, &cmd_mask))
+ return -ENOTTY;
+
+ switch (cmd) {
+ case ND_CMD_GET_CONFIG_SIZE:
+ return ramdax_get_config_size(nvdimm, buf_len, buf);
+ case ND_CMD_GET_CONFIG_DATA:
+ return ramdax_get_config_data(nvdimm, buf_len, buf);
+ case ND_CMD_SET_CONFIG_DATA:
+ return ramdax_set_config_data(nvdimm, buf_len, buf);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int ramdax_ctl(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
+{
+ /*
+ * No firmware response to translate, let the transport error
+ * code take precedence.
+ */
+ *cmd_rc = 0;
+
+ if (!nvdimm)
+ return -ENOTTY;
+ return ramdax_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ramdax_of_matches[] = {
+ { .compatible = "pmem-region", },
+ { },
+};
+#endif
+
+static int ramdax_probe_of(struct platform_device *pdev,
+ struct nvdimm_bus *bus, struct device_node *np)
+{
+ int err;
+
+ if (!of_match_node(ramdax_of_matches, np))
+ return -ENODEV;
+
+ for (int i = 0; i < pdev->num_resources; i++) {
+ err = ramdax_register_dimm(&pdev->resource[i], bus);
+ if (err)
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ /*
+ * FIXME: should we unregister the dimms that were registered
+ * successfully
+ */
+ return err;
+}
+
+static int ramdax_probe(struct platform_device *pdev)
+{
+ static struct nvdimm_bus_descriptor nd_desc;
+ struct device *dev = &pdev->dev;
+ struct nvdimm_bus *nvdimm_bus;
+ struct device_node *np;
+ int rc = -ENXIO;
+
+ nd_desc.provider_name = "ramdax";
+ nd_desc.module = THIS_MODULE;
+ nd_desc.ndctl = ramdax_ctl;
+ nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
+ if (!nvdimm_bus)
+ goto err;
+
+ np = dev_of_node(&pdev->dev);
+ if (np)
+ rc = ramdax_probe_of(pdev, nvdimm_bus, np);
+ else
+ rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ IORESOURCE_MEM, 0, -1, nvdimm_bus,
+ ramdax_register_dimm);
+ if (rc)
+ goto err;
+
+ platform_set_drvdata(pdev, nvdimm_bus);
+
+ return 0;
+err:
+ nvdimm_bus_unregister(nvdimm_bus);
+ return rc;
+}
+
+static struct platform_driver ramdax_driver = {
+ .probe = ramdax_probe,
+ .remove = ramdax_remove,
+ .driver = {
+ .name = "ramdax",
+ },
+};
+
+module_platform_driver(ramdax_driver);
+
+MODULE_DESCRIPTION("NVDIMM support for e820 type-12 memory and OF pmem-region");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Microsoft Corporation");
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 4adce8c38870..e41f6951ca0f 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -424,7 +424,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
* query.
*/
get_device(dev);
- queue_delayed_work(system_wq, &nvdimm->dwork, 0);
+ queue_delayed_work(system_percpu_wq, &nvdimm->dwork, 0);
}
return rc;
@@ -457,7 +457,7 @@ static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
/* setup delayed work again */
tmo += 10;
- queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
+ queue_delayed_work(system_percpu_wq, &nvdimm->dwork, tmo * HZ);
nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
return;
}
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 1f51fbebd9fa..e07e7d4bf8b6 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -178,7 +178,7 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
if (!key)
return ERR_PTR(-ENOMEM);
- key_len = base64_decode(secret, allocated_len, key->key);
+ key_len = base64_decode(secret, allocated_len, key->key, true, BASE64_STD);
if (key_len < 0) {
pr_debug("base64 key decoding error %d\n",
key_len);
@@ -663,7 +663,7 @@ int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
if (ret)
goto out_free_digest;
- ret = base64_encode(digest, digest_len, enc);
+ ret = base64_encode(digest, digest_len, enc, true, BASE64_STD);
if (ret < hmac_len) {
ret = -ENOKEY;
goto out_free_digest;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index feef537257d0..4e7071714356 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -517,10 +517,10 @@ static u32 hint_lookup[] = {
* ccio_io_pdir_entry - Initialize an I/O Pdir.
* @pdir_ptr: A pointer into I/O Pdir.
* @sid: The Space Identifier.
- * @vba: The virtual address.
+ * @pba: The physical address.
* @hints: The DMA Hint.
*
- * Given a virtual address (vba, arg2) and space id, (sid, arg1),
+ * Given a physical address (pba, arg2) and space id, (sid, arg1),
* load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
* entry consists of 8 bytes as shown below (MSB == bit 0):
*
@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
* index are bits 12:19 of the value returned by LCI.
*/
static void
-ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
+ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
unsigned long hints)
{
register unsigned long pa;
@@ -557,7 +557,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
** "hints" parm includes the VALID bit!
** "dep" clobbers the physical address offset bits as well.
*/
- pa = lpa(vba);
+ pa = pba;
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
((u32 *)pdir_ptr)[1] = (u32) pa;
@@ -582,7 +582,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
** Grab virtual index [0:11]
** Deposit virt_idx bits into I/O PDIR word
*/
- asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
+ asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
@@ -704,14 +704,14 @@ ccio_dma_supported(struct device *dev, u64 mask)
/**
* ccio_map_single - Map an address range into the IOMMU.
* @dev: The PCI device.
- * @addr: The start address of the DMA region.
+ * @addr: The physical address of the DMA region.
* @size: The length of the DMA region.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_map_single function.
*/
static dma_addr_t
-ccio_map_single(struct device *dev, void *addr, size_t size,
+ccio_map_single(struct device *dev, phys_addr_t addr, size_t size,
enum dma_data_direction direction)
{
int idx;
@@ -730,7 +730,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
BUG_ON(size <= 0);
/* save offset bits */
- offset = ((unsigned long) addr) & ~IOVP_MASK;
+ offset = offset_in_page(addr);
/* round up to nearest IOVP_SIZE */
size = ALIGN(size + offset, IOVP_SIZE);
@@ -746,15 +746,15 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
pdir_start = &(ioc->pdir_base[idx]);
- DBG_RUN("%s() %px -> %#lx size: %zu\n",
- __func__, addr, (long)(iovp | offset), size);
+ DBG_RUN("%s() %pa -> %#lx size: %zu\n",
+ __func__, &addr, (long)(iovp | offset), size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
- if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
+ if ((size % L1_CACHE_BYTES) || (addr % L1_CACHE_BYTES))
hint |= HINT_SAFE_DMA;
while(size > 0) {
- ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
+ ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint);
DBG_RUN(" pdir %p %08x%08x\n",
pdir_start,
@@ -773,17 +773,18 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
-ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+ccio_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
{
- return ccio_map_single(dev, page_address(page) + offset, size,
- direction);
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
+ return ccio_map_single(dev, phys, size, direction);
}
/**
- * ccio_unmap_page - Unmap an address range from the IOMMU.
+ * ccio_unmap_phys - Unmap an address range from the IOMMU.
* @dev: The PCI device.
* @iova: The start address of the DMA region.
* @size: The length of the DMA region.
@@ -791,7 +792,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
* @attrs: attributes
*/
static void
-ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+ccio_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
@@ -853,7 +854,8 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
if (ret) {
memset(ret, 0, size);
- *dma_handle = ccio_map_single(dev, ret, size, DMA_BIDIRECTIONAL);
+ *dma_handle = ccio_map_single(dev, virt_to_phys(ret), size,
+ DMA_BIDIRECTIONAL);
}
return ret;
@@ -873,7 +875,7 @@ static void
ccio_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, unsigned long attrs)
{
- ccio_unmap_page(dev, dma_handle, size, 0, 0);
+ ccio_unmap_phys(dev, dma_handle, size, 0, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
@@ -920,7 +922,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg_dma_address(sglist) = ccio_map_single(dev,
- sg_virt(sglist), sglist->length,
+ sg_phys(sglist), sglist->length,
direction);
sg_dma_len(sglist) = sglist->length;
return 1;
@@ -1004,7 +1006,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
#ifdef CCIO_COLLECT_STATS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
- ccio_unmap_page(dev, sg_dma_address(sglist),
+ ccio_unmap_phys(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction, 0);
++sglist;
nents--;
@@ -1017,8 +1019,8 @@ static const struct dma_map_ops ccio_ops = {
.dma_supported = ccio_dma_supported,
.alloc = ccio_alloc,
.free = ccio_free,
- .map_page = ccio_map_page,
- .unmap_page = ccio_unmap_page,
+ .map_phys = ccio_map_phys,
+ .unmap_phys = ccio_unmap_phys,
.map_sg = ccio_map_sg,
.unmap_sg = ccio_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
@@ -1072,7 +1074,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
ioc->msingle_calls, ioc->msingle_pages,
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
- /* KLUGE - unmap_sg calls unmap_page for each mapped page */
+ /* KLUGE - unmap_sg calls unmap_phys for each mapped page */
min = ioc->usingle_calls - ioc->usg_calls;
max = ioc->usingle_pages - ioc->usg_pages;
seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index a0daaa548bc3..8ba778170447 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -154,7 +154,9 @@ static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data;
/* switch IRQ's for devices below LASI/WAX to other CPU */
- gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
+ /* ASP chip (svers 0x70) does not support reprogramming */
+ if (gsc_dev->gsc->id.sversion != 0x70)
+ gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
irq_data_update_effective_affinity(d, &tmask);
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index c43f1a212a5c..0691884f5095 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -14,7 +14,7 @@
static inline unsigned int
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long hint,
- void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
+ void (*iommu_io_pdir_entry)(__le64 *, space_t, phys_addr_t,
unsigned long))
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
@@ -28,7 +28,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
dma_sg--;
while (nents-- > 0) {
- unsigned long vaddr;
+ phys_addr_t paddr;
long size;
DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
@@ -67,7 +67,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
BUG_ON(pdirp == NULL);
- vaddr = (unsigned long)sg_virt(startsg);
+ paddr = sg_phys(startsg);
sg_dma_len(dma_sg) += startsg->length;
size = startsg->length + dma_offset;
dma_offset = 0;
@@ -76,8 +76,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
#endif
do {
iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
- vaddr, hint);
- vaddr += IOVP_SIZE;
+ paddr, hint);
+ paddr += IOVP_SIZE;
size -= IOVP_SIZE;
pdirp++;
} while(unlikely(size > 0));
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index fc3863c09f83..a6eb6bffa5ea 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -532,7 +532,7 @@ typedef unsigned long space_t;
* sba_io_pdir_entry - fill in one IO PDIR entry
* @pdir_ptr: pointer to IO PDIR entry
* @sid: process Space ID - currently only support KERNEL_SPACE
- * @vba: Virtual CPU address of buffer to map
+ * @pba: Physical address of buffer to map
* @hint: DMA hint set to use for this mapping
*
* SBA Mapping Routine
@@ -569,20 +569,17 @@ typedef unsigned long space_t;
*/
static void
-sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
+sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
unsigned long hint)
{
- u64 pa; /* physical address */
register unsigned ci; /* coherent index */
- pa = lpa(vba);
- pa &= IOVP_MASK;
+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
+ pba &= IOVP_MASK;
+ pba |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
- asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
- pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
-
- pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
- *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
+ pba |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
+ *pdir_ptr = cpu_to_le64(pba); /* swap and store into I/O Pdir */
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
@@ -707,7 +704,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
* See Documentation/core-api/dma-api-howto.rst
*/
static dma_addr_t
-sba_map_single(struct device *dev, void *addr, size_t size,
+sba_map_single(struct device *dev, phys_addr_t addr, size_t size,
enum dma_data_direction direction)
{
struct ioc *ioc;
@@ -722,7 +719,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
return DMA_MAPPING_ERROR;
/* save offset bits */
- offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
+ offset = offset_in_page(addr);
/* round up to nearest IOVP_SIZE */
size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
@@ -739,13 +736,13 @@ sba_map_single(struct device *dev, void *addr, size_t size,
pide = sba_alloc_range(ioc, dev, size);
iovp = (dma_addr_t) pide << IOVP_SHIFT;
- DBG_RUN("%s() 0x%p -> 0x%lx\n",
- __func__, addr, (long) iovp | offset);
+ DBG_RUN("%s() 0x%pa -> 0x%lx\n",
+ __func__, &addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
while (size > 0) {
- sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
+ sba_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, 0);
DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
pdir_start,
@@ -778,17 +775,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
-sba_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+sba_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
{
- return sba_map_single(dev, page_address(page) + offset, size,
- direction);
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
+ return sba_map_single(dev, phys, size, direction);
}
/**
- * sba_unmap_page - unmap one IOVA and free resources
+ * sba_unmap_phys - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
@@ -798,7 +796,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
* See Documentation/core-api/dma-api-howto.rst
*/
static void
-sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+sba_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
@@ -893,7 +891,7 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
if (ret) {
memset(ret, 0, size);
- *dma_handle = sba_map_single(hwdev, ret, size, 0);
+ *dma_handle = sba_map_single(hwdev, virt_to_phys(ret), size, 0);
}
return ret;
@@ -914,7 +912,7 @@ static void
sba_free(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_page(hwdev, dma_handle, size, 0, 0);
+ sba_unmap_phys(hwdev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -962,7 +960,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
/* Fast path single entry scatterlists. */
if (nents == 1) {
- sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
+ sg_dma_address(sglist) = sba_map_single(dev, sg_phys(sglist),
sglist->length, direction);
sg_dma_len(sglist) = sglist->length;
return 1;
@@ -1061,7 +1059,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
while (nents && sg_dma_len(sglist)) {
- sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
+ sba_unmap_phys(dev, sg_dma_address(sglist), sg_dma_len(sglist),
direction, 0);
#ifdef SBA_COLLECT_STATS
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
@@ -1085,8 +1083,8 @@ static const struct dma_map_ops sba_ops = {
.dma_supported = sba_dma_supported,
.alloc = sba_alloc,
.free = sba_free,
- .map_page = sba_map_page,
- .unmap_page = sba_unmap_page,
+ .map_phys = sba_map_phys,
+ .unmap_phys = sba_unmap_phys,
.map_sg = sba_map_sg,
.unmap_sg = sba_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index f94f5d384362..00b0210e1f1d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -122,6 +122,24 @@ config XEN_PCIDEV_FRONTEND
config PCI_ATS
bool
+config PCI_IDE
+ bool
+
+config PCI_TSM
+ bool "PCI TSM: Device security protocol support"
+ select PCI_IDE
+ select PCI_DOE
+ select TSM
+ help
+ The TEE (Trusted Execution Environment) Device Interface
+ Security Protocol (TDISP) defines a "TSM" as a platform agent
+ that manages device authentication, link encryption, link
+ integrity protection, and assignment of PCI device functions
+ (virtual or physical) to confidential computing VMs that can
+ access (DMA) guest private memory.
+
+ Enable a platform TSM driver to use this capability.
+
config PCI_DOE
bool "Enable PCI Data Object Exchange (DOE) support"
help
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index f3c81c892786..e10cfe5a280b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
obj-$(CONFIG_PCI_DOE) += doe.o
+obj-$(CONFIG_PCI_IDE) += ide.o
+obj-$(CONFIG_PCI_TSM) += tsm.o
obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o
obj-$(CONFIG_PCI_NPEM) += npem.o
obj-$(CONFIG_PCIE_TPH) += tph.o
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 9daf13ed3714..4383a36fd6ca 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/cleanup.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -435,6 +436,27 @@ static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void
return ret;
}
+static int __pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata)
+{
+ struct pci_dev *dev;
+ int ret = 0;
+
+ list_for_each_entry_reverse(dev, &top->devices, bus_list) {
+ if (dev->subordinate) {
+ ret = __pci_walk_bus_reverse(dev->subordinate, cb,
+ userdata);
+ if (ret)
+ break;
+ }
+ ret = cb(dev, userdata);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
/**
* pci_walk_bus - walk devices on/under bus, calling callback.
* @top: bus whose devices should be walked
@@ -456,6 +478,23 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void
}
EXPORT_SYMBOL_GPL(pci_walk_bus);
+/**
+ * pci_walk_bus_reverse - walk devices on/under bus, calling callback.
+ * @top: bus whose devices should be walked
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
+ *
+ * Same semantics as pci_walk_bus(), but walks the bus in reverse order.
+ */
+void pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ down_read(&pci_bus_sem);
+ __pci_walk_bus_reverse(top, cb, userdata);
+ up_read(&pci_bus_sem);
+}
+EXPORT_SYMBOL_GPL(pci_walk_bus_reverse);
+
void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
{
lockdep_assert_held(&pci_bus_sem);
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index aae9a8a00406..62be9c8dbc52 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -24,8 +24,6 @@
#include "pci.h"
-#define PCI_DOE_FEATURE_DISCOVERY 0
-
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
#define PCI_DOE_TIMEOUT HZ
#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
diff --git a/drivers/pci/ide.c b/drivers/pci/ide.c
new file mode 100644
index 000000000000..f0ef474e1a0d
--- /dev/null
+++ b/drivers/pci/ide.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+
+/* PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE) */
+
+#define dev_fmt(fmt) "PCI/IDE: " fmt
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/pci-ide.h>
+#include <linux/pci_regs.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/tsm.h>
+
+#include "pci.h"
+
+static int __sel_ide_offset(u16 ide_cap, u8 nr_link_ide, u8 stream_index,
+ u8 nr_ide_mem)
+{
+ u32 offset = ide_cap + PCI_IDE_LINK_STREAM_0 +
+ nr_link_ide * PCI_IDE_LINK_BLOCK_SIZE;
+
+ /*
+ * Assume a constant number of address association resources per stream
+ * index
+ */
+ return offset + stream_index * PCI_IDE_SEL_BLOCK_SIZE(nr_ide_mem);
+}
+
+static int sel_ide_offset(struct pci_dev *pdev,
+ struct pci_ide_partner *settings)
+{
+ return __sel_ide_offset(pdev->ide_cap, pdev->nr_link_ide,
+ settings->stream_index, pdev->nr_ide_mem);
+}
+
+static bool reserve_stream_index(struct pci_dev *pdev, u8 idx)
+{
+ int ret;
+
+ ret = ida_alloc_range(&pdev->ide_stream_ida, idx, idx, GFP_KERNEL);
+ return ret >= 0;
+}
+
+static bool reserve_stream_id(struct pci_host_bridge *hb, u8 id)
+{
+ int ret;
+
+ ret = ida_alloc_range(&hb->ide_stream_ids_ida, id, id, GFP_KERNEL);
+ return ret >= 0;
+}
+
+static bool claim_stream(struct pci_host_bridge *hb, u8 stream_id,
+ struct pci_dev *pdev, u8 stream_idx)
+{
+ dev_info(&hb->dev, "Stream ID %d active at init\n", stream_id);
+ if (!reserve_stream_id(hb, stream_id)) {
+ dev_info(&hb->dev, "Failed to claim %s Stream ID %d\n",
+ stream_id == PCI_IDE_RESERVED_STREAM_ID ? "reserved" :
+ "active",
+ stream_id);
+ return false;
+ }
+
+ /* No stream index to reserve in the Link IDE case */
+ if (!pdev)
+ return true;
+
+ if (!reserve_stream_index(pdev, stream_idx)) {
+ pci_info(pdev, "Failed to claim active Selective Stream %d\n",
+ stream_idx);
+ return false;
+ }
+
+ return true;
+}
+
+void pci_ide_init(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+ u16 nr_link_ide, nr_ide_mem, nr_streams;
+ u16 ide_cap;
+ u32 val;
+
+ /*
+ * Unconditionally init so that ida idle state is consistent with
+ * pdev->ide_cap.
+ */
+ ida_init(&pdev->ide_stream_ida);
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ ide_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_IDE);
+ if (!ide_cap)
+ return;
+
+ pci_read_config_dword(pdev, ide_cap + PCI_IDE_CAP, &val);
+ if ((val & PCI_IDE_CAP_SELECTIVE) == 0)
+ return;
+
+ /*
+ * Require endpoint IDE capability to be paired with IDE Root Port IDE
+ * capability.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) {
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+
+ if (!rp->ide_cap)
+ return;
+ }
+
+ pdev->ide_cfg = FIELD_GET(PCI_IDE_CAP_SEL_CFG, val);
+ pdev->ide_tee_limit = FIELD_GET(PCI_IDE_CAP_TEE_LIMITED, val);
+
+ if (val & PCI_IDE_CAP_LINK)
+ nr_link_ide = 1 + FIELD_GET(PCI_IDE_CAP_LINK_TC_NUM, val);
+ else
+ nr_link_ide = 0;
+
+ nr_ide_mem = 0;
+ nr_streams = 1 + FIELD_GET(PCI_IDE_CAP_SEL_NUM, val);
+ for (u16 i = 0; i < nr_streams; i++) {
+ int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
+ int nr_assoc;
+ u32 val;
+ u8 id;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
+
+ /*
+ * Let's not entertain streams that do not have a constant
+ * number of address association blocks
+ */
+ nr_assoc = FIELD_GET(PCI_IDE_SEL_CAP_ASSOC_NUM, val);
+ if (i && (nr_assoc != nr_ide_mem)) {
+ pci_info(pdev, "Unsupported Selective Stream %d capability, SKIP the rest\n", i);
+ nr_streams = i;
+ break;
+ }
+
+ nr_ide_mem = nr_assoc;
+
+ /*
+ * Claim Stream IDs and Selective Stream blocks that are already
+ * active on the device
+ */
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val);
+ id = FIELD_GET(PCI_IDE_SEL_CTL_ID, val);
+ if ((val & PCI_IDE_SEL_CTL_EN) &&
+ !claim_stream(hb, id, pdev, i))
+ return;
+ }
+
+ /* Reserve link stream-ids that are already active on the device */
+ for (u16 i = 0; i < nr_link_ide; ++i) {
+ int pos = ide_cap + PCI_IDE_LINK_STREAM_0 + i * PCI_IDE_LINK_BLOCK_SIZE;
+ u8 id;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_LINK_CTL_0, &val);
+ id = FIELD_GET(PCI_IDE_LINK_CTL_ID, val);
+ if ((val & PCI_IDE_LINK_CTL_EN) &&
+ !claim_stream(hb, id, NULL, -1))
+ return;
+ }
+
+ for (u16 i = 0; i < nr_streams; i++) {
+ int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
+ if (val & PCI_IDE_SEL_CTL_EN)
+ continue;
+ val &= ~PCI_IDE_SEL_CTL_ID;
+ val |= FIELD_PREP(PCI_IDE_SEL_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
+ }
+
+ for (u16 i = 0; i < nr_link_ide; ++i) {
+ int pos = ide_cap + PCI_IDE_LINK_STREAM_0 +
+ i * PCI_IDE_LINK_BLOCK_SIZE;
+
+ pci_read_config_dword(pdev, pos, &val);
+ if (val & PCI_IDE_LINK_CTL_EN)
+ continue;
+ val &= ~PCI_IDE_LINK_CTL_ID;
+ val |= FIELD_PREP(PCI_IDE_LINK_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
+ pci_write_config_dword(pdev, pos, val);
+ }
+
+ pdev->ide_cap = ide_cap;
+ pdev->nr_link_ide = nr_link_ide;
+ pdev->nr_sel_ide = nr_streams;
+ pdev->nr_ide_mem = nr_ide_mem;
+}
+
+struct stream_index {
+ struct ida *ida;
+ u8 stream_index;
+};
+
+static void free_stream_index(struct stream_index *stream)
+{
+ ida_free(stream->ida, stream->stream_index);
+}
+
+DEFINE_FREE(free_stream, struct stream_index *, if (_T) free_stream_index(_T))
+static struct stream_index *alloc_stream_index(struct ida *ida, u16 max,
+ struct stream_index *stream)
+{
+ int id;
+
+ if (!max)
+ return NULL;
+
+ id = ida_alloc_max(ida, max - 1, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
+ *stream = (struct stream_index) {
+ .ida = ida,
+ .stream_index = id,
+ };
+ return stream;
+}
+
+/**
+ * pci_ide_stream_alloc() - Reserve stream indices and probe for settings
+ * @pdev: IDE capable PCIe Endpoint Physical Function
+ *
+ * Retrieve the Requester ID range of @pdev for programming its Root
+ * Port IDE RID Association registers, and conversely retrieve the
+ * Requester ID of the Root Port for programming @pdev's IDE RID
+ * Association registers.
+ *
+ * Allocate a Selective IDE Stream Register Block instance per port.
+ *
+ * Allocate a platform stream resource from the associated host bridge.
+ * Retrieve stream association parameters for Requester ID range and
+ * address range restrictions for the stream.
+ */
+struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev)
+{
+ /* EP, RP, + HB Stream allocation */
+ struct stream_index __stream[PCI_IDE_HB + 1];
+ struct pci_bus_region pref_assoc = { 0, -1 };
+ struct pci_bus_region mem_assoc = { 0, -1 };
+ struct resource *mem, *pref;
+ struct pci_host_bridge *hb;
+ struct pci_dev *rp, *br;
+ int num_vf, rid_end;
+
+ if (!pci_is_pcie(pdev))
+ return NULL;
+
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ENDPOINT)
+ return NULL;
+
+ if (!pdev->ide_cap)
+ return NULL;
+
+ struct pci_ide *ide __free(kfree) = kzalloc(sizeof(*ide), GFP_KERNEL);
+ if (!ide)
+ return NULL;
+
+ hb = pci_find_host_bridge(pdev->bus);
+ struct stream_index *hb_stream __free(free_stream) = alloc_stream_index(
+ &hb->ide_stream_ida, hb->nr_ide_streams, &__stream[PCI_IDE_HB]);
+ if (!hb_stream)
+ return NULL;
+
+ rp = pcie_find_root_port(pdev);
+ struct stream_index *rp_stream __free(free_stream) = alloc_stream_index(
+ &rp->ide_stream_ida, rp->nr_sel_ide, &__stream[PCI_IDE_RP]);
+ if (!rp_stream)
+ return NULL;
+
+ struct stream_index *ep_stream __free(free_stream) = alloc_stream_index(
+ &pdev->ide_stream_ida, pdev->nr_sel_ide, &__stream[PCI_IDE_EP]);
+ if (!ep_stream)
+ return NULL;
+
+ /* for SR-IOV case, cover all VFs */
+ num_vf = pci_num_vf(pdev);
+ if (num_vf)
+ rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf),
+ pci_iov_virtfn_devfn(pdev, num_vf));
+ else
+ rid_end = pci_dev_id(pdev);
+
+ br = pci_upstream_bridge(pdev);
+ if (!br)
+ return NULL;
+
+ /*
+ * Check if the device consumes memory and/or prefetch-memory. Setup
+ * downstream address association ranges for each.
+ */
+ mem = pci_resource_n(br, PCI_BRIDGE_MEM_WINDOW);
+ pref = pci_resource_n(br, PCI_BRIDGE_PREF_MEM_WINDOW);
+ if (resource_assigned(mem))
+ pcibios_resource_to_bus(br->bus, &mem_assoc, mem);
+ if (resource_assigned(pref))
+ pcibios_resource_to_bus(br->bus, &pref_assoc, pref);
+
+ *ide = (struct pci_ide) {
+ .pdev = pdev,
+ .partner = {
+ [PCI_IDE_EP] = {
+ .rid_start = pci_dev_id(rp),
+ .rid_end = pci_dev_id(rp),
+ .stream_index = no_free_ptr(ep_stream)->stream_index,
+ /* Disable upstream address association */
+ .mem_assoc = { 0, -1 },
+ .pref_assoc = { 0, -1 },
+ },
+ [PCI_IDE_RP] = {
+ .rid_start = pci_dev_id(pdev),
+ .rid_end = rid_end,
+ .stream_index = no_free_ptr(rp_stream)->stream_index,
+ .mem_assoc = mem_assoc,
+ .pref_assoc = pref_assoc,
+ },
+ },
+ .host_bridge_stream = no_free_ptr(hb_stream)->stream_index,
+ .stream_id = -1,
+ };
+
+ return_ptr(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_alloc);
+
+/**
+ * pci_ide_stream_free() - unwind pci_ide_stream_alloc()
+ * @ide: idle IDE settings descriptor
+ *
+ * Free all of the stream index (register block) allocations acquired by
+ * pci_ide_stream_alloc(). The stream represented by @ide is assumed to
+ * be unregistered and not instantiated in any device.
+ */
+void pci_ide_stream_free(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+
+ ida_free(&pdev->ide_stream_ida, ide->partner[PCI_IDE_EP].stream_index);
+ ida_free(&rp->ide_stream_ida, ide->partner[PCI_IDE_RP].stream_index);
+ ida_free(&hb->ide_stream_ida, ide->host_bridge_stream);
+ kfree(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_free);
+
+/**
+ * pci_ide_stream_release() - unwind and release an @ide context
+ * @ide: partially or fully registered IDE settings descriptor
+ *
+ * In support of automatic cleanup of IDE setup routines perform IDE
+ * teardown in expected reverse order of setup and with respect to which
+ * aspects of IDE setup have successfully completed.
+ *
+ * Be careful that setup order mirrors this shutdown order. Otherwise,
+ * open code releasing the IDE context.
+ */
+void pci_ide_stream_release(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+
+ if (ide->partner[PCI_IDE_RP].enable)
+ pci_ide_stream_disable(rp, ide);
+
+ if (ide->partner[PCI_IDE_EP].enable)
+ pci_ide_stream_disable(pdev, ide);
+
+ if (ide->tsm_dev)
+ tsm_ide_stream_unregister(ide);
+
+ if (ide->partner[PCI_IDE_RP].setup)
+ pci_ide_stream_teardown(rp, ide);
+
+ if (ide->partner[PCI_IDE_EP].setup)
+ pci_ide_stream_teardown(pdev, ide);
+
+ if (ide->name)
+ pci_ide_stream_unregister(ide);
+
+ pci_ide_stream_free(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_release);
+
+struct pci_ide_stream_id {
+ struct pci_host_bridge *hb;
+ u8 stream_id;
+};
+
+static struct pci_ide_stream_id *
+request_stream_id(struct pci_host_bridge *hb, u8 stream_id,
+ struct pci_ide_stream_id *sid)
+{
+ if (!reserve_stream_id(hb, stream_id))
+ return NULL;
+
+ *sid = (struct pci_ide_stream_id) {
+ .hb = hb,
+ .stream_id = stream_id,
+ };
+
+ return sid;
+}
+DEFINE_FREE(free_stream_id, struct pci_ide_stream_id *,
+ if (_T) ida_free(&_T->hb->ide_stream_ids_ida, _T->stream_id))
+
+/**
+ * pci_ide_stream_register() - Prepare to activate an IDE Stream
+ * @ide: IDE settings descriptor
+ *
+ * After a Stream ID has been acquired for @ide, record the presence of
+ * the stream in sysfs. The expectation is that @ide is immutable while
+ * registered.
+ */
+int pci_ide_stream_register(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+ struct pci_ide_stream_id __sid;
+ u8 ep_stream, rp_stream;
+ int rc;
+
+ if (ide->stream_id < 0 || ide->stream_id > U8_MAX) {
+ pci_err(pdev, "Setup fail: Invalid Stream ID: %d\n", ide->stream_id);
+ return -ENXIO;
+ }
+
+ struct pci_ide_stream_id *sid __free(free_stream_id) =
+ request_stream_id(hb, ide->stream_id, &__sid);
+ if (!sid) {
+ pci_err(pdev, "Setup fail: Stream ID %d in use\n", ide->stream_id);
+ return -EBUSY;
+ }
+
+ ep_stream = ide->partner[PCI_IDE_EP].stream_index;
+ rp_stream = ide->partner[PCI_IDE_RP].stream_index;
+ const char *name __free(kfree) = kasprintf(GFP_KERNEL, "stream%d.%d.%d",
+ ide->host_bridge_stream,
+ rp_stream, ep_stream);
+ if (!name)
+ return -ENOMEM;
+
+ rc = sysfs_create_link(&hb->dev.kobj, &pdev->dev.kobj, name);
+ if (rc)
+ return rc;
+
+ ide->name = no_free_ptr(name);
+
+ /* Stream ID reservation recorded in @ide is now successfully registered */
+ retain_and_null_ptr(sid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_register);
+
+/**
+ * pci_ide_stream_unregister() - unwind pci_ide_stream_register()
+ * @ide: idle IDE settings descriptor
+ *
+ * In preparation for freeing @ide, remove sysfs enumeration for the
+ * stream.
+ */
+void pci_ide_stream_unregister(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+
+ sysfs_remove_link(&hb->dev.kobj, ide->name);
+ kfree(ide->name);
+ ida_free(&hb->ide_stream_ids_ida, ide->stream_id);
+ ide->name = NULL;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_unregister);
+
+static int pci_ide_domain(struct pci_dev *pdev)
+{
+ if (pdev->fm_enabled)
+ return pci_domain_nr(pdev->bus);
+ return 0;
+}
+
+struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ if (!pci_is_pcie(pdev)) {
+ pci_warn_once(pdev, "not a PCIe device\n");
+ return NULL;
+ }
+
+ switch (pci_pcie_type(pdev)) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ if (pdev != ide->pdev) {
+ pci_warn_once(pdev, "setup expected Endpoint: %s\n", pci_name(ide->pdev));
+ return NULL;
+ }
+ return &ide->partner[PCI_IDE_EP];
+ case PCI_EXP_TYPE_ROOT_PORT: {
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ if (pdev != rp) {
+ pci_warn_once(pdev, "setup expected Root Port: %s\n",
+ pci_name(rp));
+ return NULL;
+ }
+ return &ide->partner[PCI_IDE_RP];
+ }
+ default:
+ pci_warn_once(pdev, "invalid device type\n");
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(pci_ide_to_settings);
+
+static void set_ide_sel_ctl(struct pci_dev *pdev, struct pci_ide *ide,
+ struct pci_ide_partner *settings, int pos,
+ bool enable)
+{
+ u32 val = FIELD_PREP(PCI_IDE_SEL_CTL_ID, ide->stream_id) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_DEFAULT, settings->default_stream) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_CFG_EN, pdev->ide_cfg) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_TEE_LIMITED, pdev->ide_tee_limit) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_EN, enable);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
+}
+
+#define SEL_ADDR1_LOWER GENMASK(31, 20)
+#define SEL_ADDR_UPPER GENMASK_ULL(63, 32)
+#define PREP_PCI_IDE_SEL_ADDR1(base, limit) \
+ (FIELD_PREP(PCI_IDE_SEL_ADDR_1_VALID, 1) | \
+ FIELD_PREP(PCI_IDE_SEL_ADDR_1_BASE_LOW, \
+ FIELD_GET(SEL_ADDR1_LOWER, (base))) | \
+ FIELD_PREP(PCI_IDE_SEL_ADDR_1_LIMIT_LOW, \
+ FIELD_GET(SEL_ADDR1_LOWER, (limit))))
+
+static void mem_assoc_to_regs(struct pci_bus_region *region,
+ struct pci_ide_regs *regs, int idx)
+{
+ /* convert to u64 range for bitfield size checks */
+ struct range r = { region->start, region->end };
+
+ regs->addr[idx].assoc1 = PREP_PCI_IDE_SEL_ADDR1(r.start, r.end);
+ regs->addr[idx].assoc2 = FIELD_GET(SEL_ADDR_UPPER, r.end);
+ regs->addr[idx].assoc3 = FIELD_GET(SEL_ADDR_UPPER, r.start);
+}
+
+/**
+ * pci_ide_stream_to_regs() - convert IDE settings to association register values
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ * @regs: output register values
+ */
+static void pci_ide_stream_to_regs(struct pci_dev *pdev, struct pci_ide *ide,
+ struct pci_ide_regs *regs)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int assoc_idx = 0;
+
+ memset(regs, 0, sizeof(*regs));
+
+ if (!settings)
+ return;
+
+ regs->rid1 = FIELD_PREP(PCI_IDE_SEL_RID_1_LIMIT, settings->rid_end);
+
+ regs->rid2 = FIELD_PREP(PCI_IDE_SEL_RID_2_VALID, 1) |
+ FIELD_PREP(PCI_IDE_SEL_RID_2_BASE, settings->rid_start) |
+ FIELD_PREP(PCI_IDE_SEL_RID_2_SEG, pci_ide_domain(pdev));
+
+ if (pdev->nr_ide_mem && pci_bus_region_size(&settings->mem_assoc)) {
+ mem_assoc_to_regs(&settings->mem_assoc, regs, assoc_idx);
+ assoc_idx++;
+ }
+
+ if (pdev->nr_ide_mem > assoc_idx &&
+ pci_bus_region_size(&settings->pref_assoc)) {
+ mem_assoc_to_regs(&settings->pref_assoc, regs, assoc_idx);
+ assoc_idx++;
+ }
+
+ regs->nr_addr = assoc_idx;
+}
+
+/**
+ * pci_ide_stream_setup() - program settings to Selective IDE Stream registers
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ *
+ * When @pdev is a PCI_EXP_TYPE_ENDPOINT then the PCI_IDE_EP partner
+ * settings are written to @pdev's Selective IDE Stream register block,
+ * and when @pdev is a PCI_EXP_TYPE_ROOT_PORT, the PCI_IDE_RP settings
+ * are selected.
+ */
+void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ struct pci_ide_regs regs;
+ int pos;
+
+ if (!settings)
+ return;
+
+ pci_ide_stream_to_regs(pdev, ide, &regs);
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, regs.rid1);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, regs.rid2);
+
+ for (int i = 0; i < regs.nr_addr; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i),
+ regs.addr[i].assoc1);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i),
+ regs.addr[i].assoc2);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i),
+ regs.addr[i].assoc3);
+ }
+
+ /* clear extra unused address association blocks */
+ for (int i = regs.nr_addr; i < pdev->nr_ide_mem; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
+ }
+
+ /*
+ * Setup control register early for devices that expect
+ * stream_id is set during key programming.
+ */
+ set_ide_sel_ctl(pdev, ide, settings, pos, false);
+ settings->setup = 1;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_setup);
+
+/**
+ * pci_ide_stream_teardown() - disable the stream and clear all settings
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ *
+ * For stream destruction, zero all registers that may have been written
+ * by pci_ide_stream_setup(). Consider pci_ide_stream_disable() to leave
+ * settings in place while temporarily disabling the stream.
+ */
+void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos, i;
+
+ if (!settings)
+ return;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
+
+ for (i = 0; i < pdev->nr_ide_mem; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
+ }
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, 0);
+ settings->setup = 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_teardown);
+
+/**
+ * pci_ide_stream_enable() - enable a Selective IDE Stream
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered and setup IDE settings descriptor
+ *
+ * Activate the stream by writing to the Selective IDE Stream Control
+ * Register.
+ *
+ * Return: 0 if the stream successfully entered the "secure" state, and -EINVAL
+ * if @ide is invalid, and -ENXIO if the stream fails to enter the secure state.
+ *
+ * Note that the state may go "insecure" at any point after returning 0, but
+ * those events are equivalent to a "link down" event and handled via
+ * asynchronous error reporting.
+ *
+ * Caller is responsible to clear the enable bit in the -ENXIO case.
+ */
+int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos;
+ u32 val;
+
+ if (!settings)
+ return -EINVAL;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ set_ide_sel_ctl(pdev, ide, settings, pos, true);
+ settings->enable = 1;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_STS, &val);
+ if (FIELD_GET(PCI_IDE_SEL_STS_STATE, val) !=
+ PCI_IDE_SEL_STS_STATE_SECURE)
+ return -ENXIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_enable);
+
+/**
+ * pci_ide_stream_disable() - disable a Selective IDE Stream
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered and setup IDE settings descriptor
+ *
+ * Clear the Selective IDE Stream Control Register, but leave all other
+ * registers untouched.
+ */
+void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos;
+
+ if (!settings)
+ return;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
+ settings->enable = 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_disable);
+
+void pci_ide_init_host_bridge(struct pci_host_bridge *hb)
+{
+ hb->nr_ide_streams = 256;
+ ida_init(&hb->ide_stream_ida);
+ ida_init(&hb->ide_stream_ids_ida);
+ reserve_stream_id(hb, PCI_IDE_RESERVED_STREAM_ID);
+}
+
+static ssize_t available_secure_streams_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_host_bridge *hb = to_pci_host_bridge(dev);
+ int nr = READ_ONCE(hb->nr_ide_streams);
+ int avail = nr;
+
+ if (!nr)
+ return -ENXIO;
+
+ /*
+ * Yes, this is inefficient and racy, but it is only for occasional
+ * platform resource surveys. Worst case is bounded to 256 streams.
+ */
+ for (int i = 0; i < nr; i++)
+ if (ida_exists(&hb->ide_stream_ida, i))
+ avail--;
+ return sysfs_emit(buf, "%d\n", avail);
+}
+static DEVICE_ATTR_RO(available_secure_streams);
+
+static struct attribute *pci_ide_attrs[] = {
+ &dev_attr_available_secure_streams.attr,
+ NULL
+};
+
+static umode_t pci_ide_attr_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_host_bridge *hb = to_pci_host_bridge(dev);
+
+ if (a == &dev_attr_available_secure_streams.attr)
+ if (!hb->nr_ide_streams)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group pci_ide_attr_group = {
+ .attrs = pci_ide_attrs,
+ .is_visible = pci_ide_attr_visible,
+};
+
+/**
+ * pci_ide_set_nr_streams() - sets size of the pool of IDE Stream resources
+ * @hb: host bridge boundary for the stream pool
+ * @nr: number of streams
+ *
+ * Platform PCI init and/or expert test module use only. Limit IDE
+ * Stream establishment by setting the number of stream resources
+ * available at the host bridge. Platform init code must set this before
+ * the first pci_ide_stream_alloc() call if the platform has less than the
+ * default of 256 streams per host-bridge.
+ *
+ * The "PCI_IDE" symbol namespace is required because this is typically
+ * a detail that is settled in early PCI init. I.e. this export is not
+ * for endpoint drivers.
+ */
+void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr)
+{
+ hb->nr_ide_streams = min(nr, 256);
+ WARN_ON_ONCE(!ida_is_empty(&hb->ide_stream_ida));
+ sysfs_update_group(&hb->dev.kobj, &pci_ide_attr_group);
+}
+EXPORT_SYMBOL_NS_GPL(pci_ide_set_nr_streams, "PCI_IDE");
+
+void pci_ide_destroy(struct pci_dev *pdev)
+{
+ ida_destroy(&pdev->ide_stream_ida);
+}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 80a7c4fe6b03..c2df915ad2d2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1856,5 +1856,9 @@ const struct attribute_group *pci_dev_attr_groups[] = {
#ifdef CONFIG_PCI_DOE
&pci_doe_sysfs_group,
#endif
+#ifdef CONFIG_PCI_TSM
+ &pci_tsm_auth_attr_group,
+ &pci_tsm_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a33bc4e0bf34..0e67014aa001 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -615,6 +615,27 @@ static inline void pci_doe_sysfs_init(struct pci_dev *pdev) { }
static inline void pci_doe_sysfs_teardown(struct pci_dev *pdev) { }
#endif
+#ifdef CONFIG_PCI_IDE
+void pci_ide_init(struct pci_dev *dev);
+void pci_ide_init_host_bridge(struct pci_host_bridge *hb);
+void pci_ide_destroy(struct pci_dev *dev);
+extern const struct attribute_group pci_ide_attr_group;
+#else
+static inline void pci_ide_init(struct pci_dev *dev) { }
+static inline void pci_ide_init_host_bridge(struct pci_host_bridge *hb) { }
+static inline void pci_ide_destroy(struct pci_dev *dev) { }
+#endif
+
+#ifdef CONFIG_PCI_TSM
+void pci_tsm_init(struct pci_dev *pdev);
+void pci_tsm_destroy(struct pci_dev *pdev);
+extern const struct attribute_group pci_tsm_attr_group;
+extern const struct attribute_group pci_tsm_auth_attr_group;
+#else
+static inline void pci_tsm_init(struct pci_dev *pdev) { }
+static inline void pci_tsm_destroy(struct pci_dev *pdev) { }
+#endif
+
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 0b5ed4722ac3..e0bcaa896803 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -30,6 +30,7 @@
#include <linux/kfifo.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>
+#include <linux/vmcore_info.h>
#include <acpi/apei.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
@@ -765,6 +766,7 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
break;
case AER_NONFATAL:
aer_info->dev_total_nonfatal_errs++;
+ hwerr_log_error_type(HWERR_RECOV_PCI);
counter = &aer_info->dev_nonfatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 124d2d309c58..41183aed8f5d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -658,6 +658,18 @@ static void pci_release_host_bridge_dev(struct device *dev)
kfree(bridge);
}
+static const struct attribute_group *pci_host_bridge_groups[] = {
+#ifdef CONFIG_PCI_IDE
+ &pci_ide_attr_group,
+#endif
+ NULL
+};
+
+static const struct device_type pci_host_bridge_type = {
+ .groups = pci_host_bridge_groups,
+ .release = pci_release_host_bridge_dev,
+};
+
static void pci_init_host_bridge(struct pci_host_bridge *bridge)
{
INIT_LIST_HEAD(&bridge->windows);
@@ -677,6 +689,8 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_dpc = 1;
bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
bridge->native_cxl_error = 1;
+ bridge->dev.type = &pci_host_bridge_type;
+ pci_ide_init_host_bridge(bridge);
device_initialize(&bridge->dev);
}
@@ -690,7 +704,6 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
return NULL;
pci_init_host_bridge(bridge);
- bridge->dev.release = pci_release_host_bridge_dev;
return bridge;
}
@@ -2296,6 +2309,17 @@ int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
return 0;
}
+static void pci_dev3_init(struct pci_dev *pdev)
+{
+ u16 cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DEV3);
+ u32 val = 0;
+
+ if (!cap)
+ return;
+ pci_read_config_dword(pdev, cap + PCI_DEV3_STA, &val);
+ pdev->fm_enabled = !!(val & PCI_DEV3_STA_SEGMENT);
+}
+
/**
* pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
* @dev: PCI device to query
@@ -2680,6 +2704,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_doe_init(dev); /* Data Object Exchange */
pci_tph_init(dev); /* TLP Processing Hints */
pci_rebar_init(dev); /* Resizable BAR */
+ pci_dev3_init(dev); /* Device 3 capabilities */
+ pci_ide_init(dev); /* Link Integrity and Data Encryption */
pcie_report_downtraining(dev);
pci_init_reset_methods(dev);
@@ -2773,6 +2799,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
+ /* Establish pdev->tsm for newly added (e.g. new SR-IOV VFs) */
+ pci_tsm_init(dev);
+
pci_npem_create(dev);
pci_doe_sysfs_init(dev);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index ce5c25adef55..417a9ea59117 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -57,6 +57,12 @@ static void pci_destroy_dev(struct pci_dev *dev)
pci_doe_sysfs_teardown(dev);
pci_npem_remove(dev);
+ /*
+ * While device is in D0 drop the device from TSM link operations
+ * including unbind and disconnect (IDE + SPDM teardown).
+ */
+ pci_tsm_destroy(dev);
+
device_del(&dev->dev);
down_write(&pci_bus_sem);
@@ -64,6 +70,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
up_write(&pci_bus_sem);
pci_doe_destroy(dev);
+ pci_ide_destroy(dev);
pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
pci_pwrctrl_unregister(&dev->dev);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 53840634fbfc..e6e84dc62e82 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -282,6 +282,45 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
return pdev;
}
+static struct pci_dev *pci_get_dev_by_id_reverse(const struct pci_device_id *id,
+ struct pci_dev *from)
+{
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct pci_dev *pdev = NULL;
+
+ if (from)
+ dev_start = &from->dev;
+ dev = bus_find_device_reverse(&pci_bus_type, dev_start, (void *)id,
+ match_pci_dev_by_id);
+ if (dev)
+ pdev = to_pci_dev(dev);
+ pci_dev_put(from);
+ return pdev;
+}
+
+enum pci_search_direction {
+ PCI_SEARCH_FORWARD,
+ PCI_SEARCH_REVERSE,
+};
+
+static struct pci_dev *__pci_get_subsys(unsigned int vendor, unsigned int device,
+ unsigned int ss_vendor, unsigned int ss_device,
+ struct pci_dev *from, enum pci_search_direction dir)
+{
+ struct pci_device_id id = {
+ .vendor = vendor,
+ .device = device,
+ .subvendor = ss_vendor,
+ .subdevice = ss_device,
+ };
+
+ if (dir == PCI_SEARCH_FORWARD)
+ return pci_get_dev_by_id(&id, from);
+ else
+ return pci_get_dev_by_id_reverse(&id, from);
+}
+
/**
* pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
@@ -302,14 +341,8 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
struct pci_dev *from)
{
- struct pci_device_id id = {
- .vendor = vendor,
- .device = device,
- .subvendor = ss_vendor,
- .subdevice = ss_device,
- };
-
- return pci_get_dev_by_id(&id, from);
+ return __pci_get_subsys(vendor, device, ss_vendor, ss_device, from,
+ PCI_SEARCH_FORWARD);
}
EXPORT_SYMBOL(pci_get_subsys);
@@ -334,6 +367,19 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
}
EXPORT_SYMBOL(pci_get_device);
+/*
+ * Same semantics as pci_get_device(), except walks the PCI device list
+ * in reverse discovery order.
+ */
+struct pci_dev *pci_get_device_reverse(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *from)
+{
+ return __pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from,
+ PCI_SEARCH_REVERSE);
+}
+EXPORT_SYMBOL(pci_get_device_reverse);
+
/**
* pci_get_class - begin or continue searching for a PCI device by class
* @class: search for a PCI device with this class designation
diff --git a/drivers/pci/tsm.c b/drivers/pci/tsm.c
new file mode 100644
index 000000000000..5fdcd7f2e820
--- /dev/null
+++ b/drivers/pci/tsm.c
@@ -0,0 +1,900 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interface with platform TEE Security Manager (TSM) objects as defined by
+ * PCIe r7.0 section 11 TEE Device Interface Security Protocol (TDISP)
+ *
+ * Copyright(c) 2024-2025 Intel Corporation. All rights reserved.
+ */
+
+#define dev_fmt(fmt) "PCI/TSM: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/pci-doe.h>
+#include <linux/pci-tsm.h>
+#include <linux/sysfs.h>
+#include <linux/tsm.h>
+#include <linux/xarray.h>
+#include "pci.h"
+
+/*
+ * Provide a read/write lock against the init / exit of pdev tsm
+ * capabilities and arrival/departure of a TSM instance
+ */
+static DECLARE_RWSEM(pci_tsm_rwsem);
+
+/*
+ * Count of TSMs registered that support physical link operations vs device
+ * security state management.
+ */
+static int pci_tsm_link_count;
+static int pci_tsm_devsec_count;
+
+static const struct pci_tsm_ops *to_pci_tsm_ops(struct pci_tsm *tsm)
+{
+ return tsm->tsm_dev->pci_ops;
+}
+
+static inline bool is_dsm(struct pci_dev *pdev)
+{
+ return pdev->tsm && pdev->tsm->dsm_dev == pdev;
+}
+
+static inline bool has_tee(struct pci_dev *pdev)
+{
+ return pdev->devcap & PCI_EXP_DEVCAP_TEE;
+}
+
+/* 'struct pci_tsm_pf0' wraps 'struct pci_tsm' when ->dsm_dev == ->pdev (self) */
+static struct pci_tsm_pf0 *to_pci_tsm_pf0(struct pci_tsm *tsm)
+{
+ /*
+ * All "link" TSM contexts reference the device that hosts the DSM
+ * interface for a set of devices. Walk to the DSM device and cast its
+ * ->tsm context to a 'struct pci_tsm_pf0 *'.
+ */
+ struct pci_dev *pf0 = tsm->dsm_dev;
+
+ if (!is_pci_tsm_pf0(pf0) || !is_dsm(pf0)) {
+ pci_WARN_ONCE(tsm->pdev, 1, "invalid context object\n");
+ return NULL;
+ }
+
+ return container_of(pf0->tsm, struct pci_tsm_pf0, base_tsm);
+}
+
+static void tsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev;
+
+ if (!tsm)
+ return;
+
+ pdev = tsm->pdev;
+ to_pci_tsm_ops(tsm)->remove(tsm);
+ pdev->tsm = NULL;
+}
+DEFINE_FREE(tsm_remove, struct pci_tsm *, if (_T) tsm_remove(_T))
+
+static void pci_tsm_walk_fns(struct pci_dev *pdev,
+ int (*cb)(struct pci_dev *pdev, void *data),
+ void *data)
+{
+ /* Walk subordinate physical functions */
+ for (int i = 0; i < 8; i++) {
+ struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
+ pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
+
+ if (!pf)
+ continue;
+
+ /* on entry function 0 has already run @cb */
+ if (i > 0)
+ cb(pf, data);
+
+ /* walk virtual functions of each pf */
+ for (int j = 0; j < pci_num_vf(pf); j++) {
+ struct pci_dev *vf __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(
+ pci_domain_nr(pf->bus),
+ pci_iov_virtfn_bus(pf, j),
+ pci_iov_virtfn_devfn(pf, j));
+
+ if (!vf)
+ continue;
+
+ cb(vf, data);
+ }
+ }
+
+ /*
+ * Walk downstream devices, assumes that an upstream DSM is
+ * limited to downstream physical functions
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
+ pci_walk_bus(pdev->subordinate, cb, data);
+}
+
+static void pci_tsm_walk_fns_reverse(struct pci_dev *pdev,
+ int (*cb)(struct pci_dev *pdev,
+ void *data),
+ void *data)
+{
+ /* Reverse walk downstream devices */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
+ pci_walk_bus_reverse(pdev->subordinate, cb, data);
+
+ /* Reverse walk subordinate physical functions */
+ for (int i = 7; i >= 0; i--) {
+ struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
+ pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
+
+ if (!pf)
+ continue;
+
+ /* reverse walk virtual functions */
+ for (int j = pci_num_vf(pf) - 1; j >= 0; j--) {
+ struct pci_dev *vf __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(
+ pci_domain_nr(pf->bus),
+ pci_iov_virtfn_bus(pf, j),
+ pci_iov_virtfn_devfn(pf, j));
+
+ if (!vf)
+ continue;
+ cb(vf, data);
+ }
+
+ /* on exit, caller will run @cb on function 0 */
+ if (i > 0)
+ cb(pf, data);
+ }
+}
+
+static void link_sysfs_disable(struct pci_dev *pdev)
+{
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
+}
+
+static void link_sysfs_enable(struct pci_dev *pdev)
+{
+ bool tee = has_tee(pdev);
+
+ pci_dbg(pdev, "%s Security Manager detected (%s%s%s)\n",
+ pdev->tsm ? "Device" : "Platform TEE",
+ pdev->ide_cap ? "IDE" : "", pdev->ide_cap && tee ? " " : "",
+ tee ? "TEE" : "");
+
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
+}
+
+static int probe_fn(struct pci_dev *pdev, void *dsm)
+{
+ struct pci_dev *dsm_dev = dsm;
+ const struct pci_tsm_ops *ops = to_pci_tsm_ops(dsm_dev->tsm);
+
+ pdev->tsm = ops->probe(dsm_dev->tsm->tsm_dev, pdev);
+ pci_dbg(pdev, "setup TSM context: DSM: %s status: %s\n",
+ pci_name(dsm_dev), pdev->tsm ? "success" : "failed");
+ if (pdev->tsm)
+ link_sysfs_enable(pdev);
+ return 0;
+}
+
+static int pci_tsm_connect(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
+{
+ int rc;
+ struct pci_tsm_pf0 *tsm_pf0;
+ const struct pci_tsm_ops *ops = tsm_dev->pci_ops;
+ struct pci_tsm *pci_tsm __free(tsm_remove) = ops->probe(tsm_dev, pdev);
+
+ /* connect() mutually exclusive with subfunction pci_tsm_init() */
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ if (!pci_tsm)
+ return -ENXIO;
+
+ pdev->tsm = pci_tsm;
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+
+ /* mutex_intr assumes connect() is always sysfs/user driven */
+ ACQUIRE(mutex_intr, lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
+ return rc;
+
+ rc = ops->connect(pdev);
+ if (rc)
+ return rc;
+
+ pdev->tsm = no_free_ptr(pci_tsm);
+
+ /*
+ * Now that the DSM is established, probe() all the potential
+ * dependent functions. Failure to probe a function is not fatal
+ * to connect(), it just disables subsequent security operations
+ * for that function.
+ *
+ * Note this is done unconditionally, without regard to finding
+ * PCI_EXP_DEVCAP_TEE on the dependent function, for robustness. The DSM
+ * is the ultimate arbiter of security state relative to a given
+ * interface id, and if it says it can manage TDISP state of a function,
+ * let it.
+ */
+ if (has_tee(pdev))
+ pci_tsm_walk_fns(pdev, probe_fn, pdev);
+ return 0;
+}
+
+static ssize_t connect_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tsm_dev *tsm_dev;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return sysfs_emit(buf, "\n");
+
+ tsm_dev = pdev->tsm->tsm_dev;
+ return sysfs_emit(buf, "%s\n", dev_name(&tsm_dev->dev));
+}
+
+/* Is @tsm_dev managing physical link / session properties... */
+static bool is_link_tsm(struct tsm_dev *tsm_dev)
+{
+ return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->link_ops.probe;
+}
+
+/* ...or is @tsm_dev managing device security state ? */
+static bool is_devsec_tsm(struct tsm_dev *tsm_dev)
+{
+ return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->devsec_ops.lock;
+}
+
+static ssize_t connect_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int rc, id;
+
+ rc = sscanf(buf, "tsm%d\n", &id);
+ if (rc != 1)
+ return -EINVAL;
+
+ ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
+ return rc;
+
+ if (pdev->tsm)
+ return -EBUSY;
+
+ struct tsm_dev *tsm_dev __free(put_tsm_dev) = find_tsm_dev(id);
+ if (!is_link_tsm(tsm_dev))
+ return -ENXIO;
+
+ rc = pci_tsm_connect(pdev, tsm_dev);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(connect);
+
+static int remove_fn(struct pci_dev *pdev, void *data)
+{
+ tsm_remove(pdev->tsm);
+ link_sysfs_disable(pdev);
+ return 0;
+}
+
+/*
+ * Note, this helper only returns an error code and takes an argument for
+ * compatibility with the pci_walk_bus() callback prototype. pci_tsm_unbind()
+ * always succeeds.
+ */
+static int __pci_tsm_unbind(struct pci_dev *pdev, void *data)
+{
+ struct pci_tdi *tdi;
+ struct pci_tsm_pf0 *tsm_pf0;
+
+ lockdep_assert_held(&pci_tsm_rwsem);
+
+ if (!pdev->tsm)
+ return 0;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ guard(mutex)(&tsm_pf0->lock);
+
+ tdi = pdev->tsm->tdi;
+ if (!tdi)
+ return 0;
+
+ to_pci_tsm_ops(pdev->tsm)->unbind(tdi);
+ pdev->tsm->tdi = NULL;
+
+ return 0;
+}
+
+void pci_tsm_unbind(struct pci_dev *pdev)
+{
+ guard(rwsem_read)(&pci_tsm_rwsem);
+ __pci_tsm_unbind(pdev, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_unbind);
+
+/**
+ * pci_tsm_bind() - Bind @pdev as a TDI for @kvm
+ * @pdev: PCI device function to bind
+ * @kvm: Private memory attach context
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ *
+ * Context: Caller is responsible for constraining the bind lifetime to the
+ * registered state of the device. For example, pci_tsm_bind() /
+ * pci_tsm_unbind() limited to the VFIO driver bound state of the device.
+ */
+int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u32 tdi_id)
+{
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tdi *tdi;
+
+ if (!kvm)
+ return -EINVAL;
+
+ guard(rwsem_read)(&pci_tsm_rwsem);
+
+ if (!pdev->tsm)
+ return -EINVAL;
+
+ if (!is_link_tsm(pdev->tsm->tsm_dev))
+ return -ENXIO;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ guard(mutex)(&tsm_pf0->lock);
+
+ /* Resolve races to bind a TDI */
+ if (pdev->tsm->tdi) {
+ if (pdev->tsm->tdi->kvm != kvm)
+ return -EBUSY;
+ return 0;
+ }
+
+ tdi = to_pci_tsm_ops(pdev->tsm)->bind(pdev, kvm, tdi_id);
+ if (IS_ERR(tdi))
+ return PTR_ERR(tdi);
+
+ pdev->tsm->tdi = tdi;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_bind);
+
+/**
+ * pci_tsm_guest_req() - helper to marshal guest requests to the TSM driver
+ * @pdev: @pdev representing a bound tdi
+ * @scope: caller asserts this passthrough request is limited to TDISP operations
+ * @req_in: Input payload forwarded from the guest
+ * @in_len: Length of @req_in
+ * @req_out: Output payload buffer response to the guest
+ * @out_len: Length of @req_out on input, bytes filled in @req_out on output
+ * @tsm_code: Optional TSM arch specific result code for the guest TSM
+ *
+ * This is a common entry point for requests triggered by userspace KVM-exit
+ * service handlers responding to TDI information or state change requests. The
+ * scope parameter limits requests to TDISP state management, or limited debug.
+ * This path is only suitable for commands and results that are the host kernel
+ * has no use, the host is only facilitating guest to TSM communication.
+ *
+ * Returns 0 on success and -error on failure and positive "residue" on success
+ * but @req_out is filled with less then @out_len, or @req_out is NULL and a
+ * residue number of bytes were not consumed from @req_in. On success or
+ * failure @tsm_code may be populated with a TSM implementation specific result
+ * code for the guest to consume.
+ *
+ * Context: Caller is responsible for calling this within the pci_tsm_bind()
+ * state of the TDI.
+ */
+ssize_t pci_tsm_guest_req(struct pci_dev *pdev, enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len, sockptr_t req_out,
+ size_t out_len, u64 *tsm_code)
+{
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tdi *tdi;
+ int rc;
+
+ /* Forbid requests that are not directly related to TDISP operations */
+ if (scope > PCI_TSM_REQ_STATE_CHANGE)
+ return -EINVAL;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return -ENXIO;
+
+ if (!is_link_tsm(pdev->tsm->tsm_dev))
+ return -ENXIO;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
+ return rc;
+
+ tdi = pdev->tsm->tdi;
+ if (!tdi)
+ return -ENXIO;
+ return to_pci_tsm_ops(pdev->tsm)->guest_req(tdi, scope, req_in, in_len,
+ req_out, out_len, tsm_code);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_guest_req);
+
+static void pci_tsm_unbind_all(struct pci_dev *pdev)
+{
+ pci_tsm_walk_fns_reverse(pdev, __pci_tsm_unbind, NULL);
+ __pci_tsm_unbind(pdev, NULL);
+}
+
+static void __pci_tsm_disconnect(struct pci_dev *pdev)
+{
+ struct pci_tsm_pf0 *tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ const struct pci_tsm_ops *ops = to_pci_tsm_ops(pdev->tsm);
+
+ /* disconnect() mutually exclusive with subfunction pci_tsm_init() */
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ pci_tsm_unbind_all(pdev);
+
+ /*
+ * disconnect() is uninterruptible as it may be called for device
+ * teardown
+ */
+ guard(mutex)(&tsm_pf0->lock);
+ pci_tsm_walk_fns_reverse(pdev, remove_fn, NULL);
+ ops->disconnect(pdev);
+}
+
+static void pci_tsm_disconnect(struct pci_dev *pdev)
+{
+ __pci_tsm_disconnect(pdev);
+ tsm_remove(pdev->tsm);
+}
+
+static ssize_t disconnect_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tsm_dev *tsm_dev;
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return -ENXIO;
+
+ tsm_dev = pdev->tsm->tsm_dev;
+ if (!sysfs_streq(buf, dev_name(&tsm_dev->dev)))
+ return -EINVAL;
+
+ pci_tsm_disconnect(pdev);
+ return len;
+}
+static DEVICE_ATTR_WO(disconnect);
+
+static ssize_t bound_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tsm *tsm;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ tsm = pdev->tsm;
+ if (!tsm)
+ return sysfs_emit(buf, "\n");
+ tsm_pf0 = to_pci_tsm_pf0(tsm);
+
+ ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
+ return rc;
+
+ if (!tsm->tdi)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%s\n", dev_name(&tsm->tsm_dev->dev));
+}
+static DEVICE_ATTR_RO(bound);
+
+static ssize_t dsm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_tsm *tsm;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ tsm = pdev->tsm;
+ if (!tsm)
+ return sysfs_emit(buf, "\n");
+
+ return sysfs_emit(buf, "%s\n", pci_name(tsm->dsm_dev));
+}
+static DEVICE_ATTR_RO(dsm);
+
+/* The 'authenticated' attribute is exclusive to the presence of a 'link' TSM */
+static bool pci_tsm_link_group_visible(struct kobject *kobj)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (!pci_tsm_link_count)
+ return false;
+
+ if (!pci_is_pcie(pdev))
+ return false;
+
+ if (is_pci_tsm_pf0(pdev))
+ return true;
+
+ /*
+ * Show 'authenticated' and other attributes for the managed
+ * sub-functions of a DSM.
+ */
+ if (pdev->tsm)
+ return true;
+
+ return false;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(pci_tsm_link);
+
+/*
+ * 'link' and 'devsec' TSMs share the same 'tsm/' sysfs group, so the TSM type
+ * specific attributes need individual visibility checks.
+ */
+static umode_t pci_tsm_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (pci_tsm_link_group_visible(kobj)) {
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (attr == &dev_attr_bound.attr) {
+ if (is_pci_tsm_pf0(pdev) && has_tee(pdev))
+ return attr->mode;
+ if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_dsm.attr) {
+ if (is_pci_tsm_pf0(pdev))
+ return attr->mode;
+ if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_connect.attr ||
+ attr == &dev_attr_disconnect.attr) {
+ if (is_pci_tsm_pf0(pdev))
+ return attr->mode;
+ }
+ }
+
+ return 0;
+}
+
+static bool pci_tsm_group_visible(struct kobject *kobj)
+{
+ return pci_tsm_link_group_visible(kobj);
+}
+DEFINE_SYSFS_GROUP_VISIBLE(pci_tsm);
+
+static struct attribute *pci_tsm_attrs[] = {
+ &dev_attr_connect.attr,
+ &dev_attr_disconnect.attr,
+ &dev_attr_bound.attr,
+ &dev_attr_dsm.attr,
+ NULL
+};
+
+const struct attribute_group pci_tsm_attr_group = {
+ .name = "tsm",
+ .attrs = pci_tsm_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_tsm),
+};
+
+static ssize_t authenticated_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ /*
+ * When the SPDM session established via TSM the 'authenticated' state
+ * of the device is identical to the connect state.
+ */
+ return connect_show(dev, attr, buf);
+}
+static DEVICE_ATTR_RO(authenticated);
+
+static struct attribute *pci_tsm_auth_attrs[] = {
+ &dev_attr_authenticated.attr,
+ NULL
+};
+
+const struct attribute_group pci_tsm_auth_attr_group = {
+ .attrs = pci_tsm_auth_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_tsm_link),
+};
+
+/*
+ * Retrieve physical function0 device whether it has TEE capability or not
+ */
+static struct pci_dev *pf0_dev_get(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_dev = pci_physfn(pdev);
+
+ if (PCI_FUNC(pf_dev->devfn) == 0)
+ return pci_dev_get(pf_dev);
+
+ return pci_get_slot(pf_dev->bus,
+ pf_dev->devfn - PCI_FUNC(pf_dev->devfn));
+}
+
+/*
+ * Find the PCI Device instance that serves as the Device Security Manager (DSM)
+ * for @pdev. Note that no additional reference is held for the resulting device
+ * because that resulting object always has a registered lifetime
+ * greater-than-or-equal to that of the @pdev argument. This is by virtue of
+ * @pdev being a descendant of, or identical to, the returned DSM device.
+ */
+static struct pci_dev *find_dsm_dev(struct pci_dev *pdev)
+{
+ struct device *grandparent;
+ struct pci_dev *uport;
+
+ if (is_pci_tsm_pf0(pdev))
+ return pdev;
+
+ struct pci_dev *pf0 __free(pci_dev_put) = pf0_dev_get(pdev);
+ if (!pf0)
+ return NULL;
+
+ if (is_dsm(pf0))
+ return pf0;
+
+ /*
+ * For cases where a switch may be hosting TDISP services on behalf of
+ * downstream devices, check the first upstream port relative to this
+ * endpoint.
+ */
+ if (!pdev->dev.parent)
+ return NULL;
+ grandparent = pdev->dev.parent->parent;
+ if (!grandparent)
+ return NULL;
+ if (!dev_is_pci(grandparent))
+ return NULL;
+ uport = to_pci_dev(grandparent);
+ if (!pci_is_pcie(uport) ||
+ pci_pcie_type(uport) != PCI_EXP_TYPE_UPSTREAM)
+ return NULL;
+
+ if (is_dsm(uport))
+ return uport;
+ return NULL;
+}
+
+/**
+ * pci_tsm_tdi_constructor() - base 'struct pci_tdi' initialization for link TSMs
+ * @pdev: PCI device function representing the TDI
+ * @tdi: context to initialize
+ * @kvm: Private memory attach context
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ */
+void pci_tsm_tdi_constructor(struct pci_dev *pdev, struct pci_tdi *tdi,
+ struct kvm *kvm, u32 tdi_id)
+{
+ tdi->pdev = pdev;
+ tdi->kvm = kvm;
+ tdi->tdi_id = tdi_id;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_tdi_constructor);
+
+/**
+ * pci_tsm_link_constructor() - base 'struct pci_tsm' initialization for link TSMs
+ * @pdev: The PCI device
+ * @tsm: context to initialize
+ * @tsm_dev: Platform TEE Security Manager, initiator of security operations
+ */
+int pci_tsm_link_constructor(struct pci_dev *pdev, struct pci_tsm *tsm,
+ struct tsm_dev *tsm_dev)
+{
+ if (!is_link_tsm(tsm_dev))
+ return -EINVAL;
+
+ tsm->dsm_dev = find_dsm_dev(pdev);
+ if (!tsm->dsm_dev) {
+ pci_warn(pdev, "failed to find Device Security Manager\n");
+ return -ENXIO;
+ }
+ tsm->pdev = pdev;
+ tsm->tsm_dev = tsm_dev;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_link_constructor);
+
+/**
+ * pci_tsm_pf0_constructor() - common 'struct pci_tsm_pf0' (DSM) initialization
+ * @pdev: Physical Function 0 PCI device (as indicated by is_pci_tsm_pf0())
+ * @tsm: context to initialize
+ * @tsm_dev: Platform TEE Security Manager, initiator of security operations
+ */
+int pci_tsm_pf0_constructor(struct pci_dev *pdev, struct pci_tsm_pf0 *tsm,
+ struct tsm_dev *tsm_dev)
+{
+ mutex_init(&tsm->lock);
+ tsm->doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_CMA);
+ if (!tsm->doe_mb) {
+ pci_warn(pdev, "TSM init failure, no CMA mailbox\n");
+ return -ENODEV;
+ }
+
+ return pci_tsm_link_constructor(pdev, &tsm->base_tsm, tsm_dev);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_pf0_constructor);
+
+void pci_tsm_pf0_destructor(struct pci_tsm_pf0 *pf0_tsm)
+{
+ mutex_destroy(&pf0_tsm->lock);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_pf0_destructor);
+
+int pci_tsm_register(struct tsm_dev *tsm_dev)
+{
+ struct pci_dev *pdev = NULL;
+
+ if (!tsm_dev)
+ return -EINVAL;
+
+ /* The TSM device must only implement one of link_ops or devsec_ops */
+ if (!is_link_tsm(tsm_dev) && !is_devsec_tsm(tsm_dev))
+ return -EINVAL;
+
+ if (is_link_tsm(tsm_dev) && is_devsec_tsm(tsm_dev))
+ return -EINVAL;
+
+ guard(rwsem_write)(&pci_tsm_rwsem);
+
+ /* On first enable, update sysfs groups */
+ if (is_link_tsm(tsm_dev) && pci_tsm_link_count++ == 0) {
+ for_each_pci_dev(pdev)
+ if (is_pci_tsm_pf0(pdev))
+ link_sysfs_enable(pdev);
+ } else if (is_devsec_tsm(tsm_dev)) {
+ pci_tsm_devsec_count++;
+ }
+
+ return 0;
+}
+
+static void pci_tsm_fn_exit(struct pci_dev *pdev)
+{
+ __pci_tsm_unbind(pdev, NULL);
+ tsm_remove(pdev->tsm);
+}
+
+/**
+ * __pci_tsm_destroy() - destroy the TSM context for @pdev
+ * @pdev: device to cleanup
+ * @tsm_dev: the TSM device being removed, or NULL if @pdev is being removed.
+ *
+ * At device removal or TSM unregistration all established context
+ * with the TSM is torn down. Additionally, if there are no more TSMs
+ * registered, the PCI tsm/ sysfs attributes are hidden.
+ */
+static void __pci_tsm_destroy(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
+{
+ struct pci_tsm *tsm = pdev->tsm;
+
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ /*
+ * First, handle the TSM removal case to shutdown @pdev sysfs, this is
+ * skipped if the device itself is being removed since sysfs goes away
+ * naturally at that point
+ */
+ if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev) && !pci_tsm_link_count)
+ link_sysfs_disable(pdev);
+
+ /* Nothing else to do if this device never attached to the departing TSM */
+ if (!tsm)
+ return;
+
+ /* Now lookup the tsm_dev to destroy TSM context */
+ if (!tsm_dev)
+ tsm_dev = tsm->tsm_dev;
+ else if (tsm_dev != tsm->tsm_dev)
+ return;
+
+ if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev))
+ pci_tsm_disconnect(pdev);
+ else
+ pci_tsm_fn_exit(pdev);
+}
+
+void pci_tsm_destroy(struct pci_dev *pdev)
+{
+ guard(rwsem_write)(&pci_tsm_rwsem);
+ __pci_tsm_destroy(pdev, NULL);
+}
+
+void pci_tsm_init(struct pci_dev *pdev)
+{
+ guard(rwsem_read)(&pci_tsm_rwsem);
+
+ /*
+ * Subfunctions are either probed synchronous with connect() or later
+ * when either the SR-IOV configuration is changed, or, unlikely,
+ * connect() raced initial bus scanning.
+ */
+ if (pdev->tsm)
+ return;
+
+ if (pci_tsm_link_count) {
+ struct pci_dev *dsm = find_dsm_dev(pdev);
+
+ if (!dsm)
+ return;
+
+ /*
+ * The only path to init a Device Security Manager capable
+ * device is via connect().
+ */
+ if (!dsm->tsm)
+ return;
+
+ probe_fn(pdev, dsm);
+ }
+}
+
+void pci_tsm_unregister(struct tsm_dev *tsm_dev)
+{
+ struct pci_dev *pdev = NULL;
+
+ guard(rwsem_write)(&pci_tsm_rwsem);
+ if (is_link_tsm(tsm_dev))
+ pci_tsm_link_count--;
+ if (is_devsec_tsm(tsm_dev))
+ pci_tsm_devsec_count--;
+ for_each_pci_dev_reverse(pdev)
+ __pci_tsm_destroy(pdev, tsm_dev);
+}
+
+int pci_tsm_doe_transfer(struct pci_dev *pdev, u8 type, const void *req,
+ size_t req_sz, void *resp, size_t resp_sz)
+{
+ struct pci_tsm_pf0 *tsm;
+
+ if (!pdev->tsm || !is_pci_tsm_pf0(pdev))
+ return -ENXIO;
+
+ tsm = to_pci_tsm_pf0(pdev->tsm);
+ if (!tsm->doe_mb)
+ return -ENXIO;
+
+ return pci_doe(tsm->doe_mb, PCI_VENDOR_ID_PCI_SIG, type, req, req_sz,
+ resp, resp_sz);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_doe_transfer);
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
index cdad01d68a37..8d71dc53cc1d 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
@@ -81,10 +81,6 @@
#define MVOLT_1800 0
#define MVOLT_3300 1
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
static const char * const gpio_group_name[] = {
"gpioa", "gpiob", "gpioc", "gpiod", "gpioe", "gpiof", "gpiog",
"gpioh", "gpioi", "gpioj", "gpiok", "gpiol", "gpiom", "gpion",
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 6e78a01755c7..5130a35214c9 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -261,56 +261,6 @@ static int imx8ulp_dsp_reset(struct imx_dsp_rproc *priv)
return 0;
}
-/* Specific configuration for i.MX8MP */
-static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
- .att = imx_dsp_rproc_att_imx8mp,
- .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
- .method = IMX_RPROC_RESET_CONTROLLER,
-};
-
-static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
- .dcfg = &dsp_rproc_cfg_imx8mp,
- .reset = imx8mp_dsp_reset,
-};
-
-/* Specific configuration for i.MX8ULP */
-static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8ulp = {
- .src_reg = IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
- .src_mask = IMX8ULP_SYSCTRL0_DSP_STALL,
- .src_start = 0,
- .src_stop = IMX8ULP_SYSCTRL0_DSP_STALL,
- .att = imx_dsp_rproc_att_imx8ulp,
- .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8ulp),
- .method = IMX_RPROC_MMIO,
-};
-
-static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8ulp = {
- .dcfg = &dsp_rproc_cfg_imx8ulp,
- .reset = imx8ulp_dsp_reset,
-};
-
-/* Specific configuration for i.MX8QXP */
-static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qxp = {
- .att = imx_dsp_rproc_att_imx8qxp,
- .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qxp),
- .method = IMX_RPROC_SCU_API,
-};
-
-static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qxp = {
- .dcfg = &dsp_rproc_cfg_imx8qxp,
-};
-
-/* Specific configuration for i.MX8QM */
-static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qm = {
- .att = imx_dsp_rproc_att_imx8qm,
- .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qm),
- .method = IMX_RPROC_SCU_API,
-};
-
-static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qm = {
- .dcfg = &dsp_rproc_cfg_imx8qm,
-};
-
static int imx_dsp_rproc_ready(struct rproc *rproc)
{
struct imx_dsp_rproc *priv = rproc->priv;
@@ -388,6 +338,28 @@ static int imx_dsp_rproc_handle_rsc(struct rproc *rproc, u32 rsc_type,
return RSC_HANDLED;
}
+static int imx_dsp_rproc_mmio_start(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dsp_dcfg->dcfg;
+
+ return regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_start);
+}
+
+static int imx_dsp_rproc_reset_ctrl_start(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return reset_control_deassert(priv->run_stall);
+}
+
+static int imx_dsp_rproc_scu_api_start(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return imx_sc_pm_cpu_start(priv->ipc_handle, IMX_SC_R_DSP, true, rproc->bootaddr);
+}
+
/*
* Start function for rproc_ops
*
@@ -404,32 +376,41 @@ static int imx_dsp_rproc_start(struct rproc *rproc)
struct device *dev = rproc->dev.parent;
int ret;
- switch (dcfg->method) {
- case IMX_RPROC_MMIO:
- ret = regmap_update_bits(priv->regmap,
- dcfg->src_reg,
- dcfg->src_mask,
- dcfg->src_start);
- break;
- case IMX_RPROC_SCU_API:
- ret = imx_sc_pm_cpu_start(priv->ipc_handle,
- IMX_SC_R_DSP,
- true,
- rproc->bootaddr);
- break;
- case IMX_RPROC_RESET_CONTROLLER:
- ret = reset_control_deassert(priv->run_stall);
- break;
- default:
+ if (!dcfg->ops || !dcfg->ops->start)
return -EOPNOTSUPP;
- }
- if (ret)
+ ret = dcfg->ops->start(rproc);
+ if (ret) {
dev_err(dev, "Failed to enable remote core!\n");
- else if (priv->flags & WAIT_FW_READY)
+ return ret;
+ }
+
+ if (priv->flags & WAIT_FW_READY)
return imx_dsp_rproc_ready(rproc);
- return ret;
+ return 0;
+}
+
+static int imx_dsp_rproc_mmio_stop(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dsp_dcfg->dcfg;
+
+ return regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_stop);
+}
+
+static int imx_dsp_rproc_reset_ctrl_stop(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return reset_control_assert(priv->run_stall);
+}
+
+static int imx_dsp_rproc_scu_api_stop(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return imx_sc_pm_cpu_start(priv->ipc_handle, IMX_SC_R_DSP, false, rproc->bootaddr);
}
/*
@@ -449,30 +430,18 @@ static int imx_dsp_rproc_stop(struct rproc *rproc)
return 0;
}
- switch (dcfg->method) {
- case IMX_RPROC_MMIO:
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
- dcfg->src_stop);
- break;
- case IMX_RPROC_SCU_API:
- ret = imx_sc_pm_cpu_start(priv->ipc_handle,
- IMX_SC_R_DSP,
- false,
- rproc->bootaddr);
- break;
- case IMX_RPROC_RESET_CONTROLLER:
- ret = reset_control_assert(priv->run_stall);
- break;
- default:
+ if (!dcfg->ops || !dcfg->ops->stop)
return -EOPNOTSUPP;
- }
- if (ret)
+ ret = dcfg->ops->stop(rproc);
+ if (ret) {
dev_err(dev, "Failed to stop remote core\n");
- else
- priv->flags &= ~REMOTE_IS_READY;
+ return ret;
+ }
- return ret;
+ priv->flags &= ~REMOTE_IS_READY;
+
+ return 0;
}
/**
@@ -689,11 +658,9 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
struct rproc *rproc = priv->rproc;
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
- struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
void __iomem *cpu_addr;
- int a;
+ int a, i = 0;
u64 da;
/* Remap required addresses */
@@ -724,49 +691,40 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
rproc_add_carveout(rproc, mem);
}
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
+ while (1) {
+ int err;
+ struct resource res;
+
+ err = of_reserved_mem_region_to_resource(np, i++, &res);
+ if (err)
+ return 0;
+
/*
* Ignore the first memory region which will be used vdev buffer.
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
*/
- if (!strcmp(it.node->name, "vdev0buffer"))
+ if (strstarts(res.name, "vdev0buffer"))
continue;
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(dev, "unable to acquire memory-region\n");
+ if (imx_dsp_rproc_sys_to_da(priv, res.start, resource_size(&res), &da))
return -EINVAL;
- }
- if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da)) {
- of_node_put(it.node);
- return -EINVAL;
- }
-
- cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!cpu_addr) {
- of_node_put(it.node);
- dev_err(dev, "failed to map memory %p\n", &rmem->base);
- return -ENOMEM;
+ cpu_addr = devm_ioremap_resource_wc(dev, &res);
+ if (IS_ERR(cpu_addr)) {
+ dev_err(dev, "failed to map memory %pR\n", &res);
+ return PTR_ERR(cpu_addr);
}
/* Register memory region */
- mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
- rmem->size, da, NULL, NULL, it.node->name);
-
- if (mem) {
- rproc_coredump_add_segment(rproc, da, rmem->size);
- } else {
- of_node_put(it.node);
+ mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)res.start,
+ resource_size(&res), da, NULL, NULL,
+ "%.*s", strchrnul(res.name, '@') - res.name, res.name);
+ if (!mem)
return -ENOMEM;
- }
+ rproc_coredump_add_segment(rproc, da, resource_size(&res));
rproc_add_carveout(rproc, mem);
}
-
- return 0;
}
/* Prepare function for rproc_ops */
@@ -784,7 +742,7 @@ static int imx_dsp_rproc_prepare(struct rproc *rproc)
pm_runtime_get_sync(dev);
- return 0;
+ return 0;
}
/* Unprepare function for rproc_ops */
@@ -792,7 +750,7 @@ static int imx_dsp_rproc_unprepare(struct rproc *rproc)
{
pm_runtime_put_sync(rproc->dev.parent);
- return 0;
+ return 0;
}
/* Kick function for rproc_ops */
@@ -1062,14 +1020,50 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
{
struct device *dev = priv->rproc->dev.parent;
- int ret;
/* A single PM domain is already attached. */
if (dev->pm_domain)
return 0;
- ret = dev_pm_domain_attach_list(dev, NULL, &priv->pd_list);
- return ret < 0 ? ret : 0;
+ return devm_pm_domain_attach_list(dev, NULL, &priv->pd_list);
+}
+
+static int imx_dsp_rproc_mmio_detect_mode(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct regmap *regmap;
+
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,dsp-ctrl");
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(regmap);
+ }
+
+ priv->regmap = regmap;
+
+ return 0;
+}
+
+static int imx_dsp_rproc_reset_ctrl_detect_mode(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+
+ priv->run_stall = devm_reset_control_get_exclusive(dev, "runstall");
+ if (IS_ERR(priv->run_stall)) {
+ dev_err(dev, "Failed to get DSP runstall reset control\n");
+ return PTR_ERR(priv->run_stall);
+ }
+
+ return 0;
+}
+
+static int imx_dsp_rproc_scu_api_detect_mode(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ return imx_scu_get_handle(&priv->ipc_handle);
}
/**
@@ -1087,38 +1081,12 @@ static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
static int imx_dsp_rproc_detect_mode(struct imx_dsp_rproc *priv)
{
const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
- struct device *dev = priv->rproc->dev.parent;
- struct regmap *regmap;
- int ret = 0;
-
- switch (dsp_dcfg->dcfg->method) {
- case IMX_RPROC_SCU_API:
- ret = imx_scu_get_handle(&priv->ipc_handle);
- if (ret)
- return ret;
- break;
- case IMX_RPROC_MMIO:
- regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,dsp-ctrl");
- if (IS_ERR(regmap)) {
- dev_err(dev, "failed to find syscon\n");
- return PTR_ERR(regmap);
- }
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
- priv->regmap = regmap;
- break;
- case IMX_RPROC_RESET_CONTROLLER:
- priv->run_stall = devm_reset_control_get_exclusive(dev, "runstall");
- if (IS_ERR(priv->run_stall)) {
- dev_err(dev, "Failed to get DSP runstall reset control\n");
- return PTR_ERR(priv->run_stall);
- }
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
+ if (dcfg->ops && dcfg->ops->detect_mode)
+ return dcfg->ops->detect_mode(priv->rproc);
- return ret;
+ return -EOPNOTSUPP;
}
static const char *imx_dsp_clks_names[DSP_RPROC_CLK_MAX] = {
@@ -1152,11 +1120,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
return -ENODEV;
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
- if (ret) {
- dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
rproc = devm_rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops,
fw_name, sizeof(*priv));
@@ -1179,52 +1144,28 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
INIT_WORK(&priv->rproc_work, imx_dsp_rproc_vq_work);
ret = imx_dsp_rproc_detect_mode(priv);
- if (ret) {
- dev_err(dev, "failed on imx_dsp_rproc_detect_mode\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed on imx_dsp_rproc_detect_mode\n");
/* There are multiple power domains required by DSP on some platform */
ret = imx_dsp_attach_pm_domains(priv);
- if (ret) {
- dev_err(dev, "failed on imx_dsp_attach_pm_domains\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed on imx_dsp_attach_pm_domains\n");
+
/* Get clocks */
ret = imx_dsp_rproc_clk_get(priv);
- if (ret) {
- dev_err(dev, "failed on imx_dsp_rproc_clk_get\n");
- goto err_detach_domains;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed on imx_dsp_rproc_clk_get\n");
init_completion(&priv->pm_comp);
rproc->auto_boot = false;
- ret = rproc_add(rproc);
- if (ret) {
- dev_err(dev, "rproc_add failed\n");
- goto err_detach_domains;
- }
+ ret = devm_rproc_add(dev, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret, "rproc_add failed\n");
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_XTENSA);
- pm_runtime_enable(dev);
-
- return 0;
-
-err_detach_domains:
- dev_pm_domain_detach_list(priv->pd_list);
-
- return ret;
-}
-
-static void imx_dsp_rproc_remove(struct platform_device *pdev)
-{
- struct rproc *rproc = platform_get_drvdata(pdev);
- struct imx_dsp_rproc *priv = rproc->priv;
-
- pm_runtime_disable(&pdev->dev);
- rproc_del(rproc);
- dev_pm_domain_detach_list(priv->pd_list);
+ return devm_pm_runtime_enable(dev);
}
/* pm runtime functions */
@@ -1364,6 +1305,74 @@ static const struct dev_pm_ops imx_dsp_rproc_pm_ops = {
RUNTIME_PM_OPS(imx_dsp_runtime_suspend, imx_dsp_runtime_resume, NULL)
};
+static const struct imx_rproc_plat_ops imx_dsp_rproc_ops_mmio = {
+ .start = imx_dsp_rproc_mmio_start,
+ .stop = imx_dsp_rproc_mmio_stop,
+ .detect_mode = imx_dsp_rproc_mmio_detect_mode,
+};
+
+static const struct imx_rproc_plat_ops imx_dsp_rproc_ops_reset_ctrl = {
+ .start = imx_dsp_rproc_reset_ctrl_start,
+ .stop = imx_dsp_rproc_reset_ctrl_stop,
+ .detect_mode = imx_dsp_rproc_reset_ctrl_detect_mode,
+};
+
+static const struct imx_rproc_plat_ops imx_dsp_rproc_ops_scu_api = {
+ .start = imx_dsp_rproc_scu_api_start,
+ .stop = imx_dsp_rproc_scu_api_stop,
+ .detect_mode = imx_dsp_rproc_scu_api_detect_mode,
+};
+
+/* Specific configuration for i.MX8MP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
+ .att = imx_dsp_rproc_att_imx8mp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
+ .ops = &imx_dsp_rproc_ops_reset_ctrl,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
+ .dcfg = &dsp_rproc_cfg_imx8mp,
+ .reset = imx8mp_dsp_reset,
+};
+
+/* Specific configuration for i.MX8ULP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8ulp = {
+ .src_reg = IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ .src_mask = IMX8ULP_SYSCTRL0_DSP_STALL,
+ .src_start = 0,
+ .src_stop = IMX8ULP_SYSCTRL0_DSP_STALL,
+ .att = imx_dsp_rproc_att_imx8ulp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8ulp),
+ .ops = &imx_dsp_rproc_ops_mmio,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8ulp = {
+ .dcfg = &dsp_rproc_cfg_imx8ulp,
+ .reset = imx8ulp_dsp_reset,
+};
+
+/* Specific configuration for i.MX8QXP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qxp = {
+ .att = imx_dsp_rproc_att_imx8qxp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qxp),
+ .ops = &imx_dsp_rproc_ops_scu_api,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qxp = {
+ .dcfg = &dsp_rproc_cfg_imx8qxp,
+};
+
+/* Specific configuration for i.MX8QM */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qm = {
+ .att = imx_dsp_rproc_att_imx8qm,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qm),
+ .ops = &imx_dsp_rproc_ops_scu_api,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qm = {
+ .dcfg = &dsp_rproc_cfg_imx8qm,
+};
+
static const struct of_device_id imx_dsp_rproc_of_match[] = {
{ .compatible = "fsl,imx8qxp-hifi4", .data = &imx_dsp_rproc_cfg_imx8qxp },
{ .compatible = "fsl,imx8qm-hifi4", .data = &imx_dsp_rproc_cfg_imx8qm },
@@ -1375,7 +1384,6 @@ MODULE_DEVICE_TABLE(of, imx_dsp_rproc_of_match);
static struct platform_driver imx_dsp_rproc_driver = {
.probe = imx_dsp_rproc_probe,
- .remove = imx_dsp_rproc_remove,
.driver = {
.name = "imx-dsp-rproc",
.of_match_table = imx_dsp_rproc_of_match,
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index bb25221a4a89..3be8790c14a2 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -93,7 +93,7 @@ struct imx_rproc_mem {
#define ATT_CORE(I) BIT((I))
static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block);
-static void imx_rproc_free_mbox(struct rproc *rproc);
+static void imx_rproc_free_mbox(void *data);
struct imx_rproc {
struct device *dev;
@@ -490,50 +490,44 @@ static int imx_rproc_prepare(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
struct device_node *np = priv->dev->of_node;
- struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
+ int i = 0;
u32 da;
/* Register associated reserved memory regions */
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
+ while (1) {
+ int err;
+ struct resource res;
+
+ err = of_reserved_mem_region_to_resource(np, i++, &res);
+ if (err)
+ return 0;
+
/*
* Ignore the first memory region which will be used vdev buffer.
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
*/
- if (!strcmp(it.node->name, "vdev0buffer"))
+ if (strstarts(res.name, "vdev0buffer"))
continue;
- if (!strcmp(it.node->name, "rsc-table"))
+ if (strstarts(res.name, "rsc-table"))
continue;
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(priv->dev, "unable to acquire memory-region\n");
- return -EINVAL;
- }
-
/* No need to translate pa to da, i.MX use same map */
- da = rmem->base;
+ da = res.start;
/* Register memory region */
- mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da,
+ mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)res.start,
+ resource_size(&res), da,
imx_rproc_mem_alloc, imx_rproc_mem_release,
- it.node->name);
-
- if (mem) {
- rproc_coredump_add_segment(rproc, da, rmem->size);
- } else {
- of_node_put(it.node);
+ "%.*s", strchrnul(res.name, '@') - res.name,
+ res.name);
+ if (!mem)
return -ENOMEM;
- }
+ rproc_coredump_add_segment(rproc, da, resource_size(&res));
rproc_add_carveout(rproc, mem);
}
-
- return 0;
}
static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
@@ -575,13 +569,9 @@ static int imx_rproc_attach(struct rproc *rproc)
return imx_rproc_xtr_mbox_init(rproc, true);
}
-static int imx_rproc_detach(struct rproc *rproc)
+static int imx_rproc_scu_api_detach(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
- const struct imx_rproc_dcfg *dcfg = priv->dcfg;
-
- if (dcfg->method != IMX_RPROC_SCU_API)
- return -EOPNOTSUPP;
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id))
return -EOPNOTSUPP;
@@ -591,6 +581,17 @@ static int imx_rproc_detach(struct rproc *rproc)
return 0;
}
+static int imx_rproc_detach(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+
+ if (!dcfg->ops || !dcfg->ops->detach)
+ return -EOPNOTSUPP;
+
+ return dcfg->ops->detach(rproc);
+}
+
static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
{
struct imx_rproc *priv = rproc->priv;
@@ -664,47 +665,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
}
/* memory-region is optional property */
- nph = of_count_phandle_with_args(np, "memory-region", NULL);
+ nph = of_reserved_mem_region_count(np);
if (nph <= 0)
return 0;
/* remap optional addresses */
for (a = 0; a < nph; a++) {
- struct device_node *node;
struct resource res;
- node = of_parse_phandle(np, "memory-region", a);
- if (!node)
- continue;
- /* Not map vdevbuffer, vdevring region */
- if (!strncmp(node->name, "vdev", strlen("vdev"))) {
- of_node_put(node);
- continue;
- }
- err = of_address_to_resource(node, 0, &res);
+ err = of_reserved_mem_region_to_resource(np, a, &res);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
- of_node_put(node);
return err;
}
- if (b >= IMX_RPROC_MEM_MAX) {
- of_node_put(node);
+ /* Not map vdevbuffer, vdevring region */
+ if (strstarts(res.name, "vdev"))
+ continue;
+
+ if (b >= IMX_RPROC_MEM_MAX)
break;
- }
/* Not use resource version, because we might share region */
- priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
+ priv->mem[b].cpu_addr = devm_ioremap_resource_wc(&pdev->dev, &res);
if (!priv->mem[b].cpu_addr) {
dev_err(dev, "failed to remap %pr\n", &res);
- of_node_put(node);
return -ENOMEM;
}
priv->mem[b].sys_addr = res.start;
priv->mem[b].size = resource_size(&res);
- if (!strcmp(node->name, "rsc-table"))
+ if (!strcmp(res.name, "rsc-table"))
priv->rsc_table = priv->mem[b].cpu_addr;
- of_node_put(node);
b++;
}
@@ -780,8 +771,9 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block)
return 0;
}
-static void imx_rproc_free_mbox(struct rproc *rproc)
+static void imx_rproc_free_mbox(void *data)
{
+ struct rproc *rproc = data;
struct imx_rproc *priv = rproc->priv;
if (priv->tx_ch) {
@@ -795,13 +787,9 @@ static void imx_rproc_free_mbox(struct rproc *rproc)
}
}
-static void imx_rproc_put_scu(struct rproc *rproc)
+static void imx_rproc_put_scu(void *data)
{
- struct imx_rproc *priv = rproc->priv;
- const struct imx_rproc_dcfg *dcfg = priv->dcfg;
-
- if (dcfg->method != IMX_RPROC_SCU_API)
- return;
+ struct imx_rproc *priv = data;
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) {
dev_pm_domain_detach_list(priv->pd_list);
@@ -943,6 +931,10 @@ static int imx_rproc_scu_api_detect_mode(struct rproc *rproc)
else
priv->core_index = 0;
+ ret = devm_add_action_or_reset(dev, imx_rproc_put_scu, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add action for put scu\n");
+
/*
* If Mcore resource is not owned by Acore partition, It is kicked by ROM,
* and Linux could only do IPC with Mcore and nothing else.
@@ -1001,35 +993,6 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
return dcfg->ops->detect_mode(priv->rproc);
}
-static int imx_rproc_clk_enable(struct imx_rproc *priv)
-{
- const struct imx_rproc_dcfg *dcfg = priv->dcfg;
- struct device *dev = priv->dev;
- int ret;
-
- /* Remote core is not under control of Linux or it is managed by SCU API */
- if (dcfg->method == IMX_RPROC_NONE || dcfg->method == IMX_RPROC_SCU_API)
- return 0;
-
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(priv->clk);
- }
-
- /*
- * clk for M4 block including memory. Should be
- * enabled before .start for FW transfer.
- */
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(dev, "Failed to enable clock\n");
- return ret;
- }
-
- return 0;
-}
-
static int imx_rproc_sys_off_handler(struct sys_off_data *data)
{
struct rproc *rproc = data->cb_data;
@@ -1046,6 +1009,13 @@ static int imx_rproc_sys_off_handler(struct sys_off_data *data)
return NOTIFY_DONE;
}
+static void imx_rproc_destroy_workqueue(void *data)
+{
+ struct workqueue_struct *workqueue = data;
+
+ destroy_workqueue(workqueue);
+}
+
static int imx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1077,25 +1047,38 @@ static int imx_rproc_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ ret = devm_add_action_or_reset(dev, imx_rproc_destroy_workqueue, priv->workqueue);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add devm destroy workqueue action\n");
+
INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
ret = imx_rproc_xtr_mbox_init(rproc, true);
if (ret)
- goto err_put_wkq;
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, imx_rproc_free_mbox, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to add devm free mbox action: %d\n", ret);
ret = imx_rproc_addr_init(priv, pdev);
- if (ret) {
- dev_err(dev, "failed on imx_rproc_addr_init\n");
- goto err_put_mbox;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed on imx_rproc_addr_init\n");
ret = imx_rproc_detect_mode(priv);
if (ret)
- goto err_put_mbox;
+ return dev_err_probe(dev, ret, "failed on detect mode\n");
- ret = imx_rproc_clk_enable(priv);
- if (ret)
- goto err_put_scu;
+ /*
+ * Handle clocks when remote core is under control of Linux AND the
+ * clocks are not managed by system firmware.
+ */
+ if (dcfg->flags & IMX_RPROC_NEED_CLKS) {
+ priv->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to enable clock\n");
+ }
if (rproc->state != RPROC_DETACHED)
rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
@@ -1110,45 +1093,32 @@ static int imx_rproc_probe(struct platform_device *pdev)
ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF_PREPARE,
SYS_OFF_PRIO_DEFAULT,
imx_rproc_sys_off_handler, rproc);
- if (ret) {
- dev_err(dev, "register power off handler failure\n");
- goto err_put_clk;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "register power off handler failure\n");
ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART_PREPARE,
SYS_OFF_PRIO_DEFAULT,
imx_rproc_sys_off_handler, rproc);
- if (ret) {
- dev_err(dev, "register restart handler failure\n");
- goto err_put_clk;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "register restart handler failure\n");
}
- if (dcfg->method == IMX_RPROC_SCU_API) {
- pm_runtime_enable(dev);
- ret = pm_runtime_resume_and_get(dev);
- if (ret) {
- dev_err(dev, "pm_runtime get failed: %d\n", ret);
- goto err_put_clk;
- }
- }
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "pm_runtime get failed\n");
- ret = rproc_add(rproc);
+ ret = devm_rproc_add(dev, rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
- goto err_put_clk;
+ goto err_put_pm;
}
return 0;
-err_put_clk:
- clk_disable_unprepare(priv->clk);
-err_put_scu:
- imx_rproc_put_scu(rproc);
-err_put_mbox:
- imx_rproc_free_mbox(rproc);
-err_put_wkq:
- destroy_workqueue(priv->workqueue);
+err_put_pm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -1158,15 +1128,8 @@ static void imx_rproc_remove(struct platform_device *pdev)
struct rproc *rproc = platform_get_drvdata(pdev);
struct imx_rproc *priv = rproc->priv;
- if (priv->dcfg->method == IMX_RPROC_SCU_API) {
- pm_runtime_disable(priv->dev);
- pm_runtime_put(priv->dev);
- }
- clk_disable_unprepare(priv->clk);
- rproc_del(rproc);
- imx_rproc_put_scu(rproc);
- imx_rproc_free_mbox(rproc);
- destroy_workqueue(priv->workqueue);
+ pm_runtime_disable(priv->dev);
+ pm_runtime_put_noidle(priv->dev);
}
static const struct imx_rproc_plat_ops imx_rproc_ops_arm_smc = {
@@ -1184,6 +1147,7 @@ static const struct imx_rproc_plat_ops imx_rproc_ops_mmio = {
static const struct imx_rproc_plat_ops imx_rproc_ops_scu_api = {
.start = imx_rproc_scu_api_start,
.stop = imx_rproc_scu_api_stop,
+ .detach = imx_rproc_scu_api_detach,
.detect_mode = imx_rproc_scu_api_detect_mode,
};
@@ -1196,15 +1160,15 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn_mmio = {
.gpr_wait = IMX8M_GPR22_CM7_CPUWAIT,
.att = imx_rproc_att_imx8mn,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
- .method = IMX_RPROC_MMIO,
.ops = &imx_rproc_ops_mmio,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
.att = imx_rproc_att_imx8mn,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
- .method = IMX_RPROC_SMC,
.ops = &imx_rproc_ops_arm_smc,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
@@ -1214,34 +1178,30 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
.src_stop = IMX7D_M4_STOP,
.att = imx_rproc_att_imx8mq,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
- .method = IMX_RPROC_MMIO,
.ops = &imx_rproc_ops_mmio,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qm = {
.att = imx_rproc_att_imx8qm,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8qm),
- .method = IMX_RPROC_SCU_API,
.ops = &imx_rproc_ops_scu_api,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qxp = {
.att = imx_rproc_att_imx8qxp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8qxp),
- .method = IMX_RPROC_SCU_API,
.ops = &imx_rproc_ops_scu_api,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
.att = imx_rproc_att_imx8ulp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
- .method = IMX_RPROC_NONE,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
.att = imx_rproc_att_imx7ulp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
- .method = IMX_RPROC_NONE,
.flags = IMX_RPROC_NEED_SYSTEM_OFF,
};
@@ -1252,8 +1212,8 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
.src_stop = IMX7D_M4_STOP,
.att = imx_rproc_att_imx7d,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
- .method = IMX_RPROC_MMIO,
.ops = &imx_rproc_ops_mmio,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
@@ -1263,15 +1223,15 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
.src_stop = IMX6SX_M4_STOP,
.att = imx_rproc_att_imx6sx,
.att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
- .method = IMX_RPROC_MMIO,
.ops = &imx_rproc_ops_mmio,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
.att = imx_rproc_att_imx93,
.att_size = ARRAY_SIZE(imx_rproc_att_imx93),
- .method = IMX_RPROC_SMC,
.ops = &imx_rproc_ops_arm_smc,
+ .flags = IMX_RPROC_NEED_CLKS,
};
static const struct of_device_id imx_rproc_of_match[] = {
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
index 3a9adaaf048b..1b2d9f4d6d19 100644
--- a/drivers/remoteproc/imx_rproc.h
+++ b/drivers/remoteproc/imx_rproc.h
@@ -15,25 +15,14 @@ struct imx_rproc_att {
int flags;
};
-/* Remote core start/stop method */
-enum imx_rproc_method {
- IMX_RPROC_NONE,
- /* Through syscon regmap */
- IMX_RPROC_MMIO,
- /* Through ARM SMCCC */
- IMX_RPROC_SMC,
- /* Through System Control Unit API */
- IMX_RPROC_SCU_API,
- /* Through Reset Controller API */
- IMX_RPROC_RESET_CONTROLLER,
-};
-
/* dcfg flags */
#define IMX_RPROC_NEED_SYSTEM_OFF BIT(0)
+#define IMX_RPROC_NEED_CLKS BIT(1)
struct imx_rproc_plat_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
+ int (*detach)(struct rproc *rproc);
int (*detect_mode)(struct rproc *rproc);
};
@@ -46,7 +35,6 @@ struct imx_rproc_dcfg {
u32 gpr_wait;
const struct imx_rproc_att *att;
size_t att_size;
- enum imx_rproc_method method;
u32 flags;
const struct imx_rproc_plat_ops *ops;
};
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 8206a1766481..db8fd045468d 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -16,6 +16,7 @@
#include <linux/remoteproc.h>
#include <linux/remoteproc/mtk_scp.h>
#include <linux/rpmsg/mtk_rpmsg.h>
+#include <linux/string.h>
#include "mtk_common.h"
#include "remoteproc_internal.h"
@@ -1093,22 +1094,74 @@ static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
}
}
+/**
+ * scp_get_default_fw_path() - Get default SCP firmware path
+ * @dev: SCP Device
+ * @core_id: SCP Core number
+ *
+ * This function generates a path based on the following format:
+ * mediatek/(soc_model)/scp(_cX).img; for multi-core or
+ * mediatek/(soc_model)/scp.img for single core SCP HW
+ *
+ * Return: A devm allocated string containing the full path to
+ * a SCP firmware or an error pointer
+ */
+static const char *scp_get_default_fw_path(struct device *dev, int core_id)
+{
+ struct device_node *np = core_id < 0 ? dev->of_node : dev->parent->of_node;
+ const char *compatible, *soc;
+ char scp_fw_file[7];
+ int ret;
+
+ /* Use only the first compatible string */
+ ret = of_property_read_string_index(np, "compatible", 0, &compatible);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* If the compatible string's length is implausible bail out early */
+ if (strlen(compatible) < strlen("mediatek,mtXXXX-scp"))
+ return ERR_PTR(-EINVAL);
+
+ /* If the compatible string starts with "mediatek,mt" assume that it's ok */
+ if (!str_has_prefix(compatible, "mediatek,mt"))
+ return ERR_PTR(-EINVAL);
+
+ if (core_id >= 0)
+ ret = snprintf(scp_fw_file, sizeof(scp_fw_file), "scp_c%d", core_id);
+ else
+ ret = snprintf(scp_fw_file, sizeof(scp_fw_file), "scp");
+ if (ret >= sizeof(scp_fw_file))
+ return ERR_PTR(-ENAMETOOLONG);
+
+ /* Not using strchr here, as strlen of a const gets optimized by compiler */
+ soc = &compatible[strlen("mediatek,")];
+
+ return devm_kasprintf(dev, GFP_KERNEL, "mediatek/%.*s/%s.img",
+ (int)strlen("mtXXXX"), soc, scp_fw_file);
+}
+
static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
struct mtk_scp_of_cluster *scp_cluster,
- const struct mtk_scp_of_data *of_data)
+ const struct mtk_scp_of_data *of_data,
+ int core_id)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct mtk_scp *scp;
struct rproc *rproc;
struct resource *res;
- const char *fw_name = "scp.img";
+ const char *fw_name;
int ret, i;
const struct mtk_scp_sizes_data *scp_sizes;
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
- if (ret < 0 && ret != -EINVAL)
- return ERR_PTR(ret);
+ if (ret) {
+ fw_name = scp_get_default_fw_path(dev, core_id);
+ if (IS_ERR(fw_name)) {
+ dev_err(dev, "Cannot get firmware path: %ld\n", PTR_ERR(fw_name));
+ return ERR_CAST(fw_name);
+ }
+ }
rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
if (!rproc) {
@@ -1212,7 +1265,7 @@ static int scp_add_single_core(struct platform_device *pdev,
struct mtk_scp *scp;
int ret;
- scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
+ scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev), -1);
if (IS_ERR(scp))
return PTR_ERR(scp);
@@ -1259,7 +1312,7 @@ static int scp_add_multi_core(struct platform_device *pdev,
goto init_fail;
}
- scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
+ scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id], core_id);
put_device(&cpdev->dev);
if (IS_ERR(scp)) {
ret = PTR_ERR(scp);
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 9c9e9c3cf378..cb01354248af 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -555,7 +555,6 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
dev_err(dev, "failed to send mailbox message, status = %d\n",
ret);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -656,7 +655,6 @@ static int omap_rproc_start(struct rproc *rproc)
pm_runtime_use_autosuspend(dev);
pm_runtime_get_noresume(dev);
pm_runtime_enable(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -714,7 +712,6 @@ enable_device:
reset_control_deassert(oproc->reset);
out:
/* schedule the next auto-suspend */
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index e98b7e03162c..b5c8d6d38c9c 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -625,27 +625,22 @@ static int adsp_init_mmio(struct qcom_adsp *adsp,
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
- struct reserved_mem *rmem = NULL;
- struct device_node *node;
-
- node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
+ int ret;
+ struct resource res;
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(adsp->dev->of_node, 0, &res);
+ if (ret) {
dev_err(adsp->dev, "unable to resolve memory-region\n");
- return -EINVAL;
+ return ret;
}
- adsp->mem_phys = adsp->mem_reloc = rmem->base;
- adsp->mem_size = rmem->size;
- adsp->mem_region = devm_ioremap_wc(adsp->dev,
- adsp->mem_phys, adsp->mem_size);
- if (!adsp->mem_region) {
- dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
- &rmem->base, adsp->mem_size);
- return -EBUSY;
+ adsp->mem_phys = adsp->mem_reloc = res.start;
+ adsp->mem_size = resource_size(&res);
+ adsp->mem_region = devm_ioremap_resource_wc(adsp->dev, &res);
+ if (IS_ERR(adsp->mem_region)) {
+ dev_err(adsp->dev, "unable to map memory region: %pR\n", &res);
+ return PTR_ERR(adsp->mem_region);
+
}
return 0;
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 3087d895b87f..91940977ca89 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -1970,8 +1970,8 @@ static int q6v5_init_reset(struct q6v5 *qproc)
static int q6v5_alloc_memory_region(struct q6v5 *qproc)
{
struct device_node *child;
- struct reserved_mem *rmem;
- struct device_node *node;
+ struct resource res;
+ int ret;
/*
* In the absence of mba/mpss sub-child, extract the mba and mpss
@@ -1979,71 +1979,49 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
*/
child = of_get_child_by_name(qproc->dev->of_node, "mba");
if (!child) {
- node = of_parse_phandle(qproc->dev->of_node,
- "memory-region", 0);
+ ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 0, &res);
} else {
- node = of_parse_phandle(child, "memory-region", 0);
+ ret = of_reserved_mem_region_to_resource(child, 0, &res);
of_node_put(child);
}
- if (!node) {
- dev_err(qproc->dev, "no mba memory-region specified\n");
- return -EINVAL;
- }
-
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ if (ret) {
dev_err(qproc->dev, "unable to resolve mba region\n");
- return -EINVAL;
+ return ret;
}
- qproc->mba_phys = rmem->base;
- qproc->mba_size = rmem->size;
+ qproc->mba_phys = res.start;
+ qproc->mba_size = resource_size(&res);
if (!child) {
- node = of_parse_phandle(qproc->dev->of_node,
- "memory-region", 1);
+ ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 1, &res);
} else {
child = of_get_child_by_name(qproc->dev->of_node, "mpss");
- node = of_parse_phandle(child, "memory-region", 0);
+ ret = of_reserved_mem_region_to_resource(child, 0, &res);
of_node_put(child);
}
- if (!node) {
- dev_err(qproc->dev, "no mpss memory-region specified\n");
- return -EINVAL;
- }
-
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ if (ret) {
dev_err(qproc->dev, "unable to resolve mpss region\n");
- return -EINVAL;
+ return ret;
}
- qproc->mpss_phys = qproc->mpss_reloc = rmem->base;
- qproc->mpss_size = rmem->size;
+ qproc->mpss_phys = qproc->mpss_reloc = res.start;
+ qproc->mpss_size = resource_size(&res);
if (!child) {
- node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
+ ret = of_reserved_mem_region_to_resource(qproc->dev->of_node, 2, &res);
} else {
child = of_get_child_by_name(qproc->dev->of_node, "metadata");
- node = of_parse_phandle(child, "memory-region", 0);
+ ret = of_reserved_mem_region_to_resource(child, 0, &res);
of_node_put(child);
}
- if (!node)
+ if (ret)
return 0;
- rmem = of_reserved_mem_lookup(node);
- if (!rmem) {
- dev_err(qproc->dev, "unable to resolve metadata region\n");
- return -EINVAL;
- }
-
- qproc->mdata_phys = rmem->base;
- qproc->mdata_size = rmem->size;
+ qproc->mdata_phys = res.start;
+ qproc->mdata_size = resource_size(&res);
return 0;
}
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 158bcd6cc85c..52680ac99589 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -547,54 +547,38 @@ static void qcom_pas_pds_detach(struct qcom_pas *pas, struct device **pds, size_
static int qcom_pas_alloc_memory_region(struct qcom_pas *pas)
{
- struct reserved_mem *rmem;
- struct device_node *node;
-
- node = of_parse_phandle(pas->dev->of_node, "memory-region", 0);
- if (!node) {
- dev_err(pas->dev, "no memory-region specified\n");
- return -EINVAL;
- }
+ struct resource res;
+ int ret;
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(pas->dev->of_node, 0, &res);
+ if (ret) {
dev_err(pas->dev, "unable to resolve memory-region\n");
- return -EINVAL;
+ return ret;
}
- pas->mem_phys = pas->mem_reloc = rmem->base;
- pas->mem_size = rmem->size;
- pas->mem_region = devm_ioremap_wc(pas->dev, pas->mem_phys, pas->mem_size);
- if (!pas->mem_region) {
- dev_err(pas->dev, "unable to map memory region: %pa+%zx\n",
- &rmem->base, pas->mem_size);
- return -EBUSY;
+ pas->mem_phys = pas->mem_reloc = res.start;
+ pas->mem_size = resource_size(&res);
+ pas->mem_region = devm_ioremap_resource_wc(pas->dev, &res);
+ if (IS_ERR(pas->mem_region)) {
+ dev_err(pas->dev, "unable to map memory region: %pR\n", &res);
+ return PTR_ERR(pas->mem_region);
}
if (!pas->dtb_pas_id)
return 0;
- node = of_parse_phandle(pas->dev->of_node, "memory-region", 1);
- if (!node) {
- dev_err(pas->dev, "no dtb memory-region specified\n");
- return -EINVAL;
- }
-
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(pas->dev->of_node, 1, &res);
+ if (ret) {
dev_err(pas->dev, "unable to resolve dtb memory-region\n");
- return -EINVAL;
+ return ret;
}
- pas->dtb_mem_phys = pas->dtb_mem_reloc = rmem->base;
- pas->dtb_mem_size = rmem->size;
- pas->dtb_mem_region = devm_ioremap_wc(pas->dev, pas->dtb_mem_phys, pas->dtb_mem_size);
- if (!pas->dtb_mem_region) {
- dev_err(pas->dev, "unable to map dtb memory region: %pa+%zx\n",
- &rmem->base, pas->dtb_mem_size);
- return -EBUSY;
+ pas->dtb_mem_phys = pas->dtb_mem_reloc = res.start;
+ pas->dtb_mem_size = resource_size(&res);
+ pas->dtb_mem_region = devm_ioremap_resource_wc(pas->dev, &res);
+ if (IS_ERR(pas->dtb_mem_region)) {
+ dev_err(pas->dev, "unable to map dtb memory region: %pR\n", &res);
+ return PTR_ERR(pas->dtb_mem_region);
}
return 0;
@@ -603,7 +587,6 @@ static int qcom_pas_alloc_memory_region(struct qcom_pas *pas)
static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
{
struct qcom_scm_vmperm perm[MAX_ASSIGN_COUNT];
- struct device_node *node;
unsigned int perm_size;
int offset;
int ret;
@@ -612,17 +595,15 @@ static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
return 0;
for (offset = 0; offset < pas->region_assign_count; ++offset) {
- struct reserved_mem *rmem = NULL;
-
- node = of_parse_phandle(pas->dev->of_node, "memory-region",
- pas->region_assign_idx + offset);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
+ struct resource res;
+
+ ret = of_reserved_mem_region_to_resource(pas->dev->of_node,
+ pas->region_assign_idx + offset,
+ &res);
+ if (ret) {
dev_err(pas->dev, "unable to resolve shareable memory-region index %d\n",
offset);
- return -EINVAL;
+ return ret;
}
if (pas->region_assign_shared) {
@@ -637,8 +618,8 @@ static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
perm_size = 1;
}
- pas->region_assign_phys[offset] = rmem->base;
- pas->region_assign_size[offset] = rmem->size;
+ pas->region_assign_phys[offset] = res.start;
+ pas->region_assign_size[offset] = resource_size(&res);
pas->region_assign_owners[offset] = BIT(QCOM_SCM_VMID_HLOS);
ret = qcom_scm_assign_mem(pas->region_assign_phys[offset],
@@ -1461,7 +1442,7 @@ static const struct of_device_id qcom_pas_of_match[] = {
{ .compatible = "qcom,milos-wpss-pas", .data = &sc7280_wpss_resource},
{ .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
- { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
+ { .compatible = "qcom,msm8974-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8996-slpi-pil", .data = &msm8996_slpi_resource_init},
{ .compatible = "qcom,msm8998-adsp-pas", .data = &msm8996_adsp_resource},
@@ -1488,6 +1469,7 @@ static const struct of_device_id qcom_pas_of_match[] = {
{ .compatible = "qcom,sc8280xp-nsp0-pas", .data = &sc8280xp_nsp0_resource},
{ .compatible = "qcom,sc8280xp-nsp1-pas", .data = &sc8280xp_nsp1_resource},
{ .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
+ { .compatible = "qcom,sdm660-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
{ .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init},
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 07c88623f597..c27200159a88 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -85,7 +85,7 @@
#define TCSR_WCSS_CLK_MASK 0x1F
#define TCSR_WCSS_CLK_ENABLE 0x14
-#define MAX_HALT_REG 3
+#define MAX_HALT_REG 4
enum {
WCSS_IPQ8074,
WCSS_QCS404,
@@ -811,7 +811,8 @@ static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss,
}
}
- wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_bcr_reset");
+ wcss->wcss_q6_bcr_reset = devm_reset_control_get_optional_exclusive(dev,
+ "wcss_q6_bcr_reset");
if (IS_ERR(wcss->wcss_q6_bcr_reset)) {
dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n");
return PTR_ERR(wcss->wcss_q6_bcr_reset);
@@ -864,37 +865,32 @@ static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
return -EINVAL;
}
- wcss->halt_q6 = halt_reg[0];
- wcss->halt_wcss = halt_reg[1];
- wcss->halt_nc = halt_reg[2];
+ wcss->halt_q6 = halt_reg[1];
+ wcss->halt_wcss = halt_reg[2];
+ wcss->halt_nc = halt_reg[3];
return 0;
}
static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss)
{
- struct reserved_mem *rmem = NULL;
- struct device_node *node;
struct device *dev = wcss->dev;
+ struct resource res;
+ int ret;
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
-
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
+ if (ret) {
dev_err(dev, "unable to acquire memory-region\n");
- return -EINVAL;
+ return ret;
}
- wcss->mem_phys = rmem->base;
- wcss->mem_reloc = rmem->base;
- wcss->mem_size = rmem->size;
- wcss->mem_region = devm_ioremap_wc(dev, wcss->mem_phys, wcss->mem_size);
- if (!wcss->mem_region) {
- dev_err(dev, "unable to map memory region: %pa+%pa\n",
- &rmem->base, &rmem->size);
- return -EBUSY;
+ wcss->mem_phys = res.start;
+ wcss->mem_reloc = res.start;
+ wcss->mem_size = resource_size(&res);
+ wcss->mem_region = devm_ioremap_resource_wc(dev, &res);
+ if (IS_ERR(wcss->mem_region)) {
+ dev_err(dev, "unable to map memory region: %pR\n", &res);
+ return PTR_ERR(wcss->mem_region);
}
return 0;
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 2c7e519a2254..ee18bf2e8054 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -526,26 +526,21 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
{
- struct reserved_mem *rmem = NULL;
- struct device_node *node;
-
- node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
+ struct resource res;
+ int ret;
- if (!rmem) {
+ ret = of_reserved_mem_region_to_resource(wcnss->dev->of_node, 0, &res);
+ if (ret) {
dev_err(wcnss->dev, "unable to resolve memory-region\n");
- return -EINVAL;
+ return ret;
}
- wcnss->mem_phys = wcnss->mem_reloc = rmem->base;
- wcnss->mem_size = rmem->size;
- wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
- if (!wcnss->mem_region) {
- dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
- &rmem->base, wcnss->mem_size);
- return -EBUSY;
+ wcnss->mem_phys = wcnss->mem_reloc = res.start;
+ wcnss->mem_size = resource_size(&res);
+ wcnss->mem_region = devm_ioremap_resource_wc(wcnss->dev, &res);
+ if (IS_ERR(wcnss->mem_region)) {
+ dev_err(wcnss->dev, "unable to map memory region: %pR\n", &res);
+ return PTR_ERR(wcnss->mem_region);
}
return 0;
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
index 921d853594f4..3c25625f966d 100644
--- a/drivers/remoteproc/rcar_rproc.c
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -52,46 +52,36 @@ static int rcar_rproc_prepare(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
- struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
+ int i = 0;
u32 da;
/* Register associated reserved memory regions */
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
-
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(&rproc->dev,
- "unable to acquire memory-region\n");
- return -EINVAL;
- }
+ while (1) {
+ struct resource res;
+ int ret;
+
+ ret = of_reserved_mem_region_to_resource(np, i++, &res);
+ if (ret)
+ return 0;
- if (rmem->base > U32_MAX) {
- of_node_put(it.node);
+ if (res.start > U32_MAX)
return -EINVAL;
- }
/* No need to translate pa to da, R-Car use same map */
- da = rmem->base;
+ da = res.start;
mem = rproc_mem_entry_init(dev, NULL,
- rmem->base,
- rmem->size, da,
+ res.start,
+ resource_size(&res), da,
rcar_rproc_mem_alloc,
rcar_rproc_mem_release,
- it.node->name);
+ res.name);
- if (!mem) {
- of_node_put(it.node);
+ if (!mem)
return -ENOMEM;
- }
rproc_add_carveout(rproc, mem);
}
-
- return 0;
}
static int rcar_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 825672100528..aada2780b343 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -16,29 +16,25 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <asm/byteorder.h>
#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/device.h>
-#include <linux/panic_notifier.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
#include <linux/dma-mapping.h>
+#include <linux/elf.h>
#include <linux/firmware.h>
-#include <linux/string.h>
-#include <linux/debugfs.h>
-#include <linux/rculist.h>
-#include <linux/remoteproc.h>
-#include <linux/iommu.h>
#include <linux/idr.h>
-#include <linux/elf.h>
-#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
-#include <linux/of_reserved_mem.h>
-#include <linux/virtio_ids.h>
-#include <linux/virtio_ring.h>
-#include <asm/byteorder.h>
+#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
+#include <linux/rculist.h>
+#include <linux/remoteproc.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/virtio_ring.h>
#include "remoteproc_internal.h"
@@ -159,7 +155,6 @@ phys_addr_t rproc_va_to_pa(void *cpu_addr)
WARN_ON(!virt_addr_valid(cpu_addr));
return virt_to_phys(cpu_addr);
}
-EXPORT_SYMBOL(rproc_va_to_pa);
/**
* rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
@@ -1989,7 +1984,7 @@ EXPORT_SYMBOL(rproc_boot);
int rproc_shutdown(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
- int ret = 0;
+ int ret;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret) {
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index e6566a9839dc..a07edf7217d2 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -120,43 +120,41 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
- struct of_phandle_iterator it;
- int index = 0;
-
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(dev, "unable to acquire memory-region\n");
- return -EINVAL;
- }
+ int entries;
+
+ entries = of_reserved_mem_region_count(np);
+
+ for (int index = 0; index < entries; index++) {
+ struct resource res;
+ int ret;
+
+ ret = of_reserved_mem_region_to_resource(np, index, &res);
+ if (ret)
+ return ret;
/* No need to map vdev buffer */
- if (strcmp(it.node->name, "vdev0buffer")) {
+ if (!strstarts(res.name, "vdev0buffer")) {
/* Register memory region */
mem = rproc_mem_entry_init(dev, NULL,
- (dma_addr_t)rmem->base,
- rmem->size, rmem->base,
+ (dma_addr_t)res.start,
+ resource_size(&res), res.start,
st_rproc_mem_alloc,
st_rproc_mem_release,
- it.node->name);
+ "%.*s",
+ strchrnul(res.name, '@') - res.name,
+ res.name);
} else {
/* Register reserved memory for vdev buffer allocation */
mem = rproc_of_resm_mem_entry_init(dev, index,
- rmem->size,
- rmem->base,
- it.node->name);
+ resource_size(&res),
+ res.start,
+ "vdev0buffer");
}
- if (!mem) {
- of_node_put(it.node);
+ if (!mem)
return -ENOMEM;
- }
rproc_add_carveout(rproc, mem);
- index++;
}
return rproc_elf_load_rsc_table(rproc, fw);
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 431648607d53..c28679d3b43c 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -213,60 +213,52 @@ static int stm32_rproc_prepare(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
- struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
- struct reserved_mem *rmem;
u64 da;
- int index = 0;
+ int index = 0, mr = 0;
/* Register associated reserved memory regions */
- of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
- while (of_phandle_iterator_next(&it) == 0) {
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(dev, "unable to acquire memory-region\n");
- return -EINVAL;
- }
+ while (1) {
+ struct resource res;
+ int ret;
+
+ ret = of_reserved_mem_region_to_resource(np, mr++, &res);
+ if (ret)
+ return 0;
- if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
- of_node_put(it.node);
- dev_err(dev, "memory region not valid %pa\n",
- &rmem->base);
+ if (stm32_rproc_pa_to_da(rproc, res.start, &da) < 0) {
+ dev_err(dev, "memory region not valid %pR\n", &res);
return -EINVAL;
}
/* No need to map vdev buffer */
- if (strcmp(it.node->name, "vdev0buffer")) {
+ if (!strstarts(res.name, "vdev0buffer")) {
/* Register memory region */
mem = rproc_mem_entry_init(dev, NULL,
- (dma_addr_t)rmem->base,
- rmem->size, da,
+ (dma_addr_t)res.start,
+ resource_size(&res), da,
stm32_rproc_mem_alloc,
stm32_rproc_mem_release,
- it.node->name);
-
+ "%.*s", strchrnul(res.name, '@') - res.name,
+ res.name);
if (mem)
rproc_coredump_add_segment(rproc, da,
- rmem->size);
+ resource_size(&res));
} else {
/* Register reserved memory for vdev buffer alloc */
mem = rproc_of_resm_mem_entry_init(dev, index,
- rmem->size,
- rmem->base,
- it.node->name);
+ resource_size(&res),
+ res.start,
+ "vdev0buffer");
}
if (!mem) {
- of_node_put(it.node);
return -ENOMEM;
}
rproc_add_carveout(rproc, mem);
index++;
}
-
- return 0;
}
static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
diff --git a/drivers/remoteproc/ti_k3_common.c b/drivers/remoteproc/ti_k3_common.c
index 56b71652e449..32aa954dc5be 100644
--- a/drivers/remoteproc/ti_k3_common.c
+++ b/drivers/remoteproc/ti_k3_common.c
@@ -470,13 +470,10 @@ int k3_reserved_mem_init(struct k3_rproc *kproc)
{
struct device *dev = kproc->dev;
struct device_node *np = dev->of_node;
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
int num_rmems;
int ret, i;
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
+ num_rmems = of_reserved_mem_region_count(np);
if (num_rmems < 0) {
dev_err(dev, "device does not reserved memory regions (%d)\n",
num_rmems);
@@ -505,23 +502,20 @@ int k3_reserved_mem_init(struct k3_rproc *kproc)
/* use remaining reserved memory regions for static carveouts */
for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
+ struct resource res;
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
+ ret = of_reserved_mem_region_to_resource(np, i + 1, &res);
+ if (ret)
+ return ret;
- kproc->rmem[i].bus_addr = rmem->base;
+ kproc->rmem[i].bus_addr = res.start;
/* 64-bit address regions currently not supported */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ kproc->rmem[i].dev_addr = (u32)res.start;
+ kproc->rmem[i].size = resource_size(&res);
+ kproc->rmem[i].cpu_addr = devm_ioremap_resource_wc(dev, &res);
if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
+ dev_err(dev, "failed to map reserved memory#%d at %pR\n",
+ i + 1, &res);
return -ENOMEM;
}
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 0b7b173d0d26..a7b75235f53e 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -492,53 +492,46 @@ static int add_mem_regions_carveout(struct rproc *rproc)
{
struct rproc_mem_entry *rproc_mem;
struct zynqmp_r5_core *r5_core;
- struct of_phandle_iterator it;
- struct reserved_mem *rmem;
int i = 0;
r5_core = rproc->priv;
/* Register associated reserved memory regions */
- of_phandle_iterator_init(&it, r5_core->np, "memory-region", NULL, 0);
+ while (1) {
+ int err;
+ struct resource res;
- while (of_phandle_iterator_next(&it) == 0) {
- rmem = of_reserved_mem_lookup(it.node);
- if (!rmem) {
- of_node_put(it.node);
- dev_err(&rproc->dev, "unable to acquire memory-region\n");
- return -EINVAL;
- }
+ err = of_reserved_mem_region_to_resource(r5_core->np, i, &res);
+ if (err)
+ return 0;
- if (!strcmp(it.node->name, "vdev0buffer")) {
+ if (strstarts(res.name, "vdev0buffer")) {
/* Init reserved memory for vdev buffer */
rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
- rmem->size,
- rmem->base,
- it.node->name);
+ resource_size(&res),
+ res.start,
+ "vdev0buffer");
} else {
/* Register associated reserved memory regions */
rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
- (dma_addr_t)rmem->base,
- rmem->size, rmem->base,
+ (dma_addr_t)res.start,
+ resource_size(&res), res.start,
zynqmp_r5_mem_region_map,
zynqmp_r5_mem_region_unmap,
- it.node->name);
+ "%.*s",
+ strchrnul(res.name, '@') - res.name,
+ res.name);
}
- if (!rproc_mem) {
- of_node_put(it.node);
+ if (!rproc_mem)
return -ENOMEM;
- }
rproc_add_carveout(rproc, rproc_mem);
- rproc_coredump_add_segment(rproc, rmem->base, rmem->size);
+ rproc_coredump_add_segment(rproc, res.start, resource_size(&res));
- dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
- it.node->name, rmem->base, rmem->size);
+ dev_dbg(&rproc->dev, "reserved mem carveout %pR\n", &res);
i++;
}
-
- return 0;
}
static int add_sram_carveouts(struct rproc *rproc)
@@ -808,7 +801,6 @@ static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
struct device *dev = r5_core->dev;
struct rsc_tbl_data *rsc_data_va;
struct resource res_mem;
- struct device_node *np;
int ret;
/*
@@ -818,14 +810,7 @@ static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
* contains that data structure which holds resource table address, size
* and some magic number to validate correct resource table entry.
*/
- np = of_parse_phandle(r5_core->np, "memory-region", 0);
- if (!np) {
- dev_err(dev, "failed to get memory region dev node\n");
- return -EINVAL;
- }
-
- ret = of_address_to_resource(np, 0, &res_mem);
- of_node_put(np);
+ ret = of_reserved_mem_region_to_resource(r5_core->np, 0, &res_mem);
if (ret) {
dev_err(dev, "failed to get memory-region resource addr\n");
return -EINVAL;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 820a6ca5b1d7..5ea096acc858 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1395,6 +1395,19 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
return 0;
}
+static void qcom_glink_remove_rpmsg_device(struct qcom_glink *glink, struct glink_channel *channel)
+{
+ struct rpmsg_channel_info chinfo;
+
+ if (channel->rpdev) {
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = RPMSG_ADDR_ANY;
+ rpmsg_unregister_device(glink->dev, &chinfo);
+ }
+ channel->rpdev = NULL;
+}
+
static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
{
struct glink_channel *channel = to_glink_channel(ept);
@@ -1406,7 +1419,7 @@ static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
spin_unlock_irqrestore(&channel->recv_lock, flags);
/* Decouple the potential rpdev from the channel */
- channel->rpdev = NULL;
+ qcom_glink_remove_rpmsg_device(glink, channel);
qcom_glink_send_close_req(glink, channel);
}
@@ -1697,7 +1710,6 @@ free_channel:
static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
{
- struct rpmsg_channel_info chinfo;
struct glink_channel *channel;
unsigned long flags;
@@ -1713,14 +1725,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
/* cancel pending rx_done work */
cancel_work_sync(&channel->intent_work);
- if (channel->rpdev) {
- strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
- chinfo.src = RPMSG_ADDR_ANY;
- chinfo.dst = RPMSG_ADDR_ANY;
-
- rpmsg_unregister_device(glink->dev, &chinfo);
- }
- channel->rpdev = NULL;
+ qcom_glink_remove_rpmsg_device(glink, channel);
qcom_glink_send_close_ack(glink, channel);
@@ -1734,7 +1739,6 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
{
- struct rpmsg_channel_info chinfo;
struct glink_channel *channel;
unsigned long flags;
@@ -1756,14 +1760,7 @@ static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
spin_unlock_irqrestore(&glink->idr_lock, flags);
/* Decouple the potential rpdev from the channel */
- if (channel->rpdev) {
- strscpy(chinfo.name, channel->name, sizeof(chinfo.name));
- chinfo.src = RPMSG_ADDR_ANY;
- chinfo.dst = RPMSG_ADDR_ANY;
-
- rpmsg_unregister_device(glink->dev, &chinfo);
- }
- channel->rpdev = NULL;
+ qcom_glink_remove_rpmsg_device(glink, channel);
kref_put(&channel->refcount, qcom_glink_channel_release);
}
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 1eb52356b996..ee4f17bb4db4 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2014-2016 Glider bvba
*/
+#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -524,8 +525,7 @@ static int __init renesas_soc_init(void)
eshi, eslo);
}
- if (soc->id &&
- ((product & id->mask) >> __ffs(id->mask)) != soc->id) {
+ if (soc->id && field_get(id->mask, product) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
ret = -ENODEV;
goto free_soc_dev_attr;
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
index 19c1e666279b..ae727d9c8cc5 100644
--- a/drivers/soc/renesas/rz-sysc.c
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2024 Renesas Electronics Corp.
*/
+#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
@@ -16,8 +17,6 @@
#include "rz-sysc.h"
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
/**
* struct rz_sysc - RZ SYSC private data structure
* @base: SYSC base address
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 4c36a3e409be..cb6ff15a21db 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/fb.h>
@@ -65,7 +66,7 @@ struct gbefb_par {
static unsigned int gbe_mem_size = CONFIG_FB_GBE_MEM * 1024*1024;
static void *gbe_mem;
static dma_addr_t gbe_dma_addr;
-static unsigned long gbe_mem_phys;
+static phys_addr_t gbe_mem_phys;
static struct {
uint16_t *cpu;
@@ -1183,7 +1184,7 @@ static int gbefb_probe(struct platform_device *p_dev)
goto out_release_mem_region;
}
- gbe_mem_phys = (unsigned long) gbe_dma_addr;
+ gbe_mem_phys = dma_to_phys(&p_dev->dev, gbe_dma_addr);
}
par = info->par;
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index 15a82c6b609e..1ee0a1efa18e 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -704,7 +704,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
info->var = var;
if (gxt4500_set_par(info)) {
- printk(KERN_ERR "gxt4500: cannot set video mode\n");
+ dev_err(&pdev->dev, "cannot set video mode\n");
goto err_free_cmap;
}
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index d73a795fe1be..10b914a24114 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -1012,7 +1012,7 @@ static int i810_check_params(struct fb_var_screeninfo *var,
var->bits_per_pixel);
vidmem = line_length * info->var.yres;
if (vxres < var->xres) {
- printk("i810fb: required video memory, "
+ dev_info(&par->dev->dev, "required video memory, "
"%d bytes, for %dx%d-%d (virtual) "
"is out of range\n",
vidmem, vxres, vyres,
@@ -1067,9 +1067,9 @@ static int i810_check_params(struct fb_var_screeninfo *var,
|(info->monspecs.hfmax-HFMAX)
|(info->monspecs.vfmin-VFMIN)
|(info->monspecs.vfmax-VFMAX);
- printk("i810fb: invalid video mode%s\n",
- default_sync ? "" : ". Specifying "
- "vsyncN/hsyncN parameters may help");
+ dev_err(&par->dev->dev, "invalid video mode%s\n",
+ default_sync ? "" : ". Specifying "
+ "vsyncN/hsyncN parameters may help");
retval = -EINVAL;
}
}
@@ -1674,19 +1674,19 @@ static int i810_alloc_agp_mem(struct fb_info *info)
size = par->fb.size + par->iring.size;
if (!(bridge = agp_backend_acquire(par->dev))) {
- printk("i810fb_alloc_fbmem: cannot acquire agpgart\n");
+ dev_warn(&par->dev->dev, "cannot acquire agpgart\n");
return -ENODEV;
}
if (!(par->i810_gtt.i810_fb_memory =
agp_allocate_memory(bridge, size >> 12, AGP_NORMAL_MEMORY))) {
- printk("i810fb_alloc_fbmem: can't allocate framebuffer "
+ dev_warn(&par->dev->dev, "can't allocate framebuffer "
"memory\n");
agp_backend_release(bridge);
return -ENOMEM;
}
if (agp_bind_memory(par->i810_gtt.i810_fb_memory,
par->fb.offset)) {
- printk("i810fb_alloc_fbmem: can't bind framebuffer memory\n");
+ dev_warn(&par->dev->dev, "can't bind framebuffer memory\n");
agp_backend_release(bridge);
return -EBUSY;
}
@@ -1694,14 +1694,14 @@ static int i810_alloc_agp_mem(struct fb_info *info)
if (!(par->i810_gtt.i810_cursor_memory =
agp_allocate_memory(bridge, par->cursor_heap.size >> 12,
AGP_PHYSICAL_MEMORY))) {
- printk("i810fb_alloc_cursormem: can't allocate "
+ dev_warn(&par->dev->dev, "can't allocate "
"cursor memory\n");
agp_backend_release(bridge);
return -ENOMEM;
}
if (agp_bind_memory(par->i810_gtt.i810_cursor_memory,
par->cursor_heap.offset)) {
- printk("i810fb_alloc_cursormem: cannot bind cursor memory\n");
+ dev_warn(&par->dev->dev, "cannot bind cursor memory\n");
agp_backend_release(bridge);
return -EBUSY;
}
@@ -1844,7 +1844,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
int err;
if ((err = pci_enable_device(par->dev))) {
- printk("i810fb_init: cannot enable device\n");
+ dev_err(&par->dev->dev, "cannot enable device\n");
return err;
}
par->res_flags |= PCI_DEVICE_ENABLED;
@@ -1859,14 +1859,14 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
par->mmio_start_phys = pci_resource_start(par->dev, 0);
}
if (!par->aperture.size) {
- printk("i810fb_init: device is disabled\n");
+ dev_warn(&par->dev->dev, "device is disabled\n");
return -ENOMEM;
}
if (!request_mem_region(par->aperture.physical,
par->aperture.size,
i810_pci_list[entry->driver_data])) {
- printk("i810fb_init: cannot request framebuffer region\n");
+ dev_warn(&par->dev->dev, "cannot request framebuffer region\n");
return -ENODEV;
}
par->res_flags |= FRAMEBUFFER_REQ;
@@ -1874,14 +1874,14 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
par->aperture.virtual = ioremap_wc(par->aperture.physical,
par->aperture.size);
if (!par->aperture.virtual) {
- printk("i810fb_init: cannot remap framebuffer region\n");
+ dev_warn(&par->dev->dev, "cannot remap framebuffer region\n");
return -ENODEV;
}
if (!request_mem_region(par->mmio_start_phys,
MMIO_SIZE,
i810_pci_list[entry->driver_data])) {
- printk("i810fb_init: cannot request mmio region\n");
+ dev_warn(&par->dev->dev, "cannot request mmio region\n");
return -ENODEV;
}
par->res_flags |= MMIO_REQ;
@@ -1889,7 +1889,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
par->mmio_start_virtual = ioremap(par->mmio_start_phys,
MMIO_SIZE);
if (!par->mmio_start_virtual) {
- printk("i810fb_init: cannot remap mmio region\n");
+ dev_warn(&par->dev->dev, "cannot remap mmio region\n");
return -ENODEV;
}
@@ -1921,12 +1921,12 @@ static void i810fb_find_init_mode(struct fb_info *info)
}
if (!err)
- printk("i810fb_init_pci: DDC probe successful\n");
+ dev_info(&par->dev->dev, "DDC probe successful\n");
fb_edid_to_monspecs(par->edid, specs);
if (specs->modedb == NULL)
- printk("i810fb_init_pci: Unable to get Mode Database\n");
+ dev_info(&par->dev->dev, "Unable to get Mode Database\n");
fb_videomode_to_modelist(specs->modedb, specs->modedb_len,
&info->modelist);
@@ -2072,7 +2072,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
if (err < 0) {
i810fb_release_resource(info, par);
- printk("i810fb_init: cannot register framebuffer device\n");
+ dev_warn(&par->dev->dev, "cannot register framebuffer device\n");
return err;
}
@@ -2084,10 +2084,10 @@ static int i810fb_init_pci(struct pci_dev *dev,
vfreq = hfreq/(info->var.yres + info->var.upper_margin +
info->var.vsync_len + info->var.lower_margin);
- printk("I810FB: fb%d : %s v%d.%d.%d%s\n"
- "I810FB: Video RAM : %dK\n"
- "I810FB: Monitor : H: %d-%d KHz V: %d-%d Hz\n"
- "I810FB: Mode : %dx%d-%dbpp@%dHz\n",
+ dev_info(&par->dev->dev, "fb%d : %s v%d.%d.%d%s\n"
+ "Video RAM : %dK\n"
+ "Monitor : H: %d-%d KHz V: %d-%d Hz\n"
+ "Mode : %dx%d-%dbpp@%dHz\n",
info->node,
i810_pci_list[entry->driver_data],
VERSION_MAJOR, VERSION_MINOR, VERSION_TEENIE, BRANCH_VERSION,
@@ -2137,7 +2137,7 @@ static void i810fb_remove_pci(struct pci_dev *dev)
unregister_framebuffer(info);
i810fb_release_resource(info, par);
- printk("cleanup_module: unloaded i810 framebuffer device\n");
+ dev_info(&par->dev->dev, "unloaded i810 framebuffer device\n");
}
#ifndef MODULE
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index b96a8a96bce8..e418eee825fb 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -419,12 +419,12 @@ static int pxafb_adjust_timing(struct pxafb_info *fbi,
var->yres = max_t(int, var->yres, MIN_YRES);
if (!(fbi->lccr0 & LCCR0_LCDT)) {
- clamp_val(var->hsync_len, 1, 64);
- clamp_val(var->vsync_len, 1, 64);
- clamp_val(var->left_margin, 1, 255);
- clamp_val(var->right_margin, 1, 255);
- clamp_val(var->upper_margin, 1, 255);
- clamp_val(var->lower_margin, 1, 255);
+ var->hsync_len = clamp(var->hsync_len, 1, 64);
+ var->vsync_len = clamp(var->vsync_len, 1, 64);
+ var->left_margin = clamp(var->left_margin, 1, 255);
+ var->right_margin = clamp(var->right_margin, 1, 255);
+ var->upper_margin = clamp(var->upper_margin, 1, 255);
+ var->lower_margin = clamp(var->lower_margin, 1, 255);
}
/* make sure each line is aligned on word boundary */
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index aa6cc0a8151a..83dd31fa1fab 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -680,7 +680,7 @@ static int ssd1307fb_probe(struct i2c_client *client)
if (!ssd1307fb_defio) {
dev_err(dev, "Couldn't allocate deferred io.\n");
ret = -ENOMEM;
- goto fb_alloc_error;
+ goto fb_defio_error;
}
ssd1307fb_defio->delay = HZ / refreshrate;
@@ -757,6 +757,8 @@ regulator_enable_error:
regulator_disable(par->vbat_reg);
reset_oled_error:
fb_deferred_io_cleanup(info);
+fb_defio_error:
+ __free_pages(vmem, get_order(vmem_size));
fb_alloc_error:
framebuffer_release(info);
return ret;
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index f9a0085ad72b..ca9e84e8d860 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -428,7 +428,7 @@ static int tcx_probe(struct platform_device *op)
j = i;
break;
}
- par->mmap_map[i].poff = op->resource[j].start;
+ par->mmap_map[i].poff = op->resource[j].start - info->fix.smem_start;
}
info->fbops = &tcx_ops;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 516cf2a18757..17b7253b8fbe 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -1631,7 +1631,7 @@ static int trident_pci_probe(struct pci_dev *dev,
}
if (noaccel) {
- printk(KERN_DEBUG "disabling acceleration\n");
+ dev_dbg(&dev->dev, "disabling acceleration\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
info->pixmap.scan_align = 1;
}
@@ -1693,7 +1693,7 @@ static int trident_pci_probe(struct pci_dev *dev,
info->var.activate |= FB_ACTIVATE_NOW;
info->device = &dev->dev;
if (register_framebuffer(info) < 0) {
- printk(KERN_ERR "tridentfb: could not register framebuffer\n");
+ dev_err(&dev->dev, "could not register framebuffer\n");
fb_dealloc_cmap(&info->cmap);
err = -EINVAL;
goto out_unmap2;
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index a81df8865143..f135033c22fb 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -314,8 +314,8 @@ static int vesafb_probe(struct platform_device *dev)
#endif
if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
- printk(KERN_WARNING
- "vesafb: cannot reserve video memory at 0x%lx\n",
+ dev_warn(&dev->dev,
+ "cannot reserve video memory at 0x%lx\n",
vesafb_fix.smem_start);
/* We cannot make this fatal. Sometimes this comes from magic
spaces our resource handlers simply don't know about */
@@ -333,12 +333,12 @@ static int vesafb_probe(struct platform_device *dev)
par->base = si->lfb_base;
par->size = size_total;
- printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+ dev_info(&dev->dev, "mode is %dx%dx%d, linelength=%d, pages=%d\n",
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel,
vesafb_fix.line_length, si->pages);
if (si->vesapm_seg) {
- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
+ dev_info(&dev->dev, "protected mode interface info at %04x:%04x\n",
si->vesapm_seg, si->vesapm_off);
}
@@ -352,9 +352,10 @@ static int vesafb_probe(struct platform_device *dev)
pmi_base = (unsigned short *)phys_to_virt(pmi_phys);
pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
- printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
+ dev_info(&dev->dev, "pmi: set display start = %p, set palette = %p\n",
+ pmi_start, pmi_pal);
if (pmi_base[3]) {
- printk(KERN_INFO "vesafb: pmi: ports = ");
+ dev_info(&dev->dev, "pmi: ports = ");
for (i = pmi_base[3]/2; pmi_base[i] != 0xffff; i++)
printk("%x ", pmi_base[i]);
printk("\n");
@@ -365,14 +366,14 @@ static int vesafb_probe(struct platform_device *dev)
* Rules are: we have to set up a descriptor for the requested
* memory area and pass it in the ES register to the BIOS function.
*/
- printk(KERN_INFO "vesafb: can't handle memory requests, pmi disabled\n");
+ dev_info(&dev->dev, "can't handle memory requests, pmi disabled\n");
ypan = pmi_setpal = 0;
}
}
}
if (vesafb_defined.bits_per_pixel == 8 && !pmi_setpal && !vga_compat) {
- printk(KERN_WARNING "vesafb: hardware palette is unchangeable,\n"
+ dev_warn(&dev->dev, "hardware palette is unchangeable,\n"
" colors may be incorrect\n");
vesafb_fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
}
@@ -380,10 +381,10 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_defined.xres_virtual = vesafb_defined.xres;
vesafb_defined.yres_virtual = vesafb_fix.smem_len / vesafb_fix.line_length;
if (ypan && vesafb_defined.yres_virtual > vesafb_defined.yres) {
- printk(KERN_INFO "vesafb: scrolling: %s using protected mode interface, yres_virtual=%d\n",
+ dev_info(&dev->dev, "scrolling: %s using protected mode interface, yres_virtual=%d\n",
(ypan > 1) ? "ywrap" : "ypan",vesafb_defined.yres_virtual);
} else {
- printk(KERN_INFO "vesafb: scrolling: redraw\n");
+ dev_info(&dev->dev, "scrolling: redraw\n");
vesafb_defined.yres_virtual = vesafb_defined.yres;
ypan = 0;
}
@@ -410,7 +411,7 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_defined.bits_per_pixel;
}
- printk(KERN_INFO "vesafb: %s: "
+ dev_info(&dev->dev, "%s: "
"size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
(vesafb_defined.bits_per_pixel > 8) ?
"Truecolor" : (vga_compat || pmi_setpal) ?
@@ -453,14 +454,14 @@ static int vesafb_probe(struct platform_device *dev)
}
if (!info->screen_base) {
- printk(KERN_ERR
- "vesafb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
+ dev_err(&dev->dev,
+ "abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
vesafb_fix.smem_len, vesafb_fix.smem_start);
err = -EIO;
goto err_release_region;
}
- printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, "
+ dev_info(&dev->dev, "framebuffer at 0x%lx, mapped to 0x%p, "
"using %dk, total %dk\n",
vesafb_fix.smem_start, info->screen_base,
size_remap/1024, size_total/1024);
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index eedab14c7d51..6b81337a4909 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1319,7 +1319,12 @@ static int vga16fb_probe(struct platform_device *dev)
if (ret)
return ret;
- printk(KERN_DEBUG "vga16fb: initializing\n");
+ dev_dbg(&dev->dev, "initializing\n");
+ if (!request_mem_region(vga16fb_fix.smem_start, vga16fb_fix.smem_len,
+ "vga16b")) {
+ dev_err(&dev->dev, "cannot reserve video memory at 0x%lx\n",
+ vga16fb_fix.smem_start);
+ }
info = framebuffer_alloc(sizeof(struct vga16fb_par), &dev->dev);
if (!info) {
@@ -1331,12 +1336,12 @@ static int vga16fb_probe(struct platform_device *dev)
info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS_BASE, 0);
if (!info->screen_base) {
- printk(KERN_ERR "vga16fb: unable to map device\n");
+ dev_err(&dev->dev, "unable to map device\n");
ret = -ENOMEM;
goto err_ioremap;
}
- printk(KERN_INFO "vga16fb: mapped to 0x%p\n", info->screen_base);
+ dev_info(&dev->dev, "mapped to 0x%p\n", info->screen_base);
par = info->par;
par->isVGA = screen_info_video_type(si) == VIDEO_TYPE_VGAC;
@@ -1364,13 +1369,13 @@ static int vga16fb_probe(struct platform_device *dev)
i = (info->var.bits_per_pixel == 8) ? 256 : 16;
ret = fb_alloc_cmap(&info->cmap, i, 0);
if (ret) {
- printk(KERN_ERR "vga16fb: unable to allocate colormap\n");
+ dev_err(&dev->dev, "unable to allocate colormap\n");
ret = -ENOMEM;
goto err_alloc_cmap;
}
if (vga16fb_check_var(&info->var, info)) {
- printk(KERN_ERR "vga16fb: unable to validate variable\n");
+ dev_err(&dev->dev, "unable to validate variable\n");
ret = -EINVAL;
goto err_check_var;
}
@@ -1381,7 +1386,7 @@ static int vga16fb_probe(struct platform_device *dev)
if (ret)
goto err_check_var;
if (register_framebuffer(info) < 0) {
- printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
+ dev_err(&dev->dev, "unable to register framebuffer\n");
ret = -EINVAL;
goto err_check_var;
}
@@ -1398,6 +1403,8 @@ static int vga16fb_probe(struct platform_device *dev)
err_ioremap:
framebuffer_release(info);
err_fb_alloc:
+ release_mem_region(vga16fb_fix.smem_start,
+ vga16fb_fix.smem_len);
return ret;
}
@@ -1407,6 +1414,8 @@ static void vga16fb_remove(struct platform_device *dev)
if (info)
unregister_framebuffer(info);
+ release_mem_region(vga16fb_fix.smem_start,
+ vga16fb_fix.smem_len);
}
static const struct platform_device_id vga16fb_driver_id_table[] = {
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index d8c848cf09a6..52eb7e4ba71f 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -47,6 +47,6 @@ source "drivers/virt/nitro_enclaves/Kconfig"
source "drivers/virt/acrn/Kconfig"
-source "drivers/virt/coco/Kconfig"
-
endif
+
+source "drivers/virt/coco/Kconfig"
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
index 819a97e8ba99..df1cfaf26c65 100644
--- a/drivers/virt/coco/Kconfig
+++ b/drivers/virt/coco/Kconfig
@@ -3,6 +3,7 @@
# Confidential computing related collateral
#
+if VIRT_DRIVERS
source "drivers/virt/coco/efi_secret/Kconfig"
source "drivers/virt/coco/pkvm-guest/Kconfig"
@@ -14,3 +15,7 @@ source "drivers/virt/coco/tdx-guest/Kconfig"
source "drivers/virt/coco/arm-cca-guest/Kconfig"
source "drivers/virt/coco/guest/Kconfig"
+endif
+
+config TSM
+ bool
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
index f918bbb61737..cb52021912b3 100644
--- a/drivers/virt/coco/Makefile
+++ b/drivers/virt/coco/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/
obj-$(CONFIG_SEV_GUEST) += sev-guest/
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/
obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest/
+obj-$(CONFIG_TSM) += tsm-core.o
obj-$(CONFIG_TSM_GUEST) += guest/
diff --git a/drivers/virt/coco/tsm-core.c b/drivers/virt/coco/tsm-core.c
new file mode 100644
index 000000000000..f027876a2f19
--- /dev/null
+++ b/drivers/virt/coco/tsm-core.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/tsm.h>
+#include <linux/pci.h>
+#include <linux/rwsem.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/cleanup.h>
+#include <linux/pci-tsm.h>
+#include <linux/pci-ide.h>
+
+static struct class *tsm_class;
+static DECLARE_RWSEM(tsm_rwsem);
+static DEFINE_IDA(tsm_ida);
+
+static int match_id(struct device *dev, const void *data)
+{
+ struct tsm_dev *tsm_dev = container_of(dev, struct tsm_dev, dev);
+ int id = *(const int *)data;
+
+ return tsm_dev->id == id;
+}
+
+struct tsm_dev *find_tsm_dev(int id)
+{
+ struct device *dev = class_find_device(tsm_class, NULL, &id, match_id);
+
+ if (!dev)
+ return NULL;
+ return container_of(dev, struct tsm_dev, dev);
+}
+
+static struct tsm_dev *alloc_tsm_dev(struct device *parent)
+{
+ struct device *dev;
+ int id;
+
+ struct tsm_dev *tsm_dev __free(kfree) =
+ kzalloc(sizeof(*tsm_dev), GFP_KERNEL);
+ if (!tsm_dev)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&tsm_ida, GFP_KERNEL);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ tsm_dev->id = id;
+ dev = &tsm_dev->dev;
+ dev->parent = parent;
+ dev->class = tsm_class;
+ device_initialize(dev);
+
+ return no_free_ptr(tsm_dev);
+}
+
+static struct tsm_dev *tsm_register_pci_or_reset(struct tsm_dev *tsm_dev,
+ struct pci_tsm_ops *pci_ops)
+{
+ int rc;
+
+ if (!pci_ops)
+ return tsm_dev;
+
+ tsm_dev->pci_ops = pci_ops;
+ rc = pci_tsm_register(tsm_dev);
+ if (rc) {
+ dev_err(tsm_dev->dev.parent,
+ "PCI/TSM registration failure: %d\n", rc);
+ device_unregister(&tsm_dev->dev);
+ return ERR_PTR(rc);
+ }
+
+ /* Notify TSM userspace that PCI/TSM operations are now possible */
+ kobject_uevent(&tsm_dev->dev.kobj, KOBJ_CHANGE);
+ return tsm_dev;
+}
+
+struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *pci_ops)
+{
+ struct tsm_dev *tsm_dev __free(put_tsm_dev) = alloc_tsm_dev(parent);
+ struct device *dev;
+ int rc;
+
+ if (IS_ERR(tsm_dev))
+ return tsm_dev;
+
+ dev = &tsm_dev->dev;
+ rc = dev_set_name(dev, "tsm%d", tsm_dev->id);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = device_add(dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return tsm_register_pci_or_reset(no_free_ptr(tsm_dev), pci_ops);
+}
+EXPORT_SYMBOL_GPL(tsm_register);
+
+void tsm_unregister(struct tsm_dev *tsm_dev)
+{
+ if (tsm_dev->pci_ops)
+ pci_tsm_unregister(tsm_dev);
+ device_unregister(&tsm_dev->dev);
+}
+EXPORT_SYMBOL_GPL(tsm_unregister);
+
+/* must be invoked between tsm_register / tsm_unregister */
+int tsm_ide_stream_register(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_tsm *tsm = pdev->tsm;
+ struct tsm_dev *tsm_dev = tsm->tsm_dev;
+ int rc;
+
+ rc = sysfs_create_link(&tsm_dev->dev.kobj, &pdev->dev.kobj, ide->name);
+ if (rc)
+ return rc;
+
+ ide->tsm_dev = tsm_dev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tsm_ide_stream_register);
+
+void tsm_ide_stream_unregister(struct pci_ide *ide)
+{
+ struct tsm_dev *tsm_dev = ide->tsm_dev;
+
+ ide->tsm_dev = NULL;
+ sysfs_remove_link(&tsm_dev->dev.kobj, ide->name);
+}
+EXPORT_SYMBOL_GPL(tsm_ide_stream_unregister);
+
+static void tsm_release(struct device *dev)
+{
+ struct tsm_dev *tsm_dev = container_of(dev, typeof(*tsm_dev), dev);
+
+ ida_free(&tsm_ida, tsm_dev->id);
+ kfree(tsm_dev);
+}
+
+static int __init tsm_init(void)
+{
+ tsm_class = class_create("tsm");
+ if (IS_ERR(tsm_class))
+ return PTR_ERR(tsm_class);
+
+ tsm_class->dev_release = tsm_release;
+ return 0;
+}
+module_init(tsm_init)
+
+static void __exit tsm_exit(void)
+{
+ class_destroy(tsm_class);
+}
+module_exit(tsm_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TEE Security Manager Class Device");
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 05008d937e40..d3b9df7d466b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -969,6 +969,14 @@ config RENESAS_WDT
This driver adds watchdog support for the integrated watchdogs in the
Renesas R-Car and other SH-Mobile SoCs (usually named RWDT or SWDT).
+config RENESAS_WWDT
+ tristate "Renesas Window WWDT Watchdog"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This driver adds watchdog support for a window timer found in some
+ Renesas R-Car Gen3 and later SoCs.
+
config RENESAS_RZAWDT
tristate "Renesas RZ/A WDT Watchdog"
depends on ARCH_RENESAS || COMPILE_TEST
@@ -1976,10 +1984,10 @@ config LANTIQ_WDT
config LOONGSON1_WDT
tristate "Loongson1 SoC hardware watchdog"
- depends on MACH_LOONGSON32 || COMPILE_TEST
+ depends on MACH_LOONGSON32 || MACH_LOONGSON64 || COMPILE_TEST
select WATCHDOG_CORE
help
- Hardware driver for the Loongson1 SoC Watchdog Timer.
+ Hardware driver for the Loongson family Watchdog Timer.
config RALINK_WDT
tristate "Ralink SoC watchdog"
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index b680e4d3c1bc..ba52099b1253 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
+obj-$(CONFIG_RENESAS_WWDT) += renesas_wwdt.o
obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o
obj-$(CONFIG_RENESAS_RZN1WDT) += rzn1_wdt.o
obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 837e15701c0e..c9e79851504c 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -35,6 +35,7 @@ struct aspeed_wdt_config {
u32 irq_shift;
u32 irq_mask;
struct aspeed_wdt_scu scu;
+ u32 num_reset_masks;
};
struct aspeed_wdt {
@@ -66,6 +67,7 @@ static const struct aspeed_wdt_config ast2500_config = {
.wdt_reset_mask = 0x1,
.wdt_reset_mask_shift = 2,
},
+ .num_reset_masks = 1,
};
static const struct aspeed_wdt_config ast2600_config = {
@@ -78,12 +80,27 @@ static const struct aspeed_wdt_config ast2600_config = {
.wdt_reset_mask = 0xf,
.wdt_reset_mask_shift = 16,
},
+ .num_reset_masks = 2,
+};
+
+static const struct aspeed_wdt_config ast2700_config = {
+ .ext_pulse_width_mask = 0xfffff,
+ .irq_shift = 0,
+ .irq_mask = GENMASK(31, 10),
+ .scu = {
+ .compatible = "aspeed,ast2700-scu0",
+ .reset_status_reg = 0x70,
+ .wdt_reset_mask = 0xf,
+ .wdt_reset_mask_shift = 0,
+ },
+ .num_reset_masks = 5,
};
static const struct of_device_id aspeed_wdt_of_table[] = {
{ .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
{ .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
{ .compatible = "aspeed,ast2600-wdt", .data = &ast2600_config },
+ { .compatible = "aspeed,ast2700-wdt", .data = &ast2700_config },
{ },
};
MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
@@ -479,11 +496,11 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
- if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
- (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
- u32 reset_mask[2];
- size_t nrstmask = of_device_is_compatible(np, "aspeed,ast2600-wdt") ? 2 : 1;
+ if (!of_device_is_compatible(np, "aspeed,ast2400-wdt")) {
+ u32 reset_mask[5];
+ size_t nrstmask = wdt->cfg->num_reset_masks;
u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
+ int i;
reg &= wdt->cfg->ext_pulse_width_mask;
if (of_property_read_bool(np, "aspeed,ext-active-high"))
@@ -503,9 +520,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
ret = of_property_read_u32_array(np, "aspeed,reset-mask", reset_mask, nrstmask);
if (!ret) {
- writel(reset_mask[0], wdt->base + WDT_RESET_MASK1);
- if (nrstmask > 1)
- writel(reset_mask[1], wdt->base + WDT_RESET_MASK2);
+ for (i = 0; i < nrstmask; i++)
+ writel(reset_mask[i], wdt->base + WDT_RESET_MASK1 + i * 4);
}
}
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index 9daed2758ae5..ea2b0171e70b 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -6,10 +6,10 @@
* to CP.
*
* The command can be altered using the module parameter "cmd". This is
- * not recommended because it's only supported on z/VM but not whith LPAR.
+ * not recommended because it's only supported on z/VM but not with LPAR.
*
- * On LPAR, the watchdog will always trigger a system restart. the module
- * paramter cmd is meaningless here.
+ * On LPAR, the watchdog will always trigger a system restart. The module
+ * parameter cmd is meaningless here.
*
*
* Copyright IBM Corp. 2004, 2013
diff --git a/drivers/watchdog/loongson1_wdt.c b/drivers/watchdog/loongson1_wdt.c
index 0587ff44d3a1..2417519717cc 100644
--- a/drivers/watchdog/loongson1_wdt.c
+++ b/drivers/watchdog/loongson1_wdt.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
+ * Copyright (C) 2025 Binbin Zhou <zhoubinbin@loongson.cn>
*/
#include <linux/clk.h>
@@ -10,31 +11,52 @@
#include <linux/platform_device.h>
#include <linux/watchdog.h>
-/* Loongson 1 Watchdog Register Definitions */
+/* Loongson Watchdog Register Definitions */
#define WDT_EN 0x0
-#define WDT_TIMER 0x4
-#define WDT_SET 0x8
#define DEFAULT_HEARTBEAT 30
static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0444);
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static unsigned int heartbeat;
-module_param(heartbeat, uint, 0444);
+module_param(heartbeat, uint, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (default="
+ __MODULE_STRING(DEFAULT_HEARTBEAT) ")");
+
+struct ls1x_wdt_pdata {
+ u32 timer_offset;
+ u32 set_offset;
+ u32 wdt_en_bit;
+};
+
+static const struct ls1x_wdt_pdata ls1b_wdt_pdata = {
+ .timer_offset = 0x4,
+ .set_offset = 0x8,
+ .wdt_en_bit = BIT(0),
+};
+
+static const struct ls1x_wdt_pdata ls2k0300_wdt_pdata = {
+ .timer_offset = 0x8,
+ .set_offset = 0x4,
+ .wdt_en_bit = BIT(1),
+};
struct ls1x_wdt_drvdata {
void __iomem *base;
struct clk *clk;
unsigned long clk_rate;
struct watchdog_device wdt;
+ const struct ls1x_wdt_pdata *pdata;
};
static int ls1x_wdt_ping(struct watchdog_device *wdt_dev)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writel(0x1, drvdata->base + WDT_SET);
+ writel(0x1, drvdata->base + drvdata->pdata->set_offset);
return 0;
}
@@ -49,7 +71,7 @@ static int ls1x_wdt_set_timeout(struct watchdog_device *wdt_dev,
wdt_dev->timeout = timeout;
counts = drvdata->clk_rate * min(timeout, max_hw_heartbeat);
- writel(counts, drvdata->base + WDT_TIMER);
+ writel(counts, drvdata->base + drvdata->pdata->timer_offset);
return 0;
}
@@ -58,7 +80,7 @@ static int ls1x_wdt_start(struct watchdog_device *wdt_dev)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writel(0x1, drvdata->base + WDT_EN);
+ writel(drvdata->pdata->wdt_en_bit, drvdata->base + WDT_EN);
return 0;
}
@@ -66,8 +88,10 @@ static int ls1x_wdt_start(struct watchdog_device *wdt_dev)
static int ls1x_wdt_stop(struct watchdog_device *wdt_dev)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
+ u32 val = readl(drvdata->base + WDT_EN);
- writel(0x0, drvdata->base + WDT_EN);
+ val &= ~(drvdata->pdata->wdt_en_bit);
+ writel(val, drvdata->base + WDT_EN);
return 0;
}
@@ -77,9 +101,9 @@ static int ls1x_wdt_restart(struct watchdog_device *wdt_dev,
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writel(0x1, drvdata->base + WDT_EN);
- writel(0x1, drvdata->base + WDT_TIMER);
- writel(0x1, drvdata->base + WDT_SET);
+ writel(drvdata->pdata->wdt_en_bit, drvdata->base + WDT_EN);
+ writel(0x1, drvdata->base + drvdata->pdata->timer_offset);
+ writel(0x1, drvdata->base + drvdata->pdata->set_offset);
return 0;
}
@@ -104,11 +128,13 @@ static int ls1x_wdt_probe(struct platform_device *pdev)
struct ls1x_wdt_drvdata *drvdata;
struct watchdog_device *ls1x_wdt;
unsigned long clk_rate;
- int err;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
+ platform_set_drvdata(pdev, drvdata);
+
+ drvdata->pdata = of_device_get_match_data(dev);
drvdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->base))
@@ -135,36 +161,51 @@ static int ls1x_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(ls1x_wdt, nowayout);
watchdog_set_drvdata(ls1x_wdt, drvdata);
- err = devm_watchdog_register_device(dev, &drvdata->wdt);
- if (err)
- return err;
+ return devm_watchdog_register_device(dev, &drvdata->wdt);
+}
- platform_set_drvdata(pdev, drvdata);
+static int ls1x_wdt_resume(struct device *dev)
+{
+ struct ls1x_wdt_drvdata *data = dev_get_drvdata(dev);
+
+ if (watchdog_active(&data->wdt))
+ ls1x_wdt_start(&data->wdt);
+
+ return 0;
+}
+
+static int ls1x_wdt_suspend(struct device *dev)
+{
+ struct ls1x_wdt_drvdata *data = dev_get_drvdata(dev);
- dev_info(dev, "Loongson1 Watchdog driver registered\n");
+ if (watchdog_active(&data->wdt))
+ ls1x_wdt_stop(&data->wdt);
return 0;
}
-#ifdef CONFIG_OF
+static DEFINE_SIMPLE_DEV_PM_OPS(ls1x_wdt_pm_ops, ls1x_wdt_suspend, ls1x_wdt_resume);
+
static const struct of_device_id ls1x_wdt_dt_ids[] = {
- { .compatible = "loongson,ls1b-wdt", },
- { .compatible = "loongson,ls1c-wdt", },
+ { .compatible = "loongson,ls1b-wdt", .data = &ls1b_wdt_pdata },
+ { .compatible = "loongson,ls1c-wdt", .data = &ls1b_wdt_pdata },
+ { .compatible = "loongson,ls2k0300-wdt", .data = &ls2k0300_wdt_pdata },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ls1x_wdt_dt_ids);
-#endif
static struct platform_driver ls1x_wdt_driver = {
.probe = ls1x_wdt_probe,
.driver = {
.name = "ls1x-wdt",
- .of_match_table = of_match_ptr(ls1x_wdt_dt_ids),
+ .of_match_table = ls1x_wdt_dt_ids,
+ .pm = pm_ptr(&ls1x_wdt_pm_ops),
},
};
module_platform_driver(ls1x_wdt_driver);
MODULE_AUTHOR("Yang Ling <gnaygnil@gmail.com>");
-MODULE_DESCRIPTION("Loongson1 Watchdog Driver");
+MODULE_AUTHOR("Binbin Zhou <zhoubinbin@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/renesas_wwdt.c b/drivers/watchdog/renesas_wwdt.c
new file mode 100644
index 000000000000..b250913c349a
--- /dev/null
+++ b/drivers/watchdog/renesas_wwdt.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Renesas Window Watchdog Timer (WWDT)
+ *
+ * The WWDT can only be setup once after boot. Because we cannot know if this
+ * already happened in early boot stages, it is mandated that the firmware
+ * configures the watchdog. Linux then adapts according to the given setup.
+ * Note that this watchdog reports in the default configuration an overflow to
+ * the Error Control Module which then decides further actions. Or the WWDT is
+ * configured to generate an interrupt.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define WDTA0WDTE 0x00
+#define WDTA0RUN BIT(7)
+#define WDTA0_KEY 0x2c
+
+#define WDTA0MD 0x0c
+#define WDTA0OVF(x) FIELD_GET(GENMASK(6, 4), x)
+#define WDTA0WIE BIT(3)
+#define WDTA0ERM BIT(2)
+#define WDTA0WS(x) FIELD_GET(GENMASK(1, 0), x)
+
+struct wwdt_priv {
+ void __iomem *base;
+ struct watchdog_device wdev;
+};
+
+static int wwdt_start(struct watchdog_device *wdev)
+{
+ struct wwdt_priv *priv = container_of(wdev, struct wwdt_priv, wdev);
+
+ writeb(WDTA0RUN | WDTA0_KEY, priv->base + WDTA0WDTE);
+ return 0;
+}
+
+static const struct watchdog_info wwdt_ident = {
+ .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
+ .identity = "Renesas Window Watchdog",
+};
+
+static const struct watchdog_ops wwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wwdt_start,
+};
+
+static irqreturn_t wwdt_error_irq(int irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+
+ dev_warn(dev, "Watchdog timed out\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wwdt_pretimeout_irq(int irq, void *dev_id)
+{
+ struct watchdog_device *wdev = dev_id;
+
+ watchdog_notify_pretimeout(wdev);
+ return IRQ_HANDLED;
+}
+
+static int wwdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wwdt_priv *priv;
+ struct watchdog_device *wdev;
+ struct clk *clk;
+ unsigned long rate;
+ unsigned int interval, window_size;
+ int ret;
+ u8 val;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ clk = devm_clk_get(dev, "cnt");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
+
+ wdev = &priv->wdev;
+
+ val = readb(priv->base + WDTA0WDTE);
+ if (val & WDTA0RUN)
+ set_bit(WDOG_HW_RUNNING, &wdev->status);
+
+ val = readb(priv->base + WDTA0MD);
+ interval = 1 << (9 + WDTA0OVF(val));
+ /* size of the closed(!) window per mille */
+ window_size = 250 * (3 - WDTA0WS(val));
+
+ wdev->info = &wwdt_ident;
+ wdev->ops = &wwdt_ops;
+ wdev->parent = dev;
+ wdev->min_hw_heartbeat_ms = window_size * interval / rate;
+ wdev->max_hw_heartbeat_ms = 1000 * interval / rate;
+ wdev->timeout = DIV_ROUND_UP(wdev->max_hw_heartbeat_ms, 1000);
+ watchdog_set_nowayout(wdev, true);
+
+ if (!(val & WDTA0ERM)) {
+ ret = platform_get_irq_byname(pdev, "error");
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, ret, NULL, wwdt_error_irq,
+ IRQF_ONESHOT, NULL, dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (val & WDTA0WIE) {
+ ret = platform_get_irq_byname(pdev, "pretimeout");
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, ret, NULL, wwdt_pretimeout_irq,
+ IRQF_ONESHOT, NULL, wdev);
+ if (ret < 0)
+ return ret;
+ }
+
+ devm_watchdog_register_device(dev, wdev);
+
+ return 0;
+}
+
+static const struct of_device_id renesas_wwdt_ids[] = {
+ { .compatible = "renesas,rcar-gen3-wwdt", },
+ { .compatible = "renesas,rcar-gen4-wwdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_wwdt_ids);
+
+static struct platform_driver renesas_wwdt_driver = {
+ .driver = {
+ .name = "renesas_wwdt",
+ .of_match_table = renesas_wwdt_ids,
+ },
+ .probe = wwdt_probe,
+};
+module_platform_driver(renesas_wwdt_driver);
+
+MODULE_DESCRIPTION("Renesas Window Watchdog (WWDT) Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wolfram Sang <wsa+renesas@sang-engineering.com>");
diff --git a/drivers/watchdog/starfive-wdt.c b/drivers/watchdog/starfive-wdt.c
index 355918d62f63..ed71d3960a0f 100644
--- a/drivers/watchdog/starfive-wdt.c
+++ b/drivers/watchdog/starfive-wdt.c
@@ -500,12 +500,14 @@ static int starfive_wdt_probe(struct platform_device *pdev)
if (pm_runtime_enabled(&pdev->dev)) {
ret = pm_runtime_put_sync(&pdev->dev);
if (ret)
- goto err_exit;
+ goto err_unregister_wdt;
}
}
return 0;
+err_unregister_wdt:
+ watchdog_unregister_device(&wdt->wdd);
err_exit:
starfive_wdt_disable_clock(wdt);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index d647923d68fe..f55576392651 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -165,6 +165,7 @@ static int wdt_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "cannot enable PCI device\n");
return -ENODEV;
}
+ wdt_res.name = "via_wdt";
/*
* Allocate a MMIO region which contains watchdog control register
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 650fdc7996e1..dd3c2d69c9df 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -326,19 +326,27 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return -ENODEV;
wdat = devm_kzalloc(dev, sizeof(*wdat), GFP_KERNEL);
- if (!wdat)
- return -ENOMEM;
+ if (!wdat) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
regs = devm_kcalloc(dev, pdev->num_resources, sizeof(*regs),
GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
/* WDAT specification wants to have >= 1ms period */
- if (tbl->timer_period < 1)
- return -EINVAL;
- if (tbl->min_count > tbl->max_count)
- return -EINVAL;
+ if (tbl->timer_period < 1) {
+ ret = -EINVAL;
+ goto out_put_table;
+ }
+ if (tbl->min_count > tbl->max_count) {
+ ret = -EINVAL;
+ goto out_put_table;
+ }
wdat->period = tbl->timer_period;
wdat->wdd.min_timeout = DIV_ROUND_UP(wdat->period * tbl->min_count, 1000);
@@ -355,15 +363,20 @@ static int wdat_wdt_probe(struct platform_device *pdev)
res = &pdev->resource[i];
if (resource_type(res) == IORESOURCE_MEM) {
reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg))
- return PTR_ERR(reg);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ goto out_put_table;
+ }
} else if (resource_type(res) == IORESOURCE_IO) {
reg = devm_ioport_map(dev, res->start, 1);
- if (!reg)
- return -ENOMEM;
+ if (!reg) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
} else {
dev_err(dev, "Unsupported resource\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_table;
}
regs[i] = reg;
@@ -385,8 +398,10 @@ static int wdat_wdt_probe(struct platform_device *pdev)
}
instr = devm_kzalloc(dev, sizeof(*instr), GFP_KERNEL);
- if (!instr)
- return -ENOMEM;
+ if (!instr) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
INIT_LIST_HEAD(&instr->node);
instr->entry = entries[i];
@@ -417,7 +432,8 @@ static int wdat_wdt_probe(struct platform_device *pdev)
if (!instr->reg) {
dev_err(dev, "I/O resource not found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_table;
}
instructions = wdat->instructions[action];
@@ -425,8 +441,10 @@ static int wdat_wdt_probe(struct platform_device *pdev)
instructions = devm_kzalloc(dev,
sizeof(*instructions),
GFP_KERNEL);
- if (!instructions)
- return -ENOMEM;
+ if (!instructions) {
+ ret = -ENOMEM;
+ goto out_put_table;
+ }
INIT_LIST_HEAD(instructions);
wdat->instructions[action] = instructions;
@@ -443,7 +461,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
ret = wdat_wdt_enable_reboot(wdat);
if (ret)
- return ret;
+ goto out_put_table;
platform_set_drvdata(pdev, wdat);
@@ -460,12 +478,16 @@ static int wdat_wdt_probe(struct platform_device *pdev)
ret = wdat_wdt_set_timeout(&wdat->wdd, timeout);
if (ret)
- return ret;
+ goto out_put_table;
watchdog_set_nowayout(&wdat->wdd, nowayout);
watchdog_stop_on_reboot(&wdat->wdd);
watchdog_stop_on_unregister(&wdat->wdd);
- return devm_watchdog_register_device(dev, &wdat->wdd);
+ ret = devm_watchdog_register_device(dev, &wdat->wdd);
+
+out_put_table:
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return ret;
}
static int wdat_wdt_suspend_noirq(struct device *dev)
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 29257d2639db..14077d23f2a1 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -163,18 +163,22 @@ static void xen_grant_dma_free_pages(struct device *dev, size_t size,
xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
}
-static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
+static dma_addr_t xen_grant_dma_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
+ unsigned long offset = offset_in_page(phys);
unsigned long dma_offset = xen_offset_in_page(offset),
pfn_offset = XEN_PFN_DOWN(offset);
unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
grant_ref_t grant;
dma_addr_t dma_handle;
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
if (WARN_ON(dir == DMA_NONE))
return DMA_MAPPING_ERROR;
@@ -190,7 +194,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
for (i = 0; i < n_pages; i++) {
gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
- pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
+ pfn_to_gfn(page_to_xen_pfn(phys_to_page(phys)) + i + pfn_offset),
dir == DMA_TO_DEVICE);
}
@@ -199,7 +203,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
return dma_handle;
}
-static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+static void xen_grant_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
@@ -242,7 +246,7 @@ static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
return;
for_each_sg(sg, s, nents, i)
- xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
+ xen_grant_dma_unmap_phys(dev, s->dma_address, sg_dma_len(s), dir,
attrs);
}
@@ -257,7 +261,7 @@ static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
return -EINVAL;
for_each_sg(sg, s, nents, i) {
- s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
+ s->dma_address = xen_grant_dma_map_phys(dev, sg_phys(s),
s->length, dir, attrs);
if (s->dma_address == DMA_MAPPING_ERROR)
goto out;
@@ -286,8 +290,8 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.free_pages = xen_grant_dma_free_pages,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
- .map_page = xen_grant_dma_map_page,
- .unmap_page = xen_grant_dma_unmap_page,
+ .map_phys = xen_grant_dma_map_phys,
+ .unmap_phys = xen_grant_dma_unmap_phys,
.map_sg = xen_grant_dma_map_sg,
.unmap_sg = xen_grant_dma_unmap_sg,
.dma_supported = xen_grant_dma_supported,
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 478d2ad725ac..3e76e33f6e08 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1204,7 +1204,7 @@ void gnttab_foreach_grant_in_range(struct page *page,
unsigned int glen;
unsigned long xen_pfn;
- len = min_t(unsigned int, PAGE_SIZE - offset, len);
+ len = min(PAGE_SIZE - offset, len);
goffset = xen_offset_in_page(offset);
xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index dd7747a2de87..ccf25027bec1 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -200,17 +200,32 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
- * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
+ * either xen_swiotlb_unmap_phys or xen_swiotlb_dma_sync_single is performed.
*/
-static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
+static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
+ dma_addr_t dev_addr;
+ phys_addr_t map;
BUG_ON(dir == DMA_NONE);
+
+ if (attrs & DMA_ATTR_MMIO) {
+ if (unlikely(!dma_capable(dev, phys, size, false))) {
+ dev_err_once(
+ dev,
+ "DMA addr %pa+%zu overflow (mask %llx, bus limit %llx).\n",
+ &phys, size, *dev->dma_mask,
+ dev->bus_dma_limit);
+ WARN_ON_ONCE(1);
+ return DMA_MAPPING_ERROR;
+ }
+ return phys;
+ }
+
+ dev_addr = xen_phys_to_dma(dev, phys);
+
/*
* If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
@@ -257,13 +272,13 @@ done:
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous xen_swiotlb_map_page call. All
+ * match what was provided for in a previous xen_swiotlb_map_phys call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
@@ -325,7 +340,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
+ * concerning calls here are the same as for swiotlb_unmap_phys() above.
*/
static void
xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
@@ -337,7 +352,7 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
+ xen_swiotlb_unmap_phys(hwdev, sg->dma_address, sg_dma_len(sg),
dir, attrs);
}
@@ -352,8 +367,8 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
- sg->offset, sg->length, dir, attrs);
+ sg->dma_address = xen_swiotlb_map_phys(dev, sg_phys(sg),
+ sg->length, dir, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(sg) = sg->length;
@@ -392,25 +407,6 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
}
}
-static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev,
- phys_addr_t paddr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t dma_addr = paddr;
-
- if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
- dev_err_once(dev,
- "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
- &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
- WARN_ON_ONCE(1);
- return DMA_MAPPING_ERROR;
- }
-
- return dma_addr;
-}
-
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
@@ -437,13 +433,12 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
.map_sg = xen_swiotlb_map_sg,
.unmap_sg = xen_swiotlb_unmap_sg,
- .map_page = xen_swiotlb_map_page,
- .unmap_page = xen_swiotlb_unmap_page,
+ .map_phys = xen_swiotlb_map_phys,
+ .unmap_phys = xen_swiotlb_unmap_phys,
.dma_supported = xen_swiotlb_dma_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.max_mapping_size = swiotlb_max_mapping_size,
- .map_resource = xen_swiotlb_direct_map_resource,
};
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index f794014814be..15f18374020e 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -407,7 +407,7 @@ static char *join(const char *dir, const char *name)
buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
else
buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
- return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
+ return buffer ?: ERR_PTR(-ENOMEM);
}
static char **split_strings(char *strings, unsigned int len, unsigned int *num)
@@ -546,18 +546,12 @@ int xenbus_transaction_start(struct xenbus_transaction *t)
EXPORT_SYMBOL_GPL(xenbus_transaction_start);
/* End a transaction.
- * If abandon is true, transaction is discarded instead of committed.
+ * If abort is true, transaction is discarded instead of committed.
*/
-int xenbus_transaction_end(struct xenbus_transaction t, int abort)
+int xenbus_transaction_end(struct xenbus_transaction t, bool abort)
{
- char abortstr[2];
-
- if (abort)
- strcpy(abortstr, "F");
- else
- strcpy(abortstr, "T");
-
- return xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
+ return xs_error(xs_single(t, XS_TRANSACTION_END, abort ? "F" : "T",
+ NULL));
}
EXPORT_SYMBOL_GPL(xenbus_transaction_end);