summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/riscv/rimt.c122
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/devcoredump.c136
-rw-r--r--drivers/firewire/core-transaction.c2
-rw-r--r--drivers/firewire/init_ohci1394_dma.c10
6 files changed, 157 insertions, 117 deletions
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
index 683fcfe35c31..7f423405e5ef 100644
--- a/drivers/acpi/riscv/rimt.c
+++ b/drivers/acpi/riscv/rimt.c
@@ -61,30 +61,6 @@ static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
return 0;
}
-/**
- * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
- *
- * @node: RIMT table node to be looked-up
- *
- * Returns: fwnode_handle pointer on success, NULL on failure
- */
-static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
-{
- struct fwnode_handle *fwnode = NULL;
- struct rimt_fwnode *curr;
-
- spin_lock(&rimt_fwnode_lock);
- list_for_each_entry(curr, &rimt_fwnode_list, list) {
- if (curr->rimt_node == node) {
- fwnode = curr->fwnode;
- break;
- }
- }
- spin_unlock(&rimt_fwnode_lock);
-
- return fwnode;
-}
-
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
void *context)
{
@@ -202,6 +178,67 @@ static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
return NULL;
}
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
{
struct acpi_rimt_pcie_rc *pci_rc;
@@ -290,43 +327,6 @@ static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
return NULL;
}
-/*
- * RISC-V supports IOMMU as a PCI device or a platform device.
- * When it is a platform device, there should be a namespace device as
- * well along with RIMT. To create the link between RIMT information and
- * the platform device, the IOMMU driver should register itself with the
- * RIMT module. This is true for PCI based IOMMU as well.
- */
-int rimt_iommu_register(struct device *dev)
-{
- struct fwnode_handle *rimt_fwnode;
- struct acpi_rimt_node *node;
-
- node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
- if (!node) {
- pr_err("Could not find IOMMU node in RIMT\n");
- return -ENODEV;
- }
-
- if (dev_is_pci(dev)) {
- rimt_fwnode = acpi_alloc_fwnode_static();
- if (!rimt_fwnode)
- return -ENOMEM;
-
- rimt_fwnode->dev = dev;
- if (!dev->fwnode)
- dev->fwnode = rimt_fwnode;
-
- rimt_set_fwnode(node, rimt_fwnode);
- } else {
- rimt_set_fwnode(node, dev->fwnode);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IOMMU_API
-
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
u32 id_in, u32 *id_out,
u8 type_mask)
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1037169abb45..e1eff05bea4a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
- if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 3c533dab8fa5..f69dc9c85954 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
- dev_warn(sup, "sync_state() pending due to %s\n",
+ dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 37faf6156d7c..55bdc7f5e59d 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -23,50 +23,46 @@ struct devcd_entry {
void *data;
size_t datalen;
/*
- * Here, mutex is required to serialize the calls to del_wk work between
- * user/kernel space which happens when devcd is added with device_add()
- * and that sends uevent to user space. User space reads the uevents,
- * and calls to devcd_data_write() which try to modify the work which is
- * not even initialized/queued from devcoredump.
+ * There are 2 races for which mutex is required.
*
+ * The first race is between device creation and userspace writing to
+ * schedule immediately destruction.
*
+ * This race is handled by arming the timer before device creation, but
+ * when device creation fails the timer still exists.
*
- * cpu0(X) cpu1(Y)
+ * To solve this, hold the mutex during device_add(), and set
+ * init_completed on success before releasing the mutex.
*
- * dev_coredump() uevent sent to user space
- * device_add() ======================> user space process Y reads the
- * uevents writes to devcd fd
- * which results into writes to
+ * That way the timer will never fire until device_add() is called,
+ * it will do nothing if init_completed is not set. The timer is also
+ * cancelled in that case.
*
- * devcd_data_write()
- * mod_delayed_work()
- * try_to_grab_pending()
- * timer_delete()
- * debug_assert_init()
- * INIT_DELAYED_WORK()
- * schedule_delayed_work()
- *
- *
- * Also, mutex alone would not be enough to avoid scheduling of
- * del_wk work after it get flush from a call to devcd_free()
- * mentioned as below.
- *
- * disabled_store()
- * devcd_free()
- * mutex_lock() devcd_data_write()
- * flush_delayed_work()
- * mutex_unlock()
- * mutex_lock()
- * mod_delayed_work()
- * mutex_unlock()
- * So, delete_work flag is required.
+ * The second race involves multiple parallel invocations of devcd_free(),
+ * add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
- bool delete_work;
+ bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
+ /*
+ * If nothing interferes and device_add() was returns success,
+ * del_wk will destroy the device after the timer fires.
+ *
+ * Multiple userspace processes can interfere in the working of the timer:
+ * - Writing to the coredump will reschedule the timer to run immediately,
+ * if still armed.
+ *
+ * This is handled by using "if (cancel_delayed_work()) {
+ * schedule_delayed_work() }", to prevent re-arming after having
+ * been previously fired.
+ * - Writing to /sys/class/devcoredump/disabled will destroy the
+ * coredump synchronously.
+ * This is handled by using disable_delayed_work_sync(), and then
+ * checking if deleted flag is set with &devcd->mutex held.
+ */
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
+static void __devcd_del(struct devcd_entry *devcd)
+{
+ devcd->deleted = true;
+ device_del(&devcd->devcd_dev);
+ put_device(&devcd->devcd_dev);
+}
+
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
+ bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
- device_del(&devcd->devcd_dev);
- put_device(&devcd->devcd_dev);
+ /* devcd->mutex serializes against dev_coredumpm_timeout */
+ mutex_lock(&devcd->mutex);
+ init_completed = devcd->init_completed;
+ mutex_unlock(&devcd->mutex);
+
+ if (init_completed)
+ __devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
@@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mutex_lock(&devcd->mutex);
- if (!devcd->delete_work) {
- devcd->delete_work = true;
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
- }
- mutex_unlock(&devcd->mutex);
+ /*
+ * Although it's tempting to use mod_delayed work here,
+ * that will cause a reschedule if the timer already fired.
+ */
+ if (cancel_delayed_work(&devcd->del_wk))
+ schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
@@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ /*
+ * To prevent a race with devcd_data_write(), disable work and
+ * complete manually instead.
+ *
+ * We cannot rely on the return value of
+ * disable_delayed_work_sync() here, because it might be in the
+ * middle of a cancel_delayed_work + schedule_delayed_work pair.
+ *
+ * devcd->mutex here guards against multiple parallel invocations
+ * of devcd_free().
+ */
+ disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
- if (!devcd->delete_work)
- devcd->delete_work = true;
-
- flush_delayed_work(&devcd->del_wk);
+ if (!devcd->deleted)
+ __devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
- * mutex_lock(&devcd->mutex);
*
*
* In the above diagram, it looks like disabled_store() would be racing with parallelly
- * running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
- devcd->delete_work = false;
+ devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
- mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
+
+ /* devcd->mutex prevents devcd_del() completing until init finishes */
+ mutex_lock(&devcd->mutex);
+ devcd->init_completed = false;
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, timeout);
+
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
- schedule_delayed_work(&devcd->del_wk, timeout);
+
+ /*
+ * Safe to run devcd_del() now that we are done with devcd_dev.
+ * Alternatively we could have taken a ref on devcd_dev before
+ * dropping the lock.
+ */
+ devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
- put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
+ cancel_delayed_work_sync(&devcd->del_wk);
+ put_device(&devcd->devcd_dev);
+
put_module:
module_put(owner);
free:
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index dd3656a0c1ff..c65f491c54d0 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -269,7 +269,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
-__must_hold(&card->transactions_lock)
+__must_hold(&card->transactions.lock)
{
int tlabel;
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index 48b879e9e831..121f0c2f6401 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
/**
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
+ * @ohci: Pointer to the OHCI-1394 controller structure
*
* OHCI1394 initialization itself and any device going on- or offline
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
@@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
/**
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This enables remote DMA access over IEEE1394 from every host for the low
* 4GB of address space. DMA accesses above 4GB are not available currently.
*/
@@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
/**
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This initializes the given controller and enables physical DMA engine in it.
*/
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
@@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
/**
* init_ohci1394_controller - Map the registers of the controller and init DMA
+ * @num: PCI bus number
+ * @slot: PCI device number
+ * @func: PCI function number
+ *
* This maps the registers of the specified controller and initializes it
*/
static inline void __init init_ohci1394_controller(int num, int slot, int func)
@@ -284,6 +293,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
/**
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
+ * @opt: Kernel command line parameter string
*/
static int __init setup_ohci1394_dma(char *opt)
{