diff options
Diffstat (limited to 'drivers/base')
| -rw-r--r-- | drivers/base/arch_topology.c | 2 | ||||
| -rw-r--r-- | drivers/base/core.c | 2 | ||||
| -rw-r--r-- | drivers/base/devcoredump.c | 136 | ||||
| -rw-r--r-- | drivers/base/devtmpfs.c | 6 | ||||
| -rw-r--r-- | drivers/base/firmware_loader/main.c | 59 | ||||
| -rw-r--r-- | drivers/base/memory.c | 23 | ||||
| -rw-r--r-- | drivers/base/platform.c | 71 | ||||
| -rw-r--r-- | drivers/base/power/generic_ops.c | 85 | ||||
| -rw-r--r-- | drivers/base/power/main.c | 40 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 23 | ||||
| -rw-r--r-- | drivers/base/power/trace.c | 4 | ||||
| -rw-r--r-- | drivers/base/power/wakeup.c | 24 | ||||
| -rw-r--r-- | drivers/base/regmap/regmap-slimbus.c | 6 |
13 files changed, 256 insertions, 225 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 1037169abb45..e1eff05bea4a 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) * frequency (by keeping the initial capacity_freq_ref value). */ cpu_clk = of_clk_get(cpu_node, 0); - if (!PTR_ERR_OR_ZERO(cpu_clk)) { + if (!IS_ERR_OR_NULL(cpu_clk)) { per_cpu(capacity_freq_ref, cpu) = clk_get_rate(cpu_clk) / HZ_PER_KHZ; clk_put(cpu_clk); diff --git a/drivers/base/core.c b/drivers/base/core.c index 3c533dab8fa5..f69dc9c85954 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data) return 0; if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) { - dev_warn(sup, "sync_state() pending due to %s\n", + dev_info(sup, "sync_state() pending due to %s\n", dev_name(link->consumer)); return 0; } diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index 37faf6156d7c..55bdc7f5e59d 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -23,50 +23,46 @@ struct devcd_entry { void *data; size_t datalen; /* - * Here, mutex is required to serialize the calls to del_wk work between - * user/kernel space which happens when devcd is added with device_add() - * and that sends uevent to user space. User space reads the uevents, - * and calls to devcd_data_write() which try to modify the work which is - * not even initialized/queued from devcoredump. + * There are 2 races for which mutex is required. * + * The first race is between device creation and userspace writing to + * schedule immediately destruction. * + * This race is handled by arming the timer before device creation, but + * when device creation fails the timer still exists. * - * cpu0(X) cpu1(Y) + * To solve this, hold the mutex during device_add(), and set + * init_completed on success before releasing the mutex. * - * dev_coredump() uevent sent to user space - * device_add() ======================> user space process Y reads the - * uevents writes to devcd fd - * which results into writes to + * That way the timer will never fire until device_add() is called, + * it will do nothing if init_completed is not set. The timer is also + * cancelled in that case. * - * devcd_data_write() - * mod_delayed_work() - * try_to_grab_pending() - * timer_delete() - * debug_assert_init() - * INIT_DELAYED_WORK() - * schedule_delayed_work() - * - * - * Also, mutex alone would not be enough to avoid scheduling of - * del_wk work after it get flush from a call to devcd_free() - * mentioned as below. - * - * disabled_store() - * devcd_free() - * mutex_lock() devcd_data_write() - * flush_delayed_work() - * mutex_unlock() - * mutex_lock() - * mod_delayed_work() - * mutex_unlock() - * So, delete_work flag is required. + * The second race involves multiple parallel invocations of devcd_free(), + * add a deleted flag so only 1 can call the destructor. */ struct mutex mutex; - bool delete_work; + bool init_completed, deleted; struct module *owner; ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen); void (*free)(void *data); + /* + * If nothing interferes and device_add() was returns success, + * del_wk will destroy the device after the timer fires. + * + * Multiple userspace processes can interfere in the working of the timer: + * - Writing to the coredump will reschedule the timer to run immediately, + * if still armed. + * + * This is handled by using "if (cancel_delayed_work()) { + * schedule_delayed_work() }", to prevent re-arming after having + * been previously fired. + * - Writing to /sys/class/devcoredump/disabled will destroy the + * coredump synchronously. + * This is handled by using disable_delayed_work_sync(), and then + * checking if deleted flag is set with &devcd->mutex held. + */ struct delayed_work del_wk; struct device *failing_dev; }; @@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev) kfree(devcd); } +static void __devcd_del(struct devcd_entry *devcd) +{ + devcd->deleted = true; + device_del(&devcd->devcd_dev); + put_device(&devcd->devcd_dev); +} + static void devcd_del(struct work_struct *wk) { struct devcd_entry *devcd; + bool init_completed; devcd = container_of(wk, struct devcd_entry, del_wk.work); - device_del(&devcd->devcd_dev); - put_device(&devcd->devcd_dev); + /* devcd->mutex serializes against dev_coredumpm_timeout */ + mutex_lock(&devcd->mutex); + init_completed = devcd->init_completed; + mutex_unlock(&devcd->mutex); + + if (init_completed) + __devcd_del(devcd); } static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, @@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct devcd_entry *devcd = dev_to_devcd(dev); - mutex_lock(&devcd->mutex); - if (!devcd->delete_work) { - devcd->delete_work = true; - mod_delayed_work(system_wq, &devcd->del_wk, 0); - } - mutex_unlock(&devcd->mutex); + /* + * Although it's tempting to use mod_delayed work here, + * that will cause a reschedule if the timer already fired. + */ + if (cancel_delayed_work(&devcd->del_wk)) + schedule_delayed_work(&devcd->del_wk, 0); return count; } @@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data) { struct devcd_entry *devcd = dev_to_devcd(dev); + /* + * To prevent a race with devcd_data_write(), disable work and + * complete manually instead. + * + * We cannot rely on the return value of + * disable_delayed_work_sync() here, because it might be in the + * middle of a cancel_delayed_work + schedule_delayed_work pair. + * + * devcd->mutex here guards against multiple parallel invocations + * of devcd_free(). + */ + disable_delayed_work_sync(&devcd->del_wk); mutex_lock(&devcd->mutex); - if (!devcd->delete_work) - devcd->delete_work = true; - - flush_delayed_work(&devcd->del_wk); + if (!devcd->deleted) + __devcd_del(devcd); mutex_unlock(&devcd->mutex); return 0; } @@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri * put_device() <- last reference * error = fn(dev, data) devcd_dev_release() * devcd_free(dev, data) kfree(devcd) - * mutex_lock(&devcd->mutex); * * * In the above diagram, it looks like disabled_store() would be racing with parallelly - * running devcd_del() and result in memory abort while acquiring devcd->mutex which - * is called after kfree of devcd memory after dropping its last reference with + * running devcd_del() and result in memory abort after dropping its last reference with * put_device(). However, this will not happens as fn(dev, data) runs * with its own reference to device via klist_node so it is not its last reference. * so, above situation would not occur. @@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner, devcd->read = read; devcd->free = free; devcd->failing_dev = get_device(dev); - devcd->delete_work = false; + devcd->deleted = false; mutex_init(&devcd->mutex); device_initialize(&devcd->devcd_dev); @@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner, atomic_inc_return(&devcd_count)); devcd->devcd_dev.class = &devcd_class; - mutex_lock(&devcd->mutex); dev_set_uevent_suppress(&devcd->devcd_dev, true); + + /* devcd->mutex prevents devcd_del() completing until init finishes */ + mutex_lock(&devcd->mutex); + devcd->init_completed = false; + INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); + schedule_delayed_work(&devcd->del_wk, timeout); + if (device_add(&devcd->devcd_dev)) goto put_device; @@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner, dev_set_uevent_suppress(&devcd->devcd_dev, false); kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); - INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); - schedule_delayed_work(&devcd->del_wk, timeout); + + /* + * Safe to run devcd_del() now that we are done with devcd_dev. + * Alternatively we could have taken a ref on devcd_dev before + * dropping the lock. + */ + devcd->init_completed = true; mutex_unlock(&devcd->mutex); return; put_device: - put_device(&devcd->devcd_dev); mutex_unlock(&devcd->mutex); + cancel_delayed_work_sync(&devcd->del_wk); + put_device(&devcd->devcd_dev); + put_module: module_put(owner); free: diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 9d4e46ad8352..2f576ecf1832 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -180,7 +180,7 @@ static int dev_mkdir(const char *name, umode_t mode) if (IS_ERR(dentry)) return PTR_ERR(dentry); - dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode); + dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode, NULL); if (!IS_ERR(dentry)) /* mark as kernel-created inode */ d_inode(dentry)->i_private = &thread; @@ -231,7 +231,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid, return PTR_ERR(dentry); err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode, - dev->devt); + dev->devt, NULL); if (!err) { struct iattr newattrs; @@ -261,7 +261,7 @@ static int dev_rmdir(const char *name) return PTR_ERR(dentry); if (d_inode(dentry)->i_private == &thread) err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry), - dentry); + dentry, NULL); else err = -EPERM; diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 6942c62fa59d..bee3050a20d9 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -829,8 +829,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name, size_t offset, u32 opt_flags) { struct firmware *fw = NULL; - struct cred *kern_cred = NULL; - const struct cred *old_cred; bool nondirect = false; int ret; @@ -871,45 +869,38 @@ _request_firmware(const struct firmware **firmware_p, const char *name, * called by a driver when serving an unrelated request from userland, we use * the kernel credentials to read the file. */ - kern_cred = prepare_kernel_cred(&init_task); - if (!kern_cred) { - ret = -ENOMEM; - goto out; - } - old_cred = override_creds(kern_cred); + scoped_with_kernel_creds() { + ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL); - ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL); - - /* Only full reads can support decompression, platform, and sysfs. */ - if (!(opt_flags & FW_OPT_PARTIAL)) - nondirect = true; + /* Only full reads can support decompression, platform, and sysfs. */ + if (!(opt_flags & FW_OPT_PARTIAL)) + nondirect = true; #ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD - if (ret == -ENOENT && nondirect) - ret = fw_get_filesystem_firmware(device, fw->priv, ".zst", - fw_decompress_zstd); + if (ret == -ENOENT && nondirect) + ret = fw_get_filesystem_firmware(device, fw->priv, ".zst", + fw_decompress_zstd); #endif #ifdef CONFIG_FW_LOADER_COMPRESS_XZ - if (ret == -ENOENT && nondirect) - ret = fw_get_filesystem_firmware(device, fw->priv, ".xz", - fw_decompress_xz); + if (ret == -ENOENT && nondirect) + ret = fw_get_filesystem_firmware(device, fw->priv, ".xz", + fw_decompress_xz); #endif - if (ret == -ENOENT && nondirect) - ret = firmware_fallback_platform(fw->priv); + if (ret == -ENOENT && nondirect) + ret = firmware_fallback_platform(fw->priv); - if (ret) { - if (!(opt_flags & FW_OPT_NO_WARN)) - dev_warn(device, - "Direct firmware load for %s failed with error %d\n", - name, ret); - if (nondirect) - ret = firmware_fallback_sysfs(fw, name, device, - opt_flags, ret); - } else - ret = assign_fw(fw, device); - - revert_creds(old_cred); - put_cred(kern_cred); + if (ret) { + if (!(opt_flags & FW_OPT_NO_WARN)) + dev_warn(device, + "Direct firmware load for %s failed with error %d\n", + name, ret); + if (nondirect) + ret = firmware_fallback_sysfs(fw, name, device, + opt_flags, ret); + } else { + ret = assign_fw(fw, device); + } + } out: if (ret < 0) { diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 6d84a02cfa5d..fc43f2703ae0 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -226,7 +226,6 @@ static int memory_block_online(struct memory_block *mem) unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long nr_vmemmap_pages = 0; - struct memory_notify arg; struct zone *zone; int ret; @@ -246,19 +245,9 @@ static int memory_block_online(struct memory_block *mem) if (mem->altmap) nr_vmemmap_pages = mem->altmap->free; - arg.altmap_start_pfn = start_pfn; - arg.altmap_nr_pages = nr_vmemmap_pages; - arg.start_pfn = start_pfn + nr_vmemmap_pages; - arg.nr_pages = nr_pages - nr_vmemmap_pages; mem_hotplug_begin(); - ret = memory_notify(MEM_PREPARE_ONLINE, &arg); - ret = notifier_to_errno(ret); - if (ret) - goto out_notifier; - if (nr_vmemmap_pages) { - ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, - zone, mem->altmap->inaccessible); + ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); if (ret) goto out; } @@ -280,11 +269,7 @@ static int memory_block_online(struct memory_block *mem) nr_vmemmap_pages); mem->zone = zone; - mem_hotplug_done(); - return ret; out: - memory_notify(MEM_FINISH_OFFLINE, &arg); -out_notifier: mem_hotplug_done(); return ret; } @@ -297,7 +282,6 @@ static int memory_block_offline(struct memory_block *mem) unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long nr_vmemmap_pages = 0; - struct memory_notify arg; int ret; if (!mem->zone) @@ -329,11 +313,6 @@ static int memory_block_offline(struct memory_block *mem) mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); mem->zone = NULL; - arg.altmap_start_pfn = start_pfn; - arg.altmap_nr_pages = nr_vmemmap_pages; - arg.start_pfn = start_pfn + nr_vmemmap_pages; - arg.nr_pages = nr_pages - nr_vmemmap_pages; - memory_notify(MEM_FINISH_OFFLINE, &arg); out: mem_hotplug_done(); return ret; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 09450349cf32..b45d41b018ca 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -150,25 +150,37 @@ devm_platform_ioremap_resource_byname(struct platform_device *pdev, EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); #endif /* CONFIG_HAS_IOMEM */ +static const struct cpumask *get_irq_affinity(struct platform_device *dev, + unsigned int num) +{ + const struct cpumask *mask = NULL; +#ifndef CONFIG_SPARC + struct fwnode_handle *fwnode = dev_fwnode(&dev->dev); + + if (is_of_node(fwnode)) + mask = of_irq_get_affinity(to_of_node(fwnode), num); + else if (is_acpi_device_node(fwnode)) + mask = acpi_irq_get_affinity(ACPI_HANDLE_FWNODE(fwnode), num); +#endif + + return mask ?: cpu_possible_mask; +} + /** - * platform_get_irq_optional - get an optional IRQ for a device - * @dev: platform device - * @num: IRQ number index + * platform_get_irq_affinity - get an optional IRQ and its affinity for a device + * @dev: platform device + * @num: interrupt number index + * @affinity: optional cpumask pointer to get the affinity of a per-cpu interrupt * - * Gets an IRQ for a platform device. Device drivers should check the return - * value for errors so as to not pass a negative integer value to the - * request_irq() APIs. This is the same as platform_get_irq(), except that it - * does not print an error message if an IRQ can not be obtained. - * - * For example:: - * - * int irq = platform_get_irq_optional(pdev, 0); - * if (irq < 0) - * return irq; + * Gets an interupt for a platform device. Device drivers should check the + * return value for errors so as to not pass a negative integer value to + * the request_irq() APIs. Optional affinity information is provided in the + * affinity pointer if available, and NULL otherwise. * - * Return: non-zero IRQ number on success, negative error number on failure. + * Return: non-zero interrupt number on success, negative error number on failure. */ -int platform_get_irq_optional(struct platform_device *dev, unsigned int num) +int platform_get_irq_affinity(struct platform_device *dev, unsigned int num, + const struct cpumask **affinity) { int ret; #ifdef CONFIG_SPARC @@ -236,8 +248,37 @@ out_not_found: out: if (WARN(!ret, "0 is an invalid IRQ number\n")) return -EINVAL; + + if (ret > 0 && affinity) + *affinity = get_irq_affinity(dev, num); + return ret; } +EXPORT_SYMBOL_GPL(platform_get_irq_affinity); + +/** + * platform_get_irq_optional - get an optional interrupt for a device + * @dev: platform device + * @num: interrupt number index + * + * Gets an interrupt for a platform device. Device drivers should check the + * return value for errors so as to not pass a negative integer value to + * the request_irq() APIs. This is the same as platform_get_irq(), except + * that it does not print an error message if an interrupt can not be + * obtained. + * + * For example:: + * + * int irq = platform_get_irq_optional(pdev, 0); + * if (irq < 0) + * return irq; + * + * Return: non-zero interrupt number on success, negative error number on failure. + */ +int platform_get_irq_optional(struct platform_device *dev, unsigned int num) +{ + return platform_get_irq_affinity(dev, num, NULL); +} EXPORT_SYMBOL_GPL(platform_get_irq_optional); /** diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 6502720bb564..af99bbcf281c 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -8,6 +8,13 @@ #include <linux/pm_runtime.h> #include <linux/export.h> +#define CALL_PM_OP(dev, op) \ +({ \ + struct device *_dev = (dev); \ + const struct dev_pm_ops *pm = _dev->driver ? _dev->driver->pm : NULL; \ + pm && pm->op ? pm->op(_dev) : 0; \ +}) + #ifdef CONFIG_PM /** * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. @@ -19,12 +26,7 @@ */ int pm_generic_runtime_suspend(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int ret; - - ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; - - return ret; + return CALL_PM_OP(dev, runtime_suspend); } EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); @@ -38,12 +40,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); */ int pm_generic_runtime_resume(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int ret; - - ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; - - return ret; + return CALL_PM_OP(dev, runtime_resume); } EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); #endif /* CONFIG_PM */ @@ -72,9 +69,7 @@ int pm_generic_prepare(struct device *dev) */ int pm_generic_suspend_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; + return CALL_PM_OP(dev, suspend_noirq); } EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); @@ -84,9 +79,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); */ int pm_generic_suspend_late(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; + return CALL_PM_OP(dev, suspend_late); } EXPORT_SYMBOL_GPL(pm_generic_suspend_late); @@ -96,9 +89,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_late); */ int pm_generic_suspend(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->suspend ? pm->suspend(dev) : 0; + return CALL_PM_OP(dev, suspend); } EXPORT_SYMBOL_GPL(pm_generic_suspend); @@ -108,9 +99,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend); */ int pm_generic_freeze_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0; + return CALL_PM_OP(dev, freeze_noirq); } EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); @@ -120,9 +109,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); */ int pm_generic_freeze(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->freeze ? pm->freeze(dev) : 0; + return CALL_PM_OP(dev, freeze); } EXPORT_SYMBOL_GPL(pm_generic_freeze); @@ -132,9 +119,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze); */ int pm_generic_poweroff_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0; + return CALL_PM_OP(dev, poweroff_noirq); } EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); @@ -144,9 +129,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); */ int pm_generic_poweroff_late(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0; + return CALL_PM_OP(dev, poweroff_late); } EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); @@ -156,9 +139,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); */ int pm_generic_poweroff(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->poweroff ? pm->poweroff(dev) : 0; + return CALL_PM_OP(dev, poweroff); } EXPORT_SYMBOL_GPL(pm_generic_poweroff); @@ -168,9 +149,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff); */ int pm_generic_thaw_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0; + return CALL_PM_OP(dev, thaw_noirq); } EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); @@ -180,9 +159,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); */ int pm_generic_thaw(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->thaw ? pm->thaw(dev) : 0; + return CALL_PM_OP(dev, thaw); } EXPORT_SYMBOL_GPL(pm_generic_thaw); @@ -192,9 +169,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw); */ int pm_generic_resume_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0; + return CALL_PM_OP(dev, resume_noirq); } EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); @@ -204,9 +179,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); */ int pm_generic_resume_early(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->resume_early ? pm->resume_early(dev) : 0; + return CALL_PM_OP(dev, resume_early); } EXPORT_SYMBOL_GPL(pm_generic_resume_early); @@ -216,9 +189,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_early); */ int pm_generic_resume(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->resume ? pm->resume(dev) : 0; + return CALL_PM_OP(dev, resume); } EXPORT_SYMBOL_GPL(pm_generic_resume); @@ -228,9 +199,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume); */ int pm_generic_restore_noirq(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0; + return CALL_PM_OP(dev, restore_noirq); } EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); @@ -240,9 +209,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); */ int pm_generic_restore_early(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->restore_early ? pm->restore_early(dev) : 0; + return CALL_PM_OP(dev, restore_early); } EXPORT_SYMBOL_GPL(pm_generic_restore_early); @@ -252,9 +219,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_early); */ int pm_generic_restore(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->restore ? pm->restore(dev) : 0; + return CALL_PM_OP(dev, restore); } EXPORT_SYMBOL_GPL(pm_generic_restore); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e83503bdc1fd..a0225a83f50c 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -34,6 +34,7 @@ #include <linux/cpufreq.h> #include <linux/devfreq.h> #include <linux/timer.h> +#include <linux/nmi.h> #include "../base.h" #include "power.h" @@ -95,6 +96,8 @@ static const char *pm_verb(int event) return "restore"; case PM_EVENT_RECOVER: return "recover"; + case PM_EVENT_POWEROFF: + return "poweroff"; default: return "(unknown PM event)"; } @@ -367,6 +370,7 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: return ops->freeze; + case PM_EVENT_POWEROFF: case PM_EVENT_HIBERNATE: return ops->poweroff; case PM_EVENT_THAW: @@ -401,6 +405,7 @@ static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: return ops->freeze_late; + case PM_EVENT_POWEROFF: case PM_EVENT_HIBERNATE: return ops->poweroff_late; case PM_EVENT_THAW: @@ -435,6 +440,7 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: return ops->freeze_noirq; + case PM_EVENT_POWEROFF: case PM_EVENT_HIBERNATE: return ops->poweroff_noirq; case PM_EVENT_THAW: @@ -515,6 +521,11 @@ struct dpm_watchdog { #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ struct dpm_watchdog wd +static bool __read_mostly dpm_watchdog_all_cpu_backtrace; +module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644); +MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace, + "Backtrace all CPUs on DPM watchdog timeout"); + /** * dpm_watchdog_handler - Driver suspend / resume watchdog handler. * @t: The timer that PM watchdog depends on. @@ -530,8 +541,12 @@ static void dpm_watchdog_handler(struct timer_list *t) unsigned int time_left; if (wd->fatal) { + unsigned int this_cpu = smp_processor_id(); + dev_emerg(wd->dev, "**** DPM device timeout ****\n"); show_stack(wd->tsk, NULL, KERN_EMERG); + if (dpm_watchdog_all_cpu_backtrace) + trigger_allbutcpu_cpu_backtrace(this_cpu); panic("%s %s: unrecoverable failure\n", dev_driver_string(wd->dev), dev_name(wd->dev)); } @@ -888,12 +903,15 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore || dev->power.direct_complete) + if (dev->power.direct_complete) goto Out; if (!dev->power.is_late_suspended) goto Out; + if (dev->power.syscore) + goto Skip; + if (!dpm_wait_for_superior(dev, async)) goto Out; @@ -926,11 +944,11 @@ Run: Skip: dev->power.is_late_suspended = false; + pm_runtime_enable(dev); Out: TRACE_RESUME(error); - pm_runtime_enable(dev); complete_all(&dev->power.completion); if (error) { @@ -1615,12 +1633,6 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy TRACE_DEVICE(dev); TRACE_SUSPEND(0); - /* - * Disable runtime PM for the device without checking if there is a - * pending resume request for it. - */ - __pm_runtime_disable(dev, false); - dpm_wait_for_subordinate(dev, async); if (READ_ONCE(async_error)) @@ -1631,9 +1643,18 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy goto Complete; } - if (dev->power.syscore || dev->power.direct_complete) + if (dev->power.direct_complete) goto Complete; + /* + * Disable runtime PM for the device without checking if there is a + * pending resume request for it. + */ + __pm_runtime_disable(dev, false); + + if (dev->power.syscore) + goto Skip; + if (dev->pm_domain) { info = "late power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -1664,6 +1685,7 @@ Run: WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async late" : " late", error); + pm_runtime_enable(dev); goto Complete; } dpm_propagate_wakeup_to_parent(dev); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 1b11a3cd4acc..62707738caa4 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -90,7 +90,7 @@ static void update_pm_runtime_accounting(struct device *dev) /* * Because ktime_get_mono_fast_ns() is not monotonic during * timekeeping updates, ensure that 'now' is after the last saved - * timesptamp. + * timestamp. */ if (now < last) return; @@ -217,7 +217,7 @@ static int dev_memalloc_noio(struct device *dev, void *data) * resume/suspend callback of any one of its ancestors(or the * block device itself), the deadlock may be triggered inside the * memory allocation since it might not complete until the block - * device becomes active and the involed page I/O finishes. The + * device becomes active and the involved page I/O finishes. The * situation is pointed out first by Alan Stern. Network device * are involved in iSCSI kind of situation. * @@ -1210,7 +1210,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume); * * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count * is set, or (2) @dev is not ignoring children and its active child count is - * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment + * nonzero, or (3) the runtime PM usage counter of @dev is not zero, increment * the usage counter of @dev and return 1. * * Otherwise, return 0 without changing the usage counter. @@ -1664,9 +1664,12 @@ EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume); * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. * - * Increase the device's usage count and clear its power.runtime_auto flag, - * so that it cannot be suspended at run time until pm_runtime_allow() is called - * for it. + * Resume @dev if already suspended and block runtime suspend of @dev in such + * a way that it can be unblocked via the /sys/devices/.../power/control + * interface, or otherwise by calling pm_runtime_allow(). + * + * Calling this function many times in a row has the same effect as calling it + * once. */ void pm_runtime_forbid(struct device *dev) { @@ -1687,7 +1690,13 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid); * pm_runtime_allow - Unblock runtime PM of a device. * @dev: Device to handle. * - * Decrease the device's usage count and set its power.runtime_auto flag. + * Unblock runtime suspend of @dev after it has been blocked by + * pm_runtime_forbid() (for instance, if it has been blocked via the + * /sys/devices/.../power/control interface), check if @dev can be + * suspended and suspend it in that case. + * + * Calling this function many times in a row has the same effect as calling it + * once. */ void pm_runtime_allow(struct device *dev) { diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index cd6e559648b2..d8da7195bb00 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -238,10 +238,8 @@ int show_trace_dev_match(char *buf, size_t size) unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); if (hash == value) { - int len = snprintf(buf, size, "%s\n", + int len = scnprintf(buf, size, "%s\n", dev_driver_string(dev)); - if (len > size) - len = size; buf += len; ret += len; size -= len; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index d1283ff1080b..1e1a0e7eeac5 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -189,17 +189,16 @@ static void wakeup_source_remove(struct wakeup_source *ws) if (WARN_ON(!ws)) return; + /* + * After shutting down the timer, wakeup_source_activate() will warn if + * the given wakeup source is passed to it. + */ + timer_shutdown_sync(&ws->timer); + raw_spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); raw_spin_unlock_irqrestore(&events_lock, flags); synchronize_srcu(&wakeup_srcu); - - timer_delete_sync(&ws->timer); - /* - * Clear timer.function to make wakeup_source_not_registered() treat - * this wakeup source as not registered. - */ - ws->timer.function = NULL; } /** @@ -506,14 +505,14 @@ int device_set_wakeup_enable(struct device *dev, bool enable) EXPORT_SYMBOL_GPL(device_set_wakeup_enable); /** - * wakeup_source_not_registered - validate the given wakeup source. + * wakeup_source_not_usable - validate the given wakeup source. * @ws: Wakeup source to be validated. */ -static bool wakeup_source_not_registered(struct wakeup_source *ws) +static bool wakeup_source_not_usable(struct wakeup_source *ws) { /* - * Use timer struct to check if the given source is initialized - * by wakeup_source_add. + * Use the timer struct to check if the given wakeup source has been + * initialized by wakeup_source_add() and it is not going away. */ return ws->timer.function != pm_wakeup_timer_fn; } @@ -558,8 +557,7 @@ static void wakeup_source_activate(struct wakeup_source *ws) { unsigned int cec; - if (WARN_ONCE(wakeup_source_not_registered(ws), - "unregistered wakeup source\n")) + if (WARN_ONCE(wakeup_source_not_usable(ws), "unusable wakeup source\n")) return; ws->active = true; diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c index 54eb7d227cf4..e523fae73004 100644 --- a/drivers/base/regmap/regmap-slimbus.c +++ b/drivers/base/regmap/regmap-slimbus.c @@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus, if (IS_ERR(bus)) return ERR_CAST(bus); - return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config, - lock_key, lock_name); + return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name); } EXPORT_SYMBOL_GPL(__regmap_init_slimbus); @@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, if (IS_ERR(bus)) return ERR_CAST(bus); - return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config, - lock_key, lock_name); + return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name); } EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus); |
