diff options
| author | Thomas Zimmermann <tzimmermann@suse.de> | 2022-06-20 18:21:25 +0200 |
|---|---|---|
| committer | Thomas Zimmermann <tzimmermann@suse.de> | 2022-06-20 18:21:25 +0200 |
| commit | 2b1333b80885b896807ffb6ccf4bc21d29aa65e0 (patch) | |
| tree | 51cc2d13d65603383db82c87f01a0aa93bd26010 /drivers/base | |
| parent | cad564ca557f8d3bb3b1fa965d9a2b3f6490ec69 (diff) | |
| parent | 0f95ee9a0c579ebed0309657f6918673927189f2 (diff) | |
Merge drm/drm-next into drm-misc-next
Backmerging to get new regmap APIs of v5.19-rc1.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'drivers/base')
37 files changed, 1954 insertions, 859 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 02f7f1358e86..83217d243c25 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o obj-$(CONFIG_GENERIC_ARCH_NUMA) += arch_numa.o +obj-$(CONFIG_ACPI) += physical_location.o obj-y += test/ diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index f73b836047cf..579c851a2bd7 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -19,6 +19,9 @@ #include <linux/rcupdate.h> #include <linux/sched.h> +#define CREATE_TRACE_POINTS +#include <trace/events/thermal_pressure.h> + static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); static struct cpumask scale_freq_counters_mask; static bool scale_freq_invariant; @@ -195,6 +198,8 @@ void topology_update_thermal_pressure(const struct cpumask *cpus, th_pressure = max_capacity - capacity; + trace_thermal_pressure_update(cpu, th_pressure); + for_each_cpu(cpu, cpus) WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); } diff --git a/drivers/base/base.h b/drivers/base/base.h index 2882af26392a..ab71403d102f 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -159,6 +159,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); extern void device_block_probing(void); extern void device_unblock_probing(void); +extern void deferred_probe_extend_timeout(void); /* /sys/devices directory */ extern struct kset *devices_kset; diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 97936ec49bde..7ca47e5b3c1f 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -617,7 +617,7 @@ int bus_add_driver(struct device_driver *drv) if (drv->bus->p->drivers_autoprobe) { error = driver_attach(drv); if (error) - goto out_unregister; + goto out_del_list; } module_add_driver(drv->owner, drv); @@ -644,6 +644,8 @@ int bus_add_driver(struct device_driver *drv) return 0; +out_del_list: + klist_del(&priv->knode_bus); out_unregister: kobject_put(&priv->kobj); /* drv->p is freed in driver_release() */ diff --git a/drivers/base/core.c b/drivers/base/core.c index 3d6430eb0c6a..7cd789c4985d 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -32,6 +32,7 @@ #include <linux/dma-map-ops.h> /* for dma_default_coherent */ #include "base.h" +#include "physical_location.h" #include "power/power.h" #ifdef CONFIG_SYSFS_DEPRECATED @@ -2649,8 +2650,17 @@ static int device_add_attrs(struct device *dev) goto err_remove_dev_waiting_for_supplier; } + if (dev_add_physical_location(dev)) { + error = device_add_group(dev, + &dev_attr_physical_location_group); + if (error) + goto err_remove_dev_removable; + } + return 0; + err_remove_dev_removable: + device_remove_file(dev, &dev_attr_removable); err_remove_dev_waiting_for_supplier: device_remove_file(dev, &dev_attr_waiting_for_supplier); err_remove_dev_online: @@ -2672,6 +2682,11 @@ static void device_remove_attrs(struct device *dev) struct class *class = dev->class; const struct device_type *type = dev->type; + if (dev->physical_location) { + device_remove_group(dev, &dev_attr_physical_location_group); + kfree(dev->physical_location); + } + device_remove_file(dev, &dev_attr_removable); device_remove_file(dev, &dev_attr_waiting_for_supplier); device_remove_file(dev, &dev_attr_online); @@ -2864,9 +2879,6 @@ void device_initialize(struct device *dev) kobject_init(&dev->kobj, &device_ktype); INIT_LIST_HEAD(&dev->dma_pools); mutex_init(&dev->mutex); -#ifdef CONFIG_PROVE_LOCKING - mutex_init(&dev->lockdep_mutex); -#endif lockdep_set_novalidate_class(&dev->mutex); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 3fc3b5940bb3..11b0fb6414d3 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -60,6 +60,7 @@ static bool initcalls_done; /* Save the async probe drivers' name from kernel cmdline */ #define ASYNC_DRV_NAMES_MAX_LEN 256 static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN]; +static bool async_probe_default; /* * In some cases, like suspend to RAM or hibernation, It might be reasonable @@ -257,7 +258,6 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs); int driver_deferred_probe_timeout; EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout); -static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue); static int __init deferred_probe_timeout_setup(char *str) { @@ -274,10 +274,10 @@ __setup("deferred_probe_timeout=", deferred_probe_timeout_setup); * @dev: device to check * * Return: - * -ENODEV if initcalls have completed and modules are disabled. - * -ETIMEDOUT if the deferred probe timeout was set and has expired - * and modules are enabled. - * -EPROBE_DEFER in other cases. + * * -ENODEV if initcalls have completed and modules are disabled. + * * -ETIMEDOUT if the deferred probe timeout was set and has expired + * and modules are enabled. + * * -EPROBE_DEFER in other cases. * * Drivers or subsystems can opt-in to calling this function instead of directly * returning -EPROBE_DEFER. @@ -312,10 +312,23 @@ static void deferred_probe_timeout_work_func(struct work_struct *work) list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) dev_info(p->device, "deferred probe pending\n"); mutex_unlock(&deferred_probe_mutex); - wake_up_all(&probe_timeout_waitqueue); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); +void deferred_probe_extend_timeout(void) +{ + /* + * If the work hasn't been queued yet or if the work expired, don't + * start a new one. + */ + if (cancel_delayed_work(&deferred_probe_timeout_work)) { + schedule_delayed_work(&deferred_probe_timeout_work, + driver_deferred_probe_timeout * HZ); + pr_debug("Extended deferred probe timeout by %d secs\n", + driver_deferred_probe_timeout); + } +} + /** * deferred_probe_initcall() - Enable probing of deferred devices * @@ -671,6 +684,8 @@ sysfs_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); + if (dev->bus && dev->bus->dma_cleanup) + dev->bus->dma_cleanup(dev); pinctrl_bind_failed: device_links_no_driver(dev); device_unbind_cleanup(dev); @@ -716,9 +731,6 @@ int driver_probe_done(void) */ void wait_for_device_probe(void) { - /* wait for probe timeout */ - wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout); - /* wait for the deferred probe workqueue to finish */ flush_work(&deferred_probe_work); @@ -797,7 +809,11 @@ static int driver_probe_device(struct device_driver *drv, struct device *dev) static inline bool cmdline_requested_async_probing(const char *drv_name) { - return parse_option_str(async_probe_drv_names, drv_name); + bool async_drv; + + async_drv = parse_option_str(async_probe_drv_names, drv_name); + + return (async_probe_default != async_drv); } /* The option format is "driver_async_probe=drv_name1,drv_name2,..." */ @@ -807,6 +823,8 @@ static int __init save_async_options(char *buf) pr_warn("Too long list of driver names for 'driver_async_probe'!\n"); strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); + async_probe_default = parse_option_str(async_probe_drv_names, "*"); + return 1; } __setup("driver_async_probe=", save_async_options); @@ -941,6 +959,7 @@ out_unlock: static int __device_attach(struct device *dev, bool allow_async) { int ret = 0; + bool async = false; device_lock(dev); if (dev->p->dead) { @@ -979,7 +998,7 @@ static int __device_attach(struct device *dev, bool allow_async) */ dev_dbg(dev, "scheduling asynchronous probe\n"); get_device(dev); - async_schedule_dev(__device_attach_async_helper, dev); + async = true; } else { pm_request_idle(dev); } @@ -989,6 +1008,8 @@ static int __device_attach(struct device *dev, bool allow_async) } out_unlock: device_unlock(dev); + if (async) + async_schedule_dev(__device_attach_async_helper, dev); return ret; } @@ -1082,6 +1103,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie) __device_driver_lock(dev, dev->parent); drv = dev->p->async_driver; + dev->p->async_driver = NULL; ret = driver_probe_device(drv, dev); __device_driver_unlock(dev, dev->parent); @@ -1128,7 +1150,7 @@ static int __driver_attach(struct device *dev, void *data) */ dev_dbg(dev, "probing driver %s asynchronously\n", drv->name); device_lock(dev); - if (!dev->driver) { + if (!dev->driver && !dev->p->async_driver) { get_device(dev); dev->p->async_driver = drv; async_schedule_dev(__driver_attach_async_helper, dev); @@ -1199,6 +1221,9 @@ static void __device_release_driver(struct device *dev, struct device *parent) device_remove(dev); + if (dev->bus && dev->bus->dma_cleanup) + dev->bus->dma_cleanup(dev); + device_links_driver_cleanup(dev); device_unbind_cleanup(dev); diff --git a/drivers/base/driver.c b/drivers/base/driver.c index 8c0d33e182fd..15a75afe6b84 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -31,6 +31,75 @@ static struct device *next_device(struct klist_iter *i) } /** + * driver_set_override() - Helper to set or clear driver override. + * @dev: Device to change + * @override: Address of string to change (e.g. &device->driver_override); + * The contents will be freed and hold newly allocated override. + * @s: NUL-terminated string, new driver name to force a match, pass empty + * string to clear it ("" or "\n", where the latter is only for sysfs + * interface). + * @len: length of @s + * + * Helper to set or clear driver override in a device, intended for the cases + * when the driver_override field is allocated by driver/bus code. + * + * Returns: 0 on success or a negative error code on failure. + */ +int driver_set_override(struct device *dev, const char **override, + const char *s, size_t len) +{ + const char *new, *old; + char *cp; + + if (!override || !s) + return -EINVAL; + + /* + * The stored value will be used in sysfs show callback (sysfs_emit()), + * which has a length limit of PAGE_SIZE and adds a trailing newline. + * Thus we can store one character less to avoid truncation during sysfs + * show. + */ + if (len >= (PAGE_SIZE - 1)) + return -EINVAL; + + if (!len) { + /* Empty string passed - clear override */ + device_lock(dev); + old = *override; + *override = NULL; + device_unlock(dev); + kfree(old); + + return 0; + } + + cp = strnchr(s, len, '\n'); + if (cp) + len = cp - s; + + new = kstrndup(s, len, GFP_KERNEL); + if (!new) + return -ENOMEM; + + device_lock(dev); + old = *override; + if (cp != s) { + *override = new; + } else { + /* "\n" passed - clear override */ + kfree(new); + *override = NULL; + } + device_unlock(dev); + + kfree(old); + + return 0; +} +EXPORT_SYMBOL_GPL(driver_set_override); + +/** * driver_for_each_device - Iterator for devices bound to a driver. * @drv: Driver we're iterating. * @start: Device to begin with @@ -177,6 +246,7 @@ int driver_register(struct device_driver *drv) return ret; } kobject_uevent(&drv->p->kobj, KOBJ_ADD); + deferred_probe_extend_timeout(); return ret; } diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig index 38f3b66bf52b..5166b323a0f8 100644 --- a/drivers/base/firmware_loader/Kconfig +++ b/drivers/base/firmware_loader/Kconfig @@ -29,6 +29,9 @@ if FW_LOADER config FW_LOADER_PAGED_BUF bool +config FW_LOADER_SYSFS + bool + config EXTRA_FIRMWARE string "Build named firmware blobs into the kernel binary" help @@ -72,6 +75,7 @@ config EXTRA_FIRMWARE_DIR config FW_LOADER_USER_HELPER bool "Enable the firmware sysfs fallback mechanism" + select FW_LOADER_SYSFS select FW_LOADER_PAGED_BUF help This option enables a sysfs loading facility to enable firmware @@ -159,21 +163,34 @@ config FW_LOADER_USER_HELPER_FALLBACK config FW_LOADER_COMPRESS bool "Enable compressed firmware support" - select FW_LOADER_PAGED_BUF - select XZ_DEC help This option enables the support for loading compressed firmware files. The caller of firmware API receives the decompressed file content. The compressed file is loaded as a fallback, only after loading the raw file failed at first. - Currently only XZ-compressed files are supported, and they have to - be compressed with either none or crc32 integrity check type (pass - "-C crc32" option to xz command). - Compressed firmware support does not apply to firmware images that are built into the kernel image (CONFIG_EXTRA_FIRMWARE). +if FW_LOADER_COMPRESS +config FW_LOADER_COMPRESS_XZ + bool "Enable XZ-compressed firmware support" + select FW_LOADER_PAGED_BUF + select XZ_DEC + default y + help + This option adds the support for XZ-compressed files. + The files have to be compressed with either none or crc32 + integrity check type (pass "-C crc32" option to xz command). + +config FW_LOADER_COMPRESS_ZSTD + bool "Enable ZSTD-compressed firmware support" + select ZSTD_DECOMPRESS + help + This option adds the support for ZSTD-compressed files. + +endif # FW_LOADER_COMPRESS + config FW_CACHE bool "Enable firmware caching during suspend" depends on PM_SLEEP @@ -186,5 +203,19 @@ config FW_CACHE If unsure, say Y. +config FW_UPLOAD + bool "Enable users to initiate firmware updates using sysfs" + select FW_LOADER_SYSFS + select FW_LOADER_PAGED_BUF + help + Enabling this option will allow device drivers to expose a persistent + sysfs interface that allows firmware updates to be initiated from + userspace. For example, FPGA based PCIe cards load firmware and FPGA + images from local FLASH when the card boots. The images in FLASH may + be updated with new images provided by the user. Enable this device + to support cards that rely on user-initiated updates for firmware files. + + If unsure, say N. + endif # FW_LOADER endmenu diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile index e87843408fe6..60d19f9e0ddc 100644 --- a/drivers/base/firmware_loader/Makefile +++ b/drivers/base/firmware_loader/Makefile @@ -6,5 +6,7 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o firmware_class-objs := main.o firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o +firmware_class-$(CONFIG_FW_LOADER_SYSFS) += sysfs.o +firmware_class-$(CONFIG_FW_UPLOAD) += sysfs_upload.o obj-y += builtin/ diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 4afb0e9312c0..bf68e3947814 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -3,12 +3,9 @@ #include <linux/types.h> #include <linux/kconfig.h> #include <linux/list.h> -#include <linux/slab.h> #include <linux/security.h> -#include <linux/highmem.h> #include <linux/umh.h> #include <linux/sysctl.h> -#include <linux/vmalloc.h> #include <linux/module.h> #include "fallback.h" @@ -18,22 +15,6 @@ * firmware fallback mechanism */ -MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE); - -extern struct firmware_fallback_config fw_fallback_config; - -/* These getters are vetted to use int properly */ -static inline int __firmware_loading_timeout(void) -{ - return fw_fallback_config.loading_timeout; -} - -/* These setters are vetted to use int properly */ -static void __fw_fallback_set_timeout(int timeout) -{ - fw_fallback_config.loading_timeout = timeout; -} - /* * use small loading timeout for caching devices' firmware because all these * firmware images have been loaded successfully at lease once, also system is @@ -58,52 +39,11 @@ static long firmware_loading_timeout(void) __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET; } -static inline bool fw_sysfs_done(struct fw_priv *fw_priv) -{ - return __fw_state_check(fw_priv, FW_STATUS_DONE); -} - -static inline bool fw_sysfs_loading(struct fw_priv *fw_priv) -{ - return __fw_state_check(fw_priv, FW_STATUS_LOADING); -} - static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout) { return __fw_state_wait_common(fw_priv, timeout); } -struct fw_sysfs { - bool nowait; - struct device dev; - struct fw_priv *fw_priv; - struct firmware *fw; -}; - -static struct fw_sysfs *to_fw_sysfs(struct device *dev) -{ - return container_of(dev, struct fw_sysfs, dev); -} - -static void __fw_load_abort(struct fw_priv *fw_priv) -{ - /* - * There is a small window in which user can write to 'loading' - * between loading done/aborted and disappearance of 'loading' - */ - if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv)) - return; - - fw_state_aborted(fw_priv); -} - -static void fw_load_abort(struct fw_sysfs *fw_sysfs) -{ - struct fw_priv *fw_priv = fw_sysfs->fw_priv; - - __fw_load_abort(fw_priv); -} - static LIST_HEAD(pending_fw_head); void kill_pending_fw_fallback_reqs(bool only_kill_custom) @@ -120,376 +60,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom) mutex_unlock(&fw_lock); } -static ssize_t timeout_show(struct class *class, struct class_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%d\n", __firmware_loading_timeout()); -} - -/** - * timeout_store() - set number of seconds to wait for firmware - * @class: device class pointer - * @attr: device attribute pointer - * @buf: buffer to scan for timeout value - * @count: number of bytes in @buf - * - * Sets the number of seconds to wait for the firmware. Once - * this expires an error will be returned to the driver and no - * firmware will be provided. - * - * Note: zero means 'wait forever'. - **/ -static ssize_t timeout_store(struct class *class, struct class_attribute *attr, - const char *buf, size_t count) -{ - int tmp_loading_timeout = simple_strtol(buf, NULL, 10); - - if (tmp_loading_timeout < 0) - tmp_loading_timeout = 0; - - __fw_fallback_set_timeout(tmp_loading_timeout); - - return count; -} -static CLASS_ATTR_RW(timeout); - -static struct attribute *firmware_class_attrs[] = { - &class_attr_timeout.attr, - NULL, -}; -ATTRIBUTE_GROUPS(firmware_class); - -static void fw_dev_release(struct device *dev) -{ - struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); - - kfree(fw_sysfs); -} - -static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) -{ - if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name)) - return -ENOMEM; - if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout())) - return -ENOMEM; - if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait)) - return -ENOMEM; - - return 0; -} - -static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) -{ - struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); - int err = 0; - - mutex_lock(&fw_lock); - if (fw_sysfs->fw_priv) - err = do_firmware_uevent(fw_sysfs, env); - mutex_unlock(&fw_lock); - return err; -} - -static struct class firmware_class = { - .name = "firmware", - .class_groups = firmware_class_groups, - .dev_uevent = firmware_uevent, - .dev_release = fw_dev_release, -}; - -int register_sysfs_loader(void) -{ - int ret = class_register(&firmware_class); - - if (ret != 0) - return ret; - return register_firmware_config_sysctl(); -} - -void unregister_sysfs_loader(void) -{ - unregister_firmware_config_sysctl(); - class_unregister(&firmware_class); -} - -static ssize_t firmware_loading_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); - int loading = 0; - - mutex_lock(&fw_lock); - if (fw_sysfs->fw_priv) - loading = fw_sysfs_loading(fw_sysfs->fw_priv); - mutex_unlock(&fw_lock); - - return sysfs_emit(buf, "%d\n", loading); -} - -/** - * firmware_loading_store() - set value in the 'loading' control file - * @dev: device pointer - * @attr: device attribute pointer - * @buf: buffer to scan for loading control value - * @count: number of bytes in @buf - * - * The relevant values are: - * - * 1: Start a load, discarding any previous partial load. - * 0: Conclude the load and hand the data to the driver code. - * -1: Conclude the load with an error and discard any written data. - **/ -static ssize_t firmware_loading_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); - struct fw_priv *fw_priv; - ssize_t written = count; - int loading = simple_strtol(buf, NULL, 10); - - mutex_lock(&fw_lock); - fw_priv = fw_sysfs->fw_priv; - if (fw_state_is_aborted(fw_priv)) - goto out; - - switch (loading) { - case 1: - /* discarding any previous partial load */ - if (!fw_sysfs_done(fw_priv)) { - fw_free_paged_buf(fw_priv); - fw_state_start(fw_priv); - } - break; - case 0: - if (fw_sysfs_loading(fw_priv)) { - int rc; - - /* - * Several loading requests may be pending on - * one same firmware buf, so let all requests - * see the mapped 'buf->data' once the loading - * is completed. - * */ - rc = fw_map_paged_buf(fw_priv); - if (rc) - dev_err(dev, "%s: map pages failed\n", - __func__); - else - rc = security_kernel_post_load_data(fw_priv->data, - fw_priv->size, - LOADING_FIRMWARE, "blob"); - - /* - * Same logic as fw_load_abort, only the DONE bit - * is ignored and we set ABORT only on failure. - */ - if (rc) { - fw_state_aborted(fw_priv); - written = rc; - } else { - fw_state_done(fw_priv); - } - break; - } - fallthrough; - default: - dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); - fallthrough; - case -1: - fw_load_abort(fw_sysfs); - break; - } -out: - mutex_unlock(&fw_lock); - return written; -} - -static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); - -static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer, - loff_t offset, size_t count, bool read) -{ - if (read) - memcpy(buffer, fw_priv->data + offset, count); - else - memcpy(fw_priv->data + offset, buffer, count); -} - -static void firmware_rw(struct fw_priv *fw_priv, char *buffer, - loff_t offset, size_t count, bool read) -{ - while (count) { - void *page_data; - int page_nr = offset >> PAGE_SHIFT; - int page_ofs = offset & (PAGE_SIZE-1); - int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); - - page_data = kmap(fw_priv->pages[page_nr]); - - if (read) - memcpy(buffer, page_data + page_ofs, page_cnt); - else - memcpy(page_data + page_ofs, buffer, page_cnt); - - kunmap(fw_priv->pages[page_nr]); - buffer += page_cnt; - offset += page_cnt; - count -= page_cnt; - } -} - -static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buffer, loff_t offset, size_t count) -{ - struct device *dev = kobj_to_dev(kobj); - struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); - struct fw_priv *fw_priv; - ssize_t ret_count; - - mutex_lock(&fw_lock); - fw_priv = fw_sysfs->fw_priv; - if (!fw_priv || fw_sysfs_done(fw_priv)) { - ret_count = -ENODEV; - goto out; - } - if (offset > fw_priv->size) { - ret_count = 0; - goto out; - } - if (count > fw_priv->size - offset) - count = fw_priv->size - offset; - - ret_count = count; - - if (fw_priv->data) - firmware_rw_data(fw_priv, buffer, offset, count, true); - else - firmware_rw(fw_priv, buffer, offset, count, true); - -out: - mutex_unlock(&fw_lock); - return ret_count; -} - -static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) -{ - int err; - - err = fw_grow_paged_buf(fw_sysfs->fw_priv, - PAGE_ALIGN(min_size) >> PAGE_SHIFT); - if (err) - fw_load_abort(fw_sysfs); - return err; -} - -/** - * firmware_data_write() - write method for firmware - * @filp: open sysfs file - * @kobj: kobject for the device - * @bin_attr: bin_attr structure - * @buffer: buffer being written - * @offset: buffer offset for write in total data store area - * @count: buffer size - * - * Data written to the 'data' attribute will be later handed to - * the driver as a firmware image. - **/ -static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buffer, loff_t offset, size_t count) -{ - struct device *dev = kobj_to_dev(kobj); - struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); - struct fw_priv *fw_priv; - ssize_t retval; - - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - - mutex_lock(&fw_lock); - fw_priv = fw_sysfs->fw_priv; - if (!fw_priv || fw_sysfs_done(fw_priv)) { - retval = -ENODEV; - goto out; - } - - if (fw_priv->data) { - if (offset + count > fw_priv->allocated_size) { - retval = -ENOMEM; - goto out; - } - firmware_rw_data(fw_priv, buffer, offset, count, false); - retval = count; - } else { - retval = fw_realloc_pages(fw_sysfs, offset + count); - if (retval) - goto out; - - retval = count; - firmware_rw(fw_priv, buffer, offset, count, false); - } - - fw_priv->size = max_t(size_t, offset + count, fw_priv->size); -out: - mutex_unlock(&fw_lock); - return retval; -} - -static struct bin_attribute firmware_attr_data = { - .attr = { .name = "data", .mode = 0644 }, - .size = 0, - .read = firmware_data_read, - .write = firmware_data_write, -}; - -static struct attribute *fw_dev_attrs[] = { - &dev_attr_loading.attr, - NULL -}; - -static struct bin_attribute *fw_dev_bin_attrs[] = { - &firmware_attr_data, - NULL -}; - -static const struct attribute_group fw_dev_attr_group = { - .attrs = fw_dev_attrs, - .bin_attrs = fw_dev_bin_attrs, -}; - -static const struct attribute_group *fw_dev_attr_groups[] = { - &fw_dev_attr_group, - NULL -}; - -static struct fw_sysfs * -fw_create_instance(struct firmware *firmware, const char *fw_name, - struct device *device, u32 opt_flags) -{ - struct fw_sysfs *fw_sysfs; - struct device *f_dev; - - fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL); - if (!fw_sysfs) { - fw_sysfs = ERR_PTR(-ENOMEM); - goto exit; - } - - fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT); - fw_sysfs->fw = firmware; - f_dev = &fw_sysfs->dev; - - device_initialize(f_dev); - dev_set_name(f_dev, "%s", fw_name); - f_dev->parent = device; - f_dev->class = &firmware_class; - f_dev->groups = fw_dev_attr_groups; -exit: - return fw_sysfs; -} - /** * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism * @fw_sysfs: firmware sysfs information for the firmware to load diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h index 9f3055d3b4ca..144148595660 100644 --- a/drivers/base/firmware_loader/fallback.h +++ b/drivers/base/firmware_loader/fallback.h @@ -6,29 +6,7 @@ #include <linux/device.h> #include "firmware.h" - -/** - * struct firmware_fallback_config - firmware fallback configuration settings - * - * Helps describe and fine tune the fallback mechanism. - * - * @force_sysfs_fallback: force the sysfs fallback mechanism to be used - * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y. - * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y - * functionality on a kernel where that config entry has been disabled. - * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism. - * This emulates the behaviour as if we had set the kernel - * config CONFIG_FW_LOADER_USER_HELPER=n. - * @old_timeout: for internal use - * @loading_timeout: the timeout to wait for the fallback mechanism before - * giving up, in seconds. - */ -struct firmware_fallback_config { - unsigned int force_sysfs_fallback; - unsigned int ignore_sysfs_fallback; - int old_timeout; - int loading_timeout; -}; +#include "sysfs.h" #ifdef CONFIG_FW_LOADER_USER_HELPER int firmware_fallback_sysfs(struct firmware *fw, const char *name, @@ -40,19 +18,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom); void fw_fallback_set_cache_timeout(void); void fw_fallback_set_default_timeout(void); -int register_sysfs_loader(void); -void unregister_sysfs_loader(void); -#ifdef CONFIG_SYSCTL -extern int register_firmware_config_sysctl(void); -extern void unregister_firmware_config_sysctl(void); -#else -static inline int register_firmware_config_sysctl(void) -{ - return 0; -} -static inline void unregister_firmware_config_sysctl(void) { } -#endif /* CONFIG_SYSCTL */ - #else /* CONFIG_FW_LOADER_USER_HELPER */ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name, struct device *device, @@ -66,15 +31,6 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name, static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { } static inline void fw_fallback_set_cache_timeout(void) { } static inline void fw_fallback_set_default_timeout(void) { } - -static inline int register_sysfs_loader(void) -{ - return 0; -} - -static inline void unregister_sysfs_loader(void) -{ -} #endif /* CONFIG_FW_LOADER_USER_HELPER */ #ifdef CONFIG_EFI_EMBEDDED_FIRMWARE diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 2889f446ad41..fe77e91c38a2 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -87,6 +87,7 @@ struct fw_priv { }; extern struct mutex fw_lock; +extern struct firmware_cache fw_cache; static inline bool __fw_state_check(struct fw_priv *fw_priv, enum fw_status status) @@ -149,7 +150,22 @@ static inline void fw_state_done(struct fw_priv *fw_priv) __fw_state_set(fw_priv, FW_STATUS_DONE); } +static inline bool fw_state_is_done(struct fw_priv *fw_priv) +{ + return __fw_state_check(fw_priv, FW_STATUS_DONE); +} + +static inline bool fw_state_is_loading(struct fw_priv *fw_priv) +{ + return __fw_state_check(fw_priv, FW_STATUS_LOADING); +} + +int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc, + struct fw_priv **fw_priv, void *dbuf, size_t size, + size_t offset, u32 opt_flags); int assign_fw(struct firmware *fw, struct device *device); +void free_fw_priv(struct fw_priv *fw_priv); +void fw_state_init(struct fw_priv *fw_priv); #ifdef CONFIG_FW_LOADER bool firmware_is_builtin(const struct firmware *fw); diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 94d1789a233e..ac3f34e80194 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -35,6 +35,7 @@ #include <linux/syscore_ops.h> #include <linux/reboot.h> #include <linux/security.h> +#include <linux/zstd.h> #include <linux/xz.h> #include <generated/utsrelease.h> @@ -91,9 +92,9 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref) * guarding for corner cases a global lock should be OK */ DEFINE_MUTEX(fw_lock); -static struct firmware_cache fw_cache; +struct firmware_cache fw_cache; -static void fw_state_init(struct fw_priv *fw_priv) +void fw_state_init(struct fw_priv *fw_priv) { struct fw_state *fw_st = &fw_priv->fw_st; @@ -163,13 +164,9 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name) } /* Returns 1 for batching firmware requests with the same name */ -static int alloc_lookup_fw_priv(const char *fw_name, - struct firmware_cache *fwc, - struct fw_priv **fw_priv, - void *dbuf, - size_t size, - size_t offset, - u32 opt_flags) +int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc, + struct fw_priv **fw_priv, void *dbuf, size_t size, + size_t offset, u32 opt_flags) { struct fw_priv *tmp; @@ -224,7 +221,7 @@ static void __free_fw_priv(struct kref *ref) kfree(fw_priv); } -static void free_fw_priv(struct fw_priv *fw_priv) +void free_fw_priv(struct fw_priv *fw_priv) { struct firmware_cache *fwc = fw_priv->fwc; spin_lock(&fwc->lock); @@ -253,6 +250,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv) fw_priv->pages = NULL; fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; + fw_priv->data = NULL; + fw_priv->size = 0; } int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) @@ -305,9 +304,73 @@ int fw_map_paged_buf(struct fw_priv *fw_priv) #endif /* + * ZSTD-compressed firmware support + */ +#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD +static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv, + size_t in_size, const void *in_buffer) +{ + size_t len, out_size, workspace_size; + void *workspace, *out_buf; + zstd_dctx *ctx; + int err; + + if (fw_priv->allocated_size) { + out_size = fw_priv->allocated_size; + out_buf = fw_priv->data; + } else { + zstd_frame_header params; + + if (zstd_get_frame_header(¶ms, in_buffer, in_size) || + params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) { + dev_dbg(dev, "%s: invalid zstd header\n", __func__); + return -EINVAL; + } + out_size = params.frameContentSize; + out_buf = vzalloc(out_size); + if (!out_buf) + return -ENOMEM; + } + + workspace_size = zstd_dctx_workspace_bound(); + workspace = kvzalloc(workspace_size, GFP_KERNEL); + if (!workspace) { + err = -ENOMEM; + goto error; + } + + ctx = zstd_init_dctx(workspace, workspace_size); + if (!ctx) { + dev_dbg(dev, "%s: failed to initialize context\n", __func__); + err = -EINVAL; + goto error; + } + + len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size); + if (zstd_is_error(len)) { + dev_dbg(dev, "%s: failed to decompress: %d\n", __func__, + zstd_get_error_code(len)); + err = -EINVAL; + goto error; + } + + if (!fw_priv->allocated_size) + fw_priv->data = out_buf; + fw_priv->size = len; + err = 0; + + error: + kvfree(workspace); + if (err && !fw_priv->allocated_size) + vfree(out_buf); + return err; +} +#endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */ + +/* * XZ-compressed firmware support */ -#ifdef CONFIG_FW_LOADER_COMPRESS +#ifdef CONFIG_FW_LOADER_COMPRESS_XZ /* show an error and return the standard error code */ static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret) { @@ -401,7 +464,7 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv, else return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer); } -#endif /* CONFIG_FW_LOADER_COMPRESS */ +#endif /* CONFIG_FW_LOADER_COMPRESS_XZ */ /* direct firmware loading support */ static char fw_path_para[256]; @@ -735,6 +798,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, size_t offset, u32 opt_flags) { struct firmware *fw = NULL; + struct cred *kern_cred = NULL; + const struct cred *old_cred; bool nondirect = false; int ret; @@ -751,13 +816,30 @@ _request_firmware(const struct firmware **firmware_p, const char *name, if (ret <= 0) /* error or already assigned */ goto out; + /* + * We are about to try to access the firmware file. Because we may have been + * called by a driver when serving an unrelated request from userland, we use + * the kernel credentials to read the file. + */ + kern_cred = prepare_kernel_cred(NULL); + if (!kern_cred) { + ret = -ENOMEM; + goto out; + } + old_cred = override_creds(kern_cred); + ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL); /* Only full reads can support decompression, platform, and sysfs. */ if (!(opt_flags & FW_OPT_PARTIAL)) nondirect = true; -#ifdef CONFIG_FW_LOADER_COMPRESS +#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD + if (ret == -ENOENT && nondirect) + ret = fw_get_filesystem_firmware(device, fw->priv, ".zst", + fw_decompress_zstd); +#endif +#ifdef CONFIG_FW_LOADER_COMPRESS_XZ if (ret == -ENOENT && nondirect) ret = fw_get_filesystem_firmware(device, fw->priv, ".xz", fw_decompress_xz); @@ -776,6 +858,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name, } else ret = assign_fw(fw, device); + revert_creds(old_cred); + put_cred(kern_cred); + out: if (ret < 0) { fw_abort_batch_reqs(fw); diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c new file mode 100644 index 000000000000..5b0b85b70b6f --- /dev/null +++ b/drivers/base/firmware_loader/sysfs.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/highmem.h> +#include <linux/module.h> +#include <linux/security.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include "sysfs.h" + +/* + * sysfs support for firmware loader + */ + +void __fw_load_abort(struct fw_priv *fw_priv) +{ + /* + * There is a small window in which user can write to 'loading' + * between loading done/aborted and disappearance of 'loading' + */ + if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv)) + return; + + fw_state_aborted(fw_priv); +} + +#ifdef CONFIG_FW_LOADER_USER_HELPER +static ssize_t timeout_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", __firmware_loading_timeout()); +} + +/** + * timeout_store() - set number of seconds to wait for firmware + * @class: device class pointer + * @attr: device attribute pointer + * @buf: buffer to scan for timeout value + * @count: number of bytes in @buf + * + * Sets the number of seconds to wait for the firmware. Once + * this expires an error will be returned to the driver and no + * firmware will be provided. + * + * Note: zero means 'wait forever'. + **/ +static ssize_t timeout_store(struct class *class, struct class_attribute *attr, + const char *buf, size_t count) +{ + int tmp_loading_timeout = simple_strtol(buf, NULL, 10); + + if (tmp_loading_timeout < 0) + tmp_loading_timeout = 0; + + __fw_fallback_set_timeout(tmp_loading_timeout); + + return count; +} +static CLASS_ATTR_RW(timeout); + +static struct attribute *firmware_class_attrs[] = { + &class_attr_timeout.attr, + NULL, +}; +ATTRIBUTE_GROUPS(firmware_class); + +static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) +{ + if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name)) + return -ENOMEM; + if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout())) + return -ENOMEM; + if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait)) + return -ENOMEM; + + return 0; +} + +static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + int err = 0; + + mutex_lock(&fw_lock); + if (fw_sysfs->fw_priv) + err = do_firmware_uevent(fw_sysfs, env); + mutex_unlock(&fw_lock); + return err; +} +#endif /* CONFIG_FW_LOADER_USER_HELPER */ + +static void fw_dev_release(struct device *dev) +{ + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + + if (fw_sysfs->fw_upload_priv) { + free_fw_priv(fw_sysfs->fw_priv); + kfree(fw_sysfs->fw_upload_priv); + } + kfree(fw_sysfs); +} + +static struct class firmware_class = { + .name = "firmware", +#ifdef CONFIG_FW_LOADER_USER_HELPER + .class_groups = firmware_class_groups, + .dev_uevent = firmware_uevent, +#endif + .dev_release = fw_dev_release, +}; + +int register_sysfs_loader(void) +{ + int ret = class_register(&firmware_class); + + if (ret != 0) + return ret; + return register_firmware_config_sysctl(); +} + +void unregister_sysfs_loader(void) +{ + unregister_firmware_config_sysctl(); + class_unregister(&firmware_class); +} + +static ssize_t firmware_loading_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + int loading = 0; + + mutex_lock(&fw_lock); + if (fw_sysfs->fw_priv) + loading = fw_state_is_loading(fw_sysfs->fw_priv); + mutex_unlock(&fw_lock); + + return sysfs_emit(buf, "%d\n", loading); +} + +/** + * firmware_loading_store() - set value in the 'loading' control file + * @dev: device pointer + * @attr: device attribute pointer + * @buf: buffer to scan for loading control value + * @count: number of bytes in @buf + * + * The relevant values are: + * + * 1: Start a load, discarding any previous partial load. + * 0: Conclude the load and hand the data to the driver code. + * -1: Conclude the load with an error and discard any written data. + **/ +static ssize_t firmware_loading_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + struct fw_priv *fw_priv; + ssize_t written = count; + int loading = simple_strtol(buf, NULL, 10); + + mutex_lock(&fw_lock); + fw_priv = fw_sysfs->fw_priv; + if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv)) + goto out; + + switch (loading) { + case 1: + /* discarding any previous partial load */ + fw_free_paged_buf(fw_priv); + fw_state_start(fw_priv); + break; + case 0: + if (fw_state_is_loading(fw_priv)) { + int rc; + + /* + * Several loading requests may be pending on + * one same firmware buf, so let all requests + * see the mapped 'buf->data' once the loading + * is completed. + */ + rc = fw_map_paged_buf(fw_priv); + if (rc) + dev_err(dev, "%s: map pages failed\n", + __func__); + else + rc = security_kernel_post_load_data(fw_priv->data, + fw_priv->size, + LOADING_FIRMWARE, + "blob"); + + /* + * Same logic as fw_load_abort, only the DONE bit + * is ignored and we set ABORT only on failure. + */ + if (rc) { + fw_state_aborted(fw_priv); + written = rc; + } else { + fw_state_done(fw_priv); + + /* + * If this is a user-initiated firmware upload + * then start the upload in a worker thread now. + */ + rc = fw_upload_start(fw_sysfs); + if (rc) + written = rc; + } + break; + } + fallthrough; + default: + dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); + fallthrough; + case -1: + fw_load_abort(fw_sysfs); + if (fw_sysfs->fw_upload_priv) + fw_state_init(fw_sysfs->fw_priv); + + break; + } +out: + mutex_unlock(&fw_lock); + return written; +} + +DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); + +static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer, + loff_t offset, size_t count, bool read) +{ + if (read) + memcpy(buffer, fw_priv->data + offset, count); + else + memcpy(fw_priv->data + offset, buffer, count); +} + +static void firmware_rw(struct fw_priv *fw_priv, char *buffer, + loff_t offset, size_t count, bool read) +{ + while (count) { + void *page_data; + int page_nr = offset >> PAGE_SHIFT; + int page_ofs = offset & (PAGE_SIZE - 1); + int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); + + page_data = kmap(fw_priv->pages[page_nr]); + + if (read) + memcpy(buffer, page_data + page_ofs, page_cnt); + else + memcpy(page_data + page_ofs, buffer, page_cnt); + + kunmap(fw_priv->pages[page_nr]); + buffer += page_cnt; + offset += page_cnt; + count -= page_cnt; + } +} + +static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + struct fw_priv *fw_priv; + ssize_t ret_count; + + mutex_lock(&fw_lock); + fw_priv = fw_sysfs->fw_priv; + if (!fw_priv || fw_state_is_done(fw_priv)) { + ret_count = -ENODEV; + goto out; + } + if (offset > fw_priv->size) { + ret_count = 0; + goto out; + } + if (count > fw_priv->size - offset) + count = fw_priv->size - offset; + + ret_count = count; + + if (fw_priv->data) + firmware_rw_data(fw_priv, buffer, offset, count, true); + else + firmware_rw(fw_priv, buffer, offset, count, true); + +out: + mutex_unlock(&fw_lock); + return ret_count; +} + +static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) +{ + int err; + + err = fw_grow_paged_buf(fw_sysfs->fw_priv, + PAGE_ALIGN(min_size) >> PAGE_SHIFT); + if (err) + fw_load_abort(fw_sysfs); + return err; +} + +/** + * firmware_data_write() - write method for firmware + * @filp: open sysfs file + * @kobj: kobject for the device + * @bin_attr: bin_attr structure + * @buffer: buffer being written + * @offset: buffer offset for write in total data store area + * @count: buffer size + * + * Data written to the 'data' attribute will be later handed to + * the driver as a firmware image. + **/ +static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + struct fw_priv *fw_priv; + ssize_t retval; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + mutex_lock(&fw_lock); + fw_priv = fw_sysfs->fw_priv; + if (!fw_priv || fw_state_is_done(fw_priv)) { + retval = -ENODEV; + goto out; + } + + if (fw_priv->data) { + if (offset + count > fw_priv->allocated_size) { + retval = -ENOMEM; + goto out; + } + firmware_rw_data(fw_priv, buffer, offset, count, false); + retval = count; + } else { + retval = fw_realloc_pages(fw_sysfs, offset + count); + if (retval) + goto out; + + retval = count; + firmware_rw(fw_priv, buffer, offset, count, false); + } + + fw_priv->size = max_t(size_t, offset + count, fw_priv->size); +out: + mutex_unlock(&fw_lock); + return retval; +} + +static struct bin_attribute firmware_attr_data = { + .attr = { .name = "data", .mode = 0644 }, + .size = 0, + .read = firmware_data_read, + .write = firmware_data_write, +}; + +static struct attribute *fw_dev_attrs[] = { + &dev_attr_loading.attr, +#ifdef CONFIG_FW_UPLOAD + &dev_attr_cancel.attr, + &dev_attr_status.attr, + &dev_attr_error.attr, + &dev_attr_remaining_size.attr, +#endif + NULL +}; + +static struct bin_attribute *fw_dev_bin_attrs[] = { + &firmware_attr_data, + NULL +}; + +static const struct attribute_group fw_dev_attr_group = { + .attrs = fw_dev_attrs, + .bin_attrs = fw_dev_bin_attrs, +#ifdef CONFIG_FW_UPLOAD + .is_visible = fw_upload_is_visible, +#endif +}; + +static const struct attribute_group *fw_dev_attr_groups[] = { + &fw_dev_attr_group, + NULL +}; + +struct fw_sysfs * +fw_create_instance(struct firmware *firmware, const char *fw_name, + struct device *device, u32 opt_flags) +{ + struct fw_sysfs *fw_sysfs; + struct device *f_dev; + + fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL); + if (!fw_sysfs) { + fw_sysfs = ERR_PTR(-ENOMEM); + goto exit; + } + + fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT); + fw_sysfs->fw = firmware; + f_dev = &fw_sysfs->dev; + + device_initialize(f_dev); + dev_set_name(f_dev, "%s", fw_name); + f_dev->parent = device; + f_dev->class = &firmware_class; + f_dev->groups = fw_dev_attr_groups; +exit: + return fw_sysfs; +} diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h new file mode 100644 index 000000000000..5d8ff1675c79 --- /dev/null +++ b/drivers/base/firmware_loader/sysfs.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FIRMWARE_SYSFS_H +#define __FIRMWARE_SYSFS_H + +#include <linux/device.h> + +#include "firmware.h" + +MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE); + +extern struct firmware_fallback_config fw_fallback_config; +extern struct device_attribute dev_attr_loading; + +#ifdef CONFIG_FW_LOADER_USER_HELPER +/** + * struct firmware_fallback_config - firmware fallback configuration settings + * + * Helps describe and fine tune the fallback mechanism. + * + * @force_sysfs_fallback: force the sysfs fallback mechanism to be used + * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y. + * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y + * functionality on a kernel where that config entry has been disabled. + * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism. + * This emulates the behaviour as if we had set the kernel + * config CONFIG_FW_LOADER_USER_HELPER=n. + * @old_timeout: for internal use + * @loading_timeout: the timeout to wait for the fallback mechanism before + * giving up, in seconds. + */ +struct firmware_fallback_config { + unsigned int force_sysfs_fallback; + unsigned int ignore_sysfs_fallback; + int old_timeout; + int loading_timeout; +}; + +/* These getters are vetted to use int properly */ +static inline int __firmware_loading_timeout(void) +{ + return fw_fallback_config.loading_timeout; +} + +/* These setters are vetted to use int properly */ +static inline void __fw_fallback_set_timeout(int timeout) +{ + fw_fallback_config.loading_timeout = timeout; +} +#endif + +#ifdef CONFIG_FW_LOADER_SYSFS +int register_sysfs_loader(void); +void unregister_sysfs_loader(void); +#if defined(CONFIG_FW_LOADER_USER_HELPER) && defined(CONFIG_SYSCTL) +int register_firmware_config_sysctl(void); +void unregister_firmware_config_sysctl(void); +#else +static inline int register_firmware_config_sysctl(void) +{ + return 0; +} + +static inline void unregister_firmware_config_sysctl(void) { } +#endif /* CONFIG_FW_LOADER_USER_HELPER && CONFIG_SYSCTL */ +#else /* CONFIG_FW_LOADER_SYSFS */ +static inline int register_sysfs_loader(void) +{ + return 0; +} + +static inline void unregister_sysfs_loader(void) +{ +} +#endif /* CONFIG_FW_LOADER_SYSFS */ + +struct fw_sysfs { + bool nowait; + struct device dev; + struct fw_priv *fw_priv; + struct firmware *fw; + void *fw_upload_priv; +}; + +static inline struct fw_sysfs *to_fw_sysfs(struct device *dev) +{ + return container_of(dev, struct fw_sysfs, dev); +} + +void __fw_load_abort(struct fw_priv *fw_priv); + +static inline void fw_load_abort(struct fw_sysfs *fw_sysfs) +{ + struct fw_priv *fw_priv = fw_sysfs->fw_priv; + + __fw_load_abort(fw_priv); +} + +struct fw_sysfs * +fw_create_instance(struct firmware *firmware, const char *fw_name, + struct device *device, u32 opt_flags); + +#ifdef CONFIG_FW_UPLOAD +extern struct device_attribute dev_attr_status; +extern struct device_attribute dev_attr_error; +extern struct device_attribute dev_attr_cancel; +extern struct device_attribute dev_attr_remaining_size; + +int fw_upload_start(struct fw_sysfs *fw_sysfs); +umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n); +#else +static inline int fw_upload_start(struct fw_sysfs *fw_sysfs) +{ + return 0; +} +#endif + +#endif /* __FIRMWARE_SYSFS_H */ diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c new file mode 100644 index 000000000000..87044d52322a --- /dev/null +++ b/drivers/base/firmware_loader/sysfs_upload.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "sysfs_upload.h" + +/* + * Support for user-space to initiate a firmware upload to a device. + */ + +static const char * const fw_upload_prog_str[] = { + [FW_UPLOAD_PROG_IDLE] = "idle", + [FW_UPLOAD_PROG_RECEIVING] = "receiving", + [FW_UPLOAD_PROG_PREPARING] = "preparing", + [FW_UPLOAD_PROG_TRANSFERRING] = "transferring", + [FW_UPLOAD_PROG_PROGRAMMING] = "programming" +}; + +static const char * const fw_upload_err_str[] = { + [FW_UPLOAD_ERR_NONE] = "none", + [FW_UPLOAD_ERR_HW_ERROR] = "hw-error", + [FW_UPLOAD_ERR_TIMEOUT] = "timeout", + [FW_UPLOAD_ERR_CANCELED] = "user-abort", + [FW_UPLOAD_ERR_BUSY] = "device-busy", + [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size", + [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error", + [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout", +}; + +static const char *fw_upload_progress(struct device *dev, + enum fw_upload_prog prog) +{ + const char *status = "unknown-status"; + + if (prog < FW_UPLOAD_PROG_MAX) + status = fw_upload_prog_str[prog]; + else + dev_err(dev, "Invalid status during secure update: %d\n", prog); + + return status; +} + +static const char *fw_upload_error(struct device *dev, + enum fw_upload_err err_code) +{ + const char *error = "unknown-error"; + + if (err_code < FW_UPLOAD_ERR_MAX) + error = fw_upload_err_str[err_code]; + else + dev_err(dev, "Invalid error code during secure update: %d\n", + err_code); + + return error; +} + +static ssize_t +status_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv; + + return sysfs_emit(buf, "%s\n", fw_upload_progress(dev, fwlp->progress)); +} +DEVICE_ATTR_RO(status); + +static ssize_t +error_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv; + int ret; + + mutex_lock(&fwlp->lock); + + if (fwlp->progress != FW_UPLOAD_PROG_IDLE) + ret = -EBUSY; + else if (!fwlp->err_code) + ret = 0; + else + ret = sysfs_emit(buf, "%s:%s\n", + fw_upload_progress(dev, fwlp->err_progress), + fw_upload_error(dev, fwlp->err_code)); + + mutex_unlock(&fwlp->lock); + + return ret; +} +DEVICE_ATTR_RO(error); + +static ssize_t cancel_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv; + int ret = count; + bool cancel; + + if (kstrtobool(buf, &cancel) || !cancel) + return -EINVAL; + + mutex_lock(&fwlp->lock); + if (fwlp->progress == FW_UPLOAD_PROG_IDLE) + ret = -ENODEV; + + fwlp->ops->cancel(fwlp->fw_upload); + mutex_unlock(&fwlp->lock); + + return ret; +} +DEVICE_ATTR_WO(cancel); + +static ssize_t remaining_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv; + + return sysfs_emit(buf, "%u\n", fwlp->remaining_size); +} +DEVICE_ATTR_RO(remaining_size); + +umode_t +fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n) +{ + static struct fw_sysfs *fw_sysfs; + + fw_sysfs = to_fw_sysfs(kobj_to_dev(kobj)); + + if (fw_sysfs->fw_upload_priv || attr == &dev_attr_loading.attr) + return attr->mode; + + return 0; +} + +static void fw_upload_update_progress(struct fw_upload_priv *fwlp, + enum fw_upload_prog new_progress) +{ + mutex_lock(&fwlp->lock); + fwlp->progress = new_progress; + mutex_unlock(&fwlp->lock); +} + +static void fw_upload_set_error(struct fw_upload_priv *fwlp, + enum fw_upload_err err_code) +{ + mutex_lock(&fwlp->lock); + fwlp->err_progress = fwlp->progress; + fwlp->err_code = err_code; + mutex_unlock(&fwlp->lock); +} + +static void fw_upload_prog_complete(struct fw_upload_priv *fwlp) +{ + mutex_lock(&fwlp->lock); + fwlp->progress = FW_UPLOAD_PROG_IDLE; + mutex_unlock(&fwlp->lock); +} + +static void fw_upload_main(struct work_struct *work) +{ + struct fw_upload_priv *fwlp; + struct fw_sysfs *fw_sysfs; + u32 written = 0, offset = 0; + enum fw_upload_err ret; + struct device *fw_dev; + struct fw_upload *fwl; + + fwlp = container_of(work, struct fw_upload_priv, work); + fwl = fwlp->fw_upload; + fw_sysfs = (struct fw_sysfs *)fwl->priv; + fw_dev = &fw_sysfs->dev; + + fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PREPARING); + ret = fwlp->ops->prepare(fwl, fwlp->data, fwlp->remaining_size); + if (ret != FW_UPLOAD_ERR_NONE) { + fw_upload_set_error(fwlp, ret); + goto putdev_exit; + } + + fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_TRANSFERRING); + while (fwlp->remaining_size) { + ret = fwlp->ops->write(fwl, fwlp->data, offset, + fwlp->remaining_size, &written); + if (ret != FW_UPLOAD_ERR_NONE || !written) { + if (ret == FW_UPLOAD_ERR_NONE) { + dev_warn(fw_dev, "write-op wrote zero data\n"); + ret = FW_UPLOAD_ERR_RW_ERROR; + } + fw_upload_set_error(fwlp, ret); + goto done; + } + + fwlp->remaining_size -= written; + offset += written; + } + + fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PROGRAMMING); + ret = fwlp->ops->poll_complete(fwl); + if (ret != FW_UPLOAD_ERR_NONE) + fw_upload_set_error(fwlp, ret); + +done: + if (fwlp->ops->cleanup) + fwlp->ops->cleanup(fwl); + +putdev_exit: + put_device(fw_dev->parent); + + /* + * Note: fwlp->remaining_size is left unmodified here to provide + * additional information on errors. It will be reinitialized when + * the next firmeware upload begins. + */ + mutex_lock(&fw_lock); + fw_free_paged_buf(fw_sysfs->fw_priv); + fw_state_init(fw_sysfs->fw_priv); + mutex_unlock(&fw_lock); + fwlp->data = NULL; + fw_upload_prog_complete(fwlp); +} + +/* + * Start a worker thread to upload data to the parent driver. + * Must be called with fw_lock held. + */ +int fw_upload_start(struct fw_sysfs *fw_sysfs) +{ + struct fw_priv *fw_priv = fw_sysfs->fw_priv; + struct device *fw_dev = &fw_sysfs->dev; + struct fw_upload_priv *fwlp; + + if (!fw_sysfs->fw_upload_priv) + return 0; + + if (!fw_priv->size) { + fw_free_paged_buf(fw_priv); + fw_state_init(fw_sysfs->fw_priv); + return 0; + } + + fwlp = fw_sysfs->fw_upload_priv; + mutex_lock(&fwlp->lock); + + /* Do not interfere with an on-going fw_upload */ + if (fwlp->progress != FW_UPLOAD_PROG_IDLE) { + mutex_unlock(&fwlp->lock); + return -EBUSY; + } + + get_device(fw_dev->parent); /* released in fw_upload_main */ + + fwlp->progress = FW_UPLOAD_PROG_RECEIVING; + fwlp->err_code = 0; + fwlp->remaining_size = fw_priv->size; + fwlp->data = fw_priv->data; + + pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n", + __func__, fw_priv->fw_name, + fw_priv, fw_priv->data, + (unsigned int)fw_priv->size); + + queue_work(system_long_wq, &fwlp->work); + mutex_unlock(&fwlp->lock); + + return 0; +} + +/** + * firmware_upload_register() - register for the firmware upload sysfs API + * @module: kernel module of this device + * @parent: parent device instantiating firmware upload + * @name: firmware name to be associated with this device + * @ops: pointer to structure of firmware upload ops + * @dd_handle: pointer to parent driver private data + * + * @name must be unique among all users of firmware upload. The firmware + * sysfs files for this device will be found at /sys/class/firmware/@name. + * + * Return: struct fw_upload pointer or ERR_PTR() + * + **/ +struct fw_upload * +firmware_upload_register(struct module *module, struct device *parent, + const char *name, const struct fw_upload_ops *ops, + void *dd_handle) +{ + u32 opt_flags = FW_OPT_NOCACHE; + struct fw_upload *fw_upload; + struct fw_upload_priv *fw_upload_priv; + struct fw_sysfs *fw_sysfs; + struct fw_priv *fw_priv; + struct device *fw_dev; + int ret; + + if (!name || name[0] == '\0') + return ERR_PTR(-EINVAL); + + if (!ops || !ops->cancel || !ops->prepare || + !ops->write || !ops->poll_complete) { + dev_err(parent, "Attempt to register without all required ops\n"); + return ERR_PTR(-EINVAL); + } + + if (!try_module_get(module)) + return ERR_PTR(-EFAULT); + + fw_upload = kzalloc(sizeof(*fw_upload), GFP_KERNEL); + if (!fw_upload) { + ret = -ENOMEM; + goto exit_module_put; + } + + fw_upload_priv = kzalloc(sizeof(*fw_upload_priv), GFP_KERNEL); + if (!fw_upload_priv) { + ret = -ENOMEM; + goto free_fw_upload; + } + + fw_upload_priv->fw_upload = fw_upload; + fw_upload_priv->ops = ops; + mutex_init(&fw_upload_priv->lock); + fw_upload_priv->module = module; + fw_upload_priv->name = name; + fw_upload_priv->err_code = 0; + fw_upload_priv->progress = FW_UPLOAD_PROG_IDLE; + INIT_WORK(&fw_upload_priv->work, fw_upload_main); + fw_upload->dd_handle = dd_handle; + + fw_sysfs = fw_create_instance(NULL, name, parent, opt_flags); + if (IS_ERR(fw_sysfs)) { + ret = PTR_ERR(fw_sysfs); + goto free_fw_upload_priv; + } + fw_upload->priv = fw_sysfs; + fw_sysfs->fw_upload_priv = fw_upload_priv; + fw_dev = &fw_sysfs->dev; + + ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, NULL, 0, 0, + FW_OPT_NOCACHE); + if (ret != 0) { + if (ret > 0) + ret = -EINVAL; + goto free_fw_sysfs; + } + fw_priv->is_paged_buf = true; + fw_sysfs->fw_priv = fw_priv; + + ret = device_add(fw_dev); + if (ret) { + dev_err(fw_dev, "%s: device_register failed\n", __func__); + put_device(fw_dev); + goto exit_module_put; + } + + return fw_upload; + +free_fw_sysfs: + kfree(fw_sysfs); + +free_fw_upload_priv: + kfree(fw_upload_priv); + +free_fw_upload: + kfree(fw_upload); + +exit_module_put: + module_put(module); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(firmware_upload_register); + +/** + * firmware_upload_unregister() - Unregister firmware upload interface + * @fw_upload: pointer to struct fw_upload + **/ +void firmware_upload_unregister(struct fw_upload *fw_upload) +{ + struct fw_sysfs *fw_sysfs = fw_upload->priv; + struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv; + + mutex_lock(&fw_upload_priv->lock); + if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) { + mutex_unlock(&fw_upload_priv->lock); + goto unregister; + } + + fw_upload_priv->ops->cancel(fw_upload); + mutex_unlock(&fw_upload_priv->lock); + + /* Ensure lower-level device-driver is finished */ + flush_work(&fw_upload_priv->work); + +unregister: + device_unregister(&fw_sysfs->dev); + module_put(fw_upload_priv->module); +} +EXPORT_SYMBOL_GPL(firmware_upload_unregister); diff --git a/drivers/base/firmware_loader/sysfs_upload.h b/drivers/base/firmware_loader/sysfs_upload.h new file mode 100644 index 000000000000..31931ff7808a --- /dev/null +++ b/drivers/base/firmware_loader/sysfs_upload.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SYSFS_UPLOAD_H +#define __SYSFS_UPLOAD_H + +#include <linux/device.h> + +#include "sysfs.h" + +/** + * enum fw_upload_prog - firmware upload progress codes + * @FW_UPLOAD_PROG_IDLE: there is no firmware upload in progress + * @FW_UPLOAD_PROG_RECEIVING: worker thread is receiving firmware data + * @FW_UPLOAD_PROG_PREPARING: target device is preparing for firmware upload + * @FW_UPLOAD_PROG_TRANSFERRING: data is being copied to the device + * @FW_UPLOAD_PROG_PROGRAMMING: device is performing the firmware update + * @FW_UPLOAD_PROG_MAX: Maximum progress code marker + */ +enum fw_upload_prog { + FW_UPLOAD_PROG_IDLE, + FW_UPLOAD_PROG_RECEIVING, + FW_UPLOAD_PROG_PREPARING, + FW_UPLOAD_PROG_TRANSFERRING, + FW_UPLOAD_PROG_PROGRAMMING, + FW_UPLOAD_PROG_MAX +}; + +struct fw_upload_priv { + struct fw_upload *fw_upload; + struct module *module; + const char *name; + const struct fw_upload_ops *ops; + struct mutex lock; /* protect data structure contents */ + struct work_struct work; + const u8 *data; /* pointer to update data */ + u32 remaining_size; /* size remaining to transfer */ + enum fw_upload_prog progress; + enum fw_upload_prog err_progress; /* progress at time of failure */ + enum fw_upload_err err_code; /* security manager error code */ +}; + +#endif /* __SYSFS_UPLOAD_H */ diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 7222ff9b5e05..084d67fd55cc 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -636,10 +636,9 @@ static int __add_memory_block(struct memory_block *memory) } ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory, GFP_KERNEL)); - if (ret) { - put_device(&memory->dev); + if (ret) device_unregister(&memory->dev); - } + return ret; } diff --git a/drivers/base/node.c b/drivers/base/node.c index ec8bb24a5a22..0ac6376ef7a1 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -682,6 +682,7 @@ static int register_node(struct node *node, int num) */ void unregister_node(struct node *node) { + compaction_unregister_node(node); hugetlb_unregister_node(node); /* no-op, if memoryless node */ node_remove_accesses(node); node_remove_caches(node); diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c new file mode 100644 index 000000000000..87af641cfe1a --- /dev/null +++ b/drivers/base/physical_location.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device physical location support + * + * Author: Won Chung <wonchung@google.com> + */ + +#include <linux/acpi.h> +#include <linux/sysfs.h> + +#include "physical_location.h" + +bool dev_add_physical_location(struct device *dev) +{ + struct acpi_pld_info *pld; + acpi_status status; + + if (!has_acpi_companion(dev)) + return false; + + status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld); + if (ACPI_FAILURE(status)) + return false; + + dev->physical_location = + kzalloc(sizeof(*dev->physical_location), GFP_KERNEL); + if (!dev->physical_location) + return false; + dev->physical_location->panel = pld->panel; + dev->physical_location->vertical_position = pld->vertical_position; + dev->physical_location->horizontal_position = pld->horizontal_position; + dev->physical_location->dock = pld->dock; + dev->physical_location->lid = pld->lid; + + ACPI_FREE(pld); + return true; +} + +static ssize_t panel_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const char *panel; + + switch (dev->physical_location->panel) { + case DEVICE_PANEL_TOP: + panel = "top"; + break; + case DEVICE_PANEL_BOTTOM: + panel = "bottom"; + break; + case DEVICE_PANEL_LEFT: + panel = "left"; + break; + case DEVICE_PANEL_RIGHT: + panel = "right"; + break; + case DEVICE_PANEL_FRONT: + panel = "front"; + break; + case DEVICE_PANEL_BACK: + panel = "back"; + break; + default: + panel = "unknown"; + } + return sysfs_emit(buf, "%s\n", panel); +} +static DEVICE_ATTR_RO(panel); + +static ssize_t vertical_position_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const char *vertical_position; + + switch (dev->physical_location->vertical_position) { + case DEVICE_VERT_POS_UPPER: + vertical_position = "upper"; + break; + case DEVICE_VERT_POS_CENTER: + vertical_position = "center"; + break; + case DEVICE_VERT_POS_LOWER: + vertical_position = "lower"; + break; + default: + vertical_position = "unknown"; + } + return sysfs_emit(buf, "%s\n", vertical_position); +} +static DEVICE_ATTR_RO(vertical_position); + +static ssize_t horizontal_position_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const char *horizontal_position; + + switch (dev->physical_location->horizontal_position) { + case DEVICE_HORI_POS_LEFT: + horizontal_position = "left"; + break; + case DEVICE_HORI_POS_CENTER: + horizontal_position = "center"; + break; + case DEVICE_HORI_POS_RIGHT: + horizontal_position = "right"; + break; + default: + horizontal_position = "unknown"; + } + return sysfs_emit(buf, "%s\n", horizontal_position); +} +static DEVICE_ATTR_RO(horizontal_position); + +static ssize_t dock_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", + dev->physical_location->dock ? "yes" : "no"); +} +static DEVICE_ATTR_RO(dock); + +static ssize_t lid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", + dev->physical_location->lid ? "yes" : "no"); +} +static DEVICE_ATTR_RO(lid); + +static struct attribute *dev_attr_physical_location[] = { + &dev_attr_panel.attr, + &dev_attr_vertical_position.attr, + &dev_attr_horizontal_position.attr, + &dev_attr_dock.attr, + &dev_attr_lid.attr, + NULL, +}; + +const struct attribute_group dev_attr_physical_location_group = { + .name = "physical_location", + .attrs = dev_attr_physical_location, +}; + diff --git a/drivers/base/physical_location.h b/drivers/base/physical_location.h new file mode 100644 index 000000000000..82cde9f1b161 --- /dev/null +++ b/drivers/base/physical_location.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Device physical location support + * + * Author: Won Chung <wonchung@google.com> + */ + +#include <linux/device.h> + +#ifdef CONFIG_ACPI +extern bool dev_add_physical_location(struct device *dev); +extern const struct attribute_group dev_attr_physical_location_group; +#else +static inline bool dev_add_physical_location(struct device *dev) { return false; }; +static const struct attribute_group dev_attr_physical_location_group = {}; +#endif diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 8cc272fd5c99..51bb2289865c 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -30,6 +30,8 @@ #include <linux/property.h> #include <linux/kmemleak.h> #include <linux/types.h> +#include <linux/iommu.h> +#include <linux/dma-map-ops.h> #include "base.h" #include "power/power.h" @@ -231,7 +233,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num) out_not_found: ret = -ENXIO; out: - WARN(ret == 0, "0 is an invalid IRQ number\n"); + if (WARN(!ret, "0 is an invalid IRQ number\n")) + return -EINVAL; return ret; } EXPORT_SYMBOL_GPL(platform_get_irq_optional); @@ -446,7 +449,8 @@ static int __platform_get_irq_byname(struct platform_device *dev, r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); if (r) { - WARN(r->start == 0, "0 is an invalid IRQ number\n"); + if (WARN(!r->start, "0 is an invalid IRQ number\n")) + return -EINVAL; return r->start; } @@ -1275,31 +1279,11 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); - char *driver_override, *old, *cp; - - /* We need to keep extra room for a newline */ - if (count >= (PAGE_SIZE - 1)) - return -EINVAL; - - driver_override = kstrndup(buf, count, GFP_KERNEL); - if (!driver_override) - return -ENOMEM; - - cp = strchr(driver_override, '\n'); - if (cp) - *cp = '\0'; - - device_lock(dev); - old = pdev->driver_override; - if (strlen(driver_override)) { - pdev->driver_override = driver_override; - } else { - kfree(driver_override); - pdev->driver_override = NULL; - } - device_unlock(dev); + int ret; - kfree(old); + ret = driver_set_override(dev, &pdev->driver_override, buf, count); + if (ret) + return ret; return count; } @@ -1454,9 +1438,9 @@ static void platform_shutdown(struct device *_dev) drv->shutdown(dev); } - -int platform_dma_configure(struct device *dev) +static int platform_dma_configure(struct device *dev) { + struct platform_driver *drv = to_platform_driver(dev->driver); enum dev_dma_attr attr; int ret = 0; @@ -1467,9 +1451,23 @@ int platform_dma_configure(struct device *dev) ret = acpi_dma_configure(dev, attr); } + if (!ret && !drv->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); + } + return ret; } +static void platform_dma_cleanup(struct device *dev) +{ + struct platform_driver *drv = to_platform_driver(dev->driver); + + if (!drv->driver_managed_dma) + iommu_device_unuse_default_domain(dev); +} + static const struct dev_pm_ops platform_dev_pm_ops = { SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) USE_PLATFORM_PM_SLEEP_OPS @@ -1484,6 +1482,7 @@ struct bus_type platform_bus_type = { .remove = platform_remove, .shutdown = platform_shutdown, .dma_configure = platform_dma_configure, + .dma_cleanup = platform_dma_cleanup, .pm = &platform_dev_pm_ops, }; EXPORT_SYMBOL_GPL(platform_bus_type); diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index bbddb267c2e6..72115917e0bd 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -172,10 +172,10 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name); * @dev: Device to detach. * @power_off: Used to indicate whether we should power off the device. * - * This functions will reverse the actions from dev_pm_domain_attach() and - * dev_pm_domain_attach_by_id(), thus it detaches @dev from its PM domain. - * Typically it should be invoked during the remove phase, either from - * subsystem level code or from drivers. + * This functions will reverse the actions from dev_pm_domain_attach(), + * dev_pm_domain_attach_by_id() and dev_pm_domain_attach_by_name(), thus it + * detaches @dev from its PM domain. Typically it should be invoked during the + * remove phase, either from subsystem level code or from drivers. * * Callers must ensure proper synchronization of this function with power * management callbacks. diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 1ee878d126fd..739e52cd4aba 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -131,7 +131,7 @@ static const struct genpd_lock_ops genpd_spin_ops = { #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) -static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, +static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, const struct generic_pm_domain *genpd) { bool ret; @@ -139,11 +139,14 @@ static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); /* - * Warn once if an IRQ safe device is attached to a no sleep domain, as - * to indicate a suboptimal configuration for PM. For an always on - * domain this isn't case, thus don't warn. + * Warn once if an IRQ safe device is attached to a domain, which + * callbacks are allowed to sleep. This indicates a suboptimal + * configuration for PM, but it doesn't matter for an always on domain. */ - if (ret && !genpd_is_always_on(genpd)) + if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) + return ret; + + if (ret) dev_warn_once(dev, "PM domain %s will not be powered off\n", genpd->name); @@ -225,24 +228,23 @@ static void genpd_debug_remove(struct generic_pm_domain *genpd) static void genpd_update_accounting(struct generic_pm_domain *genpd) { - ktime_t delta, now; + u64 delta, now; - now = ktime_get(); - delta = ktime_sub(now, genpd->accounting_time); + now = ktime_get_mono_fast_ns(); + if (now <= genpd->accounting_time) + return; + + delta = now - genpd->accounting_time; /* * If genpd->status is active, it means we are just * out of off and so update the idle time and vice * versa. */ - if (genpd->status == GENPD_STATE_ON) { - int state_idx = genpd->state_idx; - - genpd->states[state_idx].idle_time = - ktime_add(genpd->states[state_idx].idle_time, delta); - } else { - genpd->on_time = ktime_add(genpd->on_time, delta); - } + if (genpd->status == GENPD_STATE_ON) + genpd->states[genpd->state_idx].idle_time += delta; + else + genpd->on_time += delta; genpd->accounting_time = now; } @@ -476,15 +478,16 @@ EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); */ void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) { - struct generic_pm_domain_data *gpd_data; struct generic_pm_domain *genpd; + struct gpd_timing_data *td; genpd = dev_to_genpd_safe(dev); if (!genpd) return; - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - gpd_data->next_wakeup = next; + td = to_gpd_data(dev->power.subsys_data->domain_data)->td; + if (td) + td->next_wakeup = next; } EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); @@ -506,6 +509,7 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) if (!genpd->power_on) goto out; + timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; if (!timed) { ret = genpd->power_on(genpd); if (ret) @@ -524,7 +528,7 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) goto out; genpd->states[state_idx].power_on_latency_ns = elapsed_ns; - genpd->max_off_time_changed = true; + genpd->gd->max_off_time_changed = true; pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", genpd->name, "on", elapsed_ns); @@ -555,6 +559,7 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) if (!genpd->power_off) goto out; + timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; if (!timed) { ret = genpd->power_off(genpd); if (ret) @@ -573,7 +578,7 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) goto out; genpd->states[state_idx].power_off_latency_ns = elapsed_ns; - genpd->max_off_time_changed = true; + genpd->gd->max_off_time_changed = true; pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", genpd->name, "off", elapsed_ns); @@ -649,18 +654,12 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, } list_for_each_entry(pdd, &genpd->dev_list, list_node) { - enum pm_qos_flags_status stat; - - stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF); - if (stat > PM_QOS_FLAGS_NONE) - return -EBUSY; - /* * Do not allow PM domain to be powered off, when an IRQ safe * device is part of a non-IRQ safe domain. */ if (!pm_runtime_suspended(pdd->dev) || - irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) + irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) not_suspended++; } @@ -775,25 +774,27 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, dev = gpd_data->base.dev; for (;;) { - struct generic_pm_domain *genpd; + struct generic_pm_domain *genpd = ERR_PTR(-ENODATA); struct pm_domain_data *pdd; + struct gpd_timing_data *td; spin_lock_irq(&dev->power.lock); pdd = dev->power.subsys_data ? dev->power.subsys_data->domain_data : NULL; if (pdd) { - to_gpd_data(pdd)->td.constraint_changed = true; - genpd = dev_to_genpd(dev); - } else { - genpd = ERR_PTR(-ENODATA); + td = to_gpd_data(pdd)->td; + if (td) { + td->constraint_changed = true; + genpd = dev_to_genpd(dev); + } } spin_unlock_irq(&dev->power.lock); if (!IS_ERR(genpd)) { genpd_lock(genpd); - genpd->max_off_time_changed = true; + genpd->gd->max_off_time_changed = true; genpd_unlock(genpd); } @@ -879,9 +880,9 @@ static int genpd_runtime_suspend(struct device *dev) struct generic_pm_domain *genpd; bool (*suspend_ok)(struct device *__dev); struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); - struct gpd_timing_data *td = &gpd_data->td; + struct gpd_timing_data *td = gpd_data->td; bool runtime_pm = pm_runtime_enabled(dev); - ktime_t time_start; + ktime_t time_start = 0; s64 elapsed_ns; int ret; @@ -902,8 +903,7 @@ static int genpd_runtime_suspend(struct device *dev) return -EBUSY; /* Measure suspend latency. */ - time_start = 0; - if (runtime_pm) + if (td && runtime_pm) time_start = ktime_get(); ret = __genpd_runtime_suspend(dev); @@ -917,13 +917,13 @@ static int genpd_runtime_suspend(struct device *dev) } /* Update suspend latency value if the measured time exceeds it. */ - if (runtime_pm) { + if (td && runtime_pm) { elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); if (elapsed_ns > td->suspend_latency_ns) { td->suspend_latency_ns = elapsed_ns; dev_dbg(dev, "suspend latency exceeded, %lld ns\n", elapsed_ns); - genpd->max_off_time_changed = true; + genpd->gd->max_off_time_changed = true; td->constraint_changed = true; } } @@ -932,7 +932,7 @@ static int genpd_runtime_suspend(struct device *dev) * If power.irq_safe is set, this routine may be run with * IRQs disabled, so suspend only if the PM domain also is irq_safe. */ - if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) + if (irq_safe_dev_in_sleep_domain(dev, genpd)) return 0; genpd_lock(genpd); @@ -955,12 +955,11 @@ static int genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); - struct gpd_timing_data *td = &gpd_data->td; - bool runtime_pm = pm_runtime_enabled(dev); - ktime_t time_start; + struct gpd_timing_data *td = gpd_data->td; + bool timed = td && pm_runtime_enabled(dev); + ktime_t time_start = 0; s64 elapsed_ns; int ret; - bool timed = true; dev_dbg(dev, "%s()\n", __func__); @@ -972,10 +971,8 @@ static int genpd_runtime_resume(struct device *dev) * As we don't power off a non IRQ safe domain, which holds * an IRQ safe device, we don't need to restore power to it. */ - if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) { - timed = false; + if (irq_safe_dev_in_sleep_domain(dev, genpd)) goto out; - } genpd_lock(genpd); ret = genpd_power_on(genpd, 0); @@ -988,8 +985,7 @@ static int genpd_runtime_resume(struct device *dev) out: /* Measure resume latency. */ - time_start = 0; - if (timed && runtime_pm) + if (timed) time_start = ktime_get(); ret = genpd_start_dev(genpd, dev); @@ -1001,13 +997,13 @@ static int genpd_runtime_resume(struct device *dev) goto err_stop; /* Update resume latency value if the measured time exceeds it. */ - if (timed && runtime_pm) { + if (timed) { elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); if (elapsed_ns > td->resume_latency_ns) { td->resume_latency_ns = elapsed_ns; dev_dbg(dev, "resume latency exceeded, %lld ns\n", elapsed_ns); - genpd->max_off_time_changed = true; + genpd->gd->max_off_time_changed = true; td->constraint_changed = true; } } @@ -1500,9 +1496,11 @@ EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); #endif /* CONFIG_PM_SLEEP */ -static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev) +static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, + bool has_governor) { struct generic_pm_domain_data *gpd_data; + struct gpd_timing_data *td; int ret; ret = dev_pm_get_subsys_data(dev); @@ -1516,26 +1514,38 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev) } gpd_data->base.dev = dev; - gpd_data->td.constraint_changed = true; - gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; - gpd_data->next_wakeup = KTIME_MAX; - spin_lock_irq(&dev->power.lock); + /* Allocate data used by a governor. */ + if (has_governor) { + td = kzalloc(sizeof(*td), GFP_KERNEL); + if (!td) { + ret = -ENOMEM; + goto err_free; + } - if (dev->power.subsys_data->domain_data) { - ret = -EINVAL; - goto err_free; + td->constraint_changed = true; + td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; + td->next_wakeup = KTIME_MAX; + gpd_data->td = td; } - dev->power.subsys_data->domain_data = &gpd_data->base; + spin_lock_irq(&dev->power.lock); + + if (dev->power.subsys_data->domain_data) + ret = -EINVAL; + else + dev->power.subsys_data->domain_data = &gpd_data->base; spin_unlock_irq(&dev->power.lock); + if (ret) + goto err_free; + return gpd_data; err_free: - spin_unlock_irq(&dev->power.lock); + kfree(gpd_data->td); kfree(gpd_data); err_put: dev_pm_put_subsys_data(dev); @@ -1551,6 +1561,7 @@ static void genpd_free_dev_data(struct device *dev, spin_unlock_irq(&dev->power.lock); + kfree(gpd_data->td); kfree(gpd_data); dev_pm_put_subsys_data(dev); } @@ -1607,6 +1618,7 @@ static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct device *base_dev) { + struct genpd_governor_data *gd = genpd->gd; struct generic_pm_domain_data *gpd_data; int ret; @@ -1615,7 +1627,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; - gpd_data = genpd_alloc_dev_data(dev); + gpd_data = genpd_alloc_dev_data(dev, gd); if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); @@ -1631,7 +1643,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, dev_pm_domain_set(dev, &genpd->domain); genpd->device_count++; - genpd->max_off_time_changed = true; + if (gd) + gd->max_off_time_changed = true; list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); @@ -1685,7 +1698,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, } genpd->device_count--; - genpd->max_off_time_changed = true; + if (genpd->gd) + genpd->gd->max_off_time_changed = true; genpd_clear_cpumask(genpd, gpd_data->cpu); dev_pm_domain_set(dev, NULL); @@ -1958,6 +1972,53 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd) return 0; } +static int genpd_alloc_data(struct generic_pm_domain *genpd) +{ + struct genpd_governor_data *gd = NULL; + int ret; + + if (genpd_is_cpu_domain(genpd) && + !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) + return -ENOMEM; + + if (genpd->gov) { + gd = kzalloc(sizeof(*gd), GFP_KERNEL); + if (!gd) { + ret = -ENOMEM; + goto free; + } + + gd->max_off_time_ns = -1; + gd->max_off_time_changed = true; + gd->next_wakeup = KTIME_MAX; + } + + /* Use only one "off" state if there were no states declared */ + if (genpd->state_count == 0) { + ret = genpd_set_default_power_state(genpd); + if (ret) + goto free; + } + + genpd->gd = gd; + return 0; + +free: + if (genpd_is_cpu_domain(genpd)) + free_cpumask_var(genpd->cpus); + kfree(gd); + return ret; +} + +static void genpd_free_data(struct generic_pm_domain *genpd) +{ + if (genpd_is_cpu_domain(genpd)) + free_cpumask_var(genpd->cpus); + if (genpd->free_states) + genpd->free_states(genpd->states, genpd->state_count); + kfree(genpd->gd); +} + static void genpd_lock_init(struct generic_pm_domain *genpd) { if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { @@ -1995,11 +2056,9 @@ int pm_genpd_init(struct generic_pm_domain *genpd, atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; genpd->device_count = 0; - genpd->max_off_time_ns = -1; - genpd->max_off_time_changed = true; genpd->provider = NULL; genpd->has_provider = false; - genpd->accounting_time = ktime_get(); + genpd->accounting_time = ktime_get_mono_fast_ns(); genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.prepare = genpd_prepare; @@ -2017,26 +2076,22 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->dev_ops.start = pm_clk_resume; } + /* The always-on governor works better with the corresponding flag. */ + if (gov == &pm_domain_always_on_gov) + genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; + /* Always-on domains must be powered on at initialization. */ if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && !genpd_status_on(genpd)) return -EINVAL; - if (genpd_is_cpu_domain(genpd) && - !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) - return -ENOMEM; - - /* Use only one "off" state if there were no states declared */ - if (genpd->state_count == 0) { - ret = genpd_set_default_power_state(genpd); - if (ret) { - if (genpd_is_cpu_domain(genpd)) - free_cpumask_var(genpd->cpus); - return ret; - } - } else if (!gov && genpd->state_count > 1) { + /* Multiple states but no governor doesn't make sense. */ + if (!gov && genpd->state_count > 1) pr_warn("%s: no governor for states\n", genpd->name); - } + + ret = genpd_alloc_data(genpd); + if (ret) + return ret; device_initialize(&genpd->dev); dev_set_name(&genpd->dev, "%s", genpd->name); @@ -2081,10 +2136,7 @@ static int genpd_remove(struct generic_pm_domain *genpd) genpd_unlock(genpd); genpd_debug_remove(genpd); cancel_work_sync(&genpd->power_off_work); - if (genpd_is_cpu_domain(genpd)) - free_cpumask_var(genpd->cpus); - if (genpd->free_states) - genpd->free_states(genpd->states, genpd->state_count); + genpd_free_data(genpd); pr_debug("%s: removed %s\n", __func__, genpd->name); @@ -3163,6 +3215,7 @@ static int sub_domains_show(struct seq_file *s, void *data) static int idle_states_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; + u64 now, delta, idle_time = 0; unsigned int i; int ret = 0; @@ -3173,17 +3226,19 @@ static int idle_states_show(struct seq_file *s, void *data) seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); for (i = 0; i < genpd->state_count; i++) { - ktime_t delta = 0; - s64 msecs; + idle_time += genpd->states[i].idle_time; - if ((genpd->status == GENPD_STATE_OFF) && - (genpd->state_idx == i)) - delta = ktime_sub(ktime_get(), genpd->accounting_time); + if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { + now = ktime_get_mono_fast_ns(); + if (now > genpd->accounting_time) { + delta = now - genpd->accounting_time; + idle_time += delta; + } + } - msecs = ktime_to_ms( - ktime_add(genpd->states[i].idle_time, delta)); - seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs, - genpd->states[i].usage, genpd->states[i].rejected); + do_div(idle_time, NSEC_PER_MSEC); + seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time, + genpd->states[i].usage, genpd->states[i].rejected); } genpd_unlock(genpd); @@ -3193,18 +3248,22 @@ static int idle_states_show(struct seq_file *s, void *data) static int active_time_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; - ktime_t delta = 0; + u64 now, on_time, delta = 0; int ret = 0; ret = genpd_lock_interruptible(genpd); if (ret) return -ERESTARTSYS; - if (genpd->status == GENPD_STATE_ON) - delta = ktime_sub(ktime_get(), genpd->accounting_time); + if (genpd->status == GENPD_STATE_ON) { + now = ktime_get_mono_fast_ns(); + if (now > genpd->accounting_time) + delta = now - genpd->accounting_time; + } - seq_printf(s, "%lld ms\n", ktime_to_ms( - ktime_add(genpd->on_time, delta))); + on_time = genpd->on_time + delta; + do_div(on_time, NSEC_PER_MSEC); + seq_printf(s, "%llu ms\n", on_time); genpd_unlock(genpd); return ret; @@ -3213,7 +3272,7 @@ static int active_time_show(struct seq_file *s, void *data) static int total_idle_time_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; - ktime_t delta = 0, total = 0; + u64 now, delta, total = 0; unsigned int i; int ret = 0; @@ -3222,16 +3281,19 @@ static int total_idle_time_show(struct seq_file *s, void *data) return -ERESTARTSYS; for (i = 0; i < genpd->state_count; i++) { + total += genpd->states[i].idle_time; - if ((genpd->status == GENPD_STATE_OFF) && - (genpd->state_idx == i)) - delta = ktime_sub(ktime_get(), genpd->accounting_time); - - total = ktime_add(total, genpd->states[i].idle_time); + if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { + now = ktime_get_mono_fast_ns(); + if (now > genpd->accounting_time) { + delta = now - genpd->accounting_time; + total += delta; + } + } } - total = ktime_add(total, delta); - seq_printf(s, "%lld ms\n", ktime_to_ms(total)); + do_div(total, NSEC_PER_MSEC); + seq_printf(s, "%llu ms\n", total); genpd_unlock(genpd); return ret; diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index cd08c5885190..282a3a135827 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -18,6 +18,8 @@ static int dev_update_qos_constraint(struct device *dev, void *data) s64 constraint_ns; if (dev->power.subsys_data && dev->power.subsys_data->domain_data) { + struct gpd_timing_data *td = dev_gpd_data(dev)->td; + /* * Only take suspend-time QoS constraints of devices into * account, because constraints updated after the device has @@ -25,7 +27,8 @@ static int dev_update_qos_constraint(struct device *dev, void *data) * anyway. In order for them to take effect, the device has to * be resumed and suspended again. */ - constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; + constraint_ns = td ? td->effective_constraint_ns : + PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; } else { /* * The child is not in a domain and there's no info on its @@ -49,7 +52,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data) */ static bool default_suspend_ok(struct device *dev) { - struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + struct gpd_timing_data *td = dev_gpd_data(dev)->td; unsigned long flags; s64 constraint_ns; @@ -136,26 +139,28 @@ static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t n * is able to enter its optimal idle state. */ list_for_each_entry(pdd, &genpd->dev_list, list_node) { - next_wakeup = to_gpd_data(pdd)->next_wakeup; + next_wakeup = to_gpd_data(pdd)->td->next_wakeup; if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now)) if (ktime_before(next_wakeup, domain_wakeup)) domain_wakeup = next_wakeup; } list_for_each_entry(link, &genpd->parent_links, parent_node) { - next_wakeup = link->child->next_wakeup; + struct genpd_governor_data *cgd = link->child->gd; + + next_wakeup = cgd ? cgd->next_wakeup : KTIME_MAX; if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now)) if (ktime_before(next_wakeup, domain_wakeup)) domain_wakeup = next_wakeup; } - genpd->next_wakeup = domain_wakeup; + genpd->gd->next_wakeup = domain_wakeup; } static bool next_wakeup_allows_state(struct generic_pm_domain *genpd, unsigned int state, ktime_t now) { - ktime_t domain_wakeup = genpd->next_wakeup; + ktime_t domain_wakeup = genpd->gd->next_wakeup; s64 idle_time_ns, min_sleep_ns; min_sleep_ns = genpd->states[state].power_off_latency_ns + @@ -185,8 +190,9 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, * All subdomains have been powered off already at this point. */ list_for_each_entry(link, &genpd->parent_links, parent_node) { - struct generic_pm_domain *sd = link->child; - s64 sd_max_off_ns = sd->max_off_time_ns; + struct genpd_governor_data *cgd = link->child->gd; + + s64 sd_max_off_ns = cgd ? cgd->max_off_time_ns : -1; if (sd_max_off_ns < 0) continue; @@ -215,7 +221,7 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, * domain to turn off and on (that's how much time it will * have to wait worst case). */ - td = &to_gpd_data(pdd)->td; + td = to_gpd_data(pdd)->td; constraint_ns = td->effective_constraint_ns; /* * Zero means "no suspend at all" and this runs only when all @@ -244,7 +250,7 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, * time and the time needed to turn the domain on is the maximum * theoretical time this domain can spend in the "off" state. */ - genpd->max_off_time_ns = min_off_time_ns - + genpd->gd->max_off_time_ns = min_off_time_ns - genpd->states[state].power_on_latency_ns; return true; } @@ -259,6 +265,7 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now) { struct generic_pm_domain *genpd = pd_to_genpd(pd); + struct genpd_governor_data *gd = genpd->gd; int state_idx = genpd->state_count - 1; struct gpd_link *link; @@ -269,11 +276,11 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now) * cannot be met. */ update_domain_next_wakeup(genpd, now); - if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) { + if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (gd->next_wakeup != KTIME_MAX)) { /* Let's find out the deepest domain idle state, the devices prefer */ while (state_idx >= 0) { if (next_wakeup_allows_state(genpd, state_idx, now)) { - genpd->max_off_time_changed = true; + gd->max_off_time_changed = true; break; } state_idx--; @@ -281,14 +288,14 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now) if (state_idx < 0) { state_idx = 0; - genpd->cached_power_down_ok = false; + gd->cached_power_down_ok = false; goto done; } } - if (!genpd->max_off_time_changed) { - genpd->state_idx = genpd->cached_power_down_state_idx; - return genpd->cached_power_down_ok; + if (!gd->max_off_time_changed) { + genpd->state_idx = gd->cached_power_down_state_idx; + return gd->cached_power_down_ok; } /* @@ -297,12 +304,16 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now) * going to be called for any parent until this instance * returns. */ - list_for_each_entry(link, &genpd->child_links, child_node) - link->parent->max_off_time_changed = true; + list_for_each_entry(link, &genpd->child_links, child_node) { + struct genpd_governor_data *pgd = link->parent->gd; + + if (pgd) + pgd->max_off_time_changed = true; + } - genpd->max_off_time_ns = -1; - genpd->max_off_time_changed = false; - genpd->cached_power_down_ok = true; + gd->max_off_time_ns = -1; + gd->max_off_time_changed = false; + gd->cached_power_down_ok = true; /* * Find a state to power down to, starting from the state @@ -310,7 +321,7 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now) */ while (!__default_power_down_ok(pd, state_idx)) { if (state_idx == 0) { - genpd->cached_power_down_ok = false; + gd->cached_power_down_ok = false; break; } state_idx--; @@ -318,8 +329,8 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now) done: genpd->state_idx = state_idx; - genpd->cached_power_down_state_idx = genpd->state_idx; - return genpd->cached_power_down_ok; + gd->cached_power_down_state_idx = genpd->state_idx; + return gd->cached_power_down_ok; } static bool default_power_down_ok(struct dev_pm_domain *pd) @@ -327,11 +338,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) return _default_power_down_ok(pd, ktime_get()); } -static bool always_on_power_down_ok(struct dev_pm_domain *domain) -{ - return false; -} - #ifdef CONFIG_CPU_IDLE static bool cpu_power_down_ok(struct dev_pm_domain *pd) { @@ -401,6 +407,5 @@ struct dev_power_governor simple_qos_governor = { * pm_genpd_gov_always_on - A governor implementing an always-on policy */ struct dev_power_governor pm_domain_always_on_gov = { - .power_down_ok = always_on_power_down_ok, .suspend_ok = default_suspend_ok, }; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index d4059e6ffeae..676dc72d912d 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -263,7 +263,7 @@ static int rpm_check_suspend_allowed(struct device *dev) retval = -EINVAL; else if (dev->power.disable_depth > 0) retval = -EACCES; - else if (atomic_read(&dev->power.usage_count) > 0) + else if (atomic_read(&dev->power.usage_count)) retval = -EAGAIN; else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count)) @@ -1039,13 +1039,33 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) } EXPORT_SYMBOL_GPL(pm_schedule_suspend); +static int rpm_drop_usage_count(struct device *dev) +{ + int ret; + + ret = atomic_sub_return(1, &dev->power.usage_count); + if (ret >= 0) + return ret; + + /* + * Because rpm_resume() does not check the usage counter, it will resume + * the device even if the usage counter is 0 or negative, so it is + * sufficient to increment the usage counter here to reverse the change + * made above. + */ + atomic_inc(&dev->power.usage_count); + dev_warn(dev, "Runtime PM usage count underflow!\n"); + return -EINVAL; +} + /** * __pm_runtime_idle - Entry point for runtime idle operations. * @dev: Device to send idle notification for. * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, decrement the device's usage count and - * return immediately if it is larger than zero. Then carry out an idle + * return immediately if it is larger than zero (if it becomes negative, log a + * warning, increment it, and return an error). Then carry out an idle * notification, either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, @@ -1057,7 +1077,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) { + retval = rpm_drop_usage_count(dev); + if (retval < 0) { + return retval; + } else if (retval > 0) { trace_rpm_usage_rcuidle(dev, rpmflags); return 0; } @@ -1079,7 +1102,8 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle); * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, decrement the device's usage count and - * return immediately if it is larger than zero. Then carry out a suspend, + * return immediately if it is larger than zero (if it becomes negative, log a + * warning, increment it, and return an error). Then carry out a suspend, * either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, @@ -1091,7 +1115,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) { + retval = rpm_drop_usage_count(dev); + if (retval < 0) { + return retval; + } else if (retval > 0) { trace_rpm_usage_rcuidle(dev, rpmflags); return 0; } @@ -1210,12 +1237,13 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) { struct device *parent = dev->parent; bool notify_parent = false; + unsigned long flags; int error = 0; if (status != RPM_ACTIVE && status != RPM_SUSPENDED) return -EINVAL; - spin_lock_irq(&dev->power.lock); + spin_lock_irqsave(&dev->power.lock, flags); /* * Prevent PM-runtime from being enabled for the device or return an @@ -1226,7 +1254,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) else error = -EAGAIN; - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); if (error) return error; @@ -1247,7 +1275,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) device_links_read_unlock(idx); } - spin_lock_irq(&dev->power.lock); + spin_lock_irqsave(&dev->power.lock, flags); if (dev->power.runtime_status == status || !parent) goto out_set; @@ -1288,7 +1316,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) dev->power.runtime_error = 0; out: - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); if (notify_parent) pm_request_idle(parent); @@ -1527,14 +1555,17 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid); */ void pm_runtime_allow(struct device *dev) { + int ret; + spin_lock_irq(&dev->power.lock); if (dev->power.runtime_auto) goto out; dev->power.runtime_auto = true; - if (atomic_dec_and_test(&dev->power.usage_count)) + ret = rpm_drop_usage_count(dev); + if (ret == 0) rpm_idle(dev, RPM_AUTO | RPM_ASYNC); - else + else if (ret > 0) trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); out: diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index a57d469676ca..11a4ffe91367 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -930,6 +930,7 @@ bool pm_wakeup_pending(void) return ret || atomic_read(&pm_abort_suspend) > 0; } +EXPORT_SYMBOL_GPL(pm_wakeup_pending); void pm_system_wakeup(void) { diff --git a/drivers/base/property.c b/drivers/base/property.c index c0e94cce9c29..ed6f449f8e5c 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -47,12 +47,14 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode, { bool ret; + if (IS_ERR_OR_NULL(fwnode)) + return false; + ret = fwnode_call_bool_op(fwnode, property_present, propname); - if (ret == false && !IS_ERR_OR_NULL(fwnode) && - !IS_ERR_OR_NULL(fwnode->secondary)) - ret = fwnode_call_bool_op(fwnode->secondary, property_present, - propname); - return ret; + if (ret) + return ret; + + return fwnode_call_bool_op(fwnode->secondary, property_present, propname); } EXPORT_SYMBOL_GPL(fwnode_property_present); @@ -66,6 +68,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_present); * Function reads an array of u8 properties with @propname from the device * firmware description and stores them to @val if found. * + * It's recommended to call device_property_count_u8() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, @@ -91,6 +96,9 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array); * Function reads an array of u16 properties with @propname from the device * firmware description and stores them to @val if found. * + * It's recommended to call device_property_count_u16() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, @@ -116,6 +124,9 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array); * Function reads an array of u32 properties with @propname from the device * firmware description and stores them to @val if found. * + * It's recommended to call device_property_count_u32() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, @@ -141,6 +152,9 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array); * Function reads an array of u64 properties with @propname from the device * firmware description and stores them to @val if found. * + * It's recommended to call device_property_count_u64() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, @@ -166,6 +180,9 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array); * Function reads an array of string properties with @propname from the device * firmware description and stores them to @val if found. * + * It's recommended to call device_property_string_array_count() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values read on success if @val is non-NULL, * number of values available on success if @val is NULL, * %-EINVAL if given arguments are not valid, @@ -232,15 +249,16 @@ static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode, { int ret; + if (IS_ERR_OR_NULL(fwnode)) + return -EINVAL; + ret = fwnode_call_int_op(fwnode, property_read_int_array, propname, elem_size, val, nval); - if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && - !IS_ERR_OR_NULL(fwnode->secondary)) - ret = fwnode_call_int_op( - fwnode->secondary, property_read_int_array, propname, - elem_size, val, nval); + if (ret != -EINVAL) + return ret; - return ret; + return fwnode_call_int_op(fwnode->secondary, property_read_int_array, propname, + elem_size, val, nval); } /** @@ -253,6 +271,9 @@ static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode, * Read an array of u8 properties with @propname from @fwnode and stores them to * @val if found. * + * It's recommended to call fwnode_property_count_u8() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, @@ -279,6 +300,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array); * Read an array of u16 properties with @propname from @fwnode and store them to * @val if found. * + * It's recommended to call fwnode_property_count_u16() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, @@ -305,6 +329,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array); * Read an array of u32 properties with @propname from @fwnode store them to * @val if found. * + * It's recommended to call fwnode_property_count_u32() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, @@ -331,6 +358,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array); * Read an array of u64 properties with @propname from @fwnode and store them to * @val if found. * + * It's recommended to call fwnode_property_count_u64() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, @@ -357,6 +387,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array); * Read an string list property @propname from the given firmware node and store * them to @val if found. * + * It's recommended to call fwnode_property_string_array_count() instead of calling + * this function with @val equals %NULL and @nval equals 0. + * * Return: number of values read on success if @val is non-NULL, * number of values available on success if @val is NULL, * %-EINVAL if given arguments are not valid, @@ -371,14 +404,16 @@ int fwnode_property_read_string_array(const struct fwnode_handle *fwnode, { int ret; + if (IS_ERR_OR_NULL(fwnode)) + return -EINVAL; + ret = fwnode_call_int_op(fwnode, property_read_string_array, propname, val, nval); - if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && - !IS_ERR_OR_NULL(fwnode->secondary)) - ret = fwnode_call_int_op(fwnode->secondary, - property_read_string_array, propname, - val, nval); - return ret; + if (ret != -EINVAL) + return ret; + + return fwnode_call_int_op(fwnode->secondary, property_read_string_array, propname, + val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); @@ -480,15 +515,19 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, { int ret; + if (IS_ERR_OR_NULL(fwnode)) + return -ENOENT; + ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop, nargs, index, args); + if (ret == 0) + return ret; - if (ret < 0 && !IS_ERR_OR_NULL(fwnode) && - !IS_ERR_OR_NULL(fwnode->secondary)) - ret = fwnode_call_int_op(fwnode->secondary, get_reference_args, - prop, nargs_prop, nargs, index, args); + if (IS_ERR_OR_NULL(fwnode->secondary)) + return ret; - return ret; + return fwnode_call_int_op(fwnode->secondary, get_reference_args, prop, nargs_prop, + nargs, index, args); } EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args); @@ -587,17 +626,17 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent); */ struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode) { + struct fwnode_handle *parent; struct device *dev; - fwnode_handle_get(fwnode); - do { - fwnode = fwnode_get_next_parent(fwnode); - if (!fwnode) - return NULL; - dev = get_dev_from_fwnode(fwnode); - } while (!dev); - fwnode_handle_put(fwnode); - return dev; + fwnode_for_each_parent_node(fwnode, parent) { + dev = get_dev_from_fwnode(parent); + if (dev) { + fwnode_handle_put(parent); + return dev; + } + } + return NULL; } /** @@ -608,13 +647,11 @@ struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode) */ unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode) { - struct fwnode_handle *__fwnode; - unsigned int count; - - __fwnode = fwnode_get_parent(fwnode); + struct fwnode_handle *parent; + unsigned int count = 0; - for (count = 0; __fwnode; count++) - __fwnode = fwnode_get_next_parent(__fwnode); + fwnode_for_each_parent_node(fwnode, parent) + count++; return count; } @@ -635,40 +672,43 @@ EXPORT_SYMBOL_GPL(fwnode_count_parents); struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode, unsigned int depth) { - unsigned int i; - - fwnode_handle_get(fwnode); + struct fwnode_handle *parent; - for (i = 0; i < depth && fwnode; i++) - fwnode = fwnode_get_next_parent(fwnode); + if (depth == 0) + return fwnode_handle_get(fwnode); - return fwnode; + fwnode_for_each_parent_node(fwnode, parent) { + if (--depth == 0) + return parent; + } + return NULL; } EXPORT_SYMBOL_GPL(fwnode_get_nth_parent); /** - * fwnode_is_ancestor_of - Test if @test_ancestor is ancestor of @test_child - * @test_ancestor: Firmware which is tested for being an ancestor - * @test_child: Firmware which is tested for being the child + * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child + * @ancestor: Firmware which is tested for being an ancestor + * @child: Firmware which is tested for being the child * * A node is considered an ancestor of itself too. * - * Returns true if @test_ancestor is an ancestor of @test_child. - * Otherwise, returns false. + * Returns true if @ancestor is an ancestor of @child. Otherwise, returns false. */ -bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor, - struct fwnode_handle *test_child) +bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle *child) { - if (!test_ancestor) + struct fwnode_handle *parent; + + if (IS_ERR_OR_NULL(ancestor)) return false; - fwnode_handle_get(test_child); - while (test_child) { - if (test_child == test_ancestor) { - fwnode_handle_put(test_child); + if (child == ancestor) + return true; + + fwnode_for_each_parent_node(child, parent) { + if (parent == ancestor) { + fwnode_handle_put(parent); return true; } - test_child = fwnode_get_next_parent(test_child); } return false; } @@ -698,7 +738,7 @@ fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode, { struct fwnode_handle *next_child = child; - if (!fwnode) + if (IS_ERR_OR_NULL(fwnode)) return NULL; do { @@ -722,16 +762,16 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, const struct fwnode_handle *fwnode = dev_fwnode(dev); struct fwnode_handle *next; + if (IS_ERR_OR_NULL(fwnode)) + return NULL; + /* Try to find a child in primary fwnode */ next = fwnode_get_next_child_node(fwnode, child); if (next) return next; /* When no more children in primary, continue with secondary */ - if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) - next = fwnode_get_next_child_node(fwnode->secondary, child); - - return next; + return fwnode_get_next_child_node(fwnode->secondary, child); } EXPORT_SYMBOL_GPL(device_get_next_child_node); @@ -798,6 +838,9 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put); */ bool fwnode_device_is_available(const struct fwnode_handle *fwnode) { + if (IS_ERR_OR_NULL(fwnode)) + return false; + if (!fwnode_has_op(fwnode, device_is_available)) return true; @@ -823,33 +866,16 @@ EXPORT_SYMBOL_GPL(device_get_child_node_count); bool device_dma_supported(struct device *dev) { - const struct fwnode_handle *fwnode = dev_fwnode(dev); - - /* For DT, this is always supported. - * For ACPI, this depends on CCA, which - * is determined by the acpi_dma_supported(). - */ - if (is_of_node(fwnode)) - return true; - - return acpi_dma_supported(to_acpi_device_node(fwnode)); + return fwnode_call_bool_op(dev_fwnode(dev), device_dma_supported); } EXPORT_SYMBOL_GPL(device_dma_supported); enum dev_dma_attr device_get_dma_attr(struct device *dev) { - const struct fwnode_handle *fwnode = dev_fwnode(dev); - enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED; - - if (is_of_node(fwnode)) { - if (of_dma_is_coherent(to_of_node(fwnode))) - attr = DEV_DMA_COHERENT; - else - attr = DEV_DMA_NON_COHERENT; - } else - attr = acpi_get_dma_attr(to_acpi_device_node(fwnode)); + if (!fwnode_has_op(dev_fwnode(dev), device_get_dma_attr)) + return DEV_DMA_NOT_SUPPORTED; - return attr; + return fwnode_call_int_op(dev_fwnode(dev), device_get_dma_attr); } EXPORT_SYMBOL_GPL(device_get_dma_attr); @@ -904,10 +930,7 @@ EXPORT_SYMBOL_GPL(device_get_phy_mode); */ void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index) { - if (IS_ENABLED(CONFIG_OF_ADDRESS) && is_of_node(fwnode)) - return of_iomap(to_of_node(fwnode), index); - - return NULL; + return fwnode_call_ptr_op(fwnode, iomap, index); } EXPORT_SYMBOL(fwnode_iomap); @@ -921,17 +944,7 @@ EXPORT_SYMBOL(fwnode_iomap); */ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index) { - struct resource res; - int ret; - - if (is_of_node(fwnode)) - return of_irq_get(to_of_node(fwnode), index); - - ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res); - if (ret) - return ret; - - return res.start; + return fwnode_call_int_op(fwnode, irq_get, index); } EXPORT_SYMBOL(fwnode_irq_get); @@ -988,14 +1001,14 @@ fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, parent = fwnode_graph_get_port_parent(prev); else parent = fwnode; + if (IS_ERR_OR_NULL(parent)) + return NULL; ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev); + if (ep) + return ep; - if (IS_ERR_OR_NULL(ep) && - !IS_ERR_OR_NULL(parent) && !IS_ERR_OR_NULL(parent->secondary)) - ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL); - - return ep; + return fwnode_graph_get_next_endpoint(parent->secondary, NULL); } EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); @@ -1193,15 +1206,23 @@ const void *device_get_match_data(struct device *dev) } EXPORT_SYMBOL_GPL(device_get_match_data); -static void * -fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, - void *data, devcon_match_fn_t match) +static unsigned int fwnode_graph_devcon_matches(struct fwnode_handle *fwnode, + const char *con_id, void *data, + devcon_match_fn_t match, + void **matches, + unsigned int matches_len) { struct fwnode_handle *node; struct fwnode_handle *ep; + unsigned int count = 0; void *ret; fwnode_graph_for_each_endpoint(fwnode, ep) { + if (matches && count >= matches_len) { + fwnode_handle_put(ep); + break; + } + node = fwnode_graph_get_remote_port_parent(ep); if (!fwnode_device_is_available(node)) { fwnode_handle_put(node); @@ -1211,33 +1232,43 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, ret = match(node, con_id, data); fwnode_handle_put(node); if (ret) { - fwnode_handle_put(ep); - return ret; + if (matches) + matches[count] = ret; + count++; } } - return NULL; + return count; } -static void * -fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id, - void *data, devcon_match_fn_t match) +static unsigned int fwnode_devcon_matches(struct fwnode_handle *fwnode, + const char *con_id, void *data, + devcon_match_fn_t match, + void **matches, + unsigned int matches_len) { struct fwnode_handle *node; + unsigned int count = 0; + unsigned int i; void *ret; - int i; for (i = 0; ; i++) { + if (matches && count >= matches_len) + break; + node = fwnode_find_reference(fwnode, con_id, i); if (IS_ERR(node)) break; ret = match(node, NULL, data); fwnode_handle_put(node); - if (ret) - return ret; + if (ret) { + if (matches) + matches[count] = ret; + count++; + } } - return NULL; + return count; } /** @@ -1255,15 +1286,61 @@ void *fwnode_connection_find_match(struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match) { + unsigned int count; void *ret; if (!fwnode || !match) return NULL; - ret = fwnode_graph_devcon_match(fwnode, con_id, data, match); - if (ret) + count = fwnode_graph_devcon_matches(fwnode, con_id, data, match, &ret, 1); + if (count) return ret; - return fwnode_devcon_match(fwnode, con_id, data, match); + count = fwnode_devcon_matches(fwnode, con_id, data, match, &ret, 1); + return count ? ret : NULL; } EXPORT_SYMBOL_GPL(fwnode_connection_find_match); + +/** + * fwnode_connection_find_matches - Find connections from a device node + * @fwnode: Device node with the connection + * @con_id: Identifier for the connection + * @data: Data for the match function + * @match: Function to check and convert the connection description + * @matches: (Optional) array of pointers to fill with matches + * @matches_len: Length of @matches + * + * Find up to @matches_len connections with unique identifier @con_id between + * @fwnode and other device nodes. @match will be used to convert the + * connection description to data the caller is expecting to be returned + * through the @matches array. + * If @matches is NULL @matches_len is ignored and the total number of resolved + * matches is returned. + * + * Return: Number of matches resolved, or negative errno. + */ +int fwnode_connection_find_matches(struct fwnode_handle *fwnode, + const char *con_id, void *data, + devcon_match_fn_t match, + void **matches, unsigned int matches_len) +{ + unsigned int count_graph; + unsigned int count_ref; + + if (!fwnode || !match) + return -EINVAL; + + count_graph = fwnode_graph_devcon_matches(fwnode, con_id, data, match, + matches, matches_len); + + if (matches) { + matches += count_graph; + matches_len -= count_graph; + } + + count_ref = fwnode_devcon_matches(fwnode, con_id, data, match, + matches, matches_len); + + return count_graph + count_ref; +} +EXPORT_SYMBOL_GPL(fwnode_connection_find_matches); diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index b4df36c7b17d..da8996e7a1f1 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -110,6 +110,10 @@ struct regmap { int (*reg_write)(void *context, unsigned int reg, unsigned int val); int (*reg_update_bits)(void *context, unsigned int reg, unsigned int mask, unsigned int val); + /* Bulk read/write */ + int (*read)(void *context, const void *reg_buf, size_t reg_size, + void *val_buf, size_t val_size); + int (*write)(void *context, const void *data, size_t count); bool defer_caching; diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index f2469d3435ca..d0f5bc827978 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -183,8 +183,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) return 0; } - if (!map->max_register) - map->max_register = map->num_reg_defaults_raw; + if (!map->max_register && map->num_reg_defaults_raw) + map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride; if (map->cache_ops->init) { dev_dbg(map->dev, "Initializing %s cache\n", diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c index 1578fb506683..0328b0b34284 100644 --- a/drivers/base/regmap/regmap-i3c.c +++ b/drivers/base/regmap/regmap-i3c.c @@ -40,7 +40,7 @@ static int regmap_i3c_read(void *context, return i3c_device_do_priv_xfers(i3c, xfers, 2); } -static struct regmap_bus regmap_i3c = { +static const struct regmap_bus regmap_i3c = { .write = regmap_i3c_write, .read = regmap_i3c_read, }; diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c index 597042e2d009..986af26d88c2 100644 --- a/drivers/base/regmap/regmap-sccb.c +++ b/drivers/base/regmap/regmap-sccb.c @@ -80,7 +80,7 @@ static int regmap_sccb_write(void *context, unsigned int reg, unsigned int val) return i2c_smbus_write_byte_data(i2c, reg, val); } -static struct regmap_bus regmap_sccb_bus = { +static const struct regmap_bus regmap_sccb_bus = { .reg_write = regmap_sccb_write, .reg_read = regmap_sccb_read, }; diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c index fe3ac26b66ad..388c3a087bd9 100644 --- a/drivers/base/regmap/regmap-sdw-mbq.c +++ b/drivers/base/regmap/regmap-sdw-mbq.c @@ -42,7 +42,7 @@ static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *va return 0; } -static struct regmap_bus regmap_sdw_mbq = { +static const struct regmap_bus regmap_sdw_mbq = { .reg_read = regmap_sdw_mbq_read, .reg_write = regmap_sdw_mbq_write, .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c index 966de8a136d9..81b0327f719d 100644 --- a/drivers/base/regmap/regmap-sdw.c +++ b/drivers/base/regmap/regmap-sdw.c @@ -30,7 +30,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val) return 0; } -static struct regmap_bus regmap_sdw = { +static const struct regmap_bus regmap_sdw = { .reg_read = regmap_sdw_read, .reg_write = regmap_sdw_write, .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c index 0968059f1ef5..8075db788b39 100644 --- a/drivers/base/regmap/regmap-slimbus.c +++ b/drivers/base/regmap/regmap-slimbus.c @@ -22,7 +22,7 @@ static int regmap_slimbus_read(void *context, const void *reg, size_t reg_size, return slim_read(sdev, *(u16 *)reg, val_size, val); } -static struct regmap_bus regmap_slimbus_bus = { +static const struct regmap_bus regmap_slimbus_bus = { .write = regmap_slimbus_write, .read = regmap_slimbus_read, .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c index 1fbaaad71ca5..3a8b402db852 100644 --- a/drivers/base/regmap/regmap-w1.c +++ b/drivers/base/regmap/regmap-w1.c @@ -172,17 +172,17 @@ static int w1_reg_a16_v16_write(void *context, unsigned int reg, * Various types of supported bus addressing */ -static struct regmap_bus regmap_w1_bus_a8_v8 = { +static const struct regmap_bus regmap_w1_bus_a8_v8 = { .reg_read = w1_reg_a8_v8_read, .reg_write = w1_reg_a8_v8_write, }; -static struct regmap_bus regmap_w1_bus_a8_v16 = { +static const struct regmap_bus regmap_w1_bus_a8_v16 = { .reg_read = w1_reg_a8_v16_read, .reg_write = w1_reg_a8_v16_write, }; -static struct regmap_bus regmap_w1_bus_a16_v16 = { +static const struct regmap_bus regmap_w1_bus_a16_v16 = { .reg_read = w1_reg_a16_v16_read, .reg_write = w1_reg_a16_v16_write, }; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 5e12f7cb5147..2221d9863831 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -838,12 +838,15 @@ struct regmap *__regmap_init(struct device *dev, map->reg_stride_order = ilog2(map->reg_stride); else map->reg_stride_order = -1; - map->use_single_read = config->use_single_read || !bus || !bus->read; - map->use_single_write = config->use_single_write || !bus || !bus->write; - map->can_multi_write = config->can_multi_write && bus && bus->write; + map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read)); + map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write)); + map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write)); if (bus) { map->max_raw_read = bus->max_raw_read; map->max_raw_write = bus->max_raw_write; + } else if (config->max_raw_read && config->max_raw_write) { + map->max_raw_read = config->max_raw_read; + map->max_raw_write = config->max_raw_write; } map->dev = dev; map->bus = bus; @@ -877,7 +880,16 @@ struct regmap *__regmap_init(struct device *dev, map->read_flag_mask = bus->read_flag_mask; } - if (!bus) { + if (config && config->read && config->write) { + map->reg_read = _regmap_bus_read; + + /* Bulk read/write */ + map->read = config->read; + map->write = config->write; + + reg_endian = REGMAP_ENDIAN_NATIVE; + val_endian = REGMAP_ENDIAN_NATIVE; + } else if (!bus) { map->reg_read = config->reg_read; map->reg_write = config->reg_write; map->reg_update_bits = config->reg_update_bits; @@ -894,10 +906,13 @@ struct regmap *__regmap_init(struct device *dev, } else { map->reg_read = _regmap_bus_read; map->reg_update_bits = bus->reg_update_bits; - } + /* Bulk read/write */ + map->read = bus->read; + map->write = bus->write; - reg_endian = regmap_get_reg_endian(bus, config); - val_endian = regmap_get_val_endian(dev, bus, config); + reg_endian = regmap_get_reg_endian(bus, config); + val_endian = regmap_get_val_endian(dev, bus, config); + } switch (config->reg_bits + map->reg_shift) { case 2: @@ -1671,8 +1686,6 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, size_t len; int i; - WARN_ON(!map->bus); - /* Check for unwritable or noinc registers in range * before we start */ @@ -1754,7 +1767,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, val = work_val; } - if (map->async && map->bus->async_write) { + if (map->async && map->bus && map->bus->async_write) { struct regmap_async *async; trace_regmap_async_write_start(map, reg, val_len); @@ -1822,11 +1835,11 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, * write. */ if (val == work_val) - ret = map->bus->write(map->bus_context, map->work_buf, - map->format.reg_bytes + - map->format.pad_bytes + - val_len); - else if (map->bus->gather_write) + ret = map->write(map->bus_context, map->work_buf, + map->format.reg_bytes + + map->format.pad_bytes + + val_len); + else if (map->bus && map->bus->gather_write) ret = map->bus->gather_write(map->bus_context, map->work_buf, map->format.reg_bytes + map->format.pad_bytes, @@ -1844,7 +1857,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, memcpy(buf, map->work_buf, map->format.reg_bytes); memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, val, val_len); - ret = map->bus->write(map->bus_context, buf, len); + ret = map->write(map->bus_context, buf, len); kfree(buf); } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { @@ -1901,7 +1914,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, struct regmap_range_node *range; struct regmap *map = context; - WARN_ON(!map->bus || !map->format.format_write); + WARN_ON(!map->format.format_write); range = _regmap_range_lookup(map, reg); if (range) { @@ -1916,8 +1929,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, trace_regmap_hw_write_start(map, reg, 1); - ret = map->bus->write(map->bus_context, map->work_buf, - map->format.buf_size); + ret = map->write(map->bus_context, map->work_buf, map->format.buf_size); trace_regmap_hw_write_done(map, reg, 1); @@ -1937,7 +1949,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg, { struct regmap *map = context; - WARN_ON(!map->bus || !map->format.format_val); + WARN_ON(!map->format.format_val); map->format.format_val(map->work_buf + map->format.reg_bytes + map->format.pad_bytes, val, 0); @@ -1951,7 +1963,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg, static inline void *_regmap_map_get_context(struct regmap *map) { - return (map->bus) ? map : map->bus_context; + return (map->bus || (!map->bus && map->read)) ? map : map->bus_context; } int _regmap_write(struct regmap *map, unsigned int reg, @@ -2363,7 +2375,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, u8 = buf; *u8 |= map->write_flag_mask; - ret = map->bus->write(map->bus_context, buf, len); + ret = map->write(map->bus_context, buf, len); kfree(buf); @@ -2669,9 +2681,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, struct regmap_range_node *range; int ret; - WARN_ON(!map->bus); - - if (!map->bus || !map->bus->read) + if (!map->read) return -EINVAL; range = _regmap_range_lookup(map, reg); @@ -2689,9 +2699,9 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, map->read_flag_mask); trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); - ret = map->bus->read(map->bus_context, map->work_buf, - map->format.reg_bytes + map->format.pad_bytes, - val, val_len); + ret = map->read(map->bus_context, map->work_buf, + map->format.reg_bytes + map->format.pad_bytes, + val, val_len); trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); @@ -2802,8 +2812,6 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, unsigned int v; int ret, i; - if (!map->bus) - return -EINVAL; if (val_len % map->format.val_bytes) return -EINVAL; if (!IS_ALIGNED(reg, map->reg_stride)) @@ -2818,7 +2826,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, size_t chunk_count, chunk_bytes; size_t chunk_regs = val_count; - if (!map->bus->read) { + if (!map->read) { ret = -ENOTSUPP; goto out; } @@ -2878,7 +2886,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_read); * @val: Pointer to data buffer * @val_len: Length of output buffer in bytes. * - * The regmap API usually assumes that bulk bus read operations will read a + * The regmap API usually assumes that bulk read operations will read a * range of registers. Some devices have certain registers for which a read * operation read will read from an internal FIFO. * @@ -2896,10 +2904,6 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, size_t read_len; int ret; - if (!map->bus) - return -EINVAL; - if (!map->bus->read) - return -ENOTSUPP; if (val_len % map->format.val_bytes) return -EINVAL; if (!IS_ALIGNED(reg, map->reg_stride)) @@ -3013,7 +3017,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, if (val_count == 0) return -EINVAL; - if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { + if (map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { ret = regmap_raw_read(map, reg, val, val_bytes * val_count); if (ret != 0) return ret; |
