diff options
Diffstat (limited to 'drivers/base')
37 files changed, 2445 insertions, 1289 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 3e63a900b330..059700ea3521 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -191,83 +191,6 @@ config DMA_FENCE_TRACE lockup related problems for dma-buffers shared across multiple devices. -config DMA_CMA - bool "DMA Contiguous Memory Allocator" - depends on HAVE_DMA_CONTIGUOUS && CMA - help - This enables the Contiguous Memory Allocator which allows drivers - to allocate big physically-contiguous blocks of memory for use with - hardware components that do not support I/O map nor scatter-gather. - - You can disable CMA by specifying "cma=0" on the kernel's command - line. - - For more information see <include/linux/dma-contiguous.h>. - If unsure, say "n". - -if DMA_CMA -comment "Default contiguous memory area size:" - -config CMA_SIZE_MBYTES - int "Size in Mega Bytes" - depends on !CMA_SIZE_SEL_PERCENTAGE - default 0 if X86 - default 16 - help - Defines the size (in MiB) of the default memory area for Contiguous - Memory Allocator. If the size of 0 is selected, CMA is disabled by - default, but it can be enabled by passing cma=size[MG] to the kernel. - - -config CMA_SIZE_PERCENTAGE - int "Percentage of total memory" - depends on !CMA_SIZE_SEL_MBYTES - default 0 if X86 - default 10 - help - Defines the size of the default memory area for Contiguous Memory - Allocator as a percentage of the total memory in the system. - If 0 percent is selected, CMA is disabled by default, but it can be - enabled by passing cma=size[MG] to the kernel. - -choice - prompt "Selected region size" - default CMA_SIZE_SEL_MBYTES - -config CMA_SIZE_SEL_MBYTES - bool "Use mega bytes value only" - -config CMA_SIZE_SEL_PERCENTAGE - bool "Use percentage value only" - -config CMA_SIZE_SEL_MIN - bool "Use lower value (minimum)" - -config CMA_SIZE_SEL_MAX - bool "Use higher value (maximum)" - -endchoice - -config CMA_ALIGNMENT - int "Maximum PAGE_SIZE order of alignment for contiguous buffers" - range 4 12 - default 8 - help - DMA mapping framework by default aligns all buffers to the smallest - PAGE_SIZE order which is greater than or equal to the requested buffer - size. This works well for buffers up to a few hundreds kilobytes, but - for larger buffers it just a memory waste. With this parameter you can - specify the maximum PAGE_SIZE order for contiguous buffers. Larger - buffers will be aligned only to this specified order. The order is - expressed as a power of two multiplied by the PAGE_SIZE. - - For example, if your system defaults to 4KiB pages, the order value - of 8 means that the buffers will be aligned up to 1MiB only. - - If unsure, leave the default value "8". - -endif - config GENERIC_ARCH_TOPOLOGY bool help diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 704f44295810..157452080f3d 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \ cpu.o firmware.o init.o map.o devres.o \ attribute_container.o transport_class.o \ topology.o container.o property.o cacheinfo.o \ - devcon.o + devcon.o swnode.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o obj-y += power/ obj-$(CONFIG_ISA_BUS_API) += isa.o diff --git a/drivers/base/base.h b/drivers/base/base.h index 7a419a7a6235..b405436ee28e 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -60,12 +60,17 @@ struct driver_private { * @knode_parent - node in sibling list * @knode_driver - node in driver list * @knode_bus - node in bus list + * @knode_class - node in class list * @deferred_probe - entry in deferred_probe_list which is used to retry the * binding of drivers which were unable to get all the resources needed by * the device; typically because it depends on another driver getting * probed first. + * @async_driver - pointer to device driver awaiting probe via async_probe * @device - pointer back to the struct device that this structure is * associated with. + * @dead - This device is currently either in the process of or has been + * removed from the system. Any asynchronous events scheduled for this + * device should exit without taking any action. * * Nothing outside of the driver core should ever touch these fields. */ @@ -74,8 +79,11 @@ struct device_private { struct klist_node knode_parent; struct klist_node knode_driver; struct klist_node knode_bus; + struct klist_node knode_class; struct list_head deferred_probe; + struct device_driver *async_driver; struct device *device; + u8 dead:1; }; #define to_device_private_parent(obj) \ container_of(obj, struct device_private, knode_parent) @@ -83,6 +91,8 @@ struct device_private { container_of(obj, struct device_private, knode_driver) #define to_device_private_bus(obj) \ container_of(obj, struct device_private, knode_bus) +#define to_device_private_class(obj) \ + container_of(obj, struct device_private, knode_class) /* initialisation functions */ extern int devices_init(void); @@ -124,6 +134,8 @@ extern int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups); extern void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups); +int device_driver_attach(struct device_driver *drv, struct device *dev); +void device_driver_detach(struct device *dev); extern char *make_class_name(const char *name, struct kobject *kobj); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 8bfd27ec73d6..0a58e969f8b7 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -31,6 +31,9 @@ static struct kset *system_kset; #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr) +#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ + struct driver_attribute driver_attr_##_name = \ + __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) static int __must_check bus_rescan_devices_helper(struct device *dev, void *data); @@ -184,18 +187,14 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == drv) { - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_release_driver(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); + device_driver_detach(dev); err = count; } put_device(dev); bus_put(bus); return err; } -static DRIVER_ATTR_WO(unbind); +static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store); /* * Manually attach a device to a driver. @@ -211,13 +210,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { - if (dev->parent && bus->need_parent_lock) - device_lock(dev->parent); - device_lock(dev); - err = driver_probe_device(drv, dev); - device_unlock(dev); - if (dev->parent && bus->need_parent_lock) - device_unlock(dev->parent); + err = device_driver_attach(drv, dev); if (err > 0) { /* success */ @@ -231,14 +224,14 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, bus_put(bus); return err; } -static DRIVER_ATTR_WO(bind); +static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store); -static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) +static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf) { return sprintf(buf, "%d\n", bus->p->drivers_autoprobe); } -static ssize_t store_drivers_autoprobe(struct bus_type *bus, +static ssize_t drivers_autoprobe_store(struct bus_type *bus, const char *buf, size_t count) { if (buf[0] == '0') @@ -248,7 +241,7 @@ static ssize_t store_drivers_autoprobe(struct bus_type *bus, return count; } -static ssize_t store_drivers_probe(struct bus_type *bus, +static ssize_t drivers_probe_store(struct bus_type *bus, const char *buf, size_t count) { struct device *dev; @@ -583,9 +576,8 @@ static void remove_bind_files(struct device_driver *drv) driver_remove_file(drv, &driver_attr_unbind); } -static BUS_ATTR(drivers_probe, S_IWUSR, NULL, store_drivers_probe); -static BUS_ATTR(drivers_autoprobe, S_IWUSR | S_IRUGO, - show_drivers_autoprobe, store_drivers_autoprobe); +static BUS_ATTR_WO(drivers_probe); +static BUS_ATTR_RW(drivers_autoprobe); static int add_probe_files(struct bus_type *bus) { @@ -611,21 +603,12 @@ static void remove_probe_files(struct bus_type *bus) static ssize_t uevent_store(struct device_driver *drv, const char *buf, size_t count) { - kobject_synth_uevent(&drv->p->kobj, buf, count); - return count; -} -static DRIVER_ATTR_WO(uevent); - -static void driver_attach_async(void *_drv, async_cookie_t cookie) -{ - struct device_driver *drv = _drv; - int ret; - - ret = driver_attach(drv); + int rc; - pr_debug("bus: '%s': driver %s async attach completed: %d\n", - drv->bus->name, drv->name, ret); + rc = kobject_synth_uevent(&drv->p->kobj, buf, count); + return rc ? rc : count; } +static DRIVER_ATTR_WO(uevent); /** * bus_add_driver - Add a driver to the bus. @@ -659,15 +642,9 @@ int bus_add_driver(struct device_driver *drv) klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); if (drv->bus->p->drivers_autoprobe) { - if (driver_allows_async_probing(drv)) { - pr_debug("bus: '%s': probing driver %s asynchronously\n", - drv->bus->name, drv->name); - async_schedule(driver_attach_async, drv); - } else { - error = driver_attach(drv); - if (error) - goto out_unregister; - } + error = driver_attach(drv); + if (error) + goto out_unregister; } module_add_driver(drv->owner, drv); @@ -769,13 +746,8 @@ EXPORT_SYMBOL_GPL(bus_rescan_devices); */ int device_reprobe(struct device *dev) { - if (dev->driver) { - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_release_driver(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); - } + if (dev->driver) + device_driver_detach(dev); return bus_rescan_devices_helper(dev, NULL); } EXPORT_SYMBOL_GPL(device_reprobe); @@ -828,10 +800,19 @@ static void klist_devices_put(struct klist_node *n) static ssize_t bus_uevent_store(struct bus_type *bus, const char *buf, size_t count) { - kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); - return count; + int rc; + + rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); + return rc ? rc : count; } -static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); +/* + * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO() + * here, but can not use it as earlier in the file we have + * DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store + * function name. + */ +static struct bus_attribute bus_attr_uevent = __ATTR(uevent, S_IWUSR, NULL, + bus_uevent_store); /** * bus_register - register a driver-core subsystem diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cf78fa6d470d..a7359535caf5 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].size_prop; - if (of_property_read_u32(np, propname, &this_leaf->size)) - this_leaf->size = 0; + of_property_read_u32(np, propname, &this_leaf->size); } /* not cache_line_size() because that's a macro in include/linux/cache.h */ @@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].nr_sets_prop; - if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) - this_leaf->number_of_sets = 0; + of_property_read_u32(np, propname, &this_leaf->number_of_sets); } static void cache_associativity(struct cacheinfo *this_leaf) diff --git a/drivers/base/class.c b/drivers/base/class.c index 54def4e02f00..d8a6a5864c2e 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -117,16 +117,22 @@ static void class_put(struct class *cls) kset_put(&cls->p->subsys); } +static struct device *klist_class_to_dev(struct klist_node *n) +{ + struct device_private *p = to_device_private_class(n); + return p->device; +} + static void klist_class_dev_get(struct klist_node *n) { - struct device *dev = container_of(n, struct device, knode_class); + struct device *dev = klist_class_to_dev(n); get_device(dev); } static void klist_class_dev_put(struct klist_node *n) { - struct device *dev = container_of(n, struct device, knode_class); + struct device *dev = klist_class_to_dev(n); put_device(dev); } @@ -277,7 +283,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, struct klist_node *start_knode = NULL; if (start) - start_knode = &start->knode_class; + start_knode = &start->p->knode_class; klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); iter->type = type; } @@ -304,7 +310,7 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter) knode = klist_next(&iter->ki); if (!knode) return NULL; - dev = container_of(knode, struct device, knode_class); + dev = klist_class_to_dev(knode); if (!iter->type || iter->type == dev->type) return dev; } diff --git a/drivers/base/component.c b/drivers/base/component.c index e8d676fad0c9..532a3a5d8f63 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -16,11 +16,38 @@ #include <linux/slab.h> #include <linux/debugfs.h> +/** + * DOC: overview + * + * The component helper allows drivers to collect a pile of sub-devices, + * including their bound drivers, into an aggregate driver. Various subsystems + * already provide functions to get hold of such components, e.g. + * of_clk_get_by_name(). The component helper can be used when such a + * subsystem-specific way to find a device is not available: The component + * helper fills the niche of aggregate drivers for specific hardware, where + * further standardization into a subsystem would not be practical. The common + * example is when a logical device (e.g. a DRM display driver) is spread around + * the SoC on various components (scanout engines, blending blocks, transcoders + * for various outputs and so on). + * + * The component helper also doesn't solve runtime dependencies, e.g. for system + * suspend and resume operations. See also :ref:`device links<device_link>`. + * + * Components are registered using component_add() and unregistered with + * component_del(), usually from the driver's probe and disconnect functions. + * + * Aggregate drivers first assemble a component match list of what they need + * using component_match_add(). This is then registered as an aggregate driver + * using component_master_add_with_match(), and unregistered using + * component_master_del(). + */ + struct component; struct component_match_array { void *data; int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); void (*release)(struct device *, void *); struct component *component; bool duplicate; @@ -48,6 +75,7 @@ struct component { bool bound; const struct component_ops *ops; + int subcomponent; struct device *dev; }; @@ -85,17 +113,7 @@ static int component_devices_show(struct seq_file *s, void *data) return 0; } -static int component_devices_open(struct inode *inode, struct file *file) -{ - return single_open(file, component_devices_show, inode->i_private); -} - -static const struct file_operations component_devices_fops = { - .open = component_devices_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(component_devices); static int __init component_debug_init(void) { @@ -142,7 +160,7 @@ static struct master *__master_find(struct device *dev, } static struct component *find_component(struct master *master, - int (*compare)(struct device *, void *), void *compare_data) + struct component_match_array *mc) { struct component *c; @@ -150,7 +168,11 @@ static struct component *find_component(struct master *master, if (c->master && c->master != master) continue; - if (compare(c->dev, compare_data)) + if (mc->compare && mc->compare(c->dev, mc->data)) + return c; + + if (mc->compare_typed && + mc->compare_typed(c->dev, c->subcomponent, mc->data)) return c; } @@ -176,7 +198,7 @@ static int find_components(struct master *master) if (match->compare[i].component) continue; - c = find_component(master, mc->compare, mc->data); + c = find_component(master, mc); if (!c) { ret = -ENXIO; break; @@ -311,15 +333,12 @@ static int component_match_realloc(struct device *dev, return 0; } -/* - * Add a component to be matched, with a release function. - * - * The match array is first created or extended if necessary. - */ -void component_match_add_release(struct device *master, +static void __component_match_add(struct device *master, struct component_match **matchptr, void (*release)(struct device *, void *), - int (*compare)(struct device *, void *), void *compare_data) + int (*compare)(struct device *, void *), + int (*compare_typed)(struct device *, int, void *), + void *compare_data) { struct component_match *match = *matchptr; @@ -351,13 +370,69 @@ void component_match_add_release(struct device *master, } match->compare[match->num].compare = compare; + match->compare[match->num].compare_typed = compare_typed; match->compare[match->num].release = release; match->compare[match->num].data = compare_data; match->compare[match->num].component = NULL; match->num++; } + +/** + * component_match_add_release - add a component match entry with release callback + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @release: release function for @compare_data + * @compare: compare function to match against all components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions, where upon @release will be called to free any references held by + * @compare_data, e.g. when @compare_data is a &device_node that must be + * released with of_node_put(). + * + * See also component_match_add() and component_match_add_typed(). + */ +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), + int (*compare)(struct device *, void *), void *compare_data) +{ + __component_match_add(master, matchptr, release, compare, NULL, + compare_data); +} EXPORT_SYMBOL(component_match_add_release); +/** + * component_match_add_typed - add a component match entry for a typed component + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @compare_typed: compare function to match against all typed components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add_typed(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions. + * + * See also component_match_add_release() and component_match_add_typed(). + */ +void component_match_add_typed(struct device *master, + struct component_match **matchptr, + int (*compare_typed)(struct device *, int, void *), void *compare_data) +{ + __component_match_add(master, matchptr, NULL, NULL, compare_typed, + compare_data); +} +EXPORT_SYMBOL(component_match_add_typed); + static void free_master(struct master *master) { struct component_match *match = master->match; @@ -377,6 +452,18 @@ static void free_master(struct master *master) kfree(master); } +/** + * component_master_add_with_match - register an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * @match: component match list for the aggregate driver + * + * Registers a new aggregate driver consisting of the components added to @match + * by calling one of the component_match_add() functions. Once all components in + * @match are available, it will be assembled by calling + * &component_master_ops.bind from @ops. Must be unregistered by calling + * component_master_del(). + */ int component_master_add_with_match(struct device *dev, const struct component_master_ops *ops, struct component_match *match) @@ -413,6 +500,15 @@ int component_master_add_with_match(struct device *dev, } EXPORT_SYMBOL_GPL(component_master_add_with_match); +/** + * component_master_del - unregister an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * + * Unregisters an aggregate driver registered with + * component_master_add_with_match(). If necessary the aggregate driver is first + * disassembled by calling &component_master_ops.unbind from @ops. + */ void component_master_del(struct device *dev, const struct component_master_ops *ops) { @@ -440,6 +536,15 @@ static void component_unbind(struct component *component, devres_release_group(component->dev, component); } +/** + * component_unbind_all - unbind all components of an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Unbinds all components of the aggregate @dev by passing @data to their + * &component_ops.unbind functions. Should be called from + * &component_master_ops.unbind. + */ void component_unbind_all(struct device *master_dev, void *data) { struct master *master; @@ -513,6 +618,15 @@ static int component_bind(struct component *component, struct master *master, return ret; } +/** + * component_bind_all - bind all components of an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Binds all components of the aggregate @dev by passing @data to their + * &component_ops.bind functions. Should be called from + * &component_master_ops.bind. + */ int component_bind_all(struct device *master_dev, void *data) { struct master *master; @@ -547,7 +661,8 @@ int component_bind_all(struct device *master_dev, void *data) } EXPORT_SYMBOL_GPL(component_bind_all); -int component_add(struct device *dev, const struct component_ops *ops) +static int __component_add(struct device *dev, const struct component_ops *ops, + int subcomponent) { struct component *component; int ret; @@ -558,6 +673,7 @@ int component_add(struct device *dev, const struct component_ops *ops) component->ops = ops; component->dev = dev; + component->subcomponent = subcomponent; dev_dbg(dev, "adding component (ops %ps)\n", ops); @@ -576,8 +692,66 @@ int component_add(struct device *dev, const struct component_ops *ops) return ret < 0 ? ret : 0; } + +/** + * component_add_typed - register a component + * @dev: component device + * @ops: component callbacks + * @subcomponent: nonzero identifier for subcomponents + * + * Register a new component for @dev. Functions in @ops will be call when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * @subcomponent must be nonzero and is used to differentiate between multiple + * components registerd on the same device @dev. These components are match + * using component_match_add_typed(). + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add(). + */ +int component_add_typed(struct device *dev, const struct component_ops *ops, + int subcomponent) +{ + if (WARN_ON(subcomponent == 0)) + return -EINVAL; + + return __component_add(dev, ops, subcomponent); +} +EXPORT_SYMBOL_GPL(component_add_typed); + +/** + * component_add - register a component + * @dev: component device + * @ops: component callbacks + * + * Register a new component for @dev. Functions in @ops will be called when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add_typed() for a variant that allows multipled different + * components on the same device. + */ +int component_add(struct device *dev, const struct component_ops *ops) +{ + return __component_add(dev, ops, 0); +} EXPORT_SYMBOL_GPL(component_add); +/** + * component_del - unregister a component + * @dev: component device + * @ops: component callbacks + * + * Unregister a component added with component_add(). If the component is bound + * into an aggregate driver, this will force the entire aggregate driver, including + * all its components, to be unbound. + */ void component_del(struct device *dev, const struct component_ops *ops) { struct component *c, *component = NULL; diff --git a/drivers/base/core.c b/drivers/base/core.c index 04bbcd779e11..4aeaa0c92bda 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -8,6 +8,7 @@ * Copyright (c) 2006 Novell, Inc. */ +#include <linux/acpi.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fwnode.h> @@ -178,10 +179,31 @@ void device_pm_move_to_tail(struct device *dev) * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be * ignored. * - * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed - * automatically when the consumer device driver unbinds from it. - * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS - * set is invalid and will cause NULL to be returned. + * If DL_FLAG_STATELESS is set in @flags, the link is not going to be managed by + * the driver core and, in particular, the caller of this function is expected + * to drop the reference to the link acquired by it directly. + * + * If that flag is not set, however, the caller of this function is handing the + * management of the link over to the driver core entirely and its return value + * can only be used to check whether or not the link is present. In that case, + * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link + * flags can be used to indicate to the driver core when the link can be safely + * deleted. Namely, setting one of them in @flags indicates to the driver core + * that the link is not going to be used (by the given caller of this function) + * after unbinding the consumer or supplier driver, respectively, from its + * device, so the link can be deleted at that point. If none of them is set, + * the link will be maintained until one of the devices pointed to by it (either + * the consumer or the supplier) is unregistered. + * + * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and + * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent + * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can + * be used to request the driver core to automaticall probe for a consmer + * driver after successfully binding a driver to the supplier device. + * + * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER + * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and + * will cause NULL to be returned upfront. * * A side effect of the link creation is re-ordering of dpm_list and the * devices_kset list by moving the consumer device and all devices depending @@ -198,10 +220,22 @@ struct device_link *device_link_add(struct device *consumer, struct device_link *link; if (!consumer || !supplier || - ((flags & DL_FLAG_STATELESS) && - (flags & DL_FLAG_AUTOREMOVE_CONSUMER))) + (flags & DL_FLAG_STATELESS && + flags & (DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_AUTOPROBE_CONSUMER)) || + (flags & DL_FLAG_AUTOPROBE_CONSUMER && + flags & (DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER))) return NULL; + if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { + if (pm_runtime_get_sync(supplier) < 0) { + pm_runtime_put_noidle(supplier); + return NULL; + } + } + device_links_write_lock(); device_pm_lock(); @@ -216,35 +250,71 @@ struct device_link *device_link_add(struct device *consumer, goto out; } - list_for_each_entry(link, &supplier->links.consumers, s_node) - if (link->consumer == consumer) { + /* + * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed + * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both + * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + + list_for_each_entry(link, &supplier->links.consumers, s_node) { + if (link->consumer != consumer) + continue; + + /* + * Don't return a stateless link if the caller wants a stateful + * one and vice versa. + */ + if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) { + link = NULL; + goto out; + } + + if (flags & DL_FLAG_PM_RUNTIME) { + if (!(link->flags & DL_FLAG_PM_RUNTIME)) { + pm_runtime_new_link(consumer); + link->flags |= DL_FLAG_PM_RUNTIME; + } + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + } + + if (flags & DL_FLAG_STATELESS) { kref_get(&link->kref); goto out; } + /* + * If the life time of the link following from the new flags is + * longer than indicated by the flags of the existing link, + * update the existing link to stay around longer. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { + if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; + } + } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { + link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER); + } + goto out; + } + link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) goto out; + refcount_set(&link->rpm_active, 1); + if (flags & DL_FLAG_PM_RUNTIME) { - if (flags & DL_FLAG_RPM_ACTIVE) { - if (pm_runtime_get_sync(supplier) < 0) { - pm_runtime_put_noidle(supplier); - kfree(link); - link = NULL; - goto out; - } - link->rpm_active = true; - } + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + pm_runtime_new_link(consumer); - /* - * If the link is being added by the consumer driver at probe - * time, balance the decrementation of the supplier's runtime PM - * usage counter after consumer probe in driver_probe_device(). - */ - if (consumer->links.status == DL_DEV_PROBING) - pm_runtime_get_noresume(supplier); } + get_device(supplier); link->supplier = supplier; INIT_LIST_HEAD(&link->s_node); @@ -259,17 +329,26 @@ struct device_link *device_link_add(struct device *consumer, link->status = DL_STATE_NONE; } else { switch (supplier->links.status) { - case DL_DEV_DRIVER_BOUND: + case DL_DEV_PROBING: switch (consumer->links.status) { case DL_DEV_PROBING: /* - * Some callers expect the link creation during - * consumer driver probe to resume the supplier - * even without DL_FLAG_RPM_ACTIVE. + * A consumer driver can create a link to a + * supplier that has not completed its probing + * yet as long as it knows that the supplier is + * already functional (for example, it has just + * acquired some resources from the supplier). */ - if (flags & DL_FLAG_PM_RUNTIME) - pm_runtime_resume(supplier); - + link->status = DL_STATE_CONSUMER_PROBE; + break; + default: + link->status = DL_STATE_DORMANT; + break; + } + break; + case DL_DEV_DRIVER_BOUND: + switch (consumer->links.status) { + case DL_DEV_PROBING: link->status = DL_STATE_CONSUMER_PROBE; break; case DL_DEV_DRIVER_BOUND: @@ -290,6 +369,14 @@ struct device_link *device_link_add(struct device *consumer, } /* + * Some callers expect the link creation during consumer driver probe to + * resume the supplier even without DL_FLAG_RPM_ACTIVE. + */ + if (link->status == DL_STATE_CONSUMER_PROBE && + flags & DL_FLAG_PM_RUNTIME) + pm_runtime_resume(supplier); + + /* * Move the consumer and all of the devices depending on it to the end * of dpm_list and the devices_kset list. * @@ -301,17 +388,24 @@ struct device_link *device_link_add(struct device *consumer, list_add_tail_rcu(&link->s_node, &supplier->links.consumers); list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); - dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); + dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); out: device_pm_unlock(); device_links_write_unlock(); + + if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) + pm_runtime_put(supplier); + return link; } EXPORT_SYMBOL_GPL(device_link_add); static void device_link_free(struct device_link *link) { + while (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); + put_device(link->consumer); put_device(link->supplier); kfree(link); @@ -327,8 +421,8 @@ static void __device_link_del(struct kref *kref) { struct device_link *link = container_of(kref, struct device_link, kref); - dev_info(link->consumer, "Dropping the link to %s\n", - dev_name(link->supplier)); + dev_dbg(link->consumer, "Dropping the link to %s\n", + dev_name(link->supplier)); if (link->flags & DL_FLAG_PM_RUNTIME) pm_runtime_drop_link(link->consumer); @@ -354,8 +448,16 @@ static void __device_link_del(struct kref *kref) } #endif /* !CONFIG_SRCU */ +static void device_link_put_kref(struct device_link *link) +{ + if (link->flags & DL_FLAG_STATELESS) + kref_put(&link->kref, __device_link_del); + else + WARN(1, "Unable to drop a managed device link reference\n"); +} + /** - * device_link_del - Delete a link between two devices. + * device_link_del - Delete a stateless link between two devices. * @link: Device link to delete. * * The caller must ensure proper synchronization of this function with runtime @@ -367,14 +469,14 @@ void device_link_del(struct device_link *link) { device_links_write_lock(); device_pm_lock(); - kref_put(&link->kref, __device_link_del); + device_link_put_kref(link); device_pm_unlock(); device_links_write_unlock(); } EXPORT_SYMBOL_GPL(device_link_del); /** - * device_link_remove - remove a link between two devices. + * device_link_remove - Delete a stateless link between two devices. * @consumer: Consumer end of the link. * @supplier: Supplier end of the link. * @@ -393,7 +495,7 @@ void device_link_remove(void *consumer, struct device *supplier) list_for_each_entry(link, &supplier->links.consumers, s_node) { if (link->consumer == consumer) { - kref_put(&link->kref, __device_link_del); + device_link_put_kref(link); break; } } @@ -473,8 +575,21 @@ void device_links_driver_bound(struct device *dev) if (link->flags & DL_FLAG_STATELESS) continue; + /* + * Links created during consumer probe may be in the "consumer + * probe" state to start with if the supplier is still probing + * when they are created and they may become "active" if the + * consumer probe returns first. Skip them here. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) + continue; + WARN_ON(link->status != DL_STATE_DORMANT); WRITE_ONCE(link->status, DL_STATE_AVAILABLE); + + if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) + driver_deferred_probe_add(link->consumer); } list_for_each_entry(link, &dev->links.suppliers, c_node) { @@ -511,18 +626,49 @@ static void __device_links_no_driver(struct device *dev) continue; if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) - kref_put(&link->kref, __device_link_del); - else if (link->status != DL_STATE_SUPPLIER_UNBIND) + __device_link_del(&link->kref); + else if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } dev->links.status = DL_DEV_NO_DRIVER; } +/** + * device_links_no_driver - Update links after failing driver probe. + * @dev: Device whose driver has just failed to probe. + * + * Clean up leftover links to consumers for @dev and invoke + * %__device_links_no_driver() to update links to suppliers for it as + * appropriate. + * + * Links with the DL_FLAG_STATELESS flag set are ignored. + */ void device_links_no_driver(struct device *dev) { + struct device_link *link; + device_links_write_lock(); + + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (link->flags & DL_FLAG_STATELESS) + continue; + + /* + * The probe has failed, so if the status of the link is + * "consumer probe" or "active", it must have been added by + * a probing consumer while this device was still probing. + * Change its state to "dormant", as it represents a valid + * relationship, but it is not functionally meaningful. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) + WRITE_ONCE(link->status, DL_STATE_DORMANT); + } + __device_links_no_driver(dev); + device_links_write_unlock(); } @@ -538,11 +684,11 @@ void device_links_no_driver(struct device *dev) */ void device_links_driver_cleanup(struct device *dev) { - struct device_link *link; + struct device_link *link, *ln; device_links_write_lock(); - list_for_each_entry(link, &dev->links.consumers, s_node) { + list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { if (link->flags & DL_FLAG_STATELESS) continue; @@ -556,7 +702,7 @@ void device_links_driver_cleanup(struct device *dev) */ if (link->status == DL_STATE_SUPPLIER_UNBIND && link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) - kref_put(&link->kref, __device_link_del); + __device_link_del(&link->kref); WRITE_ONCE(link->status, DL_STATE_DORMANT); } @@ -728,6 +874,26 @@ static inline int device_is_not_partition(struct device *dev) } #endif +static int +device_platform_notify(struct device *dev, enum kobject_action action) +{ + int ret; + + ret = acpi_platform_notify(dev, action); + if (ret) + return ret; + + ret = software_node_notify(dev, action); + if (ret) + return ret; + + if (platform_notify && action == KOBJ_ADD) + platform_notify(dev); + else if (platform_notify_remove && action == KOBJ_REMOVE) + platform_notify_remove(dev); + return 0; +} + /** * dev_driver_string - Return a device's driver name, if at all possible * @dev: struct device to get the name of @@ -794,10 +960,12 @@ ssize_t device_store_ulong(struct device *dev, const char *buf, size_t size) { struct dev_ext_attribute *ea = to_ext_attr(attr); - char *end; - unsigned long new = simple_strtoul(buf, &end, 0); - if (end == buf) - return -EINVAL; + int ret; + unsigned long new; + + ret = kstrtoul(buf, 0, &new); + if (ret) + return ret; *(unsigned long *)(ea->var) = new; /* Always return full write size even if we didn't consume all */ return size; @@ -818,9 +986,14 @@ ssize_t device_store_int(struct device *dev, const char *buf, size_t size) { struct dev_ext_attribute *ea = to_ext_attr(attr); - char *end; - long new = simple_strtol(buf, &end, 0); - if (end == buf || new > INT_MAX || new < INT_MIN) + int ret; + long new; + + ret = kstrtol(buf, 0, &new); + if (ret) + return ret; + + if (new > INT_MAX || new < INT_MIN) return -EINVAL; *(int *)(ea->var) = new; /* Always return full write size even if we didn't consume all */ @@ -890,8 +1063,7 @@ static void device_release(struct kobject *kobj) else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); else - WARN(1, KERN_ERR "Device '%s' does not have a release() " - "function, it is broken and must be fixed.\n", + WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n", dev_name(dev)); kfree(p); } @@ -1067,8 +1239,14 @@ out: static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - if (kobject_synth_uevent(&dev->kobj, buf, count)) + int rc; + + rc = kobject_synth_uevent(&dev->kobj, buf, count); + + if (rc) { dev_err(dev, "uevent: failed to send synthetic uevent\n"); + return rc; + } return count; } @@ -1883,8 +2061,9 @@ int device_add(struct device *dev) } /* notify platform of device entry */ - if (platform_notify) - platform_notify(dev); + error = device_platform_notify(dev, KOBJ_ADD); + if (error) + goto platform_error; error = device_create_file(dev, &dev_attr_uevent); if (error) @@ -1932,7 +2111,7 @@ int device_add(struct device *dev) if (dev->class) { mutex_lock(&dev->class->p->mutex); /* tie the class to the device */ - klist_add_tail(&dev->knode_class, + klist_add_tail(&dev->p->knode_class, &dev->class->p->klist_devices); /* notify any interfaces that the device is here */ @@ -1960,6 +2139,8 @@ done: SymlinkError: device_remove_file(dev, &dev_attr_uevent); attrError: + device_platform_notify(dev, KOBJ_REMOVE); +platform_error: kobject_uevent(&dev->kobj, KOBJ_REMOVE); glue_dir = get_glue_dir(dev); kobject_del(&dev->kobj); @@ -2044,6 +2225,17 @@ void device_del(struct device *dev) struct kobject *glue_dir = NULL; struct class_interface *class_intf; + /* + * Hold the device lock and set the "dead" flag to guarantee that + * the update behavior is consistent with the other bitfields near + * it and that we cannot have an asynchronous probe routine trying + * to run while we are tearing out the bus/class/sysfs from + * underneath the device. + */ + device_lock(dev); + dev->p->dead = true; + device_unlock(dev); + /* Notify clients of device removal. This call must come * before dpm_sysfs_remove(). */ @@ -2069,7 +2261,7 @@ void device_del(struct device *dev) if (class_intf->remove_dev) class_intf->remove_dev(dev, class_intf); /* remove the device from the class list */ - klist_del(&dev->knode_class); + klist_del(&dev->p->knode_class); mutex_unlock(&dev->class->p->mutex); } device_remove_file(dev, &dev_attr_uevent); @@ -2077,14 +2269,10 @@ void device_del(struct device *dev) bus_remove_device(dev); device_pm_remove(dev); driver_deferred_probe_del(dev); + device_platform_notify(dev, KOBJ_REMOVE); device_remove_properties(dev); device_links_purge(dev); - /* Notify the platform of the removal, in case they - * need to do anything... - */ - if (platform_notify_remove) - platform_notify_remove(dev); if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_REMOVED_DEVICE, dev); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index eb9443d5bae1..668139cfa664 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -409,6 +409,7 @@ static void device_create_release(struct device *dev) kfree(dev); } +__printf(4, 0) static struct device * __cpu_device_create(struct device *parent, void *drvdata, const struct attribute_group **groups, @@ -427,6 +428,7 @@ __cpu_device_create(struct device *parent, void *drvdata, dev->parent = parent; dev->groups = groups; dev->release = device_create_release; + device_set_pm_not_required(dev); dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 169412ee4ae8..a823f469e53f 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -57,6 +57,10 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0); static struct dentry *deferred_devices; static bool initcalls_done; +/* Save the async probe drivers' name from kernel cmdline */ +#define ASYNC_DRV_NAMES_MAX_LEN 256 +static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN]; + /* * In some cases, like suspend to RAM or hibernation, It might be reasonable * to prohibit probing of devices as it could be unsafe. @@ -116,7 +120,7 @@ static void deferred_probe_work_func(struct work_struct *work) } static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func); -static void driver_deferred_probe_add(struct device *dev) +void driver_deferred_probe_add(struct device *dev) { mutex_lock(&deferred_probe_mutex); if (list_empty(&dev->p->deferred_probe)) { @@ -179,7 +183,7 @@ static void driver_deferred_probe_trigger(void) } /** - * device_block_probing() - Block/defere device's probes + * device_block_probing() - Block/defer device's probes * * It will disable probing of devices and defer their probes instead. */ @@ -223,7 +227,10 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs); static int deferred_probe_timeout = -1; static int __init deferred_probe_timeout_setup(char *str) { - deferred_probe_timeout = simple_strtol(str, NULL, 10); + int timeout; + + if (!kstrtoint(str, 10, &timeout)) + deferred_probe_timeout = timeout; return 1; } __setup("deferred_probe_timeout=", deferred_probe_timeout_setup); @@ -453,7 +460,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) if (defer_all_probes) { /* * Value of defer_all_probes can be set only by - * device_defer_all_probes_enable() which, in turn, will call + * device_block_probing() which, in turn, will call * wait_for_device_probe() right after that to avoid any races. */ dev_dbg(dev, "Driver %s force probe deferral\n", drv->name); @@ -671,6 +678,23 @@ int driver_probe_device(struct device_driver *drv, struct device *dev) return ret; } +static inline bool cmdline_requested_async_probing(const char *drv_name) +{ + return parse_option_str(async_probe_drv_names, drv_name); +} + +/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */ +static int __init save_async_options(char *buf) +{ + if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN) + printk(KERN_WARNING + "Too long list of driver names for 'driver_async_probe'!\n"); + + strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); + return 0; +} +__setup("driver_async_probe=", save_async_options); + bool driver_allows_async_probing(struct device_driver *drv) { switch (drv->probe_type) { @@ -681,6 +705,9 @@ bool driver_allows_async_probing(struct device_driver *drv) return false; default: + if (cmdline_requested_async_probing(drv->name)) + return true; + if (module_requested_async_probing(drv->owner)) return true; @@ -728,15 +755,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) bool async_allowed; int ret; - /* - * Check if device has already been claimed. This may - * happen with driver loading, device discovery/registration, - * and deferred probe processing happens all at once with - * multiple threads. - */ - if (dev->driver) - return -EBUSY; - ret = driver_match_device(drv, dev); if (ret == 0) { /* no match */ @@ -771,6 +789,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) device_lock(dev); + /* + * Check if device has already been removed or claimed. This may + * happen with driver loading, device discovery/registration, + * and deferred probe processing happens all at once with + * multiple threads. + */ + if (dev->p->dead || dev->driver) + goto out_unlock; + if (dev->parent) pm_runtime_get_sync(dev->parent); @@ -781,7 +808,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) if (dev->parent) pm_runtime_put(dev->parent); - +out_unlock: device_unlock(dev); put_device(dev); @@ -826,7 +853,7 @@ static int __device_attach(struct device *dev, bool allow_async) */ dev_dbg(dev, "scheduling asynchronous probe\n"); get_device(dev); - async_schedule(__device_attach_async_helper, dev); + async_schedule_dev(__device_attach_async_helper, dev); } else { pm_request_idle(dev); } @@ -864,6 +891,88 @@ void device_initial_probe(struct device *dev) __device_attach(dev, true); } +/* + * __device_driver_lock - acquire locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will take the required locks for manipulating dev->drv. + * Normally this will just be the @dev lock, but when called for a USB + * interface, @parent lock will be held as well. + */ +static void __device_driver_lock(struct device *dev, struct device *parent) +{ + if (parent && dev->bus->need_parent_lock) + device_lock(parent); + device_lock(dev); +} + +/* + * __device_driver_unlock - release locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will release the required locks for manipulating dev->drv. + * Normally this will just be the the @dev lock, but when called for a + * USB interface, @parent lock will be released as well. + */ +static void __device_driver_unlock(struct device *dev, struct device *parent) +{ + device_unlock(dev); + if (parent && dev->bus->need_parent_lock) + device_unlock(parent); +} + +/** + * device_driver_attach - attach a specific driver to a specific device + * @drv: Driver to attach + * @dev: Device to attach it to + * + * Manually attach driver to a device. Will acquire both @dev lock and + * @dev->parent lock if needed. + */ +int device_driver_attach(struct device_driver *drv, struct device *dev) +{ + int ret = 0; + + __device_driver_lock(dev, dev->parent); + + /* + * If device has been removed or someone has already successfully + * bound a driver before us just skip the driver probe call. + */ + if (!dev->p->dead && !dev->driver) + ret = driver_probe_device(drv, dev); + + __device_driver_unlock(dev, dev->parent); + + return ret; +} + +static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie) +{ + struct device *dev = _dev; + struct device_driver *drv; + int ret = 0; + + __device_driver_lock(dev, dev->parent); + + drv = dev->p->async_driver; + + /* + * If device has been removed or someone has already successfully + * bound a driver before us just skip the driver probe call. + */ + if (!dev->p->dead && !dev->driver) + ret = driver_probe_device(drv, dev); + + __device_driver_unlock(dev, dev->parent); + + dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret); + + put_device(dev); +} + static int __driver_attach(struct device *dev, void *data) { struct device_driver *drv = data; @@ -891,14 +1000,26 @@ static int __driver_attach(struct device *dev, void *data) return ret; } /* ret > 0 means positive match */ - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_lock(dev); - if (!dev->driver) - driver_probe_device(drv, dev); - device_unlock(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); + if (driver_allows_async_probing(drv)) { + /* + * Instead of probing the device synchronously we will + * probe it asynchronously to allow for more parallelism. + * + * We only take the device lock here in order to guarantee + * that the dev->driver and async_driver fields are protected + */ + dev_dbg(dev, "probing driver %s asynchronously\n", drv->name); + device_lock(dev); + if (!dev->driver) { + get_device(dev); + dev->p->async_driver = drv; + async_schedule_dev(__driver_attach_async_helper, dev); + } + device_unlock(dev); + return 0; + } + + device_driver_attach(drv, dev); return 0; } @@ -928,19 +1049,12 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv = dev->driver; if (drv) { - if (driver_allows_async_probing(drv)) - async_synchronize_full(); - while (device_links_busy(dev)) { - device_unlock(dev); - if (parent) - device_unlock(parent); + __device_driver_unlock(dev, parent); device_links_unbind_consumers(dev); - if (parent) - device_lock(parent); - device_lock(dev); + __device_driver_lock(dev, parent); /* * A concurrent invocation of the same function might * have released the driver successfully while this one @@ -968,9 +1082,9 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv->remove(dev); device_links_driver_cleanup(dev); - arch_teardown_dma_ops(dev); devres_release_all(dev); + arch_teardown_dma_ops(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) @@ -993,16 +1107,12 @@ void device_release_driver_internal(struct device *dev, struct device_driver *drv, struct device *parent) { - if (parent && dev->bus->need_parent_lock) - device_lock(parent); + __device_driver_lock(dev, parent); - device_lock(dev); if (!drv || drv == dev->driver) __device_release_driver(dev, parent); - device_unlock(dev); - if (parent && dev->bus->need_parent_lock) - device_unlock(parent); + __device_driver_unlock(dev, parent); } /** @@ -1028,6 +1138,18 @@ void device_release_driver(struct device *dev) EXPORT_SYMBOL_GPL(device_release_driver); /** + * device_driver_detach - detach driver from a specific device + * @dev: device to detach driver from + * + * Detach driver from device. Will acquire both @dev lock and @dev->parent + * lock if needed. + */ +void device_driver_detach(struct device *dev) +{ + device_release_driver_internal(dev, NULL, dev->parent); +} + +/** * driver_detach - detach driver from all devices it controls. * @drv: driver. */ @@ -1036,6 +1158,9 @@ void driver_detach(struct device_driver *drv) struct device_private *dev_prv; struct device *dev; + if (driver_allows_async_probing(drv)) + async_synchronize_full(); + for (;;) { spin_lock(&drv->p->klist_devices.k_lock); if (list_empty(&drv->p->klist_devices.k_list)) { diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c index d427e806cd73..04db9ae235e4 100644 --- a/drivers/base/devcon.c +++ b/drivers/base/devcon.c @@ -7,10 +7,37 @@ */ #include <linux/device.h> +#include <linux/property.h> static DEFINE_MUTEX(devcon_lock); static LIST_HEAD(devcon_list); +typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep, + void *data); + +static void * +fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, + void *data, devcon_match_fn_t match) +{ + struct device_connection con = { .id = con_id }; + struct fwnode_handle *ep; + void *ret; + + fwnode_graph_for_each_endpoint(fwnode, ep) { + con.fwnode = fwnode_graph_get_remote_port_parent(ep); + if (!fwnode_device_is_available(con.fwnode)) + continue; + + ret = match(&con, -1, data); + fwnode_handle_put(con.fwnode); + if (ret) { + fwnode_handle_put(ep); + return ret; + } + } + return NULL; +} + /** * device_connection_find_match - Find physical connection to a device * @dev: Device with the connection @@ -23,10 +50,9 @@ static LIST_HEAD(devcon_list); * caller is expecting to be returned. */ void *device_connection_find_match(struct device *dev, const char *con_id, - void *data, - void *(*match)(struct device_connection *con, - int ep, void *data)) + void *data, devcon_match_fn_t match) { + struct fwnode_handle *fwnode = dev_fwnode(dev); const char *devname = dev_name(dev); struct device_connection *con; void *ret = NULL; @@ -35,6 +61,12 @@ void *device_connection_find_match(struct device *dev, const char *con_id, if (!match) return NULL; + if (fwnode) { + ret = fwnode_graph_devcon_match(fwnode, con_id, data, match); + if (ret) + return ret; + } + mutex_lock(&devcon_lock); list_for_each_entry(con, &devcon_list, list) { @@ -75,12 +107,36 @@ static struct bus_type *generic_match_buses[] = { NULL, }; +static int device_fwnode_match(struct device *dev, void *fwnode) +{ + return dev_fwnode(dev) == fwnode; +} + +static void *device_connection_fwnode_match(struct device_connection *con) +{ + struct bus_type *bus; + struct device *dev; + + for (bus = generic_match_buses[0]; bus; bus++) { + dev = bus_find_device(bus, NULL, (void *)con->fwnode, + device_fwnode_match); + if (dev && !strncmp(dev_name(dev), con->id, strlen(con->id))) + return dev; + + put_device(dev); + } + return NULL; +} + /* This tries to find the device from the most common bus types by name. */ static void *generic_match(struct device_connection *con, int ep, void *data) { struct bus_type *bus; struct device *dev; + if (con->fwnode) + return device_connection_fwnode_match(con); + for (bus = generic_match_buses[0]; bus; bus++) { dev = bus_find_device_by_name(bus, NULL, con->endpoint[ep]); if (dev) diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index b93fc862d365..0dbc43068eeb 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -25,6 +25,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/kthread.h> +#include <uapi/linux/mount.h> #include "base.h" static struct task_struct *thread; diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile index a97eeb0be1d8..0b2dfa6259c9 100644 --- a/drivers/base/firmware_loader/Makefile +++ b/drivers/base/firmware_loader/Makefile @@ -1,7 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for the Linux firmware loader -obj-y := fallback_table.o +obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o obj-$(CONFIG_FW_LOADER) += firmware_class.o firmware_class-objs := main.o firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o + +obj-y += builtin/ diff --git a/drivers/base/firmware_loader/builtin/.gitignore b/drivers/base/firmware_loader/builtin/.gitignore new file mode 100644 index 000000000000..9c8bdb9fdcc3 --- /dev/null +++ b/drivers/base/firmware_loader/builtin/.gitignore @@ -0,0 +1 @@ +*.gen.S diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile new file mode 100644 index 000000000000..37e5ae387400 --- /dev/null +++ b/drivers/base/firmware_loader/builtin/Makefile @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0 + +# Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a +# leading /, it's relative to $(srctree). +fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR)) +fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir)) + +obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE))) + +FWNAME = $(patsubst $(obj)/%.gen.S,%,$@) +FWSTR = $(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))) +ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long) +ASM_ALIGN = $(if $(CONFIG_64BIT),3,2) +PROGBITS = $(if $(CONFIG_ARM),%,@)progbits + +filechk_fwbin = \ + echo "/* Generated by $(src)/Makefile */" ;\ + echo " .section .rodata" ;\ + echo " .p2align $(ASM_ALIGN)" ;\ + echo "_fw_$(FWSTR)_bin:" ;\ + echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\ + echo "_fw_end:" ;\ + echo " .section .rodata.str,\"aMS\",$(PROGBITS),1" ;\ + echo " .p2align $(ASM_ALIGN)" ;\ + echo "_fw_$(FWSTR)_name:" ;\ + echo " .string \"$(FWNAME)\"" ;\ + echo " .section .builtin_fw,\"a\",$(PROGBITS)" ;\ + echo " .p2align $(ASM_ALIGN)" ;\ + echo " $(ASM_WORD) _fw_$(FWSTR)_name" ;\ + echo " $(ASM_WORD) _fw_$(FWSTR)_bin" ;\ + echo " $(ASM_WORD) _fw_end - _fw_$(FWSTR)_bin" + +$(obj)/%.gen.S: FORCE + $(call filechk,fwbin) + +# The .o files depend on the binaries directly; the .S files don't. +$(addprefix $(obj)/, $(obj-y)): $(obj)/%.gen.o: $(fwdir)/% + +targets := $(patsubst $(obj)/%,%, \ + $(shell find $(obj) -name \*.gen.S 2>/dev/null)) diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c index 7428659d8df9..776dd69cf5be 100644 --- a/drivers/base/firmware_loader/fallback_table.c +++ b/drivers/base/firmware_loader/fallback_table.c @@ -16,9 +16,6 @@ * firmware fallback configuration table */ -/* Module or buit-in */ -#ifdef CONFIG_FW_LOADER_USER_HELPER - static unsigned int zero; static unsigned int one = 1; @@ -51,5 +48,3 @@ struct ctl_table firmware_config_table[] = { { } }; EXPORT_SYMBOL_GPL(firmware_config_table); - -#endif diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 8e9213b36e31..7eaaf5ee5ba6 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -328,12 +328,12 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv) rc = kernel_read_file_from_path(path, &fw_priv->data, &size, msize, id); if (rc) { - if (rc == -ENOENT) - dev_dbg(device, "loading %s failed with error %d\n", - path, rc); - else + if (rc != -ENOENT) dev_warn(device, "loading %s failed with error %d\n", path, rc); + else + dev_dbg(device, "loading %s failed for no such file or directory.\n", + path); continue; } dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 0e5985682642..e49028a60429 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -88,6 +88,7 @@ unsigned long __weak memory_block_size_bytes(void) { return MIN_MEMORY_BLOCK_SIZE; } +EXPORT_SYMBOL_GPL(memory_block_size_bytes); static unsigned long get_memory_block_size(void) { @@ -109,8 +110,8 @@ static unsigned long get_memory_block_size(void) * uses. */ -static ssize_t show_mem_start_phys_index(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t phys_index_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); unsigned long phys_index; @@ -122,8 +123,8 @@ static ssize_t show_mem_start_phys_index(struct device *dev, /* * Show whether the section of memory is likely to be hot-removable */ -static ssize_t show_mem_removable(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t removable_show(struct device *dev, struct device_attribute *attr, + char *buf) { unsigned long i, pfn; int ret = 1; @@ -146,8 +147,8 @@ out: /* * online, offline, going offline, etc. */ -static ssize_t show_mem_state(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct memory_block *mem = to_memory_block(dev); ssize_t len = 0; @@ -207,15 +208,15 @@ static bool pages_correctly_probed(unsigned long start_pfn) return false; if (!present_section_nr(section_nr)) { - pr_warn("section %ld pfn[%lx, %lx) not present", + pr_warn("section %ld pfn[%lx, %lx) not present\n", section_nr, pfn, pfn + PAGES_PER_SECTION); return false; } else if (!valid_section_nr(section_nr)) { - pr_warn("section %ld pfn[%lx, %lx) no valid memmap", + pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n", section_nr, pfn, pfn + PAGES_PER_SECTION); return false; } else if (online_section_nr(section_nr)) { - pr_warn("section %ld pfn[%lx, %lx) is already online", + pr_warn("section %ld pfn[%lx, %lx) is already online\n", section_nr, pfn, pfn + PAGES_PER_SECTION); return false; } @@ -286,7 +287,7 @@ static int memory_subsys_online(struct device *dev) return 0; /* - * If we are called from store_mem_state(), online_type will be + * If we are called from state_store(), online_type will be * set >= 0 Otherwise we were called from the device online * attribute and need to set the online_type. */ @@ -315,9 +316,8 @@ static int memory_subsys_offline(struct device *dev) return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); } -static ssize_t -store_mem_state(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) +static ssize_t state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct memory_block *mem = to_memory_block(dev); int ret, online_type; @@ -374,7 +374,7 @@ err: * s.t. if I offline all of these sections I can then * remove the physical device? */ -static ssize_t show_phys_device(struct device *dev, +static ssize_t phys_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); @@ -395,7 +395,7 @@ static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn, } } -static ssize_t show_valid_zones(struct device *dev, +static ssize_t valid_zones_show(struct device *dev, struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); @@ -435,33 +435,31 @@ out: return strlen(buf); } -static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); +static DEVICE_ATTR_RO(valid_zones); #endif -static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); -static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); -static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); -static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); +static DEVICE_ATTR_RO(phys_index); +static DEVICE_ATTR_RW(state); +static DEVICE_ATTR_RO(phys_device); +static DEVICE_ATTR_RO(removable); /* * Block size attribute stuff */ -static ssize_t -print_block_size(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t block_size_bytes_show(struct device *dev, + struct device_attribute *attr, char *buf) { return sprintf(buf, "%lx\n", get_memory_block_size()); } -static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL); +static DEVICE_ATTR_RO(block_size_bytes); /* * Memory auto online policy. */ -static ssize_t -show_auto_online_blocks(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t auto_online_blocks_show(struct device *dev, + struct device_attribute *attr, char *buf) { if (memhp_auto_online) return sprintf(buf, "online\n"); @@ -469,9 +467,9 @@ show_auto_online_blocks(struct device *dev, struct device_attribute *attr, return sprintf(buf, "offline\n"); } -static ssize_t -store_auto_online_blocks(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t auto_online_blocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { if (sysfs_streq(buf, "online")) memhp_auto_online = true; @@ -483,8 +481,7 @@ store_auto_online_blocks(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks, - store_auto_online_blocks); +static DEVICE_ATTR_RW(auto_online_blocks); /* * Some architectures will have custom drivers to do this, and @@ -493,9 +490,8 @@ static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks, * and will require this interface. */ #ifdef CONFIG_ARCH_MEMORY_PROBE -static ssize_t -memory_probe_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t probe_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { u64 phys_addr; int nid, ret; @@ -510,7 +506,7 @@ memory_probe_store(struct device *dev, struct device_attribute *attr, ret = lock_device_hotplug_sysfs(); if (ret) - goto out; + return ret; nid = memory_add_physaddr_to_nid(phys_addr); ret = __add_memory(nid, phys_addr, @@ -525,7 +521,7 @@ out: return ret; } -static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store); +static DEVICE_ATTR_WO(probe); #endif #ifdef CONFIG_MEMORY_FAILURE @@ -534,10 +530,9 @@ static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store); */ /* Soft offline a page */ -static ssize_t -store_soft_offline_page(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t soft_offline_page_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { int ret; u64 pfn; @@ -553,10 +548,9 @@ store_soft_offline_page(struct device *dev, } /* Forcibly offline a page, including killing processes. */ -static ssize_t -store_hard_offline_page(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t hard_offline_page_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { int ret; u64 pfn; @@ -569,8 +563,8 @@ store_hard_offline_page(struct device *dev, return ret ? ret : count; } -static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page); -static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page); +static DEVICE_ATTR_WO(soft_offline_page); +static DEVICE_ATTR_WO(hard_offline_page); #endif /* @@ -688,7 +682,7 @@ static int add_memory_block(int base_section_nr) int i, ret, section_count = 0, section_nr; for (i = base_section_nr; - (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; + i < base_section_nr + sections_per_block; i++) { if (!present_section_nr(i)) continue; @@ -739,7 +733,7 @@ unregister_memory(struct memory_block *memory) { BUG_ON(memory->dev.bus != &memory_subsys); - /* drop the ref. we got in remove_memory_block() */ + /* drop the ref. we got in remove_memory_section() */ put_device(&memory->dev); device_unregister(&memory->dev); } diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index f39a920496fb..8da314b81eab 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -368,14 +368,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nvec) { struct platform_msi_priv_data *data = domain->host_data; - struct msi_desc *desc; - for_each_msi_entry(desc, data->dev) { + struct msi_desc *desc, *tmp; + for_each_msi_entry_safe(desc, tmp, data->dev) { if (WARN_ON(!desc->irq || desc->nvec_used != 1)) return; if (!(desc->irq >= virq && desc->irq < (virq + nvec))) continue; irq_domain_free_irqs_common(domain, desc->irq, 1); + list_del(&desc->list); + free_msi_entry(desc); } } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 41b91af95afb..dab0a5abc391 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -26,6 +26,7 @@ #include <linux/clk/clk-conf.h> #include <linux/limits.h> #include <linux/property.h> +#include <linux/kmemleak.h> #include "base.h" #include "power/power.h" @@ -79,6 +80,26 @@ struct resource *platform_get_resource(struct platform_device *dev, EXPORT_SYMBOL_GPL(platform_get_resource); /** + * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform + * device + * + * @pdev: platform device to use both for memory resource lookup as well as + * resource managemend + * @index: resource index + */ +#ifdef CONFIG_HAS_IOMEM +void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, + unsigned int index) +{ + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, index); + return devm_ioremap_resource(&pdev->dev, res); +} +EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); +#endif /* CONFIG_HAS_IOMEM */ + +/** * platform_get_irq - get an IRQ for a device * @dev: platform device * @num: IRQ number index @@ -126,7 +147,20 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); } - return r ? r->start : -ENXIO; + if (r) + return r->start; + + /* + * For the index 0 interrupt, allow falling back to GpioInt + * resources. While a device could have both Interrupt and GpioInt + * resources, making this fallback ambiguous, in many common cases + * the device will only expose one IRQ, and this fallback + * allows a common code path across either kind of resource. + */ + if (num == 0 && has_acpi_companion(&dev->dev)) + return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); + + return -ENXIO; #endif } EXPORT_SYMBOL_GPL(platform_get_irq); @@ -234,7 +268,7 @@ struct platform_object { */ void platform_device_put(struct platform_device *pdev) { - if (pdev) + if (!IS_ERR_OR_NULL(pdev)) put_device(&pdev->dev); } EXPORT_SYMBOL_GPL(platform_device_put); @@ -447,8 +481,7 @@ void platform_device_del(struct platform_device *pdev) { int i; - if (pdev) { - device_remove_properties(&pdev->dev); + if (!IS_ERR_OR_NULL(pdev)) { device_del(&pdev->dev); if (pdev->id_auto) { @@ -508,10 +541,12 @@ struct platform_device *platform_device_register_full( pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); if (!pdev) - goto err_alloc; + return ERR_PTR(-ENOMEM); pdev->dev.parent = pdevinfo->parent; pdev->dev.fwnode = pdevinfo->fwnode; + pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); + pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { /* @@ -525,6 +560,8 @@ struct platform_device *platform_device_register_full( if (!pdev->dev.dma_mask) goto err; + kmemleak_ignore(pdev->dev.dma_mask); + *pdev->dev.dma_mask = pdevinfo->dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } @@ -551,8 +588,6 @@ struct platform_device *platform_device_register_full( err: ACPI_COMPANION_SET(&pdev->dev, NULL); kfree(pdev->dev.dma_mask); - -err_alloc: platform_device_put(pdev); return ERR_PTR(ret); } @@ -1138,8 +1173,7 @@ int platform_dma_configure(struct device *dev) ret = of_dma_configure(dev, dev->of_node, true); } else if (has_acpi_companion(dev)) { attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); - if (attr != DEV_DMA_NOT_SUPPORTED) - ret = acpi_dma_configure(dev, attr); + ret = acpi_dma_configure(dev, attr); } return ret; @@ -1179,37 +1213,6 @@ int __init platform_bus_init(void) return error; } -#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK -static u64 dma_default_get_required_mask(struct device *dev) -{ - u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); - u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); - u64 mask; - - if (!high_totalram) { - /* convert to mask just covering totalram */ - low_totalram = (1 << (fls(low_totalram) - 1)); - low_totalram += low_totalram - 1; - mask = low_totalram; - } else { - high_totalram = (1 << (fls(high_totalram) - 1)); - high_totalram += high_totalram - 1; - mask = (((u64)high_totalram) << 32) + 0xffffffff; - } - return mask; -} - -u64 dma_get_required_mask(struct device *dev) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - if (ops->get_required_mask) - return ops->get_required_mask(dev); - return dma_default_get_required_mask(dev); -} -EXPORT_SYMBOL_GPL(dma_get_required_mask); -#endif - static __initdata LIST_HEAD(early_platform_driver_list); static __initdata LIST_HEAD(early_platform_device_list); diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 5a42ae4078c2..365ad751ce0f 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) if (IS_ERR(ce->clk)) { ce->status = PCE_STATUS_ERROR; } else { - clk_prepare(ce->clk); - ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", - ce->clk, ce->con_id); + if (clk_prepare(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + dev_err(dev, "clk_prepare() failed\n"); + } else { + ce->status = PCE_STATUS_ACQUIRED; + dev_dbg(dev, + "Clock %pC con_id %s managed by runtime PM.\n", + ce->clk, ce->con_id); + } } } diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index b413951c6abc..22aedb28aad7 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id); * For a detailed function description, see dev_pm_domain_attach_by_id(). */ struct device *dev_pm_domain_attach_by_name(struct device *dev, - char *name) + const char *name) { if (dev->pm_domain) return ERR_PTR(-EEXIST); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 7f38a92b444a..96a6dc9d305c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -6,6 +6,8 @@ * This file is released under the GPLv2. */ +#define pr_fmt(fmt) "PM: " fmt + #include <linux/delay.h> #include <linux/kernel.h> #include <linux/io.h> @@ -239,6 +241,127 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd) static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} #endif +static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, + unsigned int state) +{ + struct generic_pm_domain_data *pd_data; + struct pm_domain_data *pdd; + struct gpd_link *link; + + /* New requested state is same as Max requested state */ + if (state == genpd->performance_state) + return state; + + /* New requested state is higher than Max requested state */ + if (state > genpd->performance_state) + return state; + + /* Traverse all devices within the domain */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + pd_data = to_gpd_data(pdd); + + if (pd_data->performance_state > state) + state = pd_data->performance_state; + } + + /* + * Traverse all sub-domains within the domain. This can be + * done without any additional locking as the link->performance_state + * field is protected by the master genpd->lock, which is already taken. + * + * Also note that link->performance_state (subdomain's performance state + * requirement to master domain) is different from + * link->slave->performance_state (current performance state requirement + * of the devices/sub-domains of the subdomain) and so can have a + * different value. + * + * Note that we also take vote from powered-off sub-domains into account + * as the same is done for devices right now. + */ + list_for_each_entry(link, &genpd->master_links, master_node) { + if (link->performance_state > state) + state = link->performance_state; + } + + return state; +} + +static int _genpd_set_performance_state(struct generic_pm_domain *genpd, + unsigned int state, int depth) +{ + struct generic_pm_domain *master; + struct gpd_link *link; + int master_state, ret; + + if (state == genpd->performance_state) + return 0; + + /* Propagate to masters of genpd */ + list_for_each_entry(link, &genpd->slave_links, slave_node) { + master = link->master; + + if (!master->set_performance_state) + continue; + + /* Find master's performance state */ + ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, + master->opp_table, + state); + if (unlikely(ret < 0)) + goto err; + + master_state = ret; + + genpd_lock_nested(master, depth + 1); + + link->prev_performance_state = link->performance_state; + link->performance_state = master_state; + master_state = _genpd_reeval_performance_state(master, + master_state); + ret = _genpd_set_performance_state(master, master_state, depth + 1); + if (ret) + link->performance_state = link->prev_performance_state; + + genpd_unlock(master); + + if (ret) + goto err; + } + + ret = genpd->set_performance_state(genpd, state); + if (ret) + goto err; + + genpd->performance_state = state; + return 0; + +err: + /* Encountered an error, lets rollback */ + list_for_each_entry_continue_reverse(link, &genpd->slave_links, + slave_node) { + master = link->master; + + if (!master->set_performance_state) + continue; + + genpd_lock_nested(master, depth + 1); + + master_state = link->prev_performance_state; + link->performance_state = master_state; + + master_state = _genpd_reeval_performance_state(master, + master_state); + if (_genpd_set_performance_state(master, master_state, depth + 1)) { + pr_err("%s: Failed to roll back to %d performance state\n", + master->name, master_state); + } + + genpd_unlock(master); + } + + return ret; +} + /** * dev_pm_genpd_set_performance_state- Set performance state of device's power * domain. @@ -257,10 +380,9 @@ static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) { struct generic_pm_domain *genpd; - struct generic_pm_domain_data *gpd_data, *pd_data; - struct pm_domain_data *pdd; + struct generic_pm_domain_data *gpd_data; unsigned int prev; - int ret = 0; + int ret; genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) @@ -281,47 +403,11 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) prev = gpd_data->performance_state; gpd_data->performance_state = state; - /* New requested state is same as Max requested state */ - if (state == genpd->performance_state) - goto unlock; - - /* New requested state is higher than Max requested state */ - if (state > genpd->performance_state) - goto update_state; - - /* Traverse all devices within the domain */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - pd_data = to_gpd_data(pdd); - - if (pd_data->performance_state > state) - state = pd_data->performance_state; - } - - if (state == genpd->performance_state) - goto unlock; - - /* - * We aren't propagating performance state changes of a subdomain to its - * masters as we don't have hardware that needs it. Over that, the - * performance states of subdomain and its masters may not have - * one-to-one mapping and would require additional information. We can - * get back to this once we have hardware that needs it. For that - * reason, we don't have to consider performance state of the subdomains - * of genpd here. - */ - -update_state: - if (genpd_status_on(genpd)) { - ret = genpd->set_performance_state(genpd, state); - if (ret) { - gpd_data->performance_state = prev; - goto unlock; - } - } - - genpd->performance_state = state; + state = _genpd_reeval_performance_state(genpd, state); + ret = _genpd_set_performance_state(genpd, state, 0); + if (ret) + gpd_data->performance_state = prev; -unlock: genpd_unlock(genpd); return ret; @@ -347,15 +433,6 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) return ret; elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); - - if (unlikely(genpd->set_performance_state)) { - ret = genpd->set_performance_state(genpd, genpd->performance_state); - if (ret) { - pr_warn("%s: Failed to set performance state %d (%d)\n", - genpd->name, genpd->performance_state, ret); - } - } - if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) return ret; @@ -382,19 +459,19 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) time_start = ktime_get(); ret = genpd->power_off(genpd); - if (ret == -EBUSY) + if (ret) return ret; elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) - return ret; + return 0; genpd->states[state_idx].power_off_latency_ns = elapsed_ns; genpd->max_off_time_changed = true; pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", genpd->name, "off", elapsed_ns); - return ret; + return 0; } /** @@ -1392,12 +1469,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - genpd_lock(genpd); - ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; if (ret) goto out; + genpd_lock(genpd); + dev_pm_domain_set(dev, &genpd->domain); genpd->device_count++; @@ -1405,9 +1482,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); - out: genpd_unlock(genpd); - + out: if (ret) genpd_free_dev_data(dev, gpd_data); else @@ -1456,15 +1532,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, genpd->device_count--; genpd->max_off_time_changed = true; - if (genpd->detach_dev) - genpd->detach_dev(genpd, dev); - dev_pm_domain_set(dev, NULL); list_del_init(&pdd->list_node); genpd_unlock(genpd); + if (genpd->detach_dev) + genpd->detach_dev(genpd, dev); + genpd_free_dev_data(dev, gpd_data); return 0; @@ -1582,8 +1658,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); if (!list_empty(&subdomain->master_links) || subdomain->device_count) { - pr_warn("%s: unable to remove subdomain %s\n", genpd->name, - subdomain->name); + pr_warn("%s: unable to remove subdomain %s\n", + genpd->name, subdomain->name); ret = -EBUSY; goto out; } @@ -1691,8 +1767,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd, ret = genpd_set_default_power_state(genpd); if (ret) return ret; - } else if (!gov) { - pr_warn("%s : no governor for states\n", genpd->name); + } else if (!gov && genpd->state_count > 1) { + pr_warn("%s: no governor for states\n", genpd->name); } device_initialize(&genpd->dev); @@ -1907,12 +1983,21 @@ int of_genpd_add_provider_simple(struct device_node *np, ret); goto unlock; } + + /* + * Save table for faster processing while setting performance + * state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); + WARN_ON(!genpd->opp_table); } ret = genpd_add_provider(np, genpd_xlate_simple, genpd); if (ret) { - if (genpd->set_performance_state) + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); dev_pm_opp_of_remove_table(&genpd->dev); + } goto unlock; } @@ -1965,6 +2050,13 @@ int of_genpd_add_provider_onecell(struct device_node *np, i, ret); goto error; } + + /* + * Save table for faster processing while setting + * performance state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i); + WARN_ON(!genpd->opp_table); } genpd->provider = &np->fwnode; @@ -1989,8 +2081,10 @@ error: genpd->provider = NULL; genpd->has_provider = false; - if (genpd->set_performance_state) + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); dev_pm_opp_of_remove_table(&genpd->dev); + } } mutex_unlock(&gpd_list_lock); @@ -2024,6 +2118,7 @@ void of_genpd_del_provider(struct device_node *np) if (!gpd->set_performance_state) continue; + dev_pm_opp_put_opp_table(gpd->opp_table); dev_pm_opp_of_remove_table(&gpd->dev); } } @@ -2338,7 +2433,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); struct device *genpd_dev_pm_attach_by_id(struct device *dev, unsigned int index) { - struct device *genpd_dev; + struct device *virt_dev; int num_domains; int ret; @@ -2352,31 +2447,31 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, return NULL; /* Allocate and register device on the genpd bus. */ - genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL); - if (!genpd_dev) + virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); + if (!virt_dev) return ERR_PTR(-ENOMEM); - dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev)); - genpd_dev->bus = &genpd_bus_type; - genpd_dev->release = genpd_release_dev; + dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); + virt_dev->bus = &genpd_bus_type; + virt_dev->release = genpd_release_dev; - ret = device_register(genpd_dev); + ret = device_register(virt_dev); if (ret) { - kfree(genpd_dev); + kfree(virt_dev); return ERR_PTR(ret); } /* Try to attach the device to the PM domain at the specified index. */ - ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); + ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false); if (ret < 1) { - device_unregister(genpd_dev); + device_unregister(virt_dev); return ret ? ERR_PTR(ret) : NULL; } - pm_runtime_enable(genpd_dev); - genpd_queue_power_off_work(dev_to_genpd(genpd_dev)); + pm_runtime_enable(virt_dev); + genpd_queue_power_off_work(dev_to_genpd(virt_dev)); - return genpd_dev; + return virt_dev; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); @@ -2389,7 +2484,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); * power-domain-names DT property. For further description see * genpd_dev_pm_attach_by_id(). */ -struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name) +struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) { int index; @@ -2420,7 +2515,7 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, &entry_latency); if (err) { pr_debug(" * %pOF missing entry-latency-us property\n", - state_node); + state_node); return -EINVAL; } @@ -2428,7 +2523,7 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, &exit_latency); if (err) { pr_debug(" * %pOF missing exit-latency-us property\n", - state_node); + state_node); return -EINVAL; } @@ -2521,52 +2616,36 @@ int of_genpd_parse_idle_states(struct device_node *dn, EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); /** - * of_genpd_opp_to_performance_state- Gets performance state of device's - * power domain corresponding to a DT node's "required-opps" property. + * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. * - * @dev: Device for which the performance-state needs to be found. - * @np: DT node where the "required-opps" property is present. This can be - * the device node itself (if it doesn't have an OPP table) or a node - * within the OPP table of a device (if device has an OPP table). + * @genpd_dev: Genpd's device for which the performance-state needs to be found. + * @opp: struct dev_pm_opp of the OPP for which we need to find performance + * state. * - * Returns performance state corresponding to the "required-opps" property of - * a DT node. This calls platform specific genpd->opp_to_performance_state() - * callback to translate power domain OPP to performance state. + * Returns performance state encoded in the OPP of the genpd. This calls + * platform specific genpd->opp_to_performance_state() callback to translate + * power domain OPP to performance state. * * Returns performance state on success and 0 on failure. */ -unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np) +unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp) { - struct generic_pm_domain *genpd; - struct dev_pm_opp *opp; - int state = 0; + struct generic_pm_domain *genpd = NULL; + int state; - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return 0; + genpd = container_of(genpd_dev, struct generic_pm_domain, dev); - if (unlikely(!genpd->set_performance_state)) + if (unlikely(!genpd->opp_to_performance_state)) return 0; genpd_lock(genpd); - - opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np); - if (IS_ERR(opp)) { - dev_err(dev, "Failed to find required OPP: %ld\n", - PTR_ERR(opp)); - goto unlock; - } - state = genpd->opp_to_performance_state(genpd, opp); - dev_pm_opp_put(opp); - -unlock: genpd_unlock(genpd); return state; } -EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state); +EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); static int __init genpd_bus_init(void) { @@ -2671,7 +2750,7 @@ exit: return 0; } -static int genpd_summary_show(struct seq_file *s, void *data) +static int summary_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd; int ret = 0; @@ -2694,7 +2773,7 @@ static int genpd_summary_show(struct seq_file *s, void *data) return ret; } -static int genpd_status_show(struct seq_file *s, void *data) +static int status_show(struct seq_file *s, void *data) { static const char * const status_lookup[] = { [GPD_STATE_ACTIVE] = "on", @@ -2721,7 +2800,7 @@ exit: return ret; } -static int genpd_sub_domains_show(struct seq_file *s, void *data) +static int sub_domains_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; struct gpd_link *link; @@ -2738,7 +2817,7 @@ static int genpd_sub_domains_show(struct seq_file *s, void *data) return ret; } -static int genpd_idle_states_show(struct seq_file *s, void *data) +static int idle_states_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; unsigned int i; @@ -2767,7 +2846,7 @@ static int genpd_idle_states_show(struct seq_file *s, void *data) return ret; } -static int genpd_active_time_show(struct seq_file *s, void *data) +static int active_time_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; ktime_t delta = 0; @@ -2787,7 +2866,7 @@ static int genpd_active_time_show(struct seq_file *s, void *data) return ret; } -static int genpd_total_idle_time_show(struct seq_file *s, void *data) +static int total_idle_time_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; ktime_t delta = 0, total = 0; @@ -2815,7 +2894,7 @@ static int genpd_total_idle_time_show(struct seq_file *s, void *data) } -static int genpd_devices_show(struct seq_file *s, void *data) +static int devices_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; struct pm_domain_data *pm_data; @@ -2841,7 +2920,7 @@ static int genpd_devices_show(struct seq_file *s, void *data) return ret; } -static int genpd_perf_state_show(struct seq_file *s, void *data) +static int perf_state_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; @@ -2854,37 +2933,14 @@ static int genpd_perf_state_show(struct seq_file *s, void *data) return 0; } -#define define_genpd_open_function(name) \ -static int genpd_##name##_open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, genpd_##name##_show, inode->i_private); \ -} - -define_genpd_open_function(summary); -define_genpd_open_function(status); -define_genpd_open_function(sub_domains); -define_genpd_open_function(idle_states); -define_genpd_open_function(active_time); -define_genpd_open_function(total_idle_time); -define_genpd_open_function(devices); -define_genpd_open_function(perf_state); - -#define define_genpd_debugfs_fops(name) \ -static const struct file_operations genpd_##name##_fops = { \ - .open = genpd_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} - -define_genpd_debugfs_fops(summary); -define_genpd_debugfs_fops(status); -define_genpd_debugfs_fops(sub_domains); -define_genpd_debugfs_fops(idle_states); -define_genpd_debugfs_fops(active_time); -define_genpd_debugfs_fops(total_idle_time); -define_genpd_debugfs_fops(devices); -define_genpd_debugfs_fops(perf_state); +DEFINE_SHOW_ATTRIBUTE(summary); +DEFINE_SHOW_ATTRIBUTE(status); +DEFINE_SHOW_ATTRIBUTE(sub_domains); +DEFINE_SHOW_ATTRIBUTE(idle_states); +DEFINE_SHOW_ATTRIBUTE(active_time); +DEFINE_SHOW_ATTRIBUTE(total_idle_time); +DEFINE_SHOW_ATTRIBUTE(devices); +DEFINE_SHOW_ATTRIBUTE(perf_state); static int __init genpd_debug_init(void) { @@ -2893,34 +2949,27 @@ static int __init genpd_debug_init(void) genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); - if (!genpd_debugfs_dir) - return -ENOMEM; - - d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - genpd_debugfs_dir, NULL, &genpd_summary_fops); - if (!d) - return -ENOMEM; + debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, + NULL, &summary_fops); list_for_each_entry(genpd, &gpd_list, gpd_list_node) { d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); - if (!d) - return -ENOMEM; debugfs_create_file("current_state", 0444, - d, genpd, &genpd_status_fops); + d, genpd, &status_fops); debugfs_create_file("sub_domains", 0444, - d, genpd, &genpd_sub_domains_fops); + d, genpd, &sub_domains_fops); debugfs_create_file("idle_states", 0444, - d, genpd, &genpd_idle_states_fops); + d, genpd, &idle_states_fops); debugfs_create_file("active_time", 0444, - d, genpd, &genpd_active_time_fops); + d, genpd, &active_time_fops); debugfs_create_file("total_idle_time", 0444, - d, genpd, &genpd_total_idle_time_fops); + d, genpd, &total_idle_time_fops); debugfs_create_file("devices", 0444, - d, genpd, &genpd_devices_fops); + d, genpd, &devices_fops); if (genpd->set_performance_state) debugfs_create_file("perf_state", 0444, - d, genpd, &genpd_perf_state_fops); + d, genpd, &perf_state_fops); } return 0; diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 99896fbf18e4..4d07e38a8247 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -128,7 +128,6 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, off_on_time_ns = genpd->states[state].power_off_latency_ns + genpd->states[state].power_on_latency_ns; - min_off_time_ns = -1; /* * Check if subdomains can be off for enough time. diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a690fd400260..f80d298de3fa 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -17,6 +17,8 @@ * subsystem list maintains. */ +#define pr_fmt(fmt) "PM: " fmt + #include <linux/device.h> #include <linux/export.h> #include <linux/mutex.h> @@ -32,6 +34,7 @@ #include <trace/events/power.h> #include <linux/cpufreq.h> #include <linux/cpuidle.h> +#include <linux/devfreq.h> #include <linux/timer.h> #include "../base.h" @@ -123,7 +126,11 @@ void device_pm_unlock(void) */ void device_pm_add(struct device *dev) { - pr_debug("PM: Adding info for %s:%s\n", + /* Skip PM setup/initialization. */ + if (device_pm_not_required(dev)) + return; + + pr_debug("Adding info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); device_pm_check_callbacks(dev); mutex_lock(&dpm_list_mtx); @@ -141,7 +148,10 @@ void device_pm_add(struct device *dev) */ void device_pm_remove(struct device *dev) { - pr_debug("PM: Removing info for %s:%s\n", + if (device_pm_not_required(dev)) + return; + + pr_debug("Removing info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); complete_all(&dev->power.completion); mutex_lock(&dpm_list_mtx); @@ -160,7 +170,7 @@ void device_pm_remove(struct device *dev) */ void device_pm_move_before(struct device *deva, struct device *devb) { - pr_debug("PM: Moving %s:%s before %s:%s\n", + pr_debug("Moving %s:%s before %s:%s\n", deva->bus ? deva->bus->name : "No Bus", dev_name(deva), devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); /* Delete deva from dpm_list and reinsert before devb. */ @@ -174,7 +184,7 @@ void device_pm_move_before(struct device *deva, struct device *devb) */ void device_pm_move_after(struct device *deva, struct device *devb) { - pr_debug("PM: Moving %s:%s after %s:%s\n", + pr_debug("Moving %s:%s after %s:%s\n", deva->bus ? deva->bus->name : "No Bus", dev_name(deva), devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); /* Delete deva from dpm_list and reinsert after devb. */ @@ -187,7 +197,7 @@ void device_pm_move_after(struct device *deva, struct device *devb) */ void device_pm_move_last(struct device *dev) { - pr_debug("PM: Moving %s:%s to end of list\n", + pr_debug("Moving %s:%s to end of list\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); list_move_tail(&dev->power.entry, &dpm_list); } @@ -410,8 +420,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, int error) { - printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", - dev_name(dev), pm_verb(state.event), info, error); + pr_err("Device %s failed to %s%s: error %d\n", + dev_name(dev), pm_verb(state.event), info, error); } static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, @@ -726,7 +736,7 @@ void dpm_noirq_resume_devices(pm_message_t state) reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); - async_schedule(async_resume_noirq, dev); + async_schedule_dev(async_resume_noirq, dev); } } @@ -883,7 +893,7 @@ void dpm_resume_early(pm_message_t state) reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); - async_schedule(async_resume_early, dev); + async_schedule_dev(async_resume_early, dev); } } @@ -1047,7 +1057,7 @@ void dpm_resume(pm_message_t state) reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); - async_schedule(async_resume, dev); + async_schedule_dev(async_resume, dev); } } @@ -1078,6 +1088,7 @@ void dpm_resume(pm_message_t state) dpm_show_time(starttime, state, 0, NULL); cpufreq_resume(); + devfreq_resume(); trace_suspend_resume(TPS("dpm_resume"), state.event, false); } @@ -1366,7 +1377,7 @@ static int device_suspend_noirq(struct device *dev) if (is_async(dev)) { get_device(dev); - async_schedule(async_suspend_noirq, dev); + async_schedule_dev(async_suspend_noirq, dev); return 0; } return __device_suspend_noirq(dev, pm_transition, false); @@ -1569,7 +1580,7 @@ static int device_suspend_late(struct device *dev) if (is_async(dev)) { get_device(dev); - async_schedule(async_suspend_late, dev); + async_schedule_dev(async_suspend_late, dev); return 0; } @@ -1739,8 +1750,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); - if (pm_runtime_status_suspended(dev)) + if (pm_runtime_status_suspended(dev)) { + pm_dev_dbg(dev, state, "direct-complete "); goto Complete; + } pm_runtime_enable(dev); } @@ -1833,7 +1846,7 @@ static int device_suspend(struct device *dev) if (is_async(dev)) { get_device(dev); - async_schedule(async_suspend, dev); + async_schedule_dev(async_suspend, dev); return 0; } @@ -1852,6 +1865,7 @@ int dpm_suspend(pm_message_t state) trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); + devfreq_suspend(); cpufreq_suspend(); mutex_lock(&dpm_list_mtx); @@ -2010,8 +2024,7 @@ int dpm_prepare(pm_message_t state) error = 0; continue; } - printk(KERN_INFO "PM: Device %s not prepared " - "for power transition: code %d\n", + pr_info("Device %s not prepared for power transition: code %d\n", dev_name(dev), error); put_device(dev); break; @@ -2050,7 +2063,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start); void __suspend_report_result(const char *function, void *fn, int ret) { if (ret) - printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); + pr_err("%s(): %pF returns %d\n", function, fn, ret); } EXPORT_SYMBOL_GPL(__suspend_report_result); diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index c511def48b48..ec33fbdb919b 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -21,6 +21,7 @@ static inline void pm_runtime_early_init(struct device *dev) extern void pm_runtime_init(struct device *dev); extern void pm_runtime_reinit(struct device *dev); extern void pm_runtime_remove(struct device *dev); +extern u64 pm_runtime_active_time(struct device *dev); #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) #define WAKE_IRQ_DEDICATED_MANAGED BIT(1) diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 3382542b39b7..f80e402ef778 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -22,7 +22,7 @@ * per-device constraint data struct. * * Note about the per-device constraint data struct allocation: - * . The per-device constraints data struct ptr is tored into the device + * . The per-device constraints data struct ptr is stored into the device * dev_pm_info. * . To minimize the data usage by the per-device constraints, the data struct * is only allocated at the first call to dev_pm_qos_add_request. diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index beb85c31f3fa..977db40378b0 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -8,6 +8,8 @@ */ #include <linux/sched/mm.h> +#include <linux/ktime.h> +#include <linux/hrtimer.h> #include <linux/export.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> @@ -62,22 +64,32 @@ static int rpm_suspend(struct device *dev, int rpmflags); * runtime_status field is updated, to account the time in the old state * correctly. */ -void update_pm_runtime_accounting(struct device *dev) +static void update_pm_runtime_accounting(struct device *dev) { - unsigned long now = jiffies; - unsigned long delta; + u64 now, last, delta; - delta = now - dev->power.accounting_timestamp; + if (dev->power.disable_depth > 0) + return; + + last = dev->power.accounting_timestamp; + now = ktime_get_mono_fast_ns(); dev->power.accounting_timestamp = now; - if (dev->power.disable_depth > 0) + /* + * Because ktime_get_mono_fast_ns() is not monotonic during + * timekeeping updates, ensure that 'now' is after the last saved + * timesptamp. + */ + if (now < last) return; + delta = now - last; + if (dev->power.runtime_status == RPM_SUSPENDED) - dev->power.suspended_jiffies += delta; + dev->power.suspended_time += delta; else - dev->power.active_jiffies += delta; + dev->power.active_time += delta; } static void __update_runtime_status(struct device *dev, enum rpm_status status) @@ -86,6 +98,32 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) dev->power.runtime_status = status; } +static u64 rpm_get_accounted_time(struct device *dev, bool suspended) +{ + u64 time; + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + update_pm_runtime_accounting(dev); + time = suspended ? dev->power.suspended_time : dev->power.active_time; + + spin_unlock_irqrestore(&dev->power.lock, flags); + + return time; +} + +u64 pm_runtime_active_time(struct device *dev) +{ + return rpm_get_accounted_time(dev, false); +} + +u64 pm_runtime_suspended_time(struct device *dev) +{ + return rpm_get_accounted_time(dev, true); +} +EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); + /** * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. * @dev: Device to handle. @@ -93,7 +131,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { - del_timer(&dev->power.suspend_timer); + hrtimer_try_to_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } @@ -119,43 +157,29 @@ static void pm_runtime_cancel_pending(struct device *dev) * Compute the autosuspend-delay expiration time based on the device's * power.last_busy time. If the delay has already expired or is disabled * (negative) or the power.use_autosuspend flag isn't set, return 0. - * Otherwise return the expiration time in jiffies (adjusted to be nonzero). + * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). * * This function may be called either with or without dev->power.lock held. * Either way it can be racy, since power.last_busy may be updated at any time. */ -unsigned long pm_runtime_autosuspend_expiration(struct device *dev) +u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; - long elapsed; - unsigned long last_busy; - unsigned long expires = 0; + u64 expires; if (!dev->power.use_autosuspend) - goto out; + return 0; autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) - goto out; - - last_busy = READ_ONCE(dev->power.last_busy); - elapsed = jiffies - last_busy; - if (elapsed < 0) - goto out; /* jiffies has wrapped around. */ + return 0; - /* - * If the autosuspend_delay is >= 1 second, align the timer by rounding - * up to the nearest second. - */ - expires = last_busy + msecs_to_jiffies(autosuspend_delay); - if (autosuspend_delay >= 1000) - expires = round_jiffies(expires); - expires += !expires; - if (elapsed >= expires - last_busy) - expires = 0; /* Already expired. */ + expires = READ_ONCE(dev->power.last_busy); + expires += (u64)autosuspend_delay * NSEC_PER_MSEC; + if (expires > ktime_get_mono_fast_ns()) + return expires; /* Expires in the future */ - out: - return expires; + return 0; } EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); @@ -268,11 +292,8 @@ static int rpm_get_suppliers(struct device *dev) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { int retval; - if (!(link->flags & DL_FLAG_PM_RUNTIME)) - continue; - - if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND || - link->rpm_active) + if (!(link->flags & DL_FLAG_PM_RUNTIME) || + READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) continue; retval = pm_runtime_get_sync(link->supplier); @@ -281,7 +302,7 @@ static int rpm_get_suppliers(struct device *dev) pm_runtime_put_noidle(link->supplier); return retval; } - link->rpm_active = true; + refcount_inc(&link->rpm_active); } return 0; } @@ -290,12 +311,13 @@ static void rpm_put_suppliers(struct device *dev) { struct device_link *link; - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->rpm_active && - READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) { + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { + if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) + continue; + + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put(link->supplier); - link->rpm_active = false; - } + } } /** @@ -515,7 +537,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) /* If the autosuspend_delay time hasn't expired yet, reschedule. */ if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { - unsigned long expires = pm_runtime_autosuspend_expiration(dev); + u64 expires = pm_runtime_autosuspend_expiration(dev); if (expires != 0) { /* Pending requests need to be canceled. */ @@ -528,10 +550,20 @@ static int rpm_suspend(struct device *dev, int rpmflags) * expire; pm_suspend_timer_fn() will take care of the * rest. */ - if (!(dev->power.timer_expires && time_before_eq( - dev->power.timer_expires, expires))) { + if (!(dev->power.timer_expires && + dev->power.timer_expires <= expires)) { + /* + * We add a slack of 25% to gather wakeups + * without sacrificing the granularity. + */ + u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * + (NSEC_PER_MSEC >> 2); + dev->power.timer_expires = expires; - mod_timer(&dev->power.suspend_timer, expires); + hrtimer_start_range_ns(&dev->power.suspend_timer, + ns_to_ktime(expires), + slack, + HRTIMER_MODE_ABS); } dev->power.timer_autosuspends = 1; goto out; @@ -895,23 +927,28 @@ static void pm_runtime_work(struct work_struct *work) * * Check if the time is right and queue a suspend request. */ -static void pm_suspend_timer_fn(struct timer_list *t) +static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) { - struct device *dev = from_timer(dev, t, power.suspend_timer); + struct device *dev = container_of(timer, struct device, power.suspend_timer); unsigned long flags; - unsigned long expires; + u64 expires; spin_lock_irqsave(&dev->power.lock, flags); expires = dev->power.timer_expires; - /* If 'expire' is after 'jiffies' we've been called too early. */ - if (expires > 0 && !time_after(expires, jiffies)) { + /* + * If 'expires' is after the current time, we've been called + * too early. + */ + if (expires > 0 && expires < ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); } spin_unlock_irqrestore(&dev->power.lock, flags); + + return HRTIMER_NORESTART; } /** @@ -922,6 +959,7 @@ static void pm_suspend_timer_fn(struct timer_list *t) int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; + u64 expires; int retval; spin_lock_irqsave(&dev->power.lock, flags); @@ -938,10 +976,10 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); - dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); - dev->power.timer_expires += !dev->power.timer_expires; + expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; + dev->power.timer_expires = expires; dev->power.timer_autosuspends = 0; - mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); + hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); out: spin_unlock_irqrestore(&dev->power.lock, flags); @@ -1084,24 +1122,57 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); * and the device parent's counter of unsuspended children is modified to * reflect the new status. If the new status is RPM_SUSPENDED, an idle * notification request for the parent is submitted. + * + * If @dev has any suppliers (as reflected by device links to them), and @status + * is RPM_ACTIVE, they will be activated upfront and if the activation of one + * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead + * of the @status value) and the suppliers will be deacticated on exit. The + * error returned by the failing supplier activation will be returned in that + * case. */ int __pm_runtime_set_status(struct device *dev, unsigned int status) { struct device *parent = dev->parent; - unsigned long flags; bool notify_parent = false; int error = 0; if (status != RPM_ACTIVE && status != RPM_SUSPENDED) return -EINVAL; - spin_lock_irqsave(&dev->power.lock, flags); + spin_lock_irq(&dev->power.lock); - if (!dev->power.runtime_error && !dev->power.disable_depth) { + /* + * Prevent PM-runtime from being enabled for the device or return an + * error if it is enabled already and working. + */ + if (dev->power.runtime_error || dev->power.disable_depth) + dev->power.disable_depth++; + else error = -EAGAIN; - goto out; + + spin_unlock_irq(&dev->power.lock); + + if (error) + return error; + + /* + * If the new status is RPM_ACTIVE, the suppliers can be activated + * upfront regardless of the current status, because next time + * rpm_put_suppliers() runs, the rpm_active refcounts of the links + * involved will be dropped down to one anyway. + */ + if (status == RPM_ACTIVE) { + int idx = device_links_read_lock(); + + error = rpm_get_suppliers(dev); + if (error) + status = RPM_SUSPENDED; + + device_links_read_unlock(idx); } + spin_lock_irq(&dev->power.lock); + if (dev->power.runtime_status == status || !parent) goto out_set; @@ -1129,19 +1200,33 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) spin_unlock(&parent->power.lock); - if (error) + if (error) { + status = RPM_SUSPENDED; goto out; + } } out_set: __update_runtime_status(dev, status); - dev->power.runtime_error = 0; + if (!error) + dev->power.runtime_error = 0; + out: - spin_unlock_irqrestore(&dev->power.lock, flags); + spin_unlock_irq(&dev->power.lock); if (notify_parent) pm_request_idle(parent); + if (status == RPM_SUSPENDED) { + int idx = device_links_read_lock(); + + rpm_put_suppliers(dev); + + device_links_read_unlock(idx); + } + + pm_runtime_enable(dev); + return error; } EXPORT_SYMBOL_GPL(__pm_runtime_set_status); @@ -1269,6 +1354,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) pm_runtime_put_noidle(dev); } + /* Update time accounting before disabling PM-runtime. */ + update_pm_runtime_accounting(dev); + if (!dev->power.disable_depth++) __pm_runtime_barrier(dev); @@ -1287,10 +1375,15 @@ void pm_runtime_enable(struct device *dev) spin_lock_irqsave(&dev->power.lock, flags); - if (dev->power.disable_depth > 0) + if (dev->power.disable_depth > 0) { dev->power.disable_depth--; - else + + /* About to enable runtime pm, set accounting_timestamp to now */ + if (!dev->power.disable_depth) + dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); + } else { dev_warn(dev, "Unbalanced %s!\n", __func__); + } WARN(!dev->power.disable_depth && dev->power.runtime_status == RPM_SUSPENDED && @@ -1487,11 +1580,11 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; - dev->power.accounting_timestamp = jiffies; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; - timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0); + hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + dev->power.suspend_timer.function = pm_suspend_timer_fn; init_waitqueue_head(&dev->power.wait_queue); } @@ -1531,7 +1624,7 @@ void pm_runtime_remove(struct device *dev) * * Check links from this device to any consumers and if any of them have active * runtime PM references to the device, drop the usage counter of the device - * (once per link). + * (as many times as needed). * * Links with the DL_FLAG_STATELESS flag set are ignored. * @@ -1553,10 +1646,8 @@ void pm_runtime_clean_up_links(struct device *dev) if (link->flags & DL_FLAG_STATELESS) continue; - if (link->rpm_active) { + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put_noidle(dev); - link->rpm_active = false; - } } device_links_read_unlock(idx); @@ -1574,8 +1665,11 @@ void pm_runtime_get_suppliers(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->flags & DL_FLAG_PM_RUNTIME) + if (link->flags & DL_FLAG_PM_RUNTIME) { + link->supplier_preactivated = true; + refcount_inc(&link->rpm_active); pm_runtime_get_sync(link->supplier); + } device_links_read_unlock(idx); } @@ -1592,8 +1686,11 @@ void pm_runtime_put_suppliers(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->flags & DL_FLAG_PM_RUNTIME) - pm_runtime_put(link->supplier); + if (link->supplier_preactivated) { + link->supplier_preactivated = false; + if (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); + } device_links_read_unlock(idx); } @@ -1607,8 +1704,6 @@ void pm_runtime_new_link(struct device *dev) void pm_runtime_drop_link(struct device *dev) { - rpm_put_suppliers(dev); - spin_lock_irq(&dev->power.lock); WARN_ON(dev->power.links_count == 0); dev->power.links_count--; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d713738ce796..1226e441ddfe 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -125,10 +125,9 @@ static ssize_t runtime_active_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; - spin_lock_irq(&dev->power.lock); - update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); - spin_unlock_irq(&dev->power.lock); + u64 tmp = pm_runtime_active_time(dev); + do_div(tmp, NSEC_PER_MSEC); + ret = sprintf(buf, "%llu\n", tmp); return ret; } @@ -138,11 +137,9 @@ static ssize_t runtime_suspended_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; - spin_lock_irq(&dev->power.lock); - update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", - jiffies_to_msecs(dev->power.suspended_jiffies)); - spin_unlock_irq(&dev->power.lock); + u64 tmp = pm_runtime_suspended_time(dev); + do_div(tmp, NSEC_PER_MSEC); + ret = sprintf(buf, "%llu\n", tmp); return ret; } @@ -648,6 +645,10 @@ int dpm_sysfs_add(struct device *dev) { int rc; + /* No need to create PM sysfs if explicitly disabled. */ + if (device_pm_not_required(dev)) + return 0; + rc = sysfs_create_group(&dev->kobj, &pm_attr_group); if (rc) return rc; @@ -727,6 +728,8 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + if (device_pm_not_required(dev)) + return; sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 1cda505d6a85..2bd9d2c744ca 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -7,6 +7,8 @@ * devices may be working. */ +#define pr_fmt(fmt) "PM: " fmt + #include <linux/pm-trace.h> #include <linux/export.h> #include <linux/rtc.h> @@ -118,9 +120,7 @@ static unsigned int read_magic_time(void) unsigned int val; mc146818_get_time(&time); - pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n", - time.tm_hour, time.tm_min, time.tm_sec, - time.tm_mon + 1, time.tm_mday, time.tm_year % 100); + pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time); val = time.tm_year; /* 100 years */ if (val > 100) val -= 100; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 5fa1898755a3..bb1ae175fae1 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -6,6 +6,8 @@ * This file is released under the GPLv2. */ +#define pr_fmt(fmt) "PM: " fmt + #include <linux/device.h> #include <linux/slab.h> #include <linux/sched/signal.h> @@ -106,23 +108,6 @@ struct wakeup_source *wakeup_source_create(const char *name) } EXPORT_SYMBOL_GPL(wakeup_source_create); -/** - * wakeup_source_drop - Prepare a struct wakeup_source object for destruction. - * @ws: Wakeup source to prepare for destruction. - * - * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never - * be run in parallel with this function for the same wakeup source object. - */ -void wakeup_source_drop(struct wakeup_source *ws) -{ - if (!ws) - return; - - del_timer_sync(&ws->timer); - __pm_relax(ws); -} -EXPORT_SYMBOL_GPL(wakeup_source_drop); - /* * Record wakeup_source statistics being deleted into a dummy wakeup_source. */ @@ -162,7 +147,7 @@ void wakeup_source_destroy(struct wakeup_source *ws) if (!ws) return; - wakeup_source_drop(ws); + __pm_relax(ws); wakeup_source_record(ws); kfree_const(ws->name); kfree(ws); @@ -205,6 +190,13 @@ void wakeup_source_remove(struct wakeup_source *ws) list_del_rcu(&ws->entry); raw_spin_unlock_irqrestore(&events_lock, flags); synchronize_srcu(&wakeup_srcu); + + del_timer_sync(&ws->timer); + /* + * Clear timer.function to make wakeup_source_not_registered() treat + * this wakeup source as not registered. + */ + ws->timer.function = NULL; } EXPORT_SYMBOL_GPL(wakeup_source_remove); @@ -783,7 +775,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); /** - * pm_wakeup_event - Notify the PM core of a wakeup event. + * pm_wakeup_dev_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. @@ -853,7 +845,7 @@ bool pm_wakeup_pending(void) raw_spin_unlock_irqrestore(&events_lock, flags); if (ret) { - pr_debug("PM: Wakeup pending, aborting suspend\n"); + pr_debug("Wakeup pending, aborting suspend\n"); pm_print_active_wakeup_sources(); } diff --git a/drivers/base/property.c b/drivers/base/property.c index 240ab5230ff6..8b91ab380d14 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -18,236 +18,6 @@ #include <linux/etherdevice.h> #include <linux/phy.h> -struct property_set { - struct device *dev; - struct fwnode_handle fwnode; - const struct property_entry *properties; -}; - -static const struct fwnode_operations pset_fwnode_ops; - -static inline bool is_pset_node(const struct fwnode_handle *fwnode) -{ - return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &pset_fwnode_ops; -} - -#define to_pset_node(__fwnode) \ - ({ \ - typeof(__fwnode) __to_pset_node_fwnode = __fwnode; \ - \ - is_pset_node(__to_pset_node_fwnode) ? \ - container_of(__to_pset_node_fwnode, \ - struct property_set, fwnode) : \ - NULL; \ - }) - -static const struct property_entry * -pset_prop_get(const struct property_set *pset, const char *name) -{ - const struct property_entry *prop; - - if (!pset || !pset->properties) - return NULL; - - for (prop = pset->properties; prop->name; prop++) - if (!strcmp(name, prop->name)) - return prop; - - return NULL; -} - -static const void *property_get_pointer(const struct property_entry *prop) -{ - switch (prop->type) { - case DEV_PROP_U8: - if (prop->is_array) - return prop->pointer.u8_data; - return &prop->value.u8_data; - case DEV_PROP_U16: - if (prop->is_array) - return prop->pointer.u16_data; - return &prop->value.u16_data; - case DEV_PROP_U32: - if (prop->is_array) - return prop->pointer.u32_data; - return &prop->value.u32_data; - case DEV_PROP_U64: - if (prop->is_array) - return prop->pointer.u64_data; - return &prop->value.u64_data; - case DEV_PROP_STRING: - if (prop->is_array) - return prop->pointer.str; - return &prop->value.str; - default: - return NULL; - } -} - -static void property_set_pointer(struct property_entry *prop, const void *pointer) -{ - switch (prop->type) { - case DEV_PROP_U8: - if (prop->is_array) - prop->pointer.u8_data = pointer; - else - prop->value.u8_data = *((u8 *)pointer); - break; - case DEV_PROP_U16: - if (prop->is_array) - prop->pointer.u16_data = pointer; - else - prop->value.u16_data = *((u16 *)pointer); - break; - case DEV_PROP_U32: - if (prop->is_array) - prop->pointer.u32_data = pointer; - else - prop->value.u32_data = *((u32 *)pointer); - break; - case DEV_PROP_U64: - if (prop->is_array) - prop->pointer.u64_data = pointer; - else - prop->value.u64_data = *((u64 *)pointer); - break; - case DEV_PROP_STRING: - if (prop->is_array) - prop->pointer.str = pointer; - else - prop->value.str = pointer; - break; - default: - break; - } -} - -static const void *pset_prop_find(const struct property_set *pset, - const char *propname, size_t length) -{ - const struct property_entry *prop; - const void *pointer; - - prop = pset_prop_get(pset, propname); - if (!prop) - return ERR_PTR(-EINVAL); - pointer = property_get_pointer(prop); - if (!pointer) - return ERR_PTR(-ENODATA); - if (length > prop->length) - return ERR_PTR(-EOVERFLOW); - return pointer; -} - -static int pset_prop_read_u8_array(const struct property_set *pset, - const char *propname, - u8 *values, size_t nval) -{ - const void *pointer; - size_t length = nval * sizeof(*values); - - pointer = pset_prop_find(pset, propname, length); - if (IS_ERR(pointer)) - return PTR_ERR(pointer); - - memcpy(values, pointer, length); - return 0; -} - -static int pset_prop_read_u16_array(const struct property_set *pset, - const char *propname, - u16 *values, size_t nval) -{ - const void *pointer; - size_t length = nval * sizeof(*values); - - pointer = pset_prop_find(pset, propname, length); - if (IS_ERR(pointer)) - return PTR_ERR(pointer); - - memcpy(values, pointer, length); - return 0; -} - -static int pset_prop_read_u32_array(const struct property_set *pset, - const char *propname, - u32 *values, size_t nval) -{ - const void *pointer; - size_t length = nval * sizeof(*values); - - pointer = pset_prop_find(pset, propname, length); - if (IS_ERR(pointer)) - return PTR_ERR(pointer); - - memcpy(values, pointer, length); - return 0; -} - -static int pset_prop_read_u64_array(const struct property_set *pset, - const char *propname, - u64 *values, size_t nval) -{ - const void *pointer; - size_t length = nval * sizeof(*values); - - pointer = pset_prop_find(pset, propname, length); - if (IS_ERR(pointer)) - return PTR_ERR(pointer); - - memcpy(values, pointer, length); - return 0; -} - -static int pset_prop_count_elems_of_size(const struct property_set *pset, - const char *propname, size_t length) -{ - const struct property_entry *prop; - - prop = pset_prop_get(pset, propname); - if (!prop) - return -EINVAL; - - return prop->length / length; -} - -static int pset_prop_read_string_array(const struct property_set *pset, - const char *propname, - const char **strings, size_t nval) -{ - const struct property_entry *prop; - const void *pointer; - size_t array_len, length; - - /* Find out the array length. */ - prop = pset_prop_get(pset, propname); - if (!prop) - return -EINVAL; - - if (!prop->is_array) - /* The array length for a non-array string property is 1. */ - array_len = 1; - else - /* Find the length of an array. */ - array_len = pset_prop_count_elems_of_size(pset, propname, - sizeof(const char *)); - - /* Return how many there are if strings is NULL. */ - if (!strings) - return array_len; - - array_len = min(nval, array_len); - length = array_len * sizeof(*strings); - - pointer = pset_prop_find(pset, propname, length); - if (IS_ERR(pointer)) - return PTR_ERR(pointer); - - memcpy(strings, pointer, length); - - return array_len; -} - struct fwnode_handle *dev_fwnode(struct device *dev) { return IS_ENABLED(CONFIG_OF) && dev->of_node ? @@ -255,51 +25,6 @@ struct fwnode_handle *dev_fwnode(struct device *dev) } EXPORT_SYMBOL_GPL(dev_fwnode); -static bool pset_fwnode_property_present(const struct fwnode_handle *fwnode, - const char *propname) -{ - return !!pset_prop_get(to_pset_node(fwnode), propname); -} - -static int pset_fwnode_read_int_array(const struct fwnode_handle *fwnode, - const char *propname, - unsigned int elem_size, void *val, - size_t nval) -{ - const struct property_set *node = to_pset_node(fwnode); - - if (!val) - return pset_prop_count_elems_of_size(node, propname, elem_size); - - switch (elem_size) { - case sizeof(u8): - return pset_prop_read_u8_array(node, propname, val, nval); - case sizeof(u16): - return pset_prop_read_u16_array(node, propname, val, nval); - case sizeof(u32): - return pset_prop_read_u32_array(node, propname, val, nval); - case sizeof(u64): - return pset_prop_read_u64_array(node, propname, val, nval); - } - - return -ENXIO; -} - -static int -pset_fwnode_property_read_string_array(const struct fwnode_handle *fwnode, - const char *propname, - const char **val, size_t nval) -{ - return pset_prop_read_string_array(to_pset_node(fwnode), propname, - val, nval); -} - -static const struct fwnode_operations pset_fwnode_ops = { - .property_present = pset_fwnode_property_present, - .property_read_int_array = pset_fwnode_read_int_array, - .property_read_string_array = pset_fwnode_property_read_string_array, -}; - /** * device_property_present - check if a property of a device is present * @dev: Device whose property is being checked @@ -759,223 +484,25 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, } EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args); -static void property_entry_free_data(const struct property_entry *p) -{ - const void *pointer = property_get_pointer(p); - size_t i, nval; - - if (p->is_array) { - if (p->type == DEV_PROP_STRING && p->pointer.str) { - nval = p->length / sizeof(const char *); - for (i = 0; i < nval; i++) - kfree(p->pointer.str[i]); - } - kfree(pointer); - } else if (p->type == DEV_PROP_STRING) { - kfree(p->value.str); - } - kfree(p->name); -} - -static int property_copy_string_array(struct property_entry *dst, - const struct property_entry *src) -{ - const char **d; - size_t nval = src->length / sizeof(*d); - int i; - - d = kcalloc(nval, sizeof(*d), GFP_KERNEL); - if (!d) - return -ENOMEM; - - for (i = 0; i < nval; i++) { - d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL); - if (!d[i] && src->pointer.str[i]) { - while (--i >= 0) - kfree(d[i]); - kfree(d); - return -ENOMEM; - } - } - - dst->pointer.str = d; - return 0; -} - -static int property_entry_copy_data(struct property_entry *dst, - const struct property_entry *src) -{ - const void *pointer = property_get_pointer(src); - const void *new; - int error; - - if (src->is_array) { - if (!src->length) - return -ENODATA; - - if (src->type == DEV_PROP_STRING) { - error = property_copy_string_array(dst, src); - if (error) - return error; - new = dst->pointer.str; - } else { - new = kmemdup(pointer, src->length, GFP_KERNEL); - if (!new) - return -ENOMEM; - } - } else if (src->type == DEV_PROP_STRING) { - new = kstrdup(src->value.str, GFP_KERNEL); - if (!new && src->value.str) - return -ENOMEM; - } else { - new = pointer; - } - - dst->length = src->length; - dst->is_array = src->is_array; - dst->type = src->type; - - property_set_pointer(dst, new); - - dst->name = kstrdup(src->name, GFP_KERNEL); - if (!dst->name) - goto out_free_data; - - return 0; - -out_free_data: - property_entry_free_data(dst); - return -ENOMEM; -} - -/** - * property_entries_dup - duplicate array of properties - * @properties: array of properties to copy - * - * This function creates a deep copy of the given NULL-terminated array - * of property entries. - */ -struct property_entry * -property_entries_dup(const struct property_entry *properties) -{ - struct property_entry *p; - int i, n = 0; - - while (properties[n].name) - n++; - - p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL); - if (!p) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < n; i++) { - int ret = property_entry_copy_data(&p[i], &properties[i]); - if (ret) { - while (--i >= 0) - property_entry_free_data(&p[i]); - kfree(p); - return ERR_PTR(ret); - } - } - - return p; -} -EXPORT_SYMBOL_GPL(property_entries_dup); - -/** - * property_entries_free - free previously allocated array of properties - * @properties: array of properties to destroy - * - * This function frees given NULL-terminated array of property entries, - * along with their data. - */ -void property_entries_free(const struct property_entry *properties) -{ - const struct property_entry *p; - - for (p = properties; p->name; p++) - property_entry_free_data(p); - - kfree(properties); -} -EXPORT_SYMBOL_GPL(property_entries_free); - -/** - * pset_free_set - releases memory allocated for copied property set - * @pset: Property set to release - * - * Function takes previously copied property set and releases all the - * memory allocated to it. - */ -static void pset_free_set(struct property_set *pset) -{ - if (!pset) - return; - - property_entries_free(pset->properties); - kfree(pset); -} - -/** - * pset_copy_set - copies property set - * @pset: Property set to copy - * - * This function takes a deep copy of the given property set and returns - * pointer to the copy. Call device_free_property_set() to free resources - * allocated in this function. - * - * Return: Pointer to the new property set or error pointer. - */ -static struct property_set *pset_copy_set(const struct property_set *pset) -{ - struct property_entry *properties; - struct property_set *p; - - p = kzalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return ERR_PTR(-ENOMEM); - - properties = property_entries_dup(pset->properties); - if (IS_ERR(properties)) { - kfree(p); - return ERR_CAST(properties); - } - - p->properties = properties; - return p; -} - /** * device_remove_properties - Remove properties from a device object. * @dev: Device whose properties to remove. * * The function removes properties previously associated to the device - * secondary firmware node with device_add_properties(). Memory allocated - * to the properties will also be released. + * firmware node with device_add_properties(). Memory allocated to the + * properties will also be released. */ void device_remove_properties(struct device *dev) { - struct fwnode_handle *fwnode; - struct property_set *pset; + struct fwnode_handle *fwnode = dev_fwnode(dev); - fwnode = dev_fwnode(dev); if (!fwnode) return; - /* - * Pick either primary or secondary node depending which one holds - * the pset. If there is no real firmware node (ACPI/DT) primary - * will hold the pset. - */ - pset = to_pset_node(fwnode); - if (pset) { - set_primary_fwnode(dev, NULL); - } else { - pset = to_pset_node(fwnode->secondary); - if (pset && dev == pset->dev) - set_secondary_fwnode(dev, NULL); + + if (is_software_node(fwnode->secondary)) { + fwnode_remove_software_node(fwnode->secondary); + set_secondary_fwnode(dev, NULL); } - if (pset && dev == pset->dev) - pset_free_set(pset); } EXPORT_SYMBOL_GPL(device_remove_properties); @@ -985,26 +512,22 @@ EXPORT_SYMBOL_GPL(device_remove_properties); * @properties: Collection of properties to add. * * Associate a collection of device properties represented by @properties with - * @dev as its secondary firmware node. The function takes a copy of - * @properties. + * @dev. The function takes a copy of @properties. + * + * WARNING: The callers should not use this function if it is known that there + * is no real firmware node associated with @dev! In that case the callers + * should create a software node and assign it to @dev directly. */ int device_add_properties(struct device *dev, const struct property_entry *properties) { - struct property_set *p, pset; - - if (!properties) - return -EINVAL; - - pset.properties = properties; + struct fwnode_handle *fwnode; - p = pset_copy_set(&pset); - if (IS_ERR(p)) - return PTR_ERR(p); + fwnode = fwnode_create_software_node(properties, NULL); + if (IS_ERR(fwnode)) + return PTR_ERR(fwnode); - p->fwnode.ops = &pset_fwnode_ops; - set_secondary_fwnode(dev, &p->fwnode); - p->dev = dev; + set_secondary_fwnode(dev, fwnode); return 0; } EXPORT_SYMBOL_GPL(device_add_properties); @@ -1341,7 +864,7 @@ int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index) EXPORT_SYMBOL(fwnode_irq_get); /** - * device_graph_get_next_endpoint - Get next endpoint firmware node + * fwnode_graph_get_next_endpoint - Get next endpoint firmware node * @fwnode: Pointer to the parent firmware node * @prev: Previous endpoint node or %NULL to get the first * diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index b1e9aae9a5d0..9cbb4b0cd01b 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -33,7 +33,7 @@ struct regcache_rbtree_node { unsigned int blklen; /* the actual rbtree node holding this block */ struct rb_node node; -} __attribute__ ((packed)); +}; struct regcache_rbtree_ctx { struct rb_root root; @@ -177,17 +177,7 @@ static int rbtree_show(struct seq_file *s, void *ignored) return 0; } -static int rbtree_open(struct inode *inode, struct file *file) -{ - return single_open(file, rbtree_show, inode->i_private); -} - -static const struct file_operations rbtree_fops = { - .open = rbtree_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(rbtree); static void rbtree_debugfs_init(struct regmap *map) { diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 87b562e49a43..19eb454f26c3 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -435,17 +435,7 @@ static int regmap_access_show(struct seq_file *s, void *ignored) return 0; } -static int access_open(struct inode *inode, struct file *file) -{ - return single_open(file, regmap_access_show, inode->i_private); -} - -static const struct file_operations regmap_access_fops = { - .open = access_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(regmap_access); static ssize_t regmap_cache_only_write_file(struct file *file, const char __user *user_buf, diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 429ca8ed7e51..5059748afd4c 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -35,6 +35,7 @@ struct regmap_irq_chip_data { int wake_count; void *status_reg_buf; + unsigned int *main_status_buf; unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; @@ -44,6 +45,8 @@ struct regmap_irq_chip_data { unsigned int irq_reg_stride; unsigned int type_reg_stride; + + bool clear_status:1; }; static inline const @@ -77,6 +80,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data) int i, ret; u32 reg; u32 unmask_offset; + u32 val; if (d->chip->runtime_pm) { ret = pm_runtime_get_sync(map->dev); @@ -85,12 +89,29 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ret); } + if (d->clear_status) { + for (i = 0; i < d->chip->num_regs; i++) { + reg = d->chip->status_base + + (i * map->reg_stride * d->irq_reg_stride); + + ret = regmap_read(map, reg, &val); + if (ret) + dev_err(d->map->dev, + "Failed to clear the interrupt status bits\n"); + } + + d->clear_status = false; + } + /* * If there's been a change in the mask write it back to the * hardware. We rely on the use of the regmap core cache to * suppress pointless writes. */ for (i = 0; i < d->chip->num_regs; i++) { + if (!d->chip->mask_base) + continue; + reg = d->chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (d->chip->mask_invert) { @@ -157,20 +178,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data) } } - for (i = 0; i < d->chip->num_type_reg; i++) { - if (!d->type_buf_def[i]) - continue; - reg = d->chip->type_base + - (i * map->reg_stride * d->type_reg_stride); - if (d->chip->type_invert) - ret = regmap_irq_update_bits(d, reg, - d->type_buf_def[i], ~d->type_buf[i]); - else - ret = regmap_irq_update_bits(d, reg, - d->type_buf_def[i], d->type_buf[i]); - if (ret != 0) - dev_err(d->map->dev, "Failed to sync type in %x\n", - reg); + /* Don't update the type bits if we're using mask bits for irq type. */ + if (!d->chip->type_in_mask) { + for (i = 0; i < d->chip->num_type_reg; i++) { + if (!d->type_buf_def[i]) + continue; + reg = d->chip->type_base + + (i * map->reg_stride * d->type_reg_stride); + if (d->chip->type_invert) + ret = regmap_irq_update_bits(d, reg, + d->type_buf_def[i], ~d->type_buf[i]); + else + ret = regmap_irq_update_bits(d, reg, + d->type_buf_def[i], d->type_buf[i]); + if (ret != 0) + dev_err(d->map->dev, "Failed to sync type in %x\n", + reg); + } } if (d->chip->runtime_pm) @@ -194,8 +218,30 @@ static void regmap_irq_enable(struct irq_data *data) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + unsigned int mask, type; + + type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; + + /* + * The type_in_mask flag means that the underlying hardware uses + * separate mask bits for rising and falling edge interrupts, but + * we want to make them into a single virtual interrupt with + * configurable edge. + * + * If the interrupt we're enabling defines the falling or rising + * masks then instead of using the regular mask bits for this + * interrupt, use the value previously written to the type buffer + * at the corresponding offset in regmap_irq_set_type(). + */ + if (d->chip->type_in_mask && type) + mask = d->type_buf[irq_data->reg_offset / map->reg_stride]; + else + mask = irq_data->mask; + + if (d->chip->clear_on_unmask) + d->clear_status = true; - d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask; + d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask; } static void regmap_irq_disable(struct irq_data *data) @@ -212,27 +258,42 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); - int reg = irq_data->type_reg_offset / map->reg_stride; + int reg; + const struct regmap_irq_type *t = &irq_data->type; - if (!(irq_data->type_rising_mask | irq_data->type_falling_mask)) + if ((t->types_supported & type) != type) return 0; - d->type_buf[reg] &= ~(irq_data->type_falling_mask | - irq_data->type_rising_mask); + reg = t->type_reg_offset / map->reg_stride; + + if (t->type_reg_mask) + d->type_buf[reg] &= ~t->type_reg_mask; + else + d->type_buf[reg] &= ~(t->type_falling_val | + t->type_rising_val | + t->type_level_low_val | + t->type_level_high_val); switch (type) { case IRQ_TYPE_EDGE_FALLING: - d->type_buf[reg] |= irq_data->type_falling_mask; + d->type_buf[reg] |= t->type_falling_val; break; case IRQ_TYPE_EDGE_RISING: - d->type_buf[reg] |= irq_data->type_rising_mask; + d->type_buf[reg] |= t->type_rising_val; break; case IRQ_TYPE_EDGE_BOTH: - d->type_buf[reg] |= (irq_data->type_falling_mask | - irq_data->type_rising_mask); + d->type_buf[reg] |= (t->type_falling_val | + t->type_rising_val); break; + case IRQ_TYPE_LEVEL_HIGH: + d->type_buf[reg] |= t->type_level_high_val; + break; + + case IRQ_TYPE_LEVEL_LOW: + d->type_buf[reg] |= t->type_level_low_val; + break; default: return -EINVAL; } @@ -269,6 +330,33 @@ static const struct irq_chip regmap_irq_chip = { .irq_set_wake = regmap_irq_set_wake, }; +static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, + unsigned int b) +{ + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + struct regmap_irq_sub_irq_map *subreg; + int i, ret = 0; + + if (!chip->sub_reg_offsets) { + /* Assume linear mapping */ + ret = regmap_read(map, chip->status_base + + (b * map->reg_stride * data->irq_reg_stride), + &data->status_buf[b]); + } else { + subreg = &chip->sub_reg_offsets[b]; + for (i = 0; i < subreg->num_regs; i++) { + unsigned int offset = subreg->offset[i]; + + ret = regmap_read(map, chip->status_base + offset, + &data->status_buf[offset]); + if (ret) + break; + } + } + return ret; +} + static irqreturn_t regmap_irq_thread(int irq, void *d) { struct regmap_irq_chip_data *data = d; @@ -292,11 +380,65 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } /* - * Read in the statuses, using a single bulk read if possible - * in order to reduce the I/O overheads. + * Read only registers with active IRQs if the chip has 'main status + * register'. Else read in the statuses, using a single bulk read if + * possible in order to reduce the I/O overheads. */ - if (!map->use_single_read && map->reg_stride == 1 && - data->irq_reg_stride == 1) { + + if (chip->num_main_regs) { + unsigned int max_main_bits; + unsigned long size; + + size = chip->num_regs * sizeof(unsigned int); + + max_main_bits = (chip->num_main_status_bits) ? + chip->num_main_status_bits : chip->num_regs; + /* Clear the status buf as we don't read all status regs */ + memset(data->status_buf, 0, size); + + /* We could support bulk read for main status registers + * but I don't expect to see devices with really many main + * status registers so let's only support single reads for the + * sake of simplicity. and add bulk reads only if needed + */ + for (i = 0; i < chip->num_main_regs; i++) { + ret = regmap_read(map, chip->main_status + + (i * map->reg_stride + * data->irq_reg_stride), + &data->main_status_buf[i]); + if (ret) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + goto exit; + } + } + + /* Read sub registers with active IRQs */ + for (i = 0; i < chip->num_main_regs; i++) { + unsigned int b; + const unsigned long mreg = data->main_status_buf[i]; + + for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { + if (i * map->format.val_bytes * 8 + b > + max_main_bits) + break; + ret = read_sub_irq_data(data, b); + + if (ret != 0) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + if (chip->runtime_pm) + pm_runtime_put(map->dev); + goto exit; + } + } + + } + } else if (!map->use_single_read && map->reg_stride == 1 && + data->irq_reg_stride == 1) { + u8 *buf8 = data->status_reg_buf; u16 *buf16 = data->status_reg_buf; u32 *buf32 = data->status_reg_buf; @@ -430,12 +572,16 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, struct regmap_irq_chip_data *d; int i; int ret = -ENOMEM; + int num_type_reg; u32 reg; u32 unmask_offset; if (chip->num_regs <= 0) return -EINVAL; + if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack)) + return -EINVAL; + for (i = 0; i < chip->num_irqs; i++) { if (chip->irqs[i].reg_offset % map->reg_stride) return -EINVAL; @@ -457,6 +603,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d) return -ENOMEM; + if (chip->num_main_regs) { + d->main_status_buf = kcalloc(chip->num_main_regs, + sizeof(unsigned int), + GFP_KERNEL); + + if (!d->main_status_buf) + goto err_alloc; + } + d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->status_buf) @@ -479,13 +634,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, goto err_alloc; } - if (chip->num_type_reg) { - d->type_buf_def = kcalloc(chip->num_type_reg, - sizeof(unsigned int), GFP_KERNEL); + num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg; + if (num_type_reg) { + d->type_buf_def = kcalloc(num_type_reg, + sizeof(unsigned int), GFP_KERNEL); if (!d->type_buf_def) goto err_alloc; - d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int), + d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int), GFP_KERNEL); if (!d->type_buf) goto err_alloc; @@ -526,6 +682,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, /* Mask all the interrupts by default */ for (i = 0; i < chip->num_regs; i++) { d->mask_buf[i] = d->mask_buf_def[i]; + if (!chip->mask_base) + continue; + reg = chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (chip->mask_invert) @@ -600,27 +759,21 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, } } - if (chip->num_type_reg) { - for (i = 0; i < chip->num_irqs; i++) { - reg = chip->irqs[i].type_reg_offset / map->reg_stride; - d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask | - chip->irqs[i].type_falling_mask; - } + if (chip->num_type_reg && !chip->type_in_mask) { for (i = 0; i < chip->num_type_reg; ++i) { if (!d->type_buf_def[i]) continue; reg = chip->type_base + (i * map->reg_stride * d->type_reg_stride); - if (chip->type_invert) - ret = regmap_irq_update_bits(d, reg, - d->type_buf_def[i], 0xFF); - else - ret = regmap_irq_update_bits(d, reg, - d->type_buf_def[i], 0x0); - if (ret != 0) { - dev_err(map->dev, - "Failed to set type in 0x%x: %x\n", + + ret = regmap_read(map, reg, &d->type_buf_def[i]); + + if (d->chip->type_invert) + d->type_buf_def[i] = ~d->type_buf_def[i]; + + if (ret) { + dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n", reg, ret); goto err_alloc; } diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c new file mode 100644 index 000000000000..7fc5a18e02ad --- /dev/null +++ b/drivers/base/swnode.c @@ -0,0 +1,699 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Software nodes for the firmware node framework. + * + * Copyright (C) 2018, Intel Corporation + * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com> + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/property.h> +#include <linux/slab.h> + +struct software_node { + int id; + struct kobject kobj; + struct fwnode_handle fwnode; + + /* hierarchy */ + struct ida child_ids; + struct list_head entry; + struct list_head children; + struct software_node *parent; + + /* properties */ + const struct property_entry *properties; +}; + +static DEFINE_IDA(swnode_root_ids); +static struct kset *swnode_kset; + +#define kobj_to_swnode(_kobj_) container_of(_kobj_, struct software_node, kobj) + +static const struct fwnode_operations software_node_ops; + +bool is_software_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &software_node_ops; +} + +#define to_software_node(__fwnode) \ + ({ \ + typeof(__fwnode) __to_software_node_fwnode = __fwnode; \ + \ + is_software_node(__to_software_node_fwnode) ? \ + container_of(__to_software_node_fwnode, \ + struct software_node, fwnode) : \ + NULL; \ + }) + +/* -------------------------------------------------------------------------- */ +/* property_entry processing */ + +static const struct property_entry * +property_entry_get(const struct property_entry *prop, const char *name) +{ + if (!prop) + return NULL; + + for (; prop->name; prop++) + if (!strcmp(name, prop->name)) + return prop; + + return NULL; +} + +static void +property_set_pointer(struct property_entry *prop, const void *pointer) +{ + switch (prop->type) { + case DEV_PROP_U8: + if (prop->is_array) + prop->pointer.u8_data = pointer; + else + prop->value.u8_data = *((u8 *)pointer); + break; + case DEV_PROP_U16: + if (prop->is_array) + prop->pointer.u16_data = pointer; + else + prop->value.u16_data = *((u16 *)pointer); + break; + case DEV_PROP_U32: + if (prop->is_array) + prop->pointer.u32_data = pointer; + else + prop->value.u32_data = *((u32 *)pointer); + break; + case DEV_PROP_U64: + if (prop->is_array) + prop->pointer.u64_data = pointer; + else + prop->value.u64_data = *((u64 *)pointer); + break; + case DEV_PROP_STRING: + if (prop->is_array) + prop->pointer.str = pointer; + else + prop->value.str = pointer; + break; + default: + break; + } +} + +static const void *property_get_pointer(const struct property_entry *prop) +{ + switch (prop->type) { + case DEV_PROP_U8: + if (prop->is_array) + return prop->pointer.u8_data; + return &prop->value.u8_data; + case DEV_PROP_U16: + if (prop->is_array) + return prop->pointer.u16_data; + return &prop->value.u16_data; + case DEV_PROP_U32: + if (prop->is_array) + return prop->pointer.u32_data; + return &prop->value.u32_data; + case DEV_PROP_U64: + if (prop->is_array) + return prop->pointer.u64_data; + return &prop->value.u64_data; + case DEV_PROP_STRING: + if (prop->is_array) + return prop->pointer.str; + return &prop->value.str; + default: + return NULL; + } +} + +static const void *property_entry_find(const struct property_entry *props, + const char *propname, size_t length) +{ + const struct property_entry *prop; + const void *pointer; + + prop = property_entry_get(props, propname); + if (!prop) + return ERR_PTR(-EINVAL); + pointer = property_get_pointer(prop); + if (!pointer) + return ERR_PTR(-ENODATA); + if (length > prop->length) + return ERR_PTR(-EOVERFLOW); + return pointer; +} + +static int property_entry_read_u8_array(const struct property_entry *props, + const char *propname, + u8 *values, size_t nval) +{ + const void *pointer; + size_t length = nval * sizeof(*values); + + pointer = property_entry_find(props, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int property_entry_read_u16_array(const struct property_entry *props, + const char *propname, + u16 *values, size_t nval) +{ + const void *pointer; + size_t length = nval * sizeof(*values); + + pointer = property_entry_find(props, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int property_entry_read_u32_array(const struct property_entry *props, + const char *propname, + u32 *values, size_t nval) +{ + const void *pointer; + size_t length = nval * sizeof(*values); + + pointer = property_entry_find(props, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int property_entry_read_u64_array(const struct property_entry *props, + const char *propname, + u64 *values, size_t nval) +{ + const void *pointer; + size_t length = nval * sizeof(*values); + + pointer = property_entry_find(props, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int +property_entry_count_elems_of_size(const struct property_entry *props, + const char *propname, size_t length) +{ + const struct property_entry *prop; + + prop = property_entry_get(props, propname); + if (!prop) + return -EINVAL; + + return prop->length / length; +} + +static int property_entry_read_int_array(const struct property_entry *props, + const char *name, + unsigned int elem_size, void *val, + size_t nval) +{ + if (!val) + return property_entry_count_elems_of_size(props, name, + elem_size); + switch (elem_size) { + case sizeof(u8): + return property_entry_read_u8_array(props, name, val, nval); + case sizeof(u16): + return property_entry_read_u16_array(props, name, val, nval); + case sizeof(u32): + return property_entry_read_u32_array(props, name, val, nval); + case sizeof(u64): + return property_entry_read_u64_array(props, name, val, nval); + } + + return -ENXIO; +} + +static int property_entry_read_string_array(const struct property_entry *props, + const char *propname, + const char **strings, size_t nval) +{ + const struct property_entry *prop; + const void *pointer; + size_t array_len, length; + + /* Find out the array length. */ + prop = property_entry_get(props, propname); + if (!prop) + return -EINVAL; + + if (prop->is_array) + /* Find the length of an array. */ + array_len = property_entry_count_elems_of_size(props, propname, + sizeof(const char *)); + else + /* The array length for a non-array string property is 1. */ + array_len = 1; + + /* Return how many there are if strings is NULL. */ + if (!strings) + return array_len; + + array_len = min(nval, array_len); + length = array_len * sizeof(*strings); + + pointer = property_entry_find(props, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(strings, pointer, length); + + return array_len; +} + +static void property_entry_free_data(const struct property_entry *p) +{ + const void *pointer = property_get_pointer(p); + size_t i, nval; + + if (p->is_array) { + if (p->type == DEV_PROP_STRING && p->pointer.str) { + nval = p->length / sizeof(const char *); + for (i = 0; i < nval; i++) + kfree(p->pointer.str[i]); + } + kfree(pointer); + } else if (p->type == DEV_PROP_STRING) { + kfree(p->value.str); + } + kfree(p->name); +} + +static int property_copy_string_array(struct property_entry *dst, + const struct property_entry *src) +{ + const char **d; + size_t nval = src->length / sizeof(*d); + int i; + + d = kcalloc(nval, sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + for (i = 0; i < nval; i++) { + d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL); + if (!d[i] && src->pointer.str[i]) { + while (--i >= 0) + kfree(d[i]); + kfree(d); + return -ENOMEM; + } + } + + dst->pointer.str = d; + return 0; +} + +static int property_entry_copy_data(struct property_entry *dst, + const struct property_entry *src) +{ + const void *pointer = property_get_pointer(src); + const void *new; + int error; + + if (src->is_array) { + if (!src->length) + return -ENODATA; + + if (src->type == DEV_PROP_STRING) { + error = property_copy_string_array(dst, src); + if (error) + return error; + new = dst->pointer.str; + } else { + new = kmemdup(pointer, src->length, GFP_KERNEL); + if (!new) + return -ENOMEM; + } + } else if (src->type == DEV_PROP_STRING) { + new = kstrdup(src->value.str, GFP_KERNEL); + if (!new && src->value.str) + return -ENOMEM; + } else { + new = pointer; + } + + dst->length = src->length; + dst->is_array = src->is_array; + dst->type = src->type; + + property_set_pointer(dst, new); + + dst->name = kstrdup(src->name, GFP_KERNEL); + if (!dst->name) + goto out_free_data; + + return 0; + +out_free_data: + property_entry_free_data(dst); + return -ENOMEM; +} + +/** + * property_entries_dup - duplicate array of properties + * @properties: array of properties to copy + * + * This function creates a deep copy of the given NULL-terminated array + * of property entries. + */ +struct property_entry * +property_entries_dup(const struct property_entry *properties) +{ + struct property_entry *p; + int i, n = 0; + int ret; + + while (properties[n].name) + n++; + + p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < n; i++) { + ret = property_entry_copy_data(&p[i], &properties[i]); + if (ret) { + while (--i >= 0) + property_entry_free_data(&p[i]); + kfree(p); + return ERR_PTR(ret); + } + } + + return p; +} +EXPORT_SYMBOL_GPL(property_entries_dup); + +/** + * property_entries_free - free previously allocated array of properties + * @properties: array of properties to destroy + * + * This function frees given NULL-terminated array of property entries, + * along with their data. + */ +void property_entries_free(const struct property_entry *properties) +{ + const struct property_entry *p; + + if (!properties) + return; + + for (p = properties; p->name; p++) + property_entry_free_data(p); + + kfree(properties); +} +EXPORT_SYMBOL_GPL(property_entries_free); + +/* -------------------------------------------------------------------------- */ +/* fwnode operations */ + +static struct fwnode_handle *software_node_get(struct fwnode_handle *fwnode) +{ + struct software_node *swnode = to_software_node(fwnode); + + kobject_get(&swnode->kobj); + + return &swnode->fwnode; +} + +static void software_node_put(struct fwnode_handle *fwnode) +{ + struct software_node *swnode = to_software_node(fwnode); + + kobject_put(&swnode->kobj); +} + +static bool software_node_property_present(const struct fwnode_handle *fwnode, + const char *propname) +{ + return !!property_entry_get(to_software_node(fwnode)->properties, + propname); +} + +static int software_node_read_int_array(const struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval) +{ + struct software_node *swnode = to_software_node(fwnode); + + return property_entry_read_int_array(swnode->properties, propname, + elem_size, val, nval); +} + +static int software_node_read_string_array(const struct fwnode_handle *fwnode, + const char *propname, + const char **val, size_t nval) +{ + struct software_node *swnode = to_software_node(fwnode); + + return property_entry_read_string_array(swnode->properties, propname, + val, nval); +} + +static struct fwnode_handle * +software_node_get_parent(const struct fwnode_handle *fwnode) +{ + struct software_node *swnode = to_software_node(fwnode); + + return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : + NULL; +} + +static struct fwnode_handle * +software_node_get_next_child(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) +{ + struct software_node *p = to_software_node(fwnode); + struct software_node *c = to_software_node(child); + + if (!p || list_empty(&p->children) || + (c && list_is_last(&c->entry, &p->children))) + return NULL; + + if (c) + c = list_next_entry(c, entry); + else + c = list_first_entry(&p->children, struct software_node, entry); + return &c->fwnode; +} + +static struct fwnode_handle * +software_node_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname) +{ + struct software_node *swnode = to_software_node(fwnode); + const struct property_entry *prop; + struct software_node *child; + + if (!swnode || list_empty(&swnode->children)) + return NULL; + + list_for_each_entry(child, &swnode->children, entry) { + prop = property_entry_get(child->properties, "name"); + if (!prop) + continue; + if (!strcmp(childname, prop->value.str)) { + kobject_get(&child->kobj); + return &child->fwnode; + } + } + return NULL; +} + +static const struct fwnode_operations software_node_ops = { + .get = software_node_get, + .put = software_node_put, + .property_present = software_node_property_present, + .property_read_int_array = software_node_read_int_array, + .property_read_string_array = software_node_read_string_array, + .get_parent = software_node_get_parent, + .get_next_child_node = software_node_get_next_child, + .get_named_child_node = software_node_get_named_child_node, +}; + +/* -------------------------------------------------------------------------- */ + +static int +software_node_register_properties(struct software_node *swnode, + const struct property_entry *properties) +{ + struct property_entry *props; + + props = property_entries_dup(properties); + if (IS_ERR(props)) + return PTR_ERR(props); + + swnode->properties = props; + + return 0; +} + +static void software_node_release(struct kobject *kobj) +{ + struct software_node *swnode = kobj_to_swnode(kobj); + + if (swnode->parent) { + ida_simple_remove(&swnode->parent->child_ids, swnode->id); + list_del(&swnode->entry); + } else { + ida_simple_remove(&swnode_root_ids, swnode->id); + } + + ida_destroy(&swnode->child_ids); + property_entries_free(swnode->properties); + kfree(swnode); +} + +static struct kobj_type software_node_type = { + .release = software_node_release, + .sysfs_ops = &kobj_sysfs_ops, +}; + +struct fwnode_handle * +fwnode_create_software_node(const struct property_entry *properties, + const struct fwnode_handle *parent) +{ + struct software_node *p = NULL; + struct software_node *swnode; + int ret; + + if (parent) { + if (IS_ERR(parent)) + return ERR_CAST(parent); + if (!is_software_node(parent)) + return ERR_PTR(-EINVAL); + p = to_software_node(parent); + } + + swnode = kzalloc(sizeof(*swnode), GFP_KERNEL); + if (!swnode) + return ERR_PTR(-ENOMEM); + + ret = ida_simple_get(p ? &p->child_ids : &swnode_root_ids, 0, 0, + GFP_KERNEL); + if (ret < 0) { + kfree(swnode); + return ERR_PTR(ret); + } + + swnode->id = ret; + swnode->kobj.kset = swnode_kset; + swnode->fwnode.ops = &software_node_ops; + + ida_init(&swnode->child_ids); + INIT_LIST_HEAD(&swnode->entry); + INIT_LIST_HEAD(&swnode->children); + swnode->parent = p; + + if (p) + list_add_tail(&swnode->entry, &p->children); + + ret = kobject_init_and_add(&swnode->kobj, &software_node_type, + p ? &p->kobj : NULL, "node%d", swnode->id); + if (ret) { + kobject_put(&swnode->kobj); + return ERR_PTR(ret); + } + + ret = software_node_register_properties(swnode, properties); + if (ret) { + kobject_put(&swnode->kobj); + return ERR_PTR(ret); + } + + kobject_uevent(&swnode->kobj, KOBJ_ADD); + return &swnode->fwnode; +} +EXPORT_SYMBOL_GPL(fwnode_create_software_node); + +void fwnode_remove_software_node(struct fwnode_handle *fwnode) +{ + struct software_node *swnode = to_software_node(fwnode); + + if (!swnode) + return; + + kobject_put(&swnode->kobj); +} +EXPORT_SYMBOL_GPL(fwnode_remove_software_node); + +int software_node_notify(struct device *dev, unsigned long action) +{ + struct fwnode_handle *fwnode = dev_fwnode(dev); + struct software_node *swnode; + int ret; + + if (!fwnode) + return 0; + + if (!is_software_node(fwnode)) + fwnode = fwnode->secondary; + if (!is_software_node(fwnode)) + return 0; + + swnode = to_software_node(fwnode); + + switch (action) { + case KOBJ_ADD: + ret = sysfs_create_link(&dev->kobj, &swnode->kobj, + "software_node"); + if (ret) + break; + + ret = sysfs_create_link(&swnode->kobj, &dev->kobj, + dev_name(dev)); + if (ret) { + sysfs_remove_link(&dev->kobj, "software_node"); + break; + } + kobject_get(&swnode->kobj); + break; + case KOBJ_REMOVE: + sysfs_remove_link(&swnode->kobj, dev_name(dev)); + sysfs_remove_link(&dev->kobj, "software_node"); + kobject_put(&swnode->kobj); + break; + default: + break; + } + + return 0; +} + +static int __init software_node_init(void) +{ + swnode_kset = kset_create_and_add("software_nodes", NULL, kernel_kobj); + if (!swnode_kset) + return -ENOMEM; + return 0; +} +postcore_initcall(software_node_init); + +static void __exit software_node_exit(void) +{ + ida_destroy(&swnode_root_ids); + kset_unregister(swnode_kset); +} +__exitcall(software_node_exit); diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c index e7f145d662f0..f4b1d8e54daf 100644 --- a/drivers/base/test/test_async_driver_probe.c +++ b/drivers/base/test/test_async_driver_probe.c @@ -11,16 +11,47 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/time.h> +#include <linux/numa.h> +#include <linux/nodemask.h> +#include <linux/topology.h> #define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */ #define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2) +static atomic_t warnings, errors, timeout, async_completed; + static int test_probe(struct platform_device *pdev) { - dev_info(&pdev->dev, "sleeping for %d msecs in probe\n", - TEST_PROBE_DELAY); - msleep(TEST_PROBE_DELAY); - dev_info(&pdev->dev, "done sleeping\n"); + struct device *dev = &pdev->dev; + + /* + * Determine if we have hit the "timeout" limit for the test if we + * have then report it as an error, otherwise we wil sleep for the + * required amount of time and then report completion. + */ + if (atomic_read(&timeout)) { + dev_err(dev, "async probe took too long\n"); + atomic_inc(&errors); + } else { + dev_dbg(&pdev->dev, "sleeping for %d msecs in probe\n", + TEST_PROBE_DELAY); + msleep(TEST_PROBE_DELAY); + dev_dbg(&pdev->dev, "done sleeping\n"); + } + + /* + * Report NUMA mismatch if device node is set and we are not + * performing an async init on that node. + */ + if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) { + if (dev_to_node(dev) != numa_node_id()) { + dev_warn(dev, "NUMA node mismatch %d != %d\n", + dev_to_node(dev), numa_node_id()); + atomic_inc(&warnings); + } + + atomic_inc(&async_completed); + } return 0; } @@ -41,31 +72,64 @@ static struct platform_driver sync_driver = { .probe = test_probe, }; -static struct platform_device *async_dev_1, *async_dev_2; -static struct platform_device *sync_dev_1; +static struct platform_device *async_dev[NR_CPUS * 2]; +static struct platform_device *sync_dev[2]; + +static struct platform_device * +test_platform_device_register_node(char *name, int id, int nid) +{ + struct platform_device *pdev; + int ret; + + pdev = platform_device_alloc(name, id); + if (!pdev) + return NULL; + + if (nid != NUMA_NO_NODE) + set_dev_node(&pdev->dev, nid); + + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); + return ERR_PTR(ret); + } + + return pdev; + +} static int __init test_async_probe_init(void) { - ktime_t calltime, delta; + struct platform_device **pdev = NULL; + int async_id = 0, sync_id = 0; unsigned long long duration; - int error; + ktime_t calltime, delta; + int err, nid, cpu; + + pr_info("registering first set of asynchronous devices...\n"); - pr_info("registering first asynchronous device...\n"); + for_each_online_cpu(cpu) { + nid = cpu_to_node(cpu); + pdev = &async_dev[async_id]; + *pdev = test_platform_device_register_node("test_async_driver", + async_id, + nid); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create async_dev: %d\n", err); + goto err_unregister_async_devs; + } - async_dev_1 = platform_device_register_simple("test_async_driver", 1, - NULL, 0); - if (IS_ERR(async_dev_1)) { - error = PTR_ERR(async_dev_1); - pr_err("failed to create async_dev_1: %d\n", error); - return error; + async_id++; } pr_info("registering asynchronous driver...\n"); calltime = ktime_get(); - error = platform_driver_register(&async_driver); - if (error) { - pr_err("Failed to register async_driver: %d\n", error); - goto err_unregister_async_dev_1; + err = platform_driver_register(&async_driver); + if (err) { + pr_err("Failed to register async_driver: %d\n", err); + goto err_unregister_async_devs; } delta = ktime_sub(ktime_get(), calltime); @@ -73,86 +137,163 @@ static int __init test_async_probe_init(void) pr_info("registration took %lld msecs\n", duration); if (duration > TEST_PROBE_THRESHOLD) { pr_err("test failed: probe took too long\n"); - error = -ETIMEDOUT; + err = -ETIMEDOUT; goto err_unregister_async_driver; } - pr_info("registering second asynchronous device...\n"); + pr_info("registering second set of asynchronous devices...\n"); calltime = ktime_get(); - async_dev_2 = platform_device_register_simple("test_async_driver", 2, - NULL, 0); - if (IS_ERR(async_dev_2)) { - error = PTR_ERR(async_dev_2); - pr_err("failed to create async_dev_2: %d\n", error); - goto err_unregister_async_driver; + for_each_online_cpu(cpu) { + nid = cpu_to_node(cpu); + pdev = &sync_dev[sync_id]; + + *pdev = test_platform_device_register_node("test_async_driver", + async_id, + nid); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create async_dev: %d\n", err); + goto err_unregister_async_driver; + } + + async_id++; } delta = ktime_sub(ktime_get(), calltime); duration = (unsigned long long) ktime_to_ms(delta); - pr_info("registration took %lld msecs\n", duration); + dev_info(&(*pdev)->dev, + "registration took %lld msecs\n", duration); if (duration > TEST_PROBE_THRESHOLD) { - pr_err("test failed: probe took too long\n"); - error = -ETIMEDOUT; - goto err_unregister_async_dev_2; + dev_err(&(*pdev)->dev, + "test failed: probe took too long\n"); + err = -ETIMEDOUT; + goto err_unregister_async_driver; } - pr_info("registering synchronous driver...\n"); - error = platform_driver_register(&sync_driver); - if (error) { - pr_err("Failed to register async_driver: %d\n", error); - goto err_unregister_async_dev_2; + pr_info("registering first synchronous device...\n"); + nid = cpu_to_node(cpu); + pdev = &sync_dev[sync_id]; + + *pdev = test_platform_device_register_node("test_sync_driver", + sync_id, + NUMA_NO_NODE); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create sync_dev: %d\n", err); + goto err_unregister_async_driver; } - pr_info("registering synchronous device...\n"); + sync_id++; + + pr_info("registering synchronous driver...\n"); calltime = ktime_get(); - sync_dev_1 = platform_device_register_simple("test_sync_driver", 1, - NULL, 0); - if (IS_ERR(sync_dev_1)) { - error = PTR_ERR(sync_dev_1); - pr_err("failed to create sync_dev_1: %d\n", error); - goto err_unregister_sync_driver; + err = platform_driver_register(&sync_driver); + if (err) { + pr_err("Failed to register async_driver: %d\n", err); + goto err_unregister_sync_devs; } delta = ktime_sub(ktime_get(), calltime); duration = (unsigned long long) ktime_to_ms(delta); pr_info("registration took %lld msecs\n", duration); if (duration < TEST_PROBE_THRESHOLD) { - pr_err("test failed: probe was too quick\n"); - error = -ETIMEDOUT; - goto err_unregister_sync_dev_1; + dev_err(&(*pdev)->dev, + "test failed: probe was too quick\n"); + err = -ETIMEDOUT; + goto err_unregister_sync_driver; } - pr_info("completed successfully"); + pr_info("registering second synchronous device...\n"); + pdev = &sync_dev[sync_id]; + calltime = ktime_get(); - return 0; + *pdev = test_platform_device_register_node("test_sync_driver", + sync_id, + NUMA_NO_NODE); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create sync_dev: %d\n", err); + goto err_unregister_sync_driver; + } -err_unregister_sync_dev_1: - platform_device_unregister(sync_dev_1); + sync_id++; -err_unregister_sync_driver: - platform_driver_unregister(&sync_driver); + delta = ktime_sub(ktime_get(), calltime); + duration = (unsigned long long) ktime_to_ms(delta); + dev_info(&(*pdev)->dev, + "registration took %lld msecs\n", duration); + if (duration < TEST_PROBE_THRESHOLD) { + dev_err(&(*pdev)->dev, + "test failed: probe was too quick\n"); + err = -ETIMEDOUT; + goto err_unregister_sync_driver; + } -err_unregister_async_dev_2: - platform_device_unregister(async_dev_2); + /* + * The async events should have completed while we were taking care + * of the synchronous events. We will now terminate any outstanding + * asynchronous probe calls remaining by forcing timeout and remove + * the driver before we return which should force the flush of the + * pending asynchronous probe calls. + * + * Otherwise if they completed without errors or warnings then + * report successful completion. + */ + if (atomic_read(&async_completed) != async_id) { + pr_err("async events still pending, forcing timeout\n"); + atomic_inc(&timeout); + err = -ETIMEDOUT; + } else if (!atomic_read(&errors) && !atomic_read(&warnings)) { + pr_info("completed successfully\n"); + return 0; + } +err_unregister_sync_driver: + platform_driver_unregister(&sync_driver); +err_unregister_sync_devs: + while (sync_id--) + platform_device_unregister(sync_dev[sync_id]); err_unregister_async_driver: platform_driver_unregister(&async_driver); +err_unregister_async_devs: + while (async_id--) + platform_device_unregister(async_dev[async_id]); + + /* + * If err is already set then count that as an additional error for + * the test. Otherwise we will report an invalid argument error and + * not count that as we should have reached here as a result of + * errors or warnings being reported by the probe routine. + */ + if (err) + atomic_inc(&errors); + else + err = -EINVAL; -err_unregister_async_dev_1: - platform_device_unregister(async_dev_1); + pr_err("Test failed with %d errors and %d warnings\n", + atomic_read(&errors), atomic_read(&warnings)); - return error; + return err; } module_init(test_async_probe_init); static void __exit test_async_probe_exit(void) { + int id = 2; + platform_driver_unregister(&async_driver); platform_driver_unregister(&sync_driver); - platform_device_unregister(async_dev_1); - platform_device_unregister(async_dev_2); - platform_device_unregister(sync_dev_1); + + while (id--) + platform_device_unregister(sync_dev[id]); + + id = NR_CPUS * 2; + while (id--) + platform_device_unregister(async_dev[id]); } module_exit(test_async_probe_exit); |
