From c3719bd9eeb2edf84bd263d662e36ca0ba262a23 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 4 Jan 2023 19:30:24 +0100 Subject: cacheinfo: Use RISC-V's init_cache_level() as generic OF implementation RISC-V's implementation of init_of_cache_level() is following the Devicetree Specification v0.3 regarding caches, cf.: - s3.7.3 'Internal (L1) Cache Properties' - s3.8 'Multi-level and Shared Cache Nodes' Allow reusing the implementation by moving it. Also make 'levels', 'leaves' and 'level' unsigned int. Signed-off-by: Pierre Gondois Reviewed-by: Conor Dooley Acked-by: Palmer Dabbelt Link: https://lore.kernel.org/r/20230104183033.755668-2-pierre.gondois@arm.com Signed-off-by: Sudeep Holla --- include/linux/cacheinfo.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/cacheinfo.h') diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 00b7a6ae8617..ff0328f3fbb0 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -80,6 +80,7 @@ struct cpu_cacheinfo { struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); int init_cache_level(unsigned int cpu); +int init_of_cache_level(unsigned int cpu); int populate_cache_leaves(unsigned int cpu); int cache_setup_acpi(unsigned int cpu); bool last_level_cache_is_valid(unsigned int cpu); -- cgit v1.2.3 From bd500361a937c03a3da57178287ce543c8f3681b Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 4 Jan 2023 19:30:28 +0100 Subject: ACPI: PPTT: Update acpi_find_last_cache_level() to acpi_get_cache_info() acpi_find_last_cache_level() allows to find the last level of cache for a given CPU. The function is only called on arm64 ACPI based platforms to check for cache information that would be missing in the CLIDR_EL1 register. To allow populating (struct cpu_cacheinfo).num_leaves by only parsing a PPTT, update acpi_find_last_cache_level() to get the 'split_levels', i.e. the number of cache levels being split in data/instruction caches. It is assumed that there will not be data/instruction caches above a unified cache. If a split level consist of one data cache and no instruction cache (or opposite), then the missing cache will still be populated by default with minimal cache information, and maximal cpumask (all non-existing caches have the same fw_token). Suggested-by: Jeremy Linton Signed-off-by: Pierre Gondois Reviewed-by: Jeremy Linton Acked-by: Rafael J. Wysocki Acked-by: Palmer Dabbelt Link: https://lore.kernel.org/r/20230104183033.755668-6-pierre.gondois@arm.com Signed-off-by: Sudeep Holla --- arch/arm64/kernel/cacheinfo.c | 11 ++++--- drivers/acpi/pptt.c | 76 ++++++++++++++++++++++++++++--------------- include/linux/cacheinfo.h | 9 +++-- 3 files changed, 63 insertions(+), 33 deletions(-) (limited to 'include/linux/cacheinfo.h') diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 97c42be71338..36c3b07cdf2d 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -46,7 +46,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, int init_cache_level(unsigned int cpu) { unsigned int ctype, level, leaves; - int fw_level; + int fw_level, ret; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { @@ -59,10 +59,13 @@ int init_cache_level(unsigned int cpu) leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; } - if (acpi_disabled) + if (acpi_disabled) { fw_level = of_find_last_cache_level(cpu); - else - fw_level = acpi_find_last_cache_level(cpu); + } else { + ret = acpi_get_cache_info(cpu, &fw_level, NULL); + if (ret < 0) + return ret; + } if (fw_level < 0) return fw_level; diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 97c1d33822d1..10975bb603fb 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -81,6 +81,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type) * acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache * @table_hdr: Pointer to the head of the PPTT table * @local_level: passed res reflects this cache level + * @split_levels: Number of split cache levels (data/instruction). * @res: cache resource in the PPTT we want to walk * @found: returns a pointer to the requested level if found * @level: the requested cache level @@ -100,6 +101,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type) */ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, unsigned int local_level, + unsigned int *split_levels, struct acpi_subtable_header *res, struct acpi_pptt_cache **found, unsigned int level, int type) @@ -113,8 +115,17 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, while (cache) { local_level++; + if (!(cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)) { + cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache); + continue; + } + + if (split_levels && + (acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_DATA) || + acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_INSTR))) + *split_levels = local_level; + if (local_level == level && - cache->flags & ACPI_PPTT_CACHE_TYPE_VALID && acpi_pptt_match_type(cache->attributes, type)) { if (*found != NULL && cache != *found) pr_warn("Found duplicate cache level/type unable to determine uniqueness\n"); @@ -135,8 +146,8 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, static struct acpi_pptt_cache * acpi_find_cache_level(struct acpi_table_header *table_hdr, struct acpi_pptt_processor *cpu_node, - unsigned int *starting_level, unsigned int level, - int type) + unsigned int *starting_level, unsigned int *split_levels, + unsigned int level, int type) { struct acpi_subtable_header *res; unsigned int number_of_levels = *starting_level; @@ -149,7 +160,8 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, resource++; local_level = acpi_pptt_walk_cache(table_hdr, *starting_level, - res, &ret, level, type); + split_levels, res, &ret, + level, type); /* * we are looking for the max depth. Since its potentially * possible for a given node to have resources with differing @@ -165,29 +177,29 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, } /** - * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches + * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache + * levels and split cache levels (data/instruction). * @table_hdr: Pointer to the head of the PPTT table * @cpu_node: processor node we wish to count caches for + * @levels: Number of levels if success. + * @split_levels: Number of split cache levels (data/instruction) if + * success. Can by NULL. * * Given a processor node containing a processing unit, walk into it and count * how many levels exist solely for it, and then walk up each level until we hit * the root node (ignore the package level because it may be possible to have - * caches that exist across packages). Count the number of cache levels that - * exist at each level on the way up. - * - * Return: Total number of levels found. + * caches that exist across packages). Count the number of cache levels and + * split cache levels (data/instruction) that exist at each level on the way + * up. */ -static int acpi_count_levels(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu_node) +static void acpi_count_levels(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + unsigned int *levels, unsigned int *split_levels) { - int total_levels = 0; - do { - acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0); + acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0); cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); } while (cpu_node); - - return total_levels; } /** @@ -321,7 +333,7 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta while (cpu_node && !found) { found = acpi_find_cache_level(table_hdr, cpu_node, - &total_levels, level, acpi_type); + &total_levels, NULL, level, acpi_type); *node = cpu_node; cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); } @@ -589,36 +601,48 @@ static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) } /** - * acpi_find_last_cache_level() - Determines the number of cache levels for a PE + * acpi_get_cache_info() - Determine the number of cache levels and + * split cache levels (data/instruction) and for a PE. * @cpu: Kernel logical CPU number + * @levels: Number of levels if success. + * @split_levels: Number of levels being split (i.e. data/instruction) + * if success. Can by NULL. * * Given a logical CPU number, returns the number of levels of cache represented * in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0 * indicating we didn't find any cache levels. * - * Return: Cache levels visible to this core. + * Return: -ENOENT if no PPTT table or no PPTT processor struct found. + * 0 on success. */ -int acpi_find_last_cache_level(unsigned int cpu) +int acpi_get_cache_info(unsigned int cpu, unsigned int *levels, + unsigned int *split_levels) { struct acpi_pptt_processor *cpu_node; struct acpi_table_header *table; - int number_of_levels = 0; u32 acpi_cpu_id; + *levels = 0; + if (split_levels) + *split_levels = 0; + table = acpi_get_pptt(); if (!table) return -ENOENT; - pr_debug("Cache Setup find last level CPU=%d\n", cpu); + pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu); acpi_cpu_id = get_acpi_id_for_cpu(cpu); cpu_node = acpi_find_processor_node(table, acpi_cpu_id); - if (cpu_node) - number_of_levels = acpi_count_levels(table, cpu_node); + if (!cpu_node) + return -ENOENT; - pr_debug("Cache Setup find last level level=%d\n", number_of_levels); + acpi_count_levels(table, cpu_node, levels, split_levels); - return number_of_levels; + pr_debug("Cache Setup: last_level=%d split_levels=%d\n", + *levels, split_levels ? *split_levels : -1); + + return 0; } /** diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index ff0328f3fbb0..00d8e7f9d1c6 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -88,19 +88,22 @@ bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y); int detect_cache_attributes(unsigned int cpu); #ifndef CONFIG_ACPI_PPTT /* - * acpi_find_last_cache_level is only called on ACPI enabled + * acpi_get_cache_info() is only called on ACPI enabled * platforms using the PPTT for topology. This means that if * the platform supports other firmware configuration methods * we need to stub out the call when ACPI is disabled. * ACPI enabled platforms not using PPTT won't be making calls * to this function so we need not worry about them. */ -static inline int acpi_find_last_cache_level(unsigned int cpu) +static inline +int acpi_get_cache_info(unsigned int cpu, + unsigned int *levels, unsigned int *split_levels) { return 0; } #else -int acpi_find_last_cache_level(unsigned int cpu); +int acpi_get_cache_info(unsigned int cpu, + unsigned int *levels, unsigned int *split_levels); #endif const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); -- cgit v1.2.3 From 5944ce092b97caed5d86d961e963b883b5c44ee2 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 4 Jan 2023 19:30:29 +0100 Subject: arch_topology: Build cacheinfo from primary CPU commit 3fcbf1c77d08 ("arch_topology: Fix cache attributes detection in the CPU hotplug path") adds a call to detect_cache_attributes() to populate the cacheinfo before updating the siblings mask. detect_cache_attributes() allocates memory and can take the PPTT mutex (on ACPI platforms). On PREEMPT_RT kernels, on secondary CPUs, this triggers a: 'BUG: sleeping function called from invalid context' [1] as the code is executed with preemption and interrupts disabled. The primary CPU was previously storing the cache information using the now removed (struct cpu_topology).llc_id: commit 5b8dc787ce4a ("arch_topology: Drop LLC identifier stash from the CPU topology") allocate_cache_info() tries to build the cacheinfo from the primary CPU prior secondary CPUs boot, if the DT/ACPI description contains cache information. If allocate_cache_info() fails, then fallback to the current state for the cacheinfo allocation. [1] will be triggered in such case. When unplugging a CPU, the cacheinfo memory cannot be freed. If it was, then the memory would be allocated early by the re-plugged CPU and would trigger [1]. Note that populate_cache_leaves() might be called multiple times due to populate_leaves being moved up. This is required since detect_cache_attributes() might be called with per_cpu_cacheinfo(cpu) being allocated but not populated. [1]: | BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:46 | in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 0, name: swapper/111 | preempt_count: 1, expected: 0 | RCU nest depth: 1, expected: 1 | 3 locks held by swapper/111/0: | #0: (&pcp->lock){+.+.}-{3:3}, at: get_page_from_freelist+0x218/0x12c8 | #1: (rcu_read_lock){....}-{1:3}, at: rt_spin_trylock+0x48/0xf0 | #2: (&zone->lock){+.+.}-{3:3}, at: rmqueue_bulk+0x64/0xa80 | irq event stamp: 0 | hardirqs last enabled at (0): 0x0 | hardirqs last disabled at (0): copy_process+0x5dc/0x1ab8 | softirqs last enabled at (0): copy_process+0x5dc/0x1ab8 | softirqs last disabled at (0): 0x0 | Preemption disabled at: | migrate_enable+0x30/0x130 | CPU: 111 PID: 0 Comm: swapper/111 Tainted: G W 6.0.0-rc4-rt6-[...] | Call trace: | __kmalloc+0xbc/0x1e8 | detect_cache_attributes+0x2d4/0x5f0 | update_siblings_masks+0x30/0x368 | store_cpu_topology+0x78/0xb8 | secondary_start_kernel+0xd0/0x198 | __secondary_switched+0xb0/0xb4 Signed-off-by: Pierre Gondois Reviewed-by: Sudeep Holla Acked-by: Palmer Dabbelt Link: https://lore.kernel.org/r/20230104183033.755668-7-pierre.gondois@arm.com Signed-off-by: Sudeep Holla --- arch/riscv/kernel/cacheinfo.c | 5 --- drivers/base/arch_topology.c | 12 ++++++-- drivers/base/cacheinfo.c | 71 ++++++++++++++++++++++++++++++++----------- include/linux/cacheinfo.h | 1 + 4 files changed, 65 insertions(+), 24 deletions(-) (limited to 'include/linux/cacheinfo.h') diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 440a3df5944c..3a13113f1b29 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -113,11 +113,6 @@ static void fill_cacheinfo(struct cacheinfo **this_leaf, } } -int init_cache_level(unsigned int cpu) -{ - return init_of_cache_level(cpu); -} - int populate_cache_leaves(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index e7d6e6657ffa..b1c1dd38ab01 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -736,7 +736,7 @@ void update_siblings_masks(unsigned int cpuid) ret = detect_cache_attributes(cpuid); if (ret && ret != -ENOENT) - pr_info("Early cacheinfo failed, ret = %d\n", ret); + pr_info("Early cacheinfo allocation failed, ret = %d\n", ret); /* update core and thread sibling masks */ for_each_online_cpu(cpu) { @@ -825,7 +825,7 @@ __weak int __init parse_acpi_topology(void) #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) void __init init_cpu_topology(void) { - int ret; + int cpu, ret; reset_cpu_topology(); ret = parse_acpi_topology(); @@ -840,6 +840,14 @@ void __init init_cpu_topology(void) reset_cpu_topology(); return; } + + for_each_possible_cpu(cpu) { + ret = fetch_cache_info(cpu); + if (ret) { + pr_err("Early cacheinfo failed, ret = %d\n", ret); + break; + } + } } void store_cpu_topology(unsigned int cpuid) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 9ad4403f2dab..c9bcecf76c7f 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -389,10 +389,6 @@ static void free_cache_attributes(unsigned int cpu) return; cache_shared_cpu_map_remove(cpu); - - kfree(per_cpu_cacheinfo(cpu)); - per_cpu_cacheinfo(cpu) = NULL; - cache_leaves(cpu) = 0; } int __weak init_cache_level(unsigned int cpu) @@ -405,29 +401,71 @@ int __weak populate_cache_leaves(unsigned int cpu) return -ENOENT; } +static inline +int allocate_cache_info(int cpu) +{ + per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), + sizeof(struct cacheinfo), GFP_ATOMIC); + if (!per_cpu_cacheinfo(cpu)) { + cache_leaves(cpu) = 0; + return -ENOMEM; + } + + return 0; +} + +int fetch_cache_info(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci; + unsigned int levels, split_levels; + int ret; + + if (acpi_disabled) { + ret = init_of_cache_level(cpu); + if (ret < 0) + return ret; + } else { + ret = acpi_get_cache_info(cpu, &levels, &split_levels); + if (ret < 0) + return ret; + + this_cpu_ci = get_cpu_cacheinfo(cpu); + this_cpu_ci->num_levels = levels; + /* + * This assumes that: + * - there cannot be any split caches (data/instruction) + * above a unified cache + * - data/instruction caches come by pair + */ + this_cpu_ci->num_leaves = levels + split_levels; + } + if (!cache_leaves(cpu)) + return -ENOENT; + + return allocate_cache_info(cpu); +} + int detect_cache_attributes(unsigned int cpu) { int ret; - /* Since early detection of the cacheinfo is allowed via this - * function and this also gets called as CPU hotplug callbacks via - * cacheinfo_cpu_online, the initialisation can be skipped and only - * CPU maps can be updated as the CPU online status would be update - * if called via cacheinfo_cpu_online path. + /* Since early initialization/allocation of the cacheinfo is allowed + * via fetch_cache_info() and this also gets called as CPU hotplug + * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped + * as it will happen only once (the cacheinfo memory is never freed). + * Just populate the cacheinfo. */ if (per_cpu_cacheinfo(cpu)) - goto update_cpu_map; + goto populate_leaves; if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; - per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), - sizeof(struct cacheinfo), GFP_ATOMIC); - if (per_cpu_cacheinfo(cpu) == NULL) { - cache_leaves(cpu) = 0; - return -ENOMEM; - } + ret = allocate_cache_info(cpu); + if (ret) + return ret; +populate_leaves: /* * populate_cache_leaves() may completely setup the cache leaves and * shared_cpu_map or it may leave it partially setup. @@ -436,7 +474,6 @@ int detect_cache_attributes(unsigned int cpu) if (ret) goto free_ci; -update_cpu_map: /* * For systems using DT for cache hierarchy, fw_token * and shared_cpu_map will be set up here only if they are diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 00d8e7f9d1c6..dfef57077cd0 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -85,6 +85,7 @@ int populate_cache_leaves(unsigned int cpu); int cache_setup_acpi(unsigned int cpu); bool last_level_cache_is_valid(unsigned int cpu); bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y); +int fetch_cache_info(unsigned int cpu); int detect_cache_attributes(unsigned int cpu); #ifndef CONFIG_ACPI_PPTT /* -- cgit v1.2.3 From d931b83e62b1dd352fc326c0b1cf3be3ef19e113 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Tue, 24 Jan 2023 16:40:47 +0100 Subject: cacheinfo: Make default acpi_get_cache_info() return an error commit bd500361a937 ("ACPI: PPTT: Update acpi_find_last_cache_level() to acpi_get_cache_info()") updates the prototype of acpi_get_cache_info(). The cache 'levels' is update through a pointer and not the return value of the function. If CONFIG_ACPI_PPTT is not defined, acpi_get_cache_info() doesn't update its *levels and *split_levels parameters and returns 0. This can lead to a faulty behaviour. Make acpi_get_cache_info() return an error code if CONFIG_ACPI_PPTT is not defined. Also, In init_cache_level(), if no PPTT is present or CONFIG_ACPI_PPTT is not defined, instead of aborting if acpi_get_cache_info() returns an error code, just continue. This allows to try fetching the cache information from clidr_el1. Signed-off-by: Pierre Gondois Link: https://lore.kernel.org/r/20230124154053.355376-3-pierre.gondois@arm.com Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cacheinfo.c | 2 +- include/linux/cacheinfo.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux/cacheinfo.h') diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 36c3b07cdf2d..91677f4d3395 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -64,7 +64,7 @@ int init_cache_level(unsigned int cpu) } else { ret = acpi_get_cache_info(cpu, &fw_level, NULL); if (ret < 0) - return ret; + fw_level = 0; } if (fw_level < 0) diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index dfef57077cd0..908e19d17f49 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -100,7 +100,7 @@ static inline int acpi_get_cache_info(unsigned int cpu, unsigned int *levels, unsigned int *split_levels) { - return 0; + return -ENOENT; } #else int acpi_get_cache_info(unsigned int cpu, -- cgit v1.2.3