From 9f605f260594f99b950062fd62244251e85dbd2b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 25 Feb 2021 17:16:57 -0800 Subject: mm: move pfn_to_online_page() out of line Patch series "mm: Fix pfn_to_online_page() with respect to ZONE_DEVICE", v4. A pfn-walker that uses pfn_to_online_page() may inadvertently translate a pfn as online and in the page allocator, when it is offline managed by a ZONE_DEVICE mapping (details in Patch 3: ("mm: Teach pfn_to_online_page() about ZONE_DEVICE section collisions")). The 2 proposals under consideration are teach pfn_to_online_page() to be precise in the presence of mixed-zone sections, or teach the memory-add code to drop the System RAM associated with ZONE_DEVICE collisions. In order to not regress memory capacity by a few 10s to 100s of MiB the approach taken in this set is to add precision to pfn_to_online_page(). In the course of validating pfn_to_online_page() a couple other fixes fell out: 1/ soft_offline_page() fails to drop the reference taken in the madvise(..., MADV_SOFT_OFFLINE) case. 2/ memory_failure() uses get_dev_pagemap() to lookup ZONE_DEVICE pages, however that mapping may contain data pages and metadata raw pfns. Introduce pgmap_pfn_valid() to delineate the 2 types and fail the handling of raw metadata pfns. This patch (of 4); pfn_to_online_page() is already too large to be a macro or an inline function. In anticipation of further logic changes / growth, move it out of line. No functional change, just code movement. Link: https://lkml.kernel.org/r/161058499000.1840162.702316708443239771.stgit@dwillia2-desk3.amr.corp.intel.com Link: https://lkml.kernel.org/r/161058499608.1840162.10165648147615238793.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams Reported-by: Michal Hocko Acked-by: Michal Hocko Reviewed-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Naoya Horiguchi Cc: Qian Cai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memory_hotplug.h | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) (limited to 'include/linux/memory_hotplug.h') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 15acce5ab106..3d99de0db2dd 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -16,22 +16,7 @@ struct resource; struct vmem_altmap; #ifdef CONFIG_MEMORY_HOTPLUG -/* - * Return page for the valid pfn only if the page is online. All pfn - * walkers which rely on the fully initialized page->flags and others - * should use this rather than pfn_valid && pfn_to_page - */ -#define pfn_to_online_page(pfn) \ -({ \ - struct page *___page = NULL; \ - unsigned long ___pfn = pfn; \ - unsigned long ___nr = pfn_to_section_nr(___pfn); \ - \ - if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ - pfn_valid_within(___pfn)) \ - ___page = pfn_to_page(___pfn); \ - ___page; \ -}) +struct page *pfn_to_online_page(unsigned long pfn); /* * Types for free bootmem stored in page->lru.next. These have to be in -- cgit v1.2.3 From 1adf8b468ff6bc64ba01ce3848da4bcf409215b4 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 25 Feb 2021 17:17:13 -0800 Subject: mm/memory_hotplug: rename all existing 'memhp' into 'mhp' This renames all 'memhp' instances to 'mhp' except for memhp_default_state for being a kernel command line option. This is just a clean up and should not cause a functional change. Let's make it consistent rater than mixing the two prefixes. In preparation for more users of the 'mhp' terminology. Link: https://lkml.kernel.org/r/1611554093-27316-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Suggested-by: David Hildenbrand Reviewed-by: David Hildenbrand Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 10 +++++----- include/linux/memory_hotplug.h | 4 ++-- mm/memory_hotplug.c | 12 ++++++------ 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux/memory_hotplug.h') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index eef4ffb6122c..901e379676be 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -35,7 +35,7 @@ static const char *const online_type_to_str[] = { [MMOP_ONLINE_MOVABLE] = "online_movable", }; -int memhp_online_type_from_str(const char *str) +int mhp_online_type_from_str(const char *str) { int i; @@ -253,7 +253,7 @@ static int memory_subsys_offline(struct device *dev) static ssize_t state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - const int online_type = memhp_online_type_from_str(buf); + const int online_type = mhp_online_type_from_str(buf); struct memory_block *mem = to_memory_block(dev); int ret; @@ -387,19 +387,19 @@ static ssize_t auto_online_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", - online_type_to_str[memhp_default_online_type]); + online_type_to_str[mhp_default_online_type]); } static ssize_t auto_online_blocks_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - const int online_type = memhp_online_type_from_str(buf); + const int online_type = mhp_online_type_from_str(buf); if (online_type < 0) return -EINVAL; - memhp_default_online_type = online_type; + mhp_default_online_type = online_type; return count; } diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 3d99de0db2dd..ca5e8d137726 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -116,10 +116,10 @@ extern int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params); extern u64 max_mem_size; -extern int memhp_online_type_from_str(const char *str); +extern int mhp_online_type_from_str(const char *str); /* Default online_type (MMOP_*) when new memory blocks are added. */ -extern int memhp_default_online_type; +extern int mhp_default_online_type; /* If movable_node boot option specified */ extern bool movable_node_enabled; static inline bool movable_node_is_enabled(void) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3af4d3851d1a..ac1c686a5989 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -67,17 +67,17 @@ void put_online_mems(void) bool movable_node_enabled = false; #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE -int memhp_default_online_type = MMOP_OFFLINE; +int mhp_default_online_type = MMOP_OFFLINE; #else -int memhp_default_online_type = MMOP_ONLINE; +int mhp_default_online_type = MMOP_ONLINE; #endif static int __init setup_memhp_default_state(char *str) { - const int online_type = memhp_online_type_from_str(str); + const int online_type = mhp_online_type_from_str(str); if (online_type >= 0) - memhp_default_online_type = online_type; + mhp_default_online_type = online_type; return 1; } @@ -1076,7 +1076,7 @@ static int check_hotplug_memory_range(u64 start, u64 size) static int online_memory_block(struct memory_block *mem, void *arg) { - mem->online_type = memhp_default_online_type; + mem->online_type = mhp_default_online_type; return device_online(&mem->dev); } @@ -1157,7 +1157,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) merge_system_ram_resource(res); /* online pages if requested */ - if (memhp_default_online_type != MMOP_OFFLINE) + if (mhp_default_online_type != MMOP_OFFLINE) walk_memory_blocks(start, size, NULL, online_memory_block); return ret; -- cgit v1.2.3 From 26011267e1a7ddaab50b5f81b402ca3e7fc2887c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 25 Feb 2021 17:17:17 -0800 Subject: mm/memory_hotplug: MEMHP_MERGE_RESOURCE -> MHP_MERGE_RESOURCE Let's make "MEMHP_MERGE_RESOURCE" consistent with "MHP_NONE", "mhp_t" and "mhp_flags". As discussed recently [1], "mhp" is our internal acronym for memory hotplug now. [1] https://lore.kernel.org/linux-mm/c37de2d0-28a1-4f7d-f944-cfd7d81c334d@redhat.com/ Link: https://lkml.kernel.org/r/20210126115829.10909-1-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Miaohe Lin Acked-by: Michael S. Tsirkin Reviewed-by: Oscar Salvador Acked-by: Wei Liu Reviewed-by: Pankaj Gupta Cc: "K. Y. Srinivasan" Cc: Haiyang Zhang Cc: Stephen Hemminger Cc: Jason Wang Cc: Boris Ostrovsky Cc: Juergen Gross Cc: Stefano Stabellini Cc: Michal Hocko Cc: Anshuman Khandual Cc: Wei Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/hv/hv_balloon.c | 2 +- drivers/virtio/virtio_mem.c | 2 +- drivers/xen/balloon.c | 2 +- include/linux/memory_hotplug.h | 2 +- mm/memory_hotplug.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux/memory_hotplug.h') diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 8c471823a5af..2f776d78e3c1 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -726,7 +726,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); ret = add_memory(nid, PFN_PHYS((start_pfn)), - (HA_CHUNK << PAGE_SHIFT), MEMHP_MERGE_RESOURCE); + (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE); if (ret) { pr_err("hot_add memory failed error is %d\n", ret); diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 9fc9ec4a25f5..d44e43869f17 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -623,7 +623,7 @@ static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr, /* Memory might get onlined immediately. */ atomic64_add(size, &vm->offline_size); rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name, - MEMHP_MERGE_RESOURCE); + MHP_MERGE_RESOURCE); if (rc) { atomic64_sub(size, &vm->offline_size); dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index b57b2067ecbf..671c71245a7b 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -331,7 +331,7 @@ static enum bp_state reserve_additional_memory(void) mutex_unlock(&balloon_mutex); /* add_memory_resource() requires the device_hotplug lock */ lock_device_hotplug(); - rc = add_memory_resource(nid, resource, MEMHP_MERGE_RESOURCE); + rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE); unlock_device_hotplug(); mutex_lock(&balloon_mutex); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ca5e8d137726..08eeef679ab7 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -53,7 +53,7 @@ typedef int __bitwise mhp_t; * with this flag set, the resource pointer must no longer be used as it * might be stale, or the resource might have changed. */ -#define MEMHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) +#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) /* * Extended parameters for memory hotplug: diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ac1c686a5989..6a02c3f42717 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1153,7 +1153,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) * In case we're allowed to merge the resource, flag it and trigger * merging now that adding succeeded. */ - if (mhp_flags & MEMHP_MERGE_RESOURCE) + if (mhp_flags & MHP_MERGE_RESOURCE) merge_system_ram_resource(res); /* online pages if requested */ -- cgit v1.2.3 From bca3feaa0764ab5a4cbe6817871601f1d00c059d Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 25 Feb 2021 17:17:33 -0800 Subject: mm/memory_hotplug: prevalidate the address range being added with platform Patch series "mm/memory_hotplug: Pre-validate the address range with platform", v5. This series adds a mechanism allowing platforms to weigh in and prevalidate incoming address range before proceeding further with the memory hotplug. This helps prevent potential platform errors for the given address range, down the hotplug call chain, which inevitably fails the hotplug itself. This mechanism was suggested by David Hildenbrand during another discussion with respect to a memory hotplug fix on arm64 platform. https://lore.kernel.org/linux-arm-kernel/1600332402-30123-1-git-send-email-anshuman.khandual@arm.com/ This mechanism focuses on the addressibility aspect and not [sub] section alignment aspect. Hence check_hotplug_memory_range() and check_pfn_span() have been left unchanged. This patch (of 4): This introduces mhp_range_allowed() which can be called in various memory hotplug paths to prevalidate the address range which is being added, with the platform. Then mhp_range_allowed() calls mhp_get_pluggable_range() which provides applicable address range depending on whether linear mapping is required or not. For ranges that require linear mapping, it calls a new arch callback arch_get_mappable_range() which the platform can override. So the new callback, in turn provides the platform an opportunity to configure acceptable memory hotplug address ranges in case there are constraints. This mechanism will help prevent platform specific errors deep down during hotplug calls. This drops now redundant check_hotplug_memory_addressable() check in __add_pages() but instead adds a VM_BUG_ON() check which would ensure that the range has been validated with mhp_range_allowed() earlier in the call chain. Besides mhp_get_pluggable_range() also can be used by potential memory hotplug callers to avail the allowed physical range which would go through on a given platform. This does not really add any new range check in generic memory hotplug but instead compensates for lost checks in arch_add_memory() where applicable and check_hotplug_memory_addressable(), with unified mhp_range_allowed(). [akpm@linux-foundation.org: make pagemap_range() return -EINVAL when mhp_range_allowed() fails] Link: https://lkml.kernel.org/r/1612149902-7867-1-git-send-email-anshuman.khandual@arm.com Link: https://lkml.kernel.org/r/1612149902-7867-2-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Suggested-by: David Hildenbrand Reviewed-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Heiko Carstens Cc: Catalin Marinas Cc: Vasily Gorbik # s390 Cc: Will Deacon Cc: Ard Biesheuvel Cc: Mark Rutland Cc: Jason Wang Cc: Jonathan Cameron Cc: "Michael S. Tsirkin" Cc: Michal Hocko Cc: Pankaj Gupta Cc: Pankaj Gupta Cc: teawater Cc: Wei Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memory_hotplug.h | 10 ++++++ mm/memory_hotplug.c | 78 ++++++++++++++++++++++++++++++++---------- mm/memremap.c | 8 ++++- 3 files changed, 76 insertions(+), 20 deletions(-) (limited to 'include/linux/memory_hotplug.h') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 08eeef679ab7..7288aa5ef73b 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -66,6 +66,9 @@ struct mhp_params { pgprot_t pgprot; }; +bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); +struct range mhp_get_pluggable_range(bool need_mapping); + /* * Zone resizing functions * @@ -266,6 +269,13 @@ static inline bool movable_node_is_enabled(void) } #endif /* ! CONFIG_MEMORY_HOTPLUG */ +/* + * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some + * platforms might override and use arch_get_mappable_range() + * for internal non memory hotplug purposes. + */ +struct range arch_get_mappable_range(void); + #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * pgdat resizing functions diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a969463bdda4..5ba51a8bdaeb 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -107,6 +107,9 @@ static struct resource *register_memory_resource(u64 start, u64 size, if (strcmp(resource_name, "System RAM")) flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; + if (!mhp_range_allowed(start, size, true)) + return ERR_PTR(-E2BIG); + /* * Make sure value parsed from 'mem=' only restricts memory adding * while booting, so that memory hotplug won't be impacted. Please @@ -284,22 +287,6 @@ static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, return 0; } -static int check_hotplug_memory_addressable(unsigned long pfn, - unsigned long nr_pages) -{ - const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1; - - if (max_addr >> MAX_PHYSMEM_BITS) { - const u64 max_allowed = (1ull << (MAX_PHYSMEM_BITS + 1)) - 1; - WARN(1, - "Hotplugged memory exceeds maximum addressable address, range=%#llx-%#llx, maximum=%#llx\n", - (u64)PFN_PHYS(pfn), max_addr, max_allowed); - return -E2BIG; - } - - return 0; -} - /* * Return page for the valid pfn only if the page is online. All pfn * walkers which rely on the fully initialized page->flags and others @@ -365,9 +352,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, if (WARN_ON_ONCE(!params->pgprot.pgprot)) return -EINVAL; - err = check_hotplug_memory_addressable(pfn, nr_pages); - if (err) - return err; + VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); if (altmap) { /* @@ -1248,6 +1233,61 @@ out_unlock: } EXPORT_SYMBOL_GPL(add_memory_driver_managed); +/* + * Platforms should define arch_get_mappable_range() that provides + * maximum possible addressable physical memory range for which the + * linear mapping could be created. The platform returned address + * range must adhere to these following semantics. + * + * - range.start <= range.end + * - Range includes both end points [range.start..range.end] + * + * There is also a fallback definition provided here, allowing the + * entire possible physical address range in case any platform does + * not define arch_get_mappable_range(). + */ +struct range __weak arch_get_mappable_range(void) +{ + struct range mhp_range = { + .start = 0UL, + .end = -1ULL, + }; + return mhp_range; +} + +struct range mhp_get_pluggable_range(bool need_mapping) +{ + const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; + struct range mhp_range; + + if (need_mapping) { + mhp_range = arch_get_mappable_range(); + if (mhp_range.start > max_phys) { + mhp_range.start = 0; + mhp_range.end = 0; + } + mhp_range.end = min_t(u64, mhp_range.end, max_phys); + } else { + mhp_range.start = 0; + mhp_range.end = max_phys; + } + return mhp_range; +} +EXPORT_SYMBOL_GPL(mhp_get_pluggable_range); + +bool mhp_range_allowed(u64 start, u64 size, bool need_mapping) +{ + struct range mhp_range = mhp_get_pluggable_range(need_mapping); + u64 end = start + size; + + if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) + return true; + + pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", + start, end, mhp_range.start, mhp_range.end); + return false; +} + #ifdef CONFIG_MEMORY_HOTREMOVE /* * Confirm all pages in a range [start, end) belong to the same zone (skipping diff --git a/mm/memremap.c b/mm/memremap.c index 2455bac89506..7aa7d6e80ee5 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -200,6 +200,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref) static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, int range_id, int nid) { + const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; struct range *range = &pgmap->ranges[range_id]; struct dev_pagemap *conflict_pgmap; int error, is_ram; @@ -245,6 +246,11 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, if (error) goto err_pfn_remap; + if (!mhp_range_allowed(range->start, range_len(range), !is_private)) { + error = -EINVAL; + goto err_pfn_remap; + } + mem_hotplug_begin(); /* @@ -258,7 +264,7 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, * the CPU, we do want the linear mapping and thus use * arch_add_memory(). */ - if (pgmap->type == MEMORY_DEVICE_PRIVATE) { + if (is_private) { error = add_pages(nid, PHYS_PFN(range->start), PHYS_PFN(range_len(range)), params); } else { -- cgit v1.2.3