summaryrefslogtreecommitdiff
path: root/mm/memblock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c83
1 files changed, 13 insertions, 70 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 154f1d73b61f..120a501a887a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -780,9 +780,9 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt
}
if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
- mem_size_mb = memblock_phys_mem_size() >> 20;
+ mem_size_mb = memblock_phys_mem_size() / SZ_1M;
pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
- (nr_pages << PAGE_SHIFT) >> 20, mem_size_mb);
+ (nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb);
return false;
}
@@ -1091,13 +1091,20 @@ int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
/**
* memblock_reserved_mark_noinit - Mark a reserved memory region with flag
- * MEMBLOCK_RSRV_NOINIT which results in the struct pages not being initialized
- * for this region.
+ * MEMBLOCK_RSRV_NOINIT
+ *
* @base: the base phys addr of the region
* @size: the size of the region
*
- * struct pages will not be initialized for reserved memory regions marked with
- * %MEMBLOCK_RSRV_NOINIT.
+ * The struct pages for the reserved regions marked %MEMBLOCK_RSRV_NOINIT will
+ * not be fully initialized to allow the caller optimize their initialization.
+ *
+ * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled, setting this flag
+ * completely bypasses the initialization of struct pages for such region.
+ *
+ * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is disabled, struct pages in this
+ * region will be initialized with default values but won't be marked as
+ * reserved.
*
* Return: 0 on success, -errno on failure.
*/
@@ -1438,70 +1445,6 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
return 0;
}
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-/**
- * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
- *
- * @idx: pointer to u64 loop variable
- * @zone: zone in which all of the memory blocks reside
- * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
- * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
- *
- * This function is meant to be a zone/pfn specific wrapper for the
- * for_each_mem_range type iterators. Specifically they are used in the
- * deferred memory init routines and as such we were duplicating much of
- * this logic throughout the code. So instead of having it in multiple
- * locations it seemed like it would make more sense to centralize this to
- * one new iterator that does everything they need.
- */
-void __init_memblock
-__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
- unsigned long *out_spfn, unsigned long *out_epfn)
-{
- int zone_nid = zone_to_nid(zone);
- phys_addr_t spa, epa;
-
- __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
- &memblock.memory, &memblock.reserved,
- &spa, &epa, NULL);
-
- while (*idx != U64_MAX) {
- unsigned long epfn = PFN_DOWN(epa);
- unsigned long spfn = PFN_UP(spa);
-
- /*
- * Verify the end is at least past the start of the zone and
- * that we have at least one PFN to initialize.
- */
- if (zone->zone_start_pfn < epfn && spfn < epfn) {
- /* if we went too far just stop searching */
- if (zone_end_pfn(zone) <= spfn) {
- *idx = U64_MAX;
- break;
- }
-
- if (out_spfn)
- *out_spfn = max(zone->zone_start_pfn, spfn);
- if (out_epfn)
- *out_epfn = min(zone_end_pfn(zone), epfn);
-
- return;
- }
-
- __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
- &memblock.memory, &memblock.reserved,
- &spa, &epa, NULL);
- }
-
- /* signal end of iteration */
- if (out_spfn)
- *out_spfn = ULONG_MAX;
- if (out_epfn)
- *out_epfn = 0;
-}
-
-#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
-
/**
* memblock_alloc_range_nid - allocate boot memory block
* @size: size of memory block to be allocated in bytes