diff options
Diffstat (limited to 'mm/util.c')
-rw-r--r-- | mm/util.c | 110 |
1 files changed, 86 insertions, 24 deletions
diff --git a/mm/util.c b/mm/util.c index f814e6a59ab1..6c1d64ed0221 100644 --- a/mm/util.c +++ b/mm/util.c @@ -315,7 +315,7 @@ void *memdup_user_nul(const void __user *src, size_t len) EXPORT_SYMBOL(memdup_user_nul); /* Check if the vma is being used as a stack by this task */ -int vma_is_stack_for_current(struct vm_area_struct *vma) +int vma_is_stack_for_current(const struct vm_area_struct *vma) { struct task_struct * __maybe_unused t = current; @@ -410,7 +410,7 @@ unsigned long arch_mmap_rnd(void) return rnd << PAGE_SHIFT; } -static int mmap_is_legacy(struct rlimit *rlim_stack) +static int mmap_is_legacy(const struct rlimit *rlim_stack) { if (current->personality & ADDR_COMPAT_LAYOUT) return 1; @@ -431,7 +431,7 @@ static int mmap_is_legacy(struct rlimit *rlim_stack) #define MIN_GAP (SZ_128M) #define MAX_GAP (STACK_TOP / 6 * 5) -static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) +static unsigned long mmap_base(const unsigned long rnd, const struct rlimit *rlim_stack) { #ifdef CONFIG_STACK_GROWSUP /* @@ -462,7 +462,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) #endif } -void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) +void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack) { unsigned long random_factor = 0UL; @@ -471,17 +471,17 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) if (mmap_is_legacy(rlim_stack)) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; - clear_bit(MMF_TOPDOWN, &mm->flags); + mm_flags_clear(MMF_TOPDOWN, mm); } else { mm->mmap_base = mmap_base(random_factor, rlim_stack); - set_bit(MMF_TOPDOWN, &mm->flags); + mm_flags_set(MMF_TOPDOWN, mm); } } #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) -void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) +void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack) { mm->mmap_base = TASK_UNMAPPED_BASE; - clear_bit(MMF_TOPDOWN, &mm->flags); + mm_flags_clear(MMF_TOPDOWN, mm); } #endif #ifdef CONFIG_MMU @@ -504,7 +504,7 @@ EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout); * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. */ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, - struct task_struct *task, bool bypass_rlim) + const struct task_struct *task, bool bypass_rlim) { unsigned long locked_vm, limit; int ret = 0; @@ -688,7 +688,7 @@ struct anon_vma *folio_anon_vma(const struct folio *folio) * You can call this for folios which aren't in the swap cache or page * cache and it will return NULL. */ -struct address_space *folio_mapping(struct folio *folio) +struct address_space *folio_mapping(const struct folio *folio) { struct address_space *mapping; @@ -926,7 +926,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed); * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */ -int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) +int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin) { long allowed; unsigned long bytes_failed; @@ -1134,16 +1134,50 @@ EXPORT_SYMBOL(flush_dcache_folio); #endif /** + * __compat_vma_mmap_prepare() - See description for compat_vma_mmap_prepare() + * for details. This is the same operation, only with a specific file operations + * struct which may or may not be the same as vma->vm_file->f_op. + * @f_op: The file operations whose .mmap_prepare() hook is specified. + * @file: The file which backs or will back the mapping. + * @vma: The VMA to apply the .mmap_prepare() hook to. + * Returns: 0 on success or error. + */ +int __compat_vma_mmap_prepare(const struct file_operations *f_op, + struct file *file, struct vm_area_struct *vma) +{ + struct vm_area_desc desc = { + .mm = vma->vm_mm, + .file = file, + .start = vma->vm_start, + .end = vma->vm_end, + + .pgoff = vma->vm_pgoff, + .vm_file = vma->vm_file, + .vm_flags = vma->vm_flags, + .page_prot = vma->vm_page_prot, + }; + int err; + + err = f_op->mmap_prepare(&desc); + if (err) + return err; + set_vma_from_desc(vma, &desc); + + return 0; +} +EXPORT_SYMBOL(__compat_vma_mmap_prepare); + +/** * compat_vma_mmap_prepare() - Apply the file's .mmap_prepare() hook to an - * existing VMA - * @file: The file which possesss an f_op->mmap_prepare() hook + * existing VMA. + * @file: The file which possesss an f_op->mmap_prepare() hook. * @vma: The VMA to apply the .mmap_prepare() hook to. * * Ordinarily, .mmap_prepare() is invoked directly upon mmap(). However, certain - * 'wrapper' file systems invoke a nested mmap hook of an underlying file. + * stacked filesystems invoke a nested mmap hook of an underlying file. * * Until all filesystems are converted to use .mmap_prepare(), we must be - * conservative and continue to invoke these 'wrapper' filesystems using the + * conservative and continue to invoke these stacked filesystems using the * deprecated .mmap() hook. * * However we have a problem if the underlying file system possesses an @@ -1161,15 +1195,7 @@ EXPORT_SYMBOL(flush_dcache_folio); */ int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma) { - struct vm_area_desc desc; - int err; - - err = file->f_op->mmap_prepare(vma_to_desc(vma, &desc)); - if (err) - return err; - set_vma_from_desc(vma, &desc); - - return 0; + return __compat_vma_mmap_prepare(file->f_op, file, vma); } EXPORT_SYMBOL(compat_vma_mmap_prepare); @@ -1281,3 +1307,39 @@ unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte, return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, 0); } #endif /* CONFIG_MMU */ + +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) +/** + * page_range_contiguous - test whether the page range is contiguous + * @page: the start of the page range. + * @nr_pages: the number of pages in the range. + * + * Test whether the page range is contiguous, such that they can be iterated + * naively, corresponding to iterating a contiguous PFN range. + * + * This function should primarily only be used for debug checks, or when + * working with page ranges that are not naturally contiguous (e.g., pages + * within a folio are). + * + * Returns true if contiguous, otherwise false. + */ +bool page_range_contiguous(const struct page *page, unsigned long nr_pages) +{ + const unsigned long start_pfn = page_to_pfn(page); + const unsigned long end_pfn = start_pfn + nr_pages; + unsigned long pfn; + + /* + * The memmap is allocated per memory section, so no need to check + * within the first section. However, we need to check each other + * spanned memory section once, making sure the first page in a + * section could similarly be reached by just iterating pages. + */ + for (pfn = ALIGN(start_pfn, PAGES_PER_SECTION); + pfn < end_pfn; pfn += PAGES_PER_SECTION) + if (unlikely(page + (pfn - start_pfn) != pfn_to_page(pfn))) + return false; + return true; +} +EXPORT_SYMBOL(page_range_contiguous); +#endif |