diff options
Diffstat (limited to 'tools/testing/vma/vma_internal.h')
-rw-r--r-- | tools/testing/vma/vma_internal.h | 344 |
1 files changed, 117 insertions, 227 deletions
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 3639aa8dd2b0..dc976a285ad2 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -21,11 +21,13 @@ #include <stdlib.h> +#include <linux/atomic.h> #include <linux/list.h> #include <linux/maple_tree.h> #include <linux/mm.h> #include <linux/rbtree.h> #include <linux/refcount.h> +#include <linux/slab.h> extern unsigned long stack_guard_gap; #ifdef CONFIG_MMU @@ -249,6 +251,14 @@ struct mutex {}; #define DEFINE_MUTEX(mutexname) \ struct mutex mutexname = {} +#define DECLARE_BITMAP(name, bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +#define NUM_MM_FLAG_BITS (64) +typedef struct { + __private DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS); +} mm_flags_t; + struct mm_struct { struct maple_tree mm_mt; int map_count; /* number of VMAs */ @@ -260,7 +270,7 @@ struct mm_struct { unsigned long def_flags; - unsigned long flags; /* Must use atomic bitops to access */ + mm_flags_t flags; /* Must use mm_flags_* helpers to access */ }; struct vm_area_struct; @@ -274,13 +284,14 @@ struct vm_area_struct; */ struct vm_area_desc { /* Immutable state. */ - struct mm_struct *mm; + const struct mm_struct *const mm; + struct file *const file; /* May vary from vm_file in stacked callers. */ unsigned long start; unsigned long end; /* Mutable fields. Populated with initial state. */ pgoff_t pgoff; - struct file *file; + struct file *vm_file; vm_flags_t vm_flags; pgprot_t page_prot; @@ -467,13 +478,21 @@ struct vm_operations_struct { struct mempolicy *(*get_policy)(struct vm_area_struct *vma, unsigned long addr, pgoff_t *ilx); #endif +#ifdef CONFIG_FIND_NORMAL_PAGE /* - * Called by vm_normal_page() for special PTEs to find the - * page for @addr. This is useful if the default behavior - * (using pte_page()) would not find the correct page. + * Called by vm_normal_page() for special PTEs in @vma at @addr. This + * allows for returning a "normal" page from vm_normal_page() even + * though the PTE indicates that the "struct page" either does not exist + * or should not be touched: "special". + * + * Do not add new users: this really only works when a "normal" page + * was mapped, but then the PTE got changed to something weird (+ + * marked special) that would not make pte_pfn() identify the originally + * inserted page. */ - struct page *(*find_special_page)(struct vm_area_struct *vma, - unsigned long addr); + struct page *(*find_normal_page)(struct vm_area_struct *vma, + unsigned long addr); +#endif /* CONFIG_FIND_NORMAL_PAGE */ }; struct vm_unmapped_area_info { @@ -509,65 +528,6 @@ struct pagetable_move_control { .len_in = len_, \ } -struct kmem_cache_args { - /** - * @align: The required alignment for the objects. - * - * %0 means no specific alignment is requested. - */ - unsigned int align; - /** - * @useroffset: Usercopy region offset. - * - * %0 is a valid offset, when @usersize is non-%0 - */ - unsigned int useroffset; - /** - * @usersize: Usercopy region size. - * - * %0 means no usercopy region is specified. - */ - unsigned int usersize; - /** - * @freeptr_offset: Custom offset for the free pointer - * in &SLAB_TYPESAFE_BY_RCU caches - * - * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer - * outside of the object. This might cause the object to grow in size. - * Cache creators that have a reason to avoid this can specify a custom - * free pointer offset in their struct where the free pointer will be - * placed. - * - * Note that placing the free pointer inside the object requires the - * caller to ensure that no fields are invalidated that are required to - * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for - * details). - * - * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset - * is specified, %use_freeptr_offset must be set %true. - * - * Note that @ctor currently isn't supported with custom free pointers - * as a @ctor requires an external free pointer. - */ - unsigned int freeptr_offset; - /** - * @use_freeptr_offset: Whether a @freeptr_offset is used. - */ - bool use_freeptr_offset; - /** - * @ctor: A constructor for the objects. - * - * The constructor is invoked for each object in a newly allocated slab - * page. It is the cache user's responsibility to free object in the - * same state as after calling the constructor, or deal appropriately - * with any differences between a freshly constructed and a reallocated - * object. - * - * %NULL means no constructor. - */ - void (*ctor)(void *); -}; - static inline void vma_iter_invalidate(struct vma_iterator *vmi) { mas_pause(&vmi->mas); @@ -652,40 +612,6 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_lock_seq = UINT_MAX; } -struct kmem_cache { - const char *name; - size_t object_size; - struct kmem_cache_args *args; -}; - -static inline struct kmem_cache *__kmem_cache_create(const char *name, - size_t object_size, - struct kmem_cache_args *args) -{ - struct kmem_cache *ret = malloc(sizeof(struct kmem_cache)); - - ret->name = name; - ret->object_size = object_size; - ret->args = args; - - return ret; -} - -#define kmem_cache_create(__name, __object_size, __args, ...) \ - __kmem_cache_create((__name), (__object_size), (__args)) - -static inline void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) -{ - (void)gfpflags; - - return calloc(s->object_size, 1); -} - -static inline void kmem_cache_free(struct kmem_cache *s, void *x) -{ - free(x); -} - /* * These are defined in vma.h, but sadly vm_stat_account() is referenced by * kernel/fork.c, so we have to these broadly available there, and temporarily @@ -842,11 +768,11 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; } -static inline void fput(struct file *) +static inline void fput(struct file *file) { } -static inline void mpol_put(struct mempolicy *) +static inline void mpol_put(struct mempolicy *pol) { } @@ -854,15 +780,15 @@ static inline void lru_add_drain(void) { } -static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *) +static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) { } -static inline void update_hiwater_rss(struct mm_struct *) +static inline void update_hiwater_rss(struct mm_struct *mm) { } -static inline void update_hiwater_vm(struct mm_struct *) +static inline void update_hiwater_vm(struct mm_struct *mm) { } @@ -871,36 +797,23 @@ static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, unsigned long end_addr, unsigned long tree_end, bool mm_wr_locked) { - (void)tlb; - (void)mas; - (void)vma; - (void)start_addr; - (void)end_addr; - (void)tree_end; - (void)mm_wr_locked; } static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling, bool mm_wr_locked) { - (void)tlb; - (void)mas; - (void)vma; - (void)floor; - (void)ceiling; - (void)mm_wr_locked; } -static inline void mapping_unmap_writable(struct address_space *) +static inline void mapping_unmap_writable(struct address_space *mapping) { } -static inline void flush_dcache_mmap_lock(struct address_space *) +static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } -static inline void tlb_finish_mmu(struct mmu_gather *) +static inline void tlb_finish_mmu(struct mmu_gather *tlb) { } @@ -909,7 +822,7 @@ static inline struct file *get_file(struct file *f) return f; } -static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *) +static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) { return 0; } @@ -936,10 +849,6 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long end, struct vm_area_struct *next) { - (void)vma; - (void)start; - (void)end; - (void)next; } static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {} @@ -959,51 +868,48 @@ static inline void vm_acct_memory(long pages) { } -static inline void vma_interval_tree_insert(struct vm_area_struct *, - struct rb_root_cached *) +static inline void vma_interval_tree_insert(struct vm_area_struct *vma, + struct rb_root_cached *rb) { } -static inline void vma_interval_tree_remove(struct vm_area_struct *, - struct rb_root_cached *) +static inline void vma_interval_tree_remove(struct vm_area_struct *vma, + struct rb_root_cached *rb) { } -static inline void flush_dcache_mmap_unlock(struct address_space *) +static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } -static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*, - struct rb_root_cached *) +static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc, + struct rb_root_cached *rb) { } -static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*, - struct rb_root_cached *) +static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc, + struct rb_root_cached *rb) { } -static inline void uprobe_mmap(struct vm_area_struct *) +static inline void uprobe_mmap(struct vm_area_struct *vma) { } static inline void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - (void)vma; - (void)start; - (void)end; } -static inline void i_mmap_lock_write(struct address_space *) +static inline void i_mmap_lock_write(struct address_space *mapping) { } -static inline void anon_vma_lock_write(struct anon_vma *) +static inline void anon_vma_lock_write(struct anon_vma *anon_vma) { } -static inline void vma_assert_write_locked(struct vm_area_struct *) +static inline void vma_assert_write_locked(struct vm_area_struct *vma) { } @@ -1013,16 +919,16 @@ static inline void unlink_anon_vmas(struct vm_area_struct *vma) vma->anon_vma->was_unlinked = true; } -static inline void anon_vma_unlock_write(struct anon_vma *) +static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) { } -static inline void i_mmap_unlock_write(struct address_space *) +static inline void i_mmap_unlock_write(struct address_space *mapping) { } -static inline void anon_vma_merge(struct vm_area_struct *, - struct vm_area_struct *) +static inline void anon_vma_merge(struct vm_area_struct *vma, + struct vm_area_struct *next) { } @@ -1031,27 +937,22 @@ static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long end, struct list_head *unmaps) { - (void)vma; - (void)start; - (void)end; - (void)unmaps; - return 0; } -static inline void mmap_write_downgrade(struct mm_struct *) +static inline void mmap_write_downgrade(struct mm_struct *mm) { } -static inline void mmap_read_unlock(struct mm_struct *) +static inline void mmap_read_unlock(struct mm_struct *mm) { } -static inline void mmap_write_unlock(struct mm_struct *) +static inline void mmap_write_unlock(struct mm_struct *mm) { } -static inline int mmap_write_lock_killable(struct mm_struct *) +static inline int mmap_write_lock_killable(struct mm_struct *mm) { return 0; } @@ -1060,10 +961,6 @@ static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start, unsigned long end) { - (void)mm; - (void)start; - (void)end; - return true; } @@ -1071,16 +968,13 @@ static inline void arch_unmap(struct mm_struct *mm, unsigned long start, unsigned long end) { - (void)mm; - (void)start; - (void)end; } -static inline void mmap_assert_locked(struct mm_struct *) +static inline void mmap_assert_locked(struct mm_struct *mm) { } -static inline bool mpol_equal(struct mempolicy *, struct mempolicy *) +static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { return true; } @@ -1088,63 +982,62 @@ static inline bool mpol_equal(struct mempolicy *, struct mempolicy *) static inline void khugepaged_enter_vma(struct vm_area_struct *vma, vm_flags_t vm_flags) { - (void)vma; - (void)vm_flags; } -static inline bool mapping_can_writeback(struct address_space *) +static inline bool mapping_can_writeback(struct address_space *mapping) { return true; } -static inline bool is_vm_hugetlb_page(struct vm_area_struct *) +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return false; } -static inline bool vma_soft_dirty_enabled(struct vm_area_struct *) +static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma) { return false; } -static inline bool userfaultfd_wp(struct vm_area_struct *) +static inline bool userfaultfd_wp(struct vm_area_struct *vma) { return false; } -static inline void mmap_assert_write_locked(struct mm_struct *) +static inline void mmap_assert_write_locked(struct mm_struct *mm) { } -static inline void mutex_lock(struct mutex *) +static inline void mutex_lock(struct mutex *lock) { } -static inline void mutex_unlock(struct mutex *) +static inline void mutex_unlock(struct mutex *lock) { } -static inline bool mutex_is_locked(struct mutex *) +static inline bool mutex_is_locked(struct mutex *lock) { return true; } -static inline bool signal_pending(void *) +static inline bool signal_pending(void *p) { return false; } -static inline bool is_file_hugepages(struct file *) +static inline bool is_file_hugepages(struct file *file) { return false; } -static inline int security_vm_enough_memory_mm(struct mm_struct *, long) +static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { return 0; } -static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long) +static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, + unsigned long npages) { return true; } @@ -1169,7 +1062,7 @@ static inline void vm_flags_clear(struct vm_area_struct *vma, vma->__vm_flags &= ~flags; } -static inline int shmem_zero_setup(struct vm_area_struct *) +static inline int shmem_zero_setup(struct vm_area_struct *vma) { return 0; } @@ -1179,20 +1072,20 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma) vma->vm_ops = NULL; } -static inline void ksm_add_vma(struct vm_area_struct *) +static inline void ksm_add_vma(struct vm_area_struct *vma) { } -static inline void perf_event_mmap(struct vm_area_struct *) +static inline void perf_event_mmap(struct vm_area_struct *vma) { } -static inline bool vma_is_dax(struct vm_area_struct *) +static inline bool vma_is_dax(struct vm_area_struct *vma) { return false; } -static inline struct vm_area_struct *get_gate_vma(struct mm_struct *) +static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return NULL; } @@ -1217,16 +1110,16 @@ static inline void vma_set_page_prot(struct vm_area_struct *vma) WRITE_ONCE(vma->vm_page_prot, vm_page_prot); } -static inline bool arch_validate_flags(vm_flags_t) +static inline bool arch_validate_flags(vm_flags_t flags) { return true; } -static inline void vma_close(struct vm_area_struct *) +static inline void vma_close(struct vm_area_struct *vma) { } -static inline int mmap_file(struct file *, struct vm_area_struct *) +static inline int mmap_file(struct file *file, struct vm_area_struct *vma) { return 0; } @@ -1282,8 +1175,8 @@ static inline bool capable(int cap) return true; } -static inline bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags, - unsigned long bytes) +static inline bool mlock_future_ok(const struct mm_struct *mm, + vm_flags_t vm_flags, unsigned long bytes) { unsigned long locked_pages, limit_pages; @@ -1325,6 +1218,13 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm, { } +# define ACCESS_PRIVATE(p, member) ((p)->member) + +static inline bool mm_flags_test(int flag, const struct mm_struct *mm) +{ + return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags)); +} + /* * Denies creating a writable executable mapping or gaining executable permissions. * @@ -1355,7 +1255,7 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm, static inline bool map_deny_write_exec(unsigned long old, unsigned long new) { /* If MDWE is disabled, we have nothing to deny. */ - if (!test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) + if (mm_flags_test(MMF_HAS_MDWE, current->mm)) return false; /* If the new VMA is not executable, we have nothing to deny. */ @@ -1375,21 +1275,12 @@ static inline bool map_deny_write_exec(unsigned long old, unsigned long new) static inline int mapping_map_writable(struct address_space *mapping) { - int c = atomic_read(&mapping->i_mmap_writable); - - /* Derived from the raw_atomic_inc_unless_negative() implementation. */ - do { - if (c < 0) - return -EPERM; - } while (!__sync_bool_compare_and_swap(&mapping->i_mmap_writable, c, c+1)); - - return 0; + return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? + 0 : -EPERM; } static inline unsigned long move_page_tables(struct pagetable_move_control *pmc) { - (void)pmc; - return 0; } @@ -1397,67 +1288,61 @@ static inline void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { - (void)tlb; - (void)addr; - (void)end; - (void)floor; - (void)ceiling; } static inline int ksm_execve(struct mm_struct *mm) { - (void)mm; - return 0; } static inline void ksm_exit(struct mm_struct *mm) { - (void)mm; } static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) { - (void)vma; - (void)reset_refcnt; + if (reset_refcnt) + refcount_set(&vma->vm_refcnt, 0); } static inline void vma_numab_state_init(struct vm_area_struct *vma) { - (void)vma; } static inline void vma_numab_state_free(struct vm_area_struct *vma) { - (void)vma; } static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, struct vm_area_struct *new_vma) { - (void)orig_vma; - (void)new_vma; } static inline void free_anon_vma_name(struct vm_area_struct *vma) { - (void)vma; } /* Declared in vma.h. */ static inline void set_vma_from_desc(struct vm_area_struct *vma, struct vm_area_desc *desc); -static inline struct vm_area_desc *vma_to_desc(struct vm_area_struct *vma, - struct vm_area_desc *desc); - -static int compat_vma_mmap_prepare(struct file *file, - struct vm_area_struct *vma) +static inline int __compat_vma_mmap_prepare(const struct file_operations *f_op, + struct file *file, struct vm_area_struct *vma) { - struct vm_area_desc desc; + struct vm_area_desc desc = { + .mm = vma->vm_mm, + .file = vma->vm_file, + .start = vma->vm_start, + .end = vma->vm_end, + + .pgoff = vma->vm_pgoff, + .vm_file = vma->vm_file, + .vm_flags = vma->vm_flags, + .page_prot = vma->vm_page_prot, + }; int err; - err = file->f_op->mmap_prepare(vma_to_desc(vma, &desc)); + err = f_op->mmap_prepare(&desc); if (err) return err; set_vma_from_desc(vma, &desc); @@ -1465,6 +1350,12 @@ static int compat_vma_mmap_prepare(struct file *file, return 0; } +static inline int compat_vma_mmap_prepare(struct file *file, + struct vm_area_struct *vma) +{ + return __compat_vma_mmap_prepare(file->f_op, file, vma); +} + /* Did the driver provide valid mmap hook configuration? */ static inline bool can_mmap_file(struct file *file) { @@ -1495,7 +1386,6 @@ static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc) static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma) { - (void)vma; } static inline void vma_set_file(struct vm_area_struct *vma, struct file *file) @@ -1506,13 +1396,13 @@ static inline void vma_set_file(struct vm_area_struct *vma, struct file *file) fput(file); } -static inline bool shmem_file(struct file *) +static inline bool shmem_file(struct file *file) { return false; } -static inline vm_flags_t ksm_vma_flags(const struct mm_struct *, const struct file *, - vm_flags_t vm_flags) +static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm, + const struct file *file, vm_flags_t vm_flags) { return vm_flags; } |