1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
|
// SPDX-License-Identifier: GPL-2.0-or-later
#include "shared.h"
bool fail_prealloc;
unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
const struct vm_operations_struct vma_dummy_vm_ops;
struct anon_vma dummy_anon_vma;
struct task_struct __current;
struct vm_area_struct *alloc_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
pgoff_t pgoff, vm_flags_t vm_flags)
{
struct vm_area_struct *vma = vm_area_alloc(mm);
if (vma == NULL)
return NULL;
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
vm_flags_reset(vma, vm_flags);
vma_assert_detached(vma);
return vma;
}
void detach_free_vma(struct vm_area_struct *vma)
{
vma_mark_detached(vma);
vm_area_free(vma);
}
struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
unsigned long start, unsigned long end,
pgoff_t pgoff, vm_flags_t vm_flags)
{
struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags);
if (vma == NULL)
return NULL;
if (attach_vma(mm, vma)) {
detach_free_vma(vma);
return NULL;
}
/*
* Reset this counter which we use to track whether writes have
* begun. Linking to the tree will have caused this to be incremented,
* which means we will get a false positive otherwise.
*/
vma->vm_lock_seq = UINT_MAX;
return vma;
}
void reset_dummy_anon_vma(void)
{
dummy_anon_vma.was_cloned = false;
dummy_anon_vma.was_unlinked = false;
}
int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
{
struct vm_area_struct *vma;
int count = 0;
fail_prealloc = false;
reset_dummy_anon_vma();
vma_iter_set(vmi, 0);
for_each_vma(*vmi, vma) {
detach_free_vma(vma);
count++;
}
mtree_destroy(&mm->mm_mt);
mm->map_count = 0;
return count;
}
bool vma_write_started(struct vm_area_struct *vma)
{
int seq = vma->vm_lock_seq;
/* We reset after each check. */
vma->vm_lock_seq = UINT_MAX;
/* The vma_start_write() stub simply increments this value. */
return seq > -1;
}
void __vma_set_dummy_anon_vma(struct vm_area_struct *vma,
struct anon_vma_chain *avc, struct anon_vma *anon_vma)
{
vma->anon_vma = anon_vma;
INIT_LIST_HEAD(&vma->anon_vma_chain);
list_add(&avc->same_vma, &vma->anon_vma_chain);
avc->anon_vma = vma->anon_vma;
}
void vma_set_dummy_anon_vma(struct vm_area_struct *vma,
struct anon_vma_chain *avc)
{
__vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma);
}
struct task_struct *get_current(void)
{
return &__current;
}
unsigned long rlimit(unsigned int limit)
{
return (unsigned long)-1;
}
void vma_set_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
pgoff_t pgoff)
{
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
}
|