summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2002-07-29 01:27:36 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-07-29 01:27:36 -0700
commita5271c10edcc1f090fba992760afdbbf67654bd7 (patch)
treedeb061e19c5d22b03a7359d5fb634c57acc9b751
parentd9acf5fe72b5d512be31685803b6f362a2a6bb23 (diff)
[PATCH] remove acct arg from do_munmap
An acct flag was added to do_munmap, true everywhere but in mremap's move_vma: instead of updating the arch and driver sources, revert that that change and temporarily mask VM_ACCOUNT around that one do_munmap. Also, noticed that do_mremap fails needlessly if both shrinking _and_ moving a mapping: update old_len to pass vm area boundaries test.
-rw-r--r--include/linux/mm.h2
-rw-r--r--ipc/shm.c2
-rw-r--r--mm/mmap.c17
-rw-r--r--mm/mremap.c31
4 files changed, 34 insertions, 18 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cb48ce0f9d38..747312505986 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -430,7 +430,7 @@ out:
return ret;
}
-extern int do_munmap(struct mm_struct *, unsigned long, size_t, int);
+extern int do_munmap(struct mm_struct *, unsigned long, size_t);
extern unsigned long do_brk(unsigned long, unsigned long);
diff --git a/ipc/shm.c b/ipc/shm.c
index 3d08a58a7389..8618c5fb1977 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -671,7 +671,7 @@ asmlinkage long sys_shmdt (char *shmaddr)
shmdnext = shmd->vm_next;
if (shmd->vm_ops == &shm_vm_ops
&& shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) {
- do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start, 1);
+ do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start);
retval = 0;
}
}
diff --git a/mm/mmap.c b/mm/mmap.c
index df759ce84545..10718050bc34 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -197,7 +197,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
/* Always allow shrinking brk. */
if (brk <= mm->brk) {
- if (!do_munmap(mm, newbrk, oldbrk-newbrk, 1))
+ if (!do_munmap(mm, newbrk, oldbrk-newbrk))
goto set_brk;
goto out;
}
@@ -517,7 +517,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
munmap_back:
vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
if (vma && vma->vm_start < addr + len) {
- if (do_munmap(mm, addr, len, 1))
+ if (do_munmap(mm, addr, len))
return -ENOMEM;
goto munmap_back;
}
@@ -945,8 +945,7 @@ static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *mpnt,
struct vm_area_struct *prev,
unsigned long start,
- unsigned long end,
- int acct)
+ unsigned long end)
{
mmu_gather_t *tlb;
@@ -960,7 +959,7 @@ static void unmap_region(struct mm_struct *mm,
unmap_page_range(tlb, mpnt, from, to);
- if (acct && (mpnt->vm_flags & VM_ACCOUNT)) {
+ if (mpnt->vm_flags & VM_ACCOUNT) {
len = to - from;
vm_unacct_memory(len >> PAGE_SHIFT);
}
@@ -1041,7 +1040,7 @@ static int splitvma(struct mm_struct *mm, struct vm_area_struct *mpnt, unsigned
* work. This now handles partial unmappings.
* Jeremy Fitzhardine <jeremy@sw.oz.au>
*/
-int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, int acct)
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{
unsigned long end;
struct vm_area_struct *mpnt, *prev, *last;
@@ -1085,7 +1084,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, int acct)
*/
spin_lock(&mm->page_table_lock);
mpnt = touched_by_munmap(mm, mpnt, prev, end);
- unmap_region(mm, mpnt, prev, start, end, acct);
+ unmap_region(mm, mpnt, prev, start, end);
spin_unlock(&mm->page_table_lock);
/* Fix up all other VM information */
@@ -1100,7 +1099,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
struct mm_struct *mm = current->mm;
down_write(&mm->mmap_sem);
- ret = do_munmap(mm, addr, len, 1);
+ ret = do_munmap(mm, addr, len);
up_write(&mm->mmap_sem);
return ret;
}
@@ -1137,7 +1136,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
munmap_back:
vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
if (vma && vma->vm_start < addr + len) {
- if (do_munmap(mm, addr, len, 1))
+ if (do_munmap(mm, addr, len))
return -ENOMEM;
goto munmap_back;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 80a8b7dc3e23..b7cdce69ad01 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -150,6 +150,7 @@ static inline unsigned long move_vma(struct vm_area_struct * vma,
struct mm_struct * mm = vma->vm_mm;
struct vm_area_struct * new_vma, * next, * prev;
int allocated_vma;
+ int split = 0;
new_vma = NULL;
next = find_vma_prev(mm, new_addr, &prev);
@@ -210,11 +211,26 @@ static inline unsigned long move_vma(struct vm_area_struct * vma,
new_vma->vm_ops->open(new_vma);
insert_vm_struct(current->mm, new_vma);
}
- /*
- * The old VMA has been accounted for,
- * don't double account
- */
- do_munmap(current->mm, addr, old_len, 0);
+
+ /* Conceal VM_ACCOUNT so old reservation is not undone */
+ if (vma->vm_flags & VM_ACCOUNT) {
+ vma->vm_flags &= ~VM_ACCOUNT;
+ if (addr > vma->vm_start) {
+ if (addr + old_len < vma->vm_end)
+ split = 1;
+ } else if (addr + old_len == vma->vm_end)
+ vma = NULL; /* it will be removed */
+ } else
+ vma = NULL; /* nothing more to do */
+
+ do_munmap(current->mm, addr, old_len);
+
+ /* Restore VM_ACCOUNT if one or two pieces of vma left */
+ if (vma) {
+ vma->vm_flags |= VM_ACCOUNT;
+ if (split)
+ vma->vm_next->vm_flags |= VM_ACCOUNT;
+ }
current->mm->total_vm += new_len >> PAGE_SHIFT;
if (new_vma->vm_flags & VM_LOCKED) {
current->mm->locked_vm += new_len >> PAGE_SHIFT;
@@ -272,7 +288,7 @@ unsigned long do_mremap(unsigned long addr,
if ((addr <= new_addr) && (addr+old_len) > new_addr)
goto out;
- do_munmap(current->mm, new_addr, new_len, 1);
+ do_munmap(current->mm, new_addr, new_len);
}
/*
@@ -282,9 +298,10 @@ unsigned long do_mremap(unsigned long addr,
*/
ret = addr;
if (old_len >= new_len) {
- do_munmap(current->mm, addr+new_len, old_len - new_len, 1);
+ do_munmap(current->mm, addr+new_len, old_len - new_len);
if (!(flags & MREMAP_FIXED) || (new_addr == addr))
goto out;
+ old_len = new_len;
}
/*