diff options
| author | Kanoj Sarcar <kanoj@vger.kernel.org> | 2002-04-23 08:19:51 -0400 |
|---|---|---|
| committer | David S. Miller <davem@nuts.ninka.net> | 2002-04-23 08:19:51 -0400 |
| commit | 30746bbd9545ec11346d515e419878ea265bb4d7 (patch) | |
| tree | a612c66c2a131f3961562c453773f0db08b65b3c /mm | |
| parent | 45fbecb1147007167ea78399f165ceae775d86e7 (diff) | |
cachetlb.txt, locking, fork.c, mremap.c, mprotect.c, memory.c:
Make sure that flush_tlb_range is called with PTL held.
Also, make sure no new threads can start up in user mode
while a tlb_gather_mmu is in progress.
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/memory.c | 4 | ||||
| -rw-r--r-- | mm/mprotect.c | 2 | ||||
| -rw-r--r-- | mm/mremap.c | 9 |
3 files changed, 8 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c index d72dc2efe5c7..0bad3cf9636c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -839,8 +839,8 @@ int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, unsign address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); - spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, beg, end); + spin_unlock(&mm->page_table_lock); return error; } @@ -922,8 +922,8 @@ int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned lo from = (from + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (from && (from < end)); - spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, beg, end); + spin_unlock(&mm->page_table_lock); return error; } diff --git a/mm/mprotect.c b/mm/mprotect.c index b4d3bf78ef15..b0e9ae01b09f 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -92,8 +92,8 @@ static void change_protection(struct vm_area_struct *vma, unsigned long start, u start = (start + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (start && (start < end)); - spin_unlock(¤t->mm->page_table_lock); flush_tlb_range(vma, beg, end); + spin_unlock(¤t->mm->page_table_lock); return; } diff --git a/mm/mremap.c b/mm/mremap.c index 84c1dc018c31..d58627b1b3b4 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -81,8 +81,9 @@ static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst) return error; } -static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr) +static int move_one_page(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr) { + struct mm_struct *mm = vma->vm_mm; int error = 0; pte_t *src, *dst; @@ -94,6 +95,7 @@ static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned pte_unmap_nested(src); pte_unmap(dst); } + flush_tlb_page(vma, old_addr); spin_unlock(&mm->page_table_lock); return error; } @@ -113,10 +115,9 @@ static int move_page_tables(struct vm_area_struct *vma, */ while (offset) { offset -= PAGE_SIZE; - if (move_one_page(mm, old_addr + offset, new_addr + offset)) + if (move_one_page(vma, old_addr + offset, new_addr + offset)) goto oops_we_failed; } - flush_tlb_range(vma, old_addr, old_addr + len); return 0; /* @@ -129,7 +130,7 @@ static int move_page_tables(struct vm_area_struct *vma, oops_we_failed: flush_cache_range(vma, new_addr, new_addr + len); while ((offset += PAGE_SIZE) < len) - move_one_page(mm, new_addr + offset, old_addr + offset); + move_one_page(vma, new_addr + offset, old_addr + offset); zap_page_range(vma, new_addr, len); return -1; } |
