summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@nuts.davemloft.net>2005-02-23 01:46:43 -0800
committerDavid S. Miller <davem@nuts.davemloft.net>2005-02-23 01:46:43 -0800
commit9845bf7f382e79cee9078cb67c4cb607b4264117 (patch)
treef35f4d1581f8b58f7a80af2677c3c9ef21a25026
parentae3d0a847f4b38812241e4a5dc3371965c752a8c (diff)
[SPARC64]: Pass mm/addr directly to tlb_batch_add()
No longer need to store this information in the pte table page struct. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/mm/generic.c25
-rw-r--r--arch/sparc64/mm/hugetlbpage.c10
-rw-r--r--arch/sparc64/mm/init.c3
-rw-r--r--arch/sparc64/mm/tlb.c17
-rw-r--r--include/asm-sparc64/pgalloc.h20
-rw-r--r--include/asm-sparc64/pgtable.h11
6 files changed, 40 insertions, 46 deletions
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 4f61c0a5ada1..e379f744be0a 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -25,8 +25,11 @@
* side-effect bit will be turned off. This is used as a
* performance improvement on FFB/AFB. -DaveM
*/
-static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long offset, pgprot_t prot, int space)
+static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
+ unsigned long address,
+ unsigned long size,
+ unsigned long offset, pgprot_t prot,
+ int space)
{
unsigned long end;
@@ -67,14 +70,14 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign
pte_val(entry) &= ~(_PAGE_E);
do {
BUG_ON(!pte_none(*pte));
- set_pte(pte, entry);
+ set_pte_at(mm, address, pte, entry);
address += PAGE_SIZE;
pte++;
} while (address < curend);
} while (address < end);
}
-static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
+static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
@@ -85,10 +88,10 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne
end = PGDIR_SIZE;
offset -= address;
do {
- pte_t * pte = pte_alloc_map(current->mm, pmd, address);
+ pte_t * pte = pte_alloc_map(mm, pmd, address);
if (!pte)
return -ENOMEM;
- io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
+ io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
pte_unmap(pte);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
@@ -96,7 +99,7 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne
return 0;
}
-static inline int io_remap_pud_range(pud_t * pud, unsigned long address, unsigned long size,
+static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
@@ -107,10 +110,10 @@ static inline int io_remap_pud_range(pud_t * pud, unsigned long address, unsigne
end = PUD_SIZE;
offset -= address;
do {
- pmd_t *pmd = pmd_alloc(current->mm, pud, address);
+ pmd_t *pmd = pmd_alloc(mm, pud, address);
if (!pud)
return -ENOMEM;
- io_remap_pmd_range(pmd, address, end - address, address + offset, prot, space);
+ io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
address = (address + PUD_SIZE) & PUD_MASK;
pud++;
} while (address < end);
@@ -132,11 +135,11 @@ int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned
spin_lock(&mm->page_table_lock);
while (from < end) {
- pud_t *pud = pud_alloc(current->mm, dir, from);
+ pud_t *pud = pud_alloc(mm, dir, from);
error = -ENOMEM;
if (!pud)
break;
- error = io_remap_pud_range(pud, from, end - from, offset + from, prot, space);
+ error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
if (error)
break;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index c29692817022..49b814f9c371 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -62,6 +62,7 @@ static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr,
struct page *page, pte_t * page_table, int write_access)
{
unsigned long i;
@@ -78,8 +79,9 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
mk_pte_huge(entry);
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
- set_pte(page_table, entry);
+ set_pte_at(mm, addr, page_table, entry);
page_table++;
+ addr += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
}
@@ -116,12 +118,12 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
ptepage = pte_page(entry);
get_page(ptepage);
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
- set_pte(dst_pte, entry);
+ set_pte_at(dst, addr, dst_pte, entry);
pte_val(entry) += PAGE_SIZE;
dst_pte++;
+ addr += PAGE_SIZE;
}
dst->rss += (HPAGE_SIZE / PAGE_SIZE);
- addr += HPAGE_SIZE;
}
return 0;
@@ -261,7 +263,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out;
}
}
- set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
+ set_huge_pte(mm, vma, addr, page, pte, vma->vm_flags & VM_WRITE);
}
out:
spin_unlock(&mm->page_table_lock);
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 5dc8b353b72a..41c8bc1ef83f 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -431,7 +431,8 @@ static void inherit_prom_mappings(void)
if (tlb_type == spitfire)
val &= ~0x0003fe0000000000UL;
- set_pte (ptep, __pte(val | _PAGE_MODIFIED));
+ set_pte_at(&init_mm, vaddr,
+ ptep, __pte(val | _PAGE_MODIFIED));
trans[i].data += BASE_PAGE_SIZE;
}
}
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
index f900be7beadd..cd4dbf545332 100644
--- a/arch/sparc64/mm/tlb.c
+++ b/arch/sparc64/mm/tlb.c
@@ -41,15 +41,11 @@ void flush_tlb_pending(void)
}
}
-void tlb_batch_add(pte_t *ptep, pte_t orig)
+void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+ pte_t *ptep, pte_t orig)
{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
- struct page *ptepage;
- struct mm_struct *mm;
- unsigned long vaddr, nr;
-
- ptepage = virt_to_page(ptep);
- mm = (struct mm_struct *) ptepage->mapping;
+ struct mmu_gather *mp;
+ unsigned long nr;
/* It is more efficient to let flush_tlb_kernel_range()
* handle these cases.
@@ -57,8 +53,9 @@ void tlb_batch_add(pte_t *ptep, pte_t orig)
if (mm == &init_mm)
return;
- vaddr = ptepage->index +
- (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE);
+ mp = &__get_cpu_var(mmu_gathers);
+
+ vaddr &= PAGE_MASK;
if (pte_exec(orig))
vaddr |= 0x1UL;
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index e1b053882ae9..7ba8df8c4921 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -191,25 +191,17 @@ extern pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = __pte_alloc_one_kernel(mm, address);
- if (pte) {
- struct page *page = virt_to_page(pte);
- page->mapping = (void *) mm;
- page->index = address & PMD_MASK;
- }
- return pte;
+ return __pte_alloc_one_kernel(mm, address);
}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = __pte_alloc_one_kernel(mm, addr);
- if (pte) {
- struct page *page = virt_to_page(pte);
- page->mapping = (void *) mm;
- page->index = addr & PMD_MASK;
- return page;
- }
+
+ if (pte)
+ return virt_to_page(pte);
+
return NULL;
}
@@ -246,13 +238,11 @@ static __inline__ void free_pte_slow(pte_t *pte)
static inline void pte_free_kernel(pte_t *pte)
{
- virt_to_page(pte)->mapping = NULL;
free_pte_fast(pte);
}
static inline void pte_free(struct page *ptepage)
{
- ptepage->mapping = NULL;
free_pte_fast(page_address(ptepage));
}
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index bfd5c93845a2..5373e6cf13d3 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -333,17 +333,18 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
#define pte_unmap_nested(pte) do { } while (0)
/* Actual page table PTE updates. */
-extern void tlb_batch_add(pte_t *ptep, pte_t orig);
+extern void tlb_batch_add(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t orig);
-static inline void set_pte(pte_t *ptep, pte_t pte)
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
{
pte_t orig = *ptep;
*ptep = pte;
- if (pte_present(orig))
- tlb_batch_add(ptep, orig);
+ if (pte_val(orig) & _PAGE_VALID)
+ tlb_batch_add(mm, addr, ptep, orig);
}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
#define pte_clear(mm,addr,ptep) \
set_pte_at((mm), (addr), (ptep), __pte(0UL))