summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2002-06-17 19:43:55 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-06-17 19:43:55 -0700
commitc8712aebdb2455a095d596c1a5a6af358b3efde3 (patch)
tree9587e727a015c93f88dfed57ecc573ca8a259c54 /arch
parentd9083ea2b8d34d827f77e3d8ceeb04c160a938ed (diff)
[PATCH] change_page_attr and AGP update
Add change_page_attr to change page attributes for the kernel linear map. Fix AGP driver to use change_page_attr for the AGP buffer. Clean up AGP driver a bit (only tested on i386/VIA+AMD) Change ioremap_nocache to use change_page_attr to avoid mappings with conflicting caching attributes.
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/mm/Makefile3
-rw-r--r--arch/i386/mm/ioremap.c69
-rw-r--r--arch/i386/mm/pageattr.c197
3 files changed, 265 insertions, 4 deletions
diff --git a/arch/i386/mm/Makefile b/arch/i386/mm/Makefile
index 73e25bd3022a..67df8b6f6594 100644
--- a/arch/i386/mm/Makefile
+++ b/arch/i386/mm/Makefile
@@ -9,6 +9,7 @@
O_TARGET := mm.o
-obj-y := init.o fault.o ioremap.o extable.o
+obj-y := init.o fault.o ioremap.o extable.o pageattr.o
+export-objs := pageattr.o
include $(TOPDIR)/Rules.make
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index f81fae4ff7a9..4ba5641b271f 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -10,12 +10,13 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-
+#include <asm/pgtable.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
@@ -155,6 +156,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
+ area->phys_addr = phys_addr;
addr = area->addr;
if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
vfree(addr);
@@ -163,10 +165,71 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
return (void *) (offset + (char *)addr);
}
+
+/**
+ * ioremap_nocache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * on the CPU as well as honouring existing caching rules from things like
+ * the PCI bus. Note that there are other caches and buffers on many
+ * busses. In particular driver authors should read up on PCI writes
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ *
+ * Must be freed with iounmap.
+ */
+
+void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ void *p = __ioremap(phys_addr, size, _PAGE_PCD);
+ if (!p)
+ return p;
+
+ if (phys_addr + size < virt_to_phys(high_memory)) {
+ struct page *ppage = virt_to_page(__va(phys_addr));
+ unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ BUG_ON(phys_addr+size > (unsigned long)high_memory);
+ BUG_ON(phys_addr + size < phys_addr);
+
+ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
+ iounmap(p);
+ p = NULL;
+ }
+ }
+
+ return p;
+}
+
void iounmap(void *addr)
{
- if (addr > high_memory)
- return vfree((void *) (PAGE_MASK & (unsigned long) addr));
+ struct vm_struct *p;
+ if (addr < high_memory)
+ return;
+ p = remove_kernel_area(addr);
+ if (!p) {
+ printk("__iounmap: bad address %p\n", addr);
+ return;
+ }
+
+ BUG_ON(p->phys_addr == 0); /* not allocated with ioremap */
+
+ vmfree_area_pages(VMALLOC_VMADDR(p->addr), p->size);
+ if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
+ change_page_attr(virt_to_page(__va(p->phys_addr)),
+ p->size >> PAGE_SHIFT,
+ PAGE_KERNEL);
+ }
+ kfree(p);
}
void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
new file mode 100644
index 000000000000..c5e2374b6bc7
--- /dev/null
+++ b/arch/i386/mm/pageattr.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ * Thanks to Ben LaHaise for precious feedback.
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+
+static inline pte_t *lookup_address(unsigned long address)
+{
+ pgd_t *pgd = pgd_offset_k(address);
+ pmd_t *pmd = pmd_offset(pgd, address);
+ if (pmd_large(*pmd))
+ return (pte_t *)pmd;
+ return pte_offset_kernel(pmd, address);
+}
+
+static struct page *split_large_page(unsigned long address, pgprot_t prot)
+{
+ int i;
+ unsigned long addr;
+ struct page *base = alloc_pages(GFP_KERNEL, 0);
+ pte_t *pbase;
+ if (!base)
+ return NULL;
+ address = __pa(address);
+ addr = address & LARGE_PAGE_MASK;
+ pbase = (pte_t *)page_address(base);
+ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
+ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
+ addr == address ? prot : PAGE_KERNEL);
+ }
+ return base;
+}
+
+static void flush_kernel_map(void *dummy)
+{
+ /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
+ if (boot_cpu_data.x86_model >= 4)
+ asm volatile("wbinvd":::"memory");
+ /* Flush all to work around Errata in early athlons regarding
+ * large page flushing.
+ */
+ __flush_tlb_all();
+}
+
+static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+{
+ set_pte_atomic(kpte, pte); /* change init_mm */
+#ifndef CONFIG_X86_PAE
+ {
+ struct list_head *l;
+ spin_lock(&mmlist_lock);
+ list_for_each(l, &init_mm.mmlist) {
+ struct mm_struct *mm = list_entry(l, struct mm_struct, mmlist);
+ pmd_t *pmd = pmd_offset(pgd_offset(mm, address), address);
+ set_pte_atomic((pte_t *)pmd, pte);
+ }
+ spin_unlock(&mmlist_lock);
+ }
+#endif
+}
+
+/*
+ * No more special protections in this 2/4MB area - revert to a
+ * large page again.
+ */
+static inline void revert_page(struct page *kpte_page, unsigned long address)
+{
+ pte_t *linear = (pte_t *)
+ pmd_offset(pgd_offset(&init_mm, address), address);
+ set_pmd_pte(linear, address,
+ pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
+ PAGE_KERNEL_LARGE));
+}
+
+static int
+__change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage)
+{
+ pte_t *kpte;
+ unsigned long address;
+ struct page *kpte_page;
+
+#ifdef CONFIG_HIGHMEM
+ if (page >= highmem_start_page)
+ BUG();
+#endif
+ address = (unsigned long)page_address(page);
+
+ kpte = lookup_address(address);
+ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
+ if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
+ if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
+ pte_t old = *kpte;
+ pte_t standard = mk_pte(page, PAGE_KERNEL);
+
+ set_pte_atomic(kpte, mk_pte(page, prot));
+ if (pte_same(old,standard))
+ atomic_inc(&kpte_page->count);
+ } else {
+ struct page *split = split_large_page(address, prot);
+ if (!split)
+ return -ENOMEM;
+ set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
+ }
+ } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
+ set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
+ atomic_dec(&kpte_page->count);
+ }
+
+ if (cpu_has_pse && (atomic_read(&kpte_page->count) == 1)) {
+ *oldpage = kpte_page;
+ revert_page(kpte_page, address);
+ }
+ return 0;
+}
+
+static inline void flush_map(void)
+{
+#ifdef CONFIG_SMP
+ smp_call_function(flush_kernel_map, NULL, 1, 1);
+#endif
+ flush_kernel_map(NULL);
+}
+
+struct deferred_page {
+ struct deferred_page *next;
+ struct page *fpage;
+};
+static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
+
+/*
+ * Change the page attributes of an page in the linear mapping.
+ *
+ * This should be used when a page is mapped with a different caching policy
+ * than write-back somewhere - some CPUs do not like it when mappings with
+ * different caching policies exist. This changes the page attributes of the
+ * in kernel linear mapping too.
+ *
+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
+ * This function only deals with the kernel linear map.
+ *
+ * Caller must call global_flush_tlb() after this.
+ */
+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+{
+ int err = 0;
+ struct page *fpage;
+ int i;
+
+ down_write(&init_mm.mmap_sem);
+ for (i = 0; i < numpages; i++, page++) {
+ fpage = NULL;
+ err = __change_page_attr(page, prot, &fpage);
+ if (err)
+ break;
+ if (fpage) {
+ struct deferred_page *df;
+ df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
+ if (!df) {
+ flush_map();
+ __free_page(fpage);
+ } else {
+ df->next = df_list;
+ df->fpage = fpage;
+ df_list = df;
+ }
+ }
+ }
+ up_write(&init_mm.mmap_sem);
+ return err;
+}
+
+void global_flush_tlb(void)
+{
+ struct deferred_page *df, *next_df;
+
+ down_read(&init_mm.mmap_sem);
+ df = xchg(&df_list, NULL);
+ up_read(&init_mm.mmap_sem);
+ flush_map();
+ for (; df; df = next_df) {
+ next_df = df->next;
+ if (df->fpage)
+ __free_page(df->fpage);
+ kfree(df);
+ }
+}
+
+EXPORT_SYMBOL(change_page_attr);
+EXPORT_SYMBOL(global_flush_tlb);