summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2002-09-09 21:09:43 -0700
committerLinus Torvalds <torvalds@penguin.transmeta.com>2002-09-09 21:09:43 -0700
commit81e0a1a69b727c2fa09f5b22270d6694511fbc6b (patch)
tree7123bdaa43652152639b1ea08c1f57c2e1d99e8e /include/linux
parent9dc8af8046afd8c05588b0ec338e6f3358ce40c9 (diff)
[PATCH] resurrect CONFIG_HIGHPTE
Bill Irwin's patch to fix up pte's in highmem. With CONFIG_HIGHPTE, the direct pte pointer in struct page becomes the 64-bit physical address of the single pte which is mapping this page. If the page is not PageDirect then page->pte.chain points at a list of pte_chains, which each now contain an array of 64-bit physical addresses of the pte's which are mapping the page. The functions rmap_ptep_map() and rmap_ptep_unmap() are used for mapping and unmapping the page which backs the target pte. The patch touches all architectures (adding do-nothing compatibility macros and inlines). It generally mangles lots of header files and may break non-ia32 compiles. I've had it in testing since 2.5.31.
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/highmem.h1
-rw-r--r--include/linux/mm.h14
2 files changed, 13 insertions, 2 deletions
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 370177037315..042e6ddfcf8a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -26,6 +26,7 @@ static inline void *kmap(struct page *page) { return page_address(page); }
#define kmap_atomic(page, idx) page_address(page)
#define kunmap_atomic(addr, idx) do { } while (0)
+#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif /* CONFIG_HIGHMEM */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5e01743a0bc6..4dfac9d2cb5c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -159,9 +159,9 @@ struct page {
struct list_head lru; /* Pageout list, eg. active_list;
protected by zone->lru_lock !! */
union {
- struct pte_chain * chain; /* Reverse pte mapping pointer.
+ struct pte_chain *chain;/* Reverse pte mapping pointer.
* protected by PG_chainlock */
- pte_t * direct;
+ pte_addr_t direct;
} pte;
unsigned long private; /* mapping-private opaque data */
@@ -322,6 +322,16 @@ static inline void set_page_zone(struct page *page, unsigned long zone_num)
#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
/*
+ * Return true if this page is mapped into pagetables. Subtle: test pte.direct
+ * rather than pte.chain. Because sometimes pte.direct is 64-bit, and .chain
+ * is only 32-bit.
+ */
+static inline int page_mapped(struct page *page)
+{
+ return page->pte.direct != 0;
+}
+
+/*
* Error return values for the *_nopage functions
*/
#define NOPAGE_SIGBUS (NULL)