summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@home.transmeta.com>2002-02-18 19:53:39 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2002-02-18 19:53:39 -0800
commit0c7f4371bd4cd724222fc2d6262cfdef13b84ae2 (patch)
tree03955ed6d0573a88b820ab92581145abb74155fb /include
parent991f6b0ad40a81c3ea6e012c195e15eb5a47867c (diff)
parente5191c50d12621e04f8bc247dcb6a0f4ad76ae26 (diff)
Merge home.transmeta.com:/home/torvalds/v2.5/small-page
into home.transmeta.com:/home/torvalds/v2.5/linux
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/pgtable.h2
-rw-r--r--include/asm-arm/pgtable.h1
-rw-r--r--include/asm-cris/pgtable.h1
-rw-r--r--include/asm-i386/pgtable.h6
-rw-r--r--include/asm-ia64/pgtable.h5
-rw-r--r--include/asm-mips/pgtable.h5
-rw-r--r--include/asm-mips64/pgtable.h5
-rw-r--r--include/asm-parisc/pgtable.h1
-rw-r--r--include/asm-ppc/pgtable.h4
-rw-r--r--include/asm-s390/pgtable.h4
-rw-r--r--include/asm-s390x/pgtable.h4
-rw-r--r--include/asm-sh/pgtable.h5
-rw-r--r--include/asm-sparc/pgtable.h3
-rw-r--r--include/asm-sparc64/page.h3
-rw-r--r--include/asm-sparc64/pgtable.h3
-rw-r--r--include/asm-x86_64/pgtable.h1
-rw-r--r--include/linux/mm.h77
-rw-r--r--include/linux/mmzone.h34
-rw-r--r--include/linux/pagemap.h2
19 files changed, 115 insertions, 51 deletions
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index c3915880beed..70b2b9ce76fe 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -268,8 +268,6 @@ extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _P
extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
-#define page_address(page) ((page)->virtual)
-
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index e7ed8b336ca0..1a19ac6a2f7f 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -99,7 +99,6 @@ extern struct page *empty_zero_page;
/*
* Permanent address of a page. We never have highmem, so this is trivial.
*/
-#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
/*
diff --git a/include/asm-cris/pgtable.h b/include/asm-cris/pgtable.h
index 87c504e0ff46..51eb17d87f9d 100644
--- a/include/asm-cris/pgtable.h
+++ b/include/asm-cris/pgtable.h
@@ -439,7 +439,6 @@ static inline unsigned long __pte_page(pte_t pte)
/* permanent address of a page */
-#define page_address(page) ((page)->virtual)
#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define pte_page(pte) (mem_map+pte_pagenr(pte))
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 137dafc66e1c..1bf46187be83 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -264,11 +264,7 @@ extern unsigned long pg0[1024];
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-/*
- * Permanent address of a page. Obviously must never be
- * called on a highmem page.
- */
-#define page_address(page) ((page)->virtual)
+
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index edc2104509d5..c92025c36304 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -165,11 +165,6 @@
* addresses:
*/
-/*
- * Given a pointer to an mem_map[] entry, return the kernel virtual
- * address corresponding to that page.
- */
-#define page_address(page) ((page)->virtual)
/* Quick test to see if ADDR is a (potentially) valid physical address. */
static inline long
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index e24c6bde77f5..b70d53edab46 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -331,11 +331,6 @@ extern inline int pgd_bad(pgd_t pgd) { return 0; }
extern inline int pgd_present(pgd_t pgd) { return 1; }
extern inline void pgd_clear(pgd_t *pgdp) { }
-/*
- * Permanent address of a page. On MIPS we never have highmem, so this
- * is simple.
- */
-#define page_address(page) ((page)->virtual)
#ifdef CONFIG_CPU_VR41XX
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> (PAGE_SHIFT + 2))))
#else
diff --git a/include/asm-mips64/pgtable.h b/include/asm-mips64/pgtable.h
index 9b041abb4083..d5f16df89c8a 100644
--- a/include/asm-mips64/pgtable.h
+++ b/include/asm-mips64/pgtable.h
@@ -370,11 +370,6 @@ extern inline void pgd_clear(pgd_t *pgdp)
pgd_val(*pgdp) = ((unsigned long) invalid_pmd_table);
}
-/*
- * Permanent address of a page. On MIPS64 we never have highmem, so this
- * is simple.
- */
-#define page_address(page) ((page)->virtual)
#ifndef CONFIG_DISCONTIGMEM
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#else
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index 1dfe59ef444e..df853449c900 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -275,7 +275,6 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
-#define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
#define __page_address(page) ({ if (PageHighMem(page)) BUG(); PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) (mem_map+pte_pagenr(x))
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index e92aaf0f0661..3c2498a7a798 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -389,10 +389,6 @@ extern unsigned long empty_zero_page[1024];
#define pmd_present(pmd) ((pmd_val(pmd) & PAGE_MASK) != 0)
#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
-/*
- * Permanent address of a page.
- */
-#define page_address(page) ((page)->virtual)
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x)-PPC_MEMSTART) >> PAGE_SHIFT))
#ifndef __ASSEMBLY__
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 5e22205addb8..c0591a4b535a 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -239,10 +239,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval)
*pteptr = pteval;
}
-/*
- * Permanent address of a page.
- */
-#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
diff --git a/include/asm-s390x/pgtable.h b/include/asm-s390x/pgtable.h
index 2dcb8b438f83..7d296df217b4 100644
--- a/include/asm-s390x/pgtable.h
+++ b/include/asm-s390x/pgtable.h
@@ -234,10 +234,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval)
*pteptr = pteval;
}
-/*
- * Permanent address of a page.
- */
-#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index ce68ec2d2bd1..a7e2b8dfecaf 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -208,11 +208,6 @@ extern unsigned long empty_zero_page[1024];
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-/*
- * Permanent address of a page. Obviously must never be
- * called on a highmem page.
- */
-#define page_address(page) ((page)->virtual) /* P1 address of the page */
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK)
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index b73aaa20847e..518f15379394 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -293,9 +293,6 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
#define page_pte_prot(page, prot) mk_pte(page, prot)
#define page_pte(page) page_pte_prot(page, __pgprot(0))
-/* Permanent address of a page. */
-#define page_address(page) ((page)->virtual)
-
BTFIXUPDEF_CALL(struct page *, pte_page, pte_t)
#define pte_page(pte) BTFIXUP_CALL(pte_page)(pte)
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index 1eb1ae8cf560..15ad83cb4097 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -30,6 +30,9 @@ extern void do_BUG(const char *file, int line);
#define PAGE_BUG(page) BUG()
+/* Sparc64 is slow at multiplication, we prefer to use some extra space. */
+#define WANT_PAGE_VIRTUAL 1
+
extern void _clear_page(void *page);
#define clear_page(X) _clear_page((void *)(X))
extern void clear_user_page(void *page, unsigned long vaddr);
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index ebf8ac1060bc..587b289e5c2e 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -243,8 +243,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
#define pte_mkold(pte) (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
/* Permanent address of a page. */
-#define __page_address(page) ((page)->virtual)
-#define page_address(page) ({ __page_address(page); })
+#define __page_address(page) page_address(page)
#define pte_page(x) (mem_map+(((pte_val(x)&_PAGE_PADDR)-phys_base)>>PAGE_SHIFT))
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index c2061e0f2e68..aa884aa3c8c1 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -289,7 +289,6 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
-#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
right? */
#define pte_page(x) (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT))))
diff --git a/include/linux/mm.h b/include/linux/mm.h
index efd8fed2d361..580b32a99f93 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -157,12 +157,23 @@ typedef struct page {
updated asynchronously */
struct list_head lru; /* Pageout list, eg. active_list;
protected by pagemap_lru_lock !! */
- wait_queue_head_t wait; /* Page locked? Stand in line... */
struct page **pprev_hash; /* Complement to *next_hash. */
struct buffer_head * buffers; /* Buffer maps us to a disk block. */
+
+ /*
+ * On machines where all RAM is mapped into kernel address space,
+ * we can simply calculate the virtual address. On machines with
+ * highmem some memory is mapped into kernel virtual memory
+ * dynamically, so we need a place to store that address.
+ * Note that this field could be 16 bits on x86 ... ;)
+ *
+ * Architectures with slow multiplication can define
+ * WANT_PAGE_VIRTUAL in asm/page.h
+ */
+#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
- struct zone_struct *zone; /* Memory zone we are in. */
+#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
} mem_map_t;
/*
@@ -183,6 +194,11 @@ typedef struct page {
#define page_count(p) atomic_read(&(p)->count)
#define set_page_count(p,v) atomic_set(&(p)->count, v)
+static inline void init_page_count(struct page *page)
+{
+ page->count.counter = 0;
+}
+
/*
* Various page->flags bits:
*
@@ -237,7 +253,7 @@ typedef struct page {
* - private pages which have been modified may need to be swapped out
* to swap space and (later) to be read back into memory.
* During disk I/O, PG_locked is used. This bit is set before I/O
- * and reset when I/O completes. page->wait is a wait queue of all
+ * and reset when I/O completes. page_waitqueue(page) is a wait queue of all
* tasks waiting for the I/O on this page to complete.
* PG_uptodate tells whether the page's contents is valid.
* When a read completes, the page becomes uptodate, unless a disk I/O
@@ -299,6 +315,61 @@ typedef struct page {
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
+#define __SetPageReserved(page) __set_bit(PG_reserved, &(page)->flags)
+
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+#define NODE_SHIFT 4
+#define ZONE_SHIFT (BITS_PER_LONG - 8)
+
+struct zone_struct;
+extern struct zone_struct *zone_table[];
+
+static inline zone_t *page_zone(struct page *page)
+{
+ return zone_table[page->flags >> ZONE_SHIFT];
+}
+
+static inline void set_page_zone(struct page *page, unsigned long zone_num)
+{
+ page->flags &= ~(~0UL << ZONE_SHIFT);
+ page->flags |= zone_num << ZONE_SHIFT;
+}
+
+/*
+ * In order to avoid #ifdefs within C code itself, we define
+ * set_page_address to a noop for non-highmem machines, where
+ * the field isn't useful.
+ * The same is true for page_address() in arch-dependent code.
+ */
+#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+
+#define set_page_address(page, address) \
+ do { \
+ (page)->virtual = (address); \
+ } while(0)
+
+#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+#define set_page_address(page, address) do { } while(0)
+#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+
+/*
+ * Permanent address of a page. Obviously must never be
+ * called on a highmem page.
+ */
+#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+
+#define page_address(page) ((page)->virtual)
+
+#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+
+#define page_address(page) \
+ __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
+ + page_zone(page)->zone_start_paddr)
+
+#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
extern void FASTCALL(set_page_dirty(struct page *));
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ea14bd835c68..ff810df6c8ee 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -7,6 +7,7 @@
#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/wait.h>
/*
* Free memory management - zoned buddy allocator.
@@ -48,6 +49,35 @@ typedef struct zone_struct {
free_area_t free_area[MAX_ORDER];
/*
+ * wait_table -- the array holding the hash table
+ * wait_table_size -- the size of the hash table array
+ * wait_table_shift -- wait_table_size
+ * == BITS_PER_LONG (1 << wait_table_bits)
+ *
+ * The purpose of all these is to keep track of the people
+ * waiting for a page to become available and make them
+ * runnable again when possible. The trouble is that this
+ * consumes a lot of space, especially when so few things
+ * wait on pages at a given time. So instead of using
+ * per-page waitqueues, we use a waitqueue hash table.
+ *
+ * The bucket discipline is to sleep on the same queue when
+ * colliding and wake all in that wait queue when removing.
+ * When something wakes, it must check to be sure its page is
+ * truly available, a la thundering herd. The cost of a
+ * collision is great, but given the expected load of the
+ * table, they should be so rare as to be outweighed by the
+ * benefits from the saved space.
+ *
+ * __wait_on_page() and unlock_page() in mm/filemap.c, are the
+ * primary users of these fields, and in mm/page_alloc.c
+ * free_area_init_core() performs the initialization of them.
+ */
+ wait_queue_head_t * wait_table;
+ unsigned long wait_table_size;
+ unsigned long wait_table_shift;
+
+ /*
* Discontig memory support fields.
*/
struct pglist_data *zone_pgdat;
@@ -132,11 +162,15 @@ extern pg_data_t contig_page_data;
#define NODE_DATA(nid) (&contig_page_data)
#define NODE_MEM_MAP(nid) mem_map
+#define MAX_NR_NODES 1
#else /* !CONFIG_DISCONTIGMEM */
#include <asm/mmzone.h>
+/* page->zone is currently 8 bits ... */
+#define MAX_NR_NODES (255 / MAX_NR_ZONES)
+
#endif /* !CONFIG_DISCONTIGMEM */
#define MAP_ALIGN(x) ((((x) % sizeof(mem_map_t)) == 0) ? (x) : ((x) + \
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d18fc2b008fd..242a576ea934 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -97,6 +97,8 @@ static inline void wait_on_page(struct page * page)
___wait_on_page(page);
}
+extern void wake_up_page(struct page *);
+
extern struct page * grab_cache_page (struct address_space *, unsigned long);
extern struct page * grab_cache_page_nowait (struct address_space *, unsigned long);