summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@home.transmeta.com>2002-06-17 20:48:29 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-06-17 20:48:29 -0700
commit1f60ade2a44d22a67c75a165b70d66f9d4e0b76e (patch)
tree7a8bda4c45fb3e5d255a023b030137e3b6be87ee /include
parent8509486ae776be099cbedb6c37c37741ddc20ad8 (diff)
parent3986594c6167a269053d3d88f17e53e0ca4023f8 (diff)
Merge master.kernel.org:/home/mingo/bk-sched
into home.transmeta.com:/home/torvalds/v2.5/linux
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/agp.h11
-rw-r--r--include/asm-i386/agp.h23
-rw-r--r--include/asm-i386/cacheflush.h3
-rw-r--r--include/asm-i386/io.h26
-rw-r--r--include/asm-i386/kmap_types.h9
-rw-r--r--include/asm-i386/page.h3
-rw-r--r--include/asm-i386/pgtable-2level.h1
-rw-r--r--include/asm-i386/pgtable-3level.h2
-rw-r--r--include/asm-i386/pgtable.h3
-rw-r--r--include/asm-ia64/agp.h11
-rw-r--r--include/asm-ppc/kmap_types.h3
-rw-r--r--include/asm-sparc/kmap_types.h3
-rw-r--r--include/asm-sparc64/agp.h11
-rw-r--r--include/asm-x86_64/agp.h23
-rw-r--r--include/asm-x86_64/cacheflush.h3
-rw-r--r--include/asm-x86_64/i387.h11
-rw-r--r--include/asm-x86_64/ia32.h2
-rw-r--r--include/asm-x86_64/ipc.h30
-rw-r--r--include/asm-x86_64/kmap_types.h3
-rw-r--r--include/asm-x86_64/mmu_context.h12
-rw-r--r--include/asm-x86_64/msr.h21
-rw-r--r--include/asm-x86_64/mtrr.h42
-rw-r--r--include/asm-x86_64/pda.h2
-rw-r--r--include/asm-x86_64/processor.h11
-rw-r--r--include/asm-x86_64/spinlock.h6
-rw-r--r--include/asm-x86_64/string.h13
-rw-r--r--include/asm-x86_64/suspend.h6
-rw-r--r--include/asm-x86_64/system.h7
-rw-r--r--include/asm-x86_64/timex.h2
-rw-r--r--include/asm-x86_64/tlbflush.h9
-rw-r--r--include/linux/bio.h50
-rw-r--r--include/linux/blkdev.h7
-rw-r--r--include/linux/buffer_head.h24
-rw-r--r--include/linux/highmem.h44
-rw-r--r--include/linux/ide.h1
-rw-r--r--include/linux/jbd.h1
-rw-r--r--include/linux/loop.h8
-rw-r--r--include/linux/poll.h49
-rw-r--r--include/linux/raid/raid5.h1
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/swap.h32
-rw-r--r--include/linux/sysctl.h19
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/tqueue.h3
-rw-r--r--include/linux/vmalloc.h3
-rw-r--r--include/linux/writeback.h6
47 files changed, 305 insertions, 261 deletions
diff --git a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h
new file mode 100644
index 000000000000..ba05bdf9a211
--- /dev/null
+++ b/include/asm-alpha/agp.h
@@ -0,0 +1,11 @@
+#ifndef AGP_H
+#define AGP_H 1
+
+/* dummy for now */
+
+#define map_page_into_agp(page)
+#define unmap_page_from_agp(page)
+#define flush_agp_mappings()
+#define flush_agp_cache() mb()
+
+#endif
diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h
new file mode 100644
index 000000000000..9ae97c09fb49
--- /dev/null
+++ b/include/asm-i386/agp.h
@@ -0,0 +1,23 @@
+#ifndef AGP_H
+#define AGP_H 1
+
+#include <asm/pgtable.h>
+
+/*
+ * Functions to keep the agpgart mappings coherent with the MMU.
+ * The GART gives the CPU a physical alias of pages in memory. The alias region is
+ * mapped uncacheable. Make sure there are no conflicting mappings
+ * with different cachability attributes for the same page. This avoids
+ * data corruption on some CPUs.
+ */
+
+#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
+#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
+#define flush_agp_mappings() global_flush_tlb()
+
+/* Could use CLFLUSH here if the cpu supports it. But then it would
+ need to be called for each cacheline of the whole page so it may not be
+ worth it. Would need a page for it. */
+#define flush_agp_cache() asm volatile("wbinvd":::"memory")
+
+#endif
diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h
index 58d027dfc5ff..319e65a7047f 100644
--- a/include/asm-i386/cacheflush.h
+++ b/include/asm-i386/cacheflush.h
@@ -15,4 +15,7 @@
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
+void global_flush_tlb(void);
+int change_page_attr(struct page *page, int numpages, pgprot_t prot);
+
#endif /* _I386_CACHEFLUSH_H */
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 44996d06ecc3..9922dd823c9c 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -121,31 +121,7 @@ static inline void * ioremap (unsigned long offset, unsigned long size)
return __ioremap(offset, size, 0);
}
-/**
- * ioremap_nocache - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many
- * busses. In paticular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- */
-
-static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, _PAGE_PCD);
-}
-
+extern void * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap(void *addr);
/*
diff --git a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h
index 9a12267d3a4f..0ae7bb3c2b8d 100644
--- a/include/asm-i386/kmap_types.h
+++ b/include/asm-i386/kmap_types.h
@@ -15,10 +15,11 @@ D(1) KM_SKB_SUNRPC_DATA,
D(2) KM_SKB_DATA_SOFTIRQ,
D(3) KM_USER0,
D(4) KM_USER1,
-D(5) KM_BIO_IRQ,
-D(6) KM_PTE0,
-D(7) KM_PTE1,
-D(8) KM_TYPE_NR
+D(5) KM_BIO_SRC_IRQ,
+D(6) KM_BIO_DST_IRQ,
+D(7) KM_PTE0,
+D(8) KM_PTE1,
+D(9) KM_TYPE_NR
};
#undef D
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 4737ef69ae18..d8e1f404c08b 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -6,6 +6,9 @@
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
+
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index e22db0cc6824..9f8bdc13adac 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -40,6 +40,7 @@ static inline int pgd_present(pgd_t pgd) { return 1; }
* hook is made available.
*/
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
/*
* (pmds are folded into pgds so this doesnt get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index bb2eaea63fde..beb0c1bc3d30 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -49,6 +49,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
smp_wmb();
ptep->pte_low = pte.pte_low;
}
+#define set_pte_atomic(pteptr,pteval) \
+ set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
#define set_pmd(pmdptr,pmdval) \
set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
#define set_pgd(pgdptr,pgdval) \
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index f48db2beeeba..71b75fa234af 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -237,6 +237,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pmd_page(pmd) \
(mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_large(pmd) \
+ ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
+
/* to find an entry in a page-table-directory. */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
diff --git a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h
new file mode 100644
index 000000000000..ba05bdf9a211
--- /dev/null
+++ b/include/asm-ia64/agp.h
@@ -0,0 +1,11 @@
+#ifndef AGP_H
+#define AGP_H 1
+
+/* dummy for now */
+
+#define map_page_into_agp(page)
+#define unmap_page_from_agp(page)
+#define flush_agp_mappings()
+#define flush_agp_cache() mb()
+
+#endif
diff --git a/include/asm-ppc/kmap_types.h b/include/asm-ppc/kmap_types.h
index 99fec407abf5..bce7fd8c1ff2 100644
--- a/include/asm-ppc/kmap_types.h
+++ b/include/asm-ppc/kmap_types.h
@@ -11,7 +11,8 @@ enum km_type {
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
- KM_BIO_IRQ,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
KM_PTE0,
KM_PTE1,
KM_TYPE_NR
diff --git a/include/asm-sparc/kmap_types.h b/include/asm-sparc/kmap_types.h
index 7e9a5661c698..bab20a2a676b 100644
--- a/include/asm-sparc/kmap_types.h
+++ b/include/asm-sparc/kmap_types.h
@@ -7,7 +7,8 @@ enum km_type {
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
- KM_BIO_IRQ,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
KM_TYPE_NR
};
diff --git a/include/asm-sparc64/agp.h b/include/asm-sparc64/agp.h
new file mode 100644
index 000000000000..ba05bdf9a211
--- /dev/null
+++ b/include/asm-sparc64/agp.h
@@ -0,0 +1,11 @@
+#ifndef AGP_H
+#define AGP_H 1
+
+/* dummy for now */
+
+#define map_page_into_agp(page)
+#define unmap_page_from_agp(page)
+#define flush_agp_mappings()
+#define flush_agp_cache() mb()
+
+#endif
diff --git a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h
new file mode 100644
index 000000000000..8c2fabe80419
--- /dev/null
+++ b/include/asm-x86_64/agp.h
@@ -0,0 +1,23 @@
+#ifndef AGP_H
+#define AGP_H 1
+
+#include <asm/cacheflush.h>
+
+/*
+ * Functions to keep the agpgart mappings coherent.
+ * The GART gives the CPU a physical alias of memory. The alias is
+ * mapped uncacheable. Make sure there are no conflicting mappings
+ * with different cachability attributes for the same page.
+ */
+
+#define map_page_into_agp(page) \
+ change_page_attr(page, __pgprot(__PAGE_KERNEL | _PAGE_PCD))
+#define unmap_page_from_agp(page) change_page_attr(page, PAGE_KERNEL)
+#define flush_agp_mappings() global_flush_tlb()
+
+/* Could use CLFLUSH here if the cpu supports it. But then it would
+ need to be called for each cacheline of the whole page so it may not be
+ worth it. Would need a page for it. */
+#define flush_agp_cache() asm volatile("wbinvd":::"memory")
+
+#endif
diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h
index 58d027dfc5ff..319e65a7047f 100644
--- a/include/asm-x86_64/cacheflush.h
+++ b/include/asm-x86_64/cacheflush.h
@@ -15,4 +15,7 @@
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
+void global_flush_tlb(void);
+int change_page_attr(struct page *page, int numpages, pgprot_t prot);
+
#endif /* _I386_CACHEFLUSH_H */
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index edb75edb063e..2a0292c00b54 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -16,11 +16,22 @@
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/user.h>
+#include <asm/thread_info.h>
extern void fpu_init(void);
extern void init_fpu(void);
int save_i387(struct _fpstate *buf);
+static inline int need_signal_i387(struct task_struct *me)
+{
+ if (!me->used_math)
+ return 0;
+ me->used_math = 0;
+ if (!test_thread_flag(TIF_USEDFPU))
+ return 0;
+ return 1;
+}
+
/*
* FPU lazy state save handling...
*/
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
index e57c2e593007..7830bf40cfd4 100644
--- a/include/asm-x86_64/ia32.h
+++ b/include/asm-x86_64/ia32.h
@@ -18,7 +18,9 @@ typedef int __kernel_clock_t32;
typedef int __kernel_pid_t32;
typedef unsigned short __kernel_ipc_pid_t32;
typedef unsigned short __kernel_uid_t32;
+typedef unsigned __kernel_uid32_t32;
typedef unsigned short __kernel_gid_t32;
+typedef unsigned __kernel_gid32_t32;
typedef unsigned short __kernel_dev_t32;
typedef unsigned int __kernel_ino_t32;
typedef unsigned short __kernel_mode_t32;
diff --git a/include/asm-x86_64/ipc.h b/include/asm-x86_64/ipc.h
index 49ea4fdc19b4..2ca5773be061 100644
--- a/include/asm-x86_64/ipc.h
+++ b/include/asm-x86_64/ipc.h
@@ -1,34 +1,6 @@
#ifndef __i386_IPC_H__
#define __i386_IPC_H__
-/*
- * These are used to wrap system calls on x86.
- *
- * See arch/i386/kernel/sys_i386.c for ugly details..
- *
- * (on x86-64 only used for 32bit emulation)
- */
-
-struct ipc_kludge {
- struct msgbuf *msgp;
- long msgtyp;
-};
-
-#define SEMOP 1
-#define SEMGET 2
-#define SEMCTL 3
-#define MSGSND 11
-#define MSGRCV 12
-#define MSGGET 13
-#define MSGCTL 14
-#define SHMAT 21
-#define SHMDT 22
-#define SHMGET 23
-#define SHMCTL 24
-
-/* Used by the DIPC package, try and avoid reusing it */
-#define DIPC 25
-
-#define IPCCALL(version,op) ((version)<<16 | (op))
+/* dummy */
#endif
diff --git a/include/asm-x86_64/kmap_types.h b/include/asm-x86_64/kmap_types.h
index 7e9a5661c698..bab20a2a676b 100644
--- a/include/asm-x86_64/kmap_types.h
+++ b/include/asm-x86_64/kmap_types.h
@@ -7,7 +7,8 @@ enum km_type {
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
- KM_BIO_IRQ,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
KM_TYPE_NR
};
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
index e9f6d661cf4c..e21f0e6721f8 100644
--- a/include/asm-x86_64/mmu_context.h
+++ b/include/asm-x86_64/mmu_context.h
@@ -19,8 +19,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
- if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
- cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
+ if (read_pda(mmu_state) == TLBSTATE_OK)
+ write_pda(mmu_state, TLBSTATE_LAZY);
}
#else
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
@@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* stop flush ipis for the previous mm */
clear_bit(cpu, &prev->cpu_vm_mask);
#ifdef CONFIG_SMP
- cpu_tlbstate[cpu].state = TLBSTATE_OK;
- cpu_tlbstate[cpu].active_mm = next;
+ write_pda(mmu_state, TLBSTATE_OK);
+ write_pda(active_mm, next);
#endif
set_bit(cpu, &next->cpu_vm_mask);
/* Re-load page tables */
@@ -48,8 +48,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
#ifdef CONFIG_SMP
else {
- cpu_tlbstate[cpu].state = TLBSTATE_OK;
- if(cpu_tlbstate[cpu].active_mm != next)
+ write_pda(mmu_state, TLBSTATE_OK);
+ if (read_pda(active_mm) != next)
out_of_line_bug();
if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 7e522c2f4846..4085cc8c5dbe 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -95,6 +95,7 @@
#define MSR_IA32_PERFCTR0 0xc1
#define MSR_IA32_PERFCTR1 0xc2
+#define MSR_MTRRcap 0x0fe
#define MSR_IA32_BBL_CR_CTL 0x119
#define MSR_IA32_MCG_CAP 0x179
@@ -110,6 +111,19 @@
#define MSR_IA32_LASTINTFROMIP 0x1dd
#define MSR_IA32_LASTINTTOIP 0x1de
+#define MSR_MTRRfix64K_00000 0x250
+#define MSR_MTRRfix16K_80000 0x258
+#define MSR_MTRRfix16K_A0000 0x259
+#define MSR_MTRRfix4K_C0000 0x268
+#define MSR_MTRRfix4K_C8000 0x269
+#define MSR_MTRRfix4K_D0000 0x26a
+#define MSR_MTRRfix4K_D8000 0x26b
+#define MSR_MTRRfix4K_E0000 0x26c
+#define MSR_MTRRfix4K_E8000 0x26d
+#define MSR_MTRRfix4K_F0000 0x26e
+#define MSR_MTRRfix4K_F8000 0x26f
+#define MSR_MTRRdefType 0x2ff
+
#define MSR_IA32_MC0_CTL 0x400
#define MSR_IA32_MC0_STATUS 0x401
#define MSR_IA32_MC0_ADDR 0x402
@@ -171,11 +185,4 @@
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-
-#define MSR_IA32_THERM_CONTROL 0x19a
-#define MSR_IA32_THERM_INTERRUPT 0x19b
-#define MSR_IA32_THERM_STATUS 0x19c
-#define MSR_IA32_MISC_ENABLE 0x1a0
-
-
#endif
diff --git a/include/asm-x86_64/mtrr.h b/include/asm-x86_64/mtrr.h
index ff3ea870d0d6..6505d7bd6ece 100644
--- a/include/asm-x86_64/mtrr.h
+++ b/include/asm-x86_64/mtrr.h
@@ -30,16 +30,16 @@
struct mtrr_sentry
{
- unsigned long base; /* Base address */
- unsigned long size; /* Size of region */
+ __u64 base; /* Base address */
+ __u32 size; /* Size of region */
unsigned int type; /* Type of region */
};
struct mtrr_gentry
{
+ __u64 base; /* Base address */
+ __u32 size; /* Size of region */
unsigned int regnum; /* Register number */
- unsigned long base; /* Base address */
- unsigned long size; /* Size of region */
unsigned int type; /* Type of region */
};
@@ -81,46 +81,38 @@ static char *mtrr_strings[MTRR_NUM_TYPES] =
#ifdef __KERNEL__
/* The following functions are for use by other drivers */
-# ifdef CONFIG_MTRR
-extern int mtrr_add (unsigned long base, unsigned long size,
- unsigned int type, char increment);
-extern int mtrr_add_page (unsigned long base, unsigned long size,
- unsigned int type, char increment);
-extern int mtrr_del (int reg, unsigned long base, unsigned long size);
-extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
-extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
-# else
-static __inline__ int mtrr_add (unsigned long base, unsigned long size,
+#ifdef CONFIG_MTRR
+extern int mtrr_add (__u64 base, __u32 size, unsigned int type, char increment);
+extern int mtrr_add_page (__u64 base, __u32 size, unsigned int type, char increment);
+extern int mtrr_del (int reg, __u64 base, __u32 size);
+extern int mtrr_del_page (int reg, __u64 base, __u32 size);
+#else
+static __inline__ int mtrr_add (__u64 base, __u32 size,
unsigned int type, char increment)
{
return -ENODEV;
}
-static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
+static __inline__ int mtrr_add_page (__u64 base, __u32 size,
unsigned int type, char increment)
{
return -ENODEV;
}
-static __inline__ int mtrr_del (int reg, unsigned long base,
- unsigned long size)
+static __inline__ int mtrr_del (int reg, __u64 base, __u32 size)
{
return -ENODEV;
}
-static __inline__ int mtrr_del_page (int reg, unsigned long base,
- unsigned long size)
+static __inline__ int mtrr_del_page (int reg, __u64 base, __u32 size)
{
return -ENODEV;
}
-
-static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
-
-# endif
+#endif
/* The following functions are for initialisation: don't use them! */
extern int mtrr_init (void);
-# if defined(CONFIG_SMP) && defined(CONFIG_MTRR)
+#if defined(CONFIG_SMP) && defined(CONFIG_MTRR)
extern void mtrr_init_boot_cpu (void);
extern void mtrr_init_secondary_cpu (void);
-# endif
+#endif
#endif
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index 7ff508346013..eb38cf70fb90 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -22,6 +22,8 @@ struct x8664_pda {
unsigned int __local_bh_count;
unsigned int __nmi_count; /* arch dependent */
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
+ struct mm_struct *active_mm;
+ int mmu_state;
} ____cacheline_aligned;
#define PDA_STACKOFFSET (5*8)
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 4cda0f055a5f..03875338aedf 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -45,21 +45,12 @@ struct cpuinfo_x86 {
__u8 x86_vendor; /* CPU vendor */
__u8 x86_model;
__u8 x86_mask;
- /* We know that wp_works_ok = 1, hlt_works_ok = 1, hard_math = 1,
- etc... */
- char wp_works_ok; /* It doesn't on 386's */
- char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
- char hard_math;
- char rfu;
int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
__u32 x86_capability[NCAPINTS];
char x86_vendor_id[16];
char x86_model_id[64];
int x86_cache_size; /* in KB - valid for CPUS which support this
call */
- int fdiv_bug;
- int f00f_bug;
- int coma_bug;
unsigned long loops_per_jiffy;
} ____cacheline_aligned;
@@ -323,7 +314,7 @@ struct thread_struct {
/* IO permissions. the bitmap could be moved into the GDT, that would make
switch faster for a limited number of ioperm using tasks. -AK */
int ioperm;
- u32 io_bitmap[IO_BITMAP_SIZE+1];
+ u32 *io_bitmap_ptr;
};
#define INIT_THREAD { \
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 6f1d71c65a68..a276217b88a3 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -15,7 +15,7 @@ extern int printk(const char * fmt, ...)
typedef struct {
volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
+#if SPINLOCK_DEBUG
unsigned magic;
#endif
} spinlock_t;
@@ -39,7 +39,7 @@ typedef struct {
* We make no fairness assumptions. They have a cost.
*/
-#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
+#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define spin_lock_string \
@@ -62,7 +62,7 @@ typedef struct {
static inline int _raw_spin_trylock(spinlock_t *lock)
{
- char oldval;
+ signed char oldval;
__asm__ __volatile__(
"xchgb %b0,%1"
:"=q" (oldval), "=m" (lock->lock)
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
index ec456eadb674..27876b9da06a 100644
--- a/include/asm-x86_64/string.h
+++ b/include/asm-x86_64/string.h
@@ -40,18 +40,9 @@ extern void *__memcpy(void *to, const void *from, size_t len);
__ret = __builtin_memcpy((dst),(src),__len); \
__ret; })
-#if 0
+
#define __HAVE_ARCH_MEMSET
-extern void *__memset(void *mem, int val, size_t len);
-#define memset(dst,val,len) \
- ({ size_t __len = (len); \
- void *__ret; \
- if (__builtin_constant_p(len) && __len >= 64) \
- __ret = __memset((dst),(val),__len); \
- else \
- __ret = __builtin_memset((dst),(val),__len); \
- __ret; })
-#endif
+#define memset __builtin_memset
#define __HAVE_ARCH_MEMMOVE
void * memmove(void * dest,const void *src,size_t count);
diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h
new file mode 100644
index 000000000000..9f065f8fe33d
--- /dev/null
+++ b/include/asm-x86_64/suspend.h
@@ -0,0 +1,6 @@
+#ifndef SUSPEND_H
+#define SUSPEND_H 1
+
+/* dummy for now */
+
+#endif
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 1df84d087823..9d6c6f1f48d5 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -13,7 +13,10 @@
#define LOCK_PREFIX ""
#endif
-#define prepare_to_switch() do {} while(0)
+#define prepare_arch_schedule(prev) do { } while(0)
+#define finish_arch_schedule(prev) do { } while(0)
+#define prepare_arch_switch(rq) do { } while(0)
+#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock)
#define __STR(x) #x
#define STR(x) __STR(x)
@@ -41,7 +44,7 @@
__POP(rax) __POP(r15) __POP(r14) __POP(r13) __POP(r12) __POP(r11) __POP(r10) \
__POP(r9) __POP(r8)
-#define switch_to(prev,next) \
+#define switch_to(prev,next,last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%[prevrsp]\n\t" \
"movq %[nextrsp],%%rsp\n\t" \
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index b87680d9e51a..98bddc2d805a 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -48,6 +48,4 @@ static inline cycles_t get_cycles (void)
extern unsigned int cpu_khz;
-#define ARCH_HAS_JIFFIES_64
-
#endif
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
index 3f086b2d03b3..2e811ac262af 100644
--- a/include/asm-x86_64/tlbflush.h
+++ b/include/asm-x86_64/tlbflush.h
@@ -106,15 +106,6 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
-struct tlb_state
-{
- struct mm_struct *active_mm;
- int state;
- char __cacheline_padding[24];
-};
-extern struct tlb_state cpu_tlbstate[NR_CPUS];
-
-
#endif
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b244108a27a8..ffc38fca9c1e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -21,6 +21,8 @@
#define __LINUX_BIO_H
#include <linux/kdev_t.h>
+#include <linux/highmem.h>
+
/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
#include <asm/io.h>
#ifndef BIO_VMERGE_BOUNDARY
@@ -47,9 +49,6 @@ struct bio_vec {
unsigned int bv_offset;
};
-/*
- * weee, c forward decl...
- */
struct bio;
typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *);
@@ -206,4 +205,49 @@ extern inline void bio_init(struct bio *);
extern int bio_ioctl(kdev_t, unsigned int, unsigned long);
+#ifdef CONFIG_HIGHMEM
+/*
+ * remember to add offset! and never ever reenable interrupts between a
+ * bio_kmap_irq and bio_kunmap_irq!!
+ *
+ * This function MUST be inlined - it plays with the CPU interrupt flags.
+ * Hence the `extern inline'.
+ */
+extern inline char *bio_kmap_irq(struct bio *bio, unsigned long *flags)
+{
+ unsigned long addr;
+
+ __save_flags(*flags);
+
+ /*
+ * could be low
+ */
+ if (!PageHighMem(bio_page(bio)))
+ return bio_data(bio);
+
+ /*
+ * it's a highmem page
+ */
+ __cli();
+ addr = (unsigned long) kmap_atomic(bio_page(bio), KM_BIO_SRC_IRQ);
+
+ if (addr & ~PAGE_MASK)
+ BUG();
+
+ return (char *) addr + bio_offset(bio);
+}
+
+extern inline void bio_kunmap_irq(char *buffer, unsigned long *flags)
+{
+ unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
+
+ kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
+ __restore_flags(*flags);
+}
+
+#else
+#define bio_kmap_irq(bio, flags) (bio_data(bio))
+#define bio_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
+#endif
+
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ef86a3ed6e64..c0c099834df2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -246,12 +246,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
extern int init_emergency_isa_pool(void);
-extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
-
-extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
-{
- create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
-}
+void blk_queue_bounce(request_queue_t *q, struct bio **bio);
#define rq_for_each_bio(bio, rq) \
if ((rq->bio)) \
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 90767fc78617..4fc6bab55825 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -108,12 +108,7 @@ BUFFER_FNS(Async_Read, async_read)
BUFFER_FNS(Async_Write, async_write)
BUFFER_FNS(Boundary, boundary)
-/*
- * FIXME: this is used only by bh_kmap, which is used only by RAID5.
- * Move all that stuff into raid5.c
- */
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
-
#define touch_buffer(bh) mark_page_accessed(bh->b_page)
/* If we *know* page->private refers to buffer_heads */
@@ -124,16 +119,6 @@ BUFFER_FNS(Boundary, boundary)
((struct buffer_head *)(page)->private); \
})
#define page_has_buffers(page) PagePrivate(page)
-#define set_page_buffers(page, buffers) \
- do { \
- SetPagePrivate(page); \
- page->private = (unsigned long)buffers; \
- } while (0)
-#define clear_page_buffers(page) \
- do { \
- ClearPagePrivate(page); \
- page->private = 0; \
- } while (0)
#define invalidate_buffers(dev) __invalidate_buffers((dev), 0)
#define destroy_buffers(dev) __invalidate_buffers((dev), 1)
@@ -175,15 +160,14 @@ int fsync_dev(kdev_t);
int fsync_bdev(struct block_device *);
int fsync_super(struct super_block *);
int fsync_no_super(struct block_device *);
-struct buffer_head *__get_hash_table(struct block_device *, sector_t, int);
+struct buffer_head *__find_get_block(struct block_device *, sector_t, int);
struct buffer_head * __getblk(struct block_device *, sector_t, int);
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
struct buffer_head * __bread(struct block_device *, int, int);
void wakeup_bdflush(void);
-struct buffer_head *alloc_buffer_head(int async);
+struct buffer_head *alloc_buffer_head(void);
void free_buffer_head(struct buffer_head * bh);
-int brw_page(int, struct page *, struct block_device *, sector_t [], int);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
/*
@@ -270,9 +254,9 @@ static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
}
static inline struct buffer_head *
-sb_get_hash_table(struct super_block *sb, int block)
+sb_find_get_block(struct super_block *sb, int block)
{
- return __get_hash_table(sb->s_bdev, block, sb->s_blocksize);
+ return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
}
static inline void
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index da66723d62c5..68c841afc622 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -2,7 +2,6 @@
#define _LINUX_HIGHMEM_H
#include <linux/config.h>
-#include <linux/bio.h>
#include <linux/fs.h>
#include <asm/cacheflush.h>
@@ -15,45 +14,8 @@ extern struct page *highmem_start_page;
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
-extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
extern void check_highmem_ptes(void);
-/*
- * remember to add offset! and never ever reenable interrupts between a
- * bio_kmap_irq and bio_kunmap_irq!!
- */
-static inline char *bio_kmap_irq(struct bio *bio, unsigned long *flags)
-{
- unsigned long addr;
-
- __save_flags(*flags);
-
- /*
- * could be low
- */
- if (!PageHighMem(bio_page(bio)))
- return bio_data(bio);
-
- /*
- * it's a highmem page
- */
- __cli();
- addr = (unsigned long) kmap_atomic(bio_page(bio), KM_BIO_IRQ);
-
- if (addr & ~PAGE_MASK)
- BUG();
-
- return (char *) addr + bio_offset(bio);
-}
-
-static inline void bio_kunmap_irq(char *buffer, unsigned long *flags)
-{
- unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
- kunmap_atomic((void *) ptr, KM_BIO_IRQ);
- __restore_flags(*flags);
-}
-
#else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -65,12 +27,6 @@ static inline void *kmap(struct page *page) { return page_address(page); }
#define kmap_atomic(page,idx) kmap(page)
#define kunmap_atomic(page,idx) kunmap(page)
-#define bh_kmap(bh) ((bh)->b_data)
-#define bh_kunmap(bh) do { } while (0)
-
-#define bio_kmap_irq(bio, flags) (bio_data(bio))
-#define bio_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
-
#endif /* CONFIG_HIGHMEM */
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index e07d0f19fcd1..03c21c567ce4 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -15,6 +15,7 @@
#include <linux/devfs_fs_kernel.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
+#include <linux/bio.h>
#include <asm/byteorder.h>
#include <asm/hdreg.h>
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 835d38c9dbfc..683c1247fd70 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -238,6 +238,7 @@ enum jbd_state_bits {
BUFFER_FNS(JBD, jbd)
BUFFER_FNS(JBDDirty, jbddirty)
TAS_BUFFER_FNS(JBDDirty, jbddirty)
+BUFFER_FNS(Freed, freed)
static inline struct buffer_head *jh2bh(struct journal_head *jh)
{
diff --git a/include/linux/loop.h b/include/linux/loop.h
index d4dc0665a92d..4dfa8b14a586 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -62,14 +62,6 @@ typedef int (* transfer_proc_t)(struct loop_device *, int cmd,
char *raw_buf, char *loop_buf, int size,
int real_block);
-static inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf,
- char *lbuf, int size, int rblock)
-{
- if (!lo->transfer)
- return 0;
-
- return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock);
-}
#endif /* __KERNEL__ */
/*
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 796aac51388a..86b1ee2d3eb3 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -10,13 +10,32 @@
#include <linux/mm.h>
#include <asm/uaccess.h>
-struct poll_table_page;
+#define POLL_INLINE_BYTES 256
+#define FAST_SELECT_MAX 128
+#define FAST_POLL_MAX 128
+#define POLL_INLINE_ENTRIES (1+(POLL_INLINE_BYTES / sizeof(struct poll_table_entry)))
+
+struct poll_table_entry {
+ struct file * filp;
+ wait_queue_t wait;
+ wait_queue_head_t * wait_address;
+};
+
+struct poll_table_page {
+ struct poll_table_page * next;
+ struct poll_table_entry * entry;
+ struct poll_table_entry entries[0];
+};
typedef struct poll_table_struct {
int error;
struct poll_table_page * table;
+ struct poll_table_page inline_page;
+ struct poll_table_entry inline_table[POLL_INLINE_ENTRIES];
} poll_table;
+#define POLL_INLINE_TABLE_LEN (sizeof(poll_table) - offsetof(poll_table, inline_page))
+
extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p);
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -30,6 +49,7 @@ static inline void poll_initwait(poll_table* pt)
pt->error = 0;
pt->table = NULL;
}
+
extern void poll_freewait(poll_table* pt);
@@ -49,27 +69,6 @@ typedef struct {
#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
-/*
- * We do a VERIFY_WRITE here even though we are only reading this time:
- * we'll write to it eventually..
- *
- * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
- */
-static inline
-int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
-{
- nr = FDS_BYTES(nr);
- if (ufdset) {
- int error;
- error = verify_area(VERIFY_WRITE, ufdset, nr);
- if (!error && __copy_from_user(fdset, ufdset, nr))
- error = -EFAULT;
- return error;
- }
- memset(fdset, 0, nr);
- return 0;
-}
-
static inline
void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
{
@@ -77,12 +76,6 @@ void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
__copy_to_user(ufdset, fdset, FDS_BYTES(nr));
}
-static inline
-void zero_fd_set(unsigned long nr, unsigned long *fdset)
-{
- memset(fdset, 0, FDS_BYTES(nr));
-}
-
extern int do_select(int n, fd_set_bits *fds, long *timeout);
#endif /* KERNEL */
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index 5c25120581a7..67f7bf471798 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -3,6 +3,7 @@
#include <linux/raid/md.h>
#include <linux/raid/xor.h>
+#include <linux/bio.h>
/*
*
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 4a3d16d7b8dc..29f6063b3546 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -1651,7 +1651,7 @@ extern wait_queue_head_t reiserfs_commit_thread_wait ;
#define JOURNAL_BUFFER(j,n) ((j)->j_ap_blocks[((j)->j_start + (n)) % JOURNAL_BLOCK_COUNT])
// We need these to make journal.c code more readable
-#define journal_get_hash_table(s, block) __get_hash_table(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
+#define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
#define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
#define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3b43d3bb1123..9e7d80851c32 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -7,7 +7,6 @@ extern unsigned long event;
#include <linux/config.h>
#include <linux/capability.h>
-#include <linux/tqueue.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -160,7 +159,6 @@ extern unsigned long cache_decay_ticks;
extern signed long FASTCALL(schedule_timeout(signed long timeout));
asmlinkage void schedule(void);
-extern int schedule_task(struct tq_struct *task);
extern void flush_scheduled_tasks(void);
extern int start_context_thread(void);
extern int current_is_keventd(void);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d0160265e3c5..0b448a811a39 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -5,6 +5,7 @@
#include <linux/kdev_t.h>
#include <linux/linkage.h>
#include <linux/mmzone.h>
+#include <linux/list.h>
#include <asm/page.h>
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
@@ -62,6 +63,21 @@ typedef struct {
#ifdef __KERNEL__
/*
+ * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
+ * disk blocks. A list of swap extents maps the entire swapfile. (Where the
+ * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
+ * from setup, they're handled identically.
+ *
+ * We always assume that blocks are of size PAGE_SIZE.
+ */
+struct swap_extent {
+ struct list_head list;
+ pgoff_t start_page;
+ pgoff_t nr_pages;
+ sector_t start_block;
+};
+
+/*
* Max bad pages in the new format..
*/
#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
@@ -83,11 +99,17 @@ enum {
/*
* The in-memory structure used to track swap areas.
+ * extent_list.prev points at the lowest-index extent. That list is
+ * sorted.
*/
struct swap_info_struct {
unsigned int flags;
spinlock_t sdev_lock;
struct file *swap_file;
+ struct block_device *bdev;
+ struct list_head extent_list;
+ int nr_extents;
+ struct swap_extent *curr_swap_extent;
unsigned old_block_size;
unsigned short * swap_map;
unsigned int lowest_bit;
@@ -134,8 +156,9 @@ extern wait_queue_head_t kswapd_wait;
extern int FASTCALL(try_to_free_pages(zone_t *, unsigned int, unsigned int));
/* linux/mm/page_io.c */
-extern void rw_swap_page(int, struct page *);
-extern void rw_swap_page_nolock(int, swp_entry_t, char *);
+int swap_readpage(struct file *file, struct page *page);
+int swap_writepage(struct page *page);
+int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page);
/* linux/mm/page_alloc.c */
@@ -163,12 +186,13 @@ extern unsigned int nr_swapfiles;
extern struct swap_info_struct swap_info[];
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
-extern void get_swaphandle_info(swp_entry_t, unsigned long *, struct inode **);
extern int swap_duplicate(swp_entry_t);
-extern int swap_count(struct page *);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern void swap_free(swp_entry_t);
extern void free_swap_and_cache(swp_entry_t);
+sector_t map_swap_page(struct swap_info_struct *p, pgoff_t offset);
+struct swap_info_struct *get_swap_info_struct(unsigned type);
+
struct swap_list_t {
int head; /* head of priority-ordered swapfile list */
int next; /* swapfile to be used next */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index a5a6684f9a50..488bc05dbcc1 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -130,16 +130,21 @@ enum
/* CTL_VM names: */
enum
{
- VM_SWAPCTL=1, /* struct: Set vm swapping control */
- VM_SWAPOUT=2, /* int: Linear or sqrt() swapout for hogs */
- VM_FREEPG=3, /* struct: Set free page thresholds */
+ VM_UNUSED1=1, /* was: struct: Set vm swapping control */
+ VM_UNUSED2=2, /* was; int: Linear or sqrt() swapout for hogs */
+ VM_UNUSED3=3, /* was: struct: Set free page thresholds */
VM_BDFLUSH_UNUSED=4, /* Spare */
VM_OVERCOMMIT_MEMORY=5, /* Turn off the virtual memory safety limit */
- VM_BUFFERMEM=6, /* struct: Set buffer memory thresholds */
- VM_PAGECACHE=7, /* struct: Set cache memory thresholds */
+ VM_UNUSED4=6, /* was: struct: Set buffer memory thresholds */
+ VM_UNUSED5=7, /* was: struct: Set cache memory thresholds */
VM_PAGERDAEMON=8, /* struct: Control kswapd behaviour */
- VM_PGT_CACHE=9, /* struct: Set page table cache parameters */
- VM_PAGE_CLUSTER=10 /* int: set number of pages to swap together */
+ VM_UNUSED6=9, /* was: struct: Set page table cache parameters */
+ VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */
+ VM_DIRTY_BACKGROUND=11, /* dirty_background_ratio */
+ VM_DIRTY_ASYNC=12, /* dirty_async_ratio */
+ VM_DIRTY_SYNC=13, /* dirty_sync_ratio */
+ VM_DIRTY_WB_CS=14, /* dirty_writeback_centisecs */
+ VM_DIRTY_EXPIRE_CS=15, /* dirty_expire_centisecs */
};
diff --git a/include/linux/timer.h b/include/linux/timer.h
index d6f0ce5f8740..6e1e61a4c07b 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -25,10 +25,8 @@ extern int del_timer(struct timer_list * timer);
#ifdef CONFIG_SMP
extern int del_timer_sync(struct timer_list * timer);
-extern void sync_timers(void);
#else
#define del_timer_sync(t) del_timer(t)
-#define sync_timers() do { } while (0)
#endif
/*
diff --git a/include/linux/tqueue.h b/include/linux/tqueue.h
index 3d3047027229..d4729c518f22 100644
--- a/include/linux/tqueue.h
+++ b/include/linux/tqueue.h
@@ -110,6 +110,9 @@ static inline int queue_task(struct tq_struct *bh_pointer, task_queue *bh_list)
return ret;
}
+/* Schedule a tq to run in process context */
+extern int schedule_task(struct tq_struct *task);
+
/*
* Call all "bottom halfs" on a given list.
*/
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4051c031a976..9cc67b500368 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -13,6 +13,7 @@ struct vm_struct {
unsigned long flags;
void * addr;
unsigned long size;
+ unsigned long phys_addr;
struct vm_struct * next;
};
@@ -23,6 +24,8 @@ extern long vread(char *buf, char *addr, unsigned long count);
extern void vmfree_area_pages(unsigned long address, unsigned long size);
extern int vmalloc_area_pages(unsigned long address, unsigned long size,
int gfp_mask, pgprot_t prot);
+extern struct vm_struct *remove_kernel_area(void *addr);
+
/*
* Various ways to allocate pages.
*/
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index cf706c783eda..a06b0f116ebd 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -45,6 +45,12 @@ static inline void wait_on_inode(struct inode *inode)
/*
* mm/page-writeback.c
*/
+extern int dirty_background_ratio;
+extern int dirty_async_ratio;
+extern int dirty_sync_ratio;
+extern int dirty_writeback_centisecs;
+extern int dirty_expire_centisecs;
+
void balance_dirty_pages(struct address_space *mapping);
void balance_dirty_pages_ratelimited(struct address_space *mapping);
int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);