summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/tlb.h13
-rw-r--r--include/asm-i386/page.h8
-rw-r--r--include/asm-ppc/atomic.h2
-rw-r--r--include/asm-ppc/highmem.h11
-rw-r--r--include/asm-ppc/ide.h22
-rw-r--r--include/asm-ppc/spinlock.h2
-rw-r--r--include/asm-ppc/unistd.h3
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/elevator.h5
-rw-r--r--include/linux/ext3_fs.h25
-rw-r--r--include/linux/ide.h26
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/mm.h20
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/page-flags.h1
-rw-r--r--include/linux/rbtree.h45
-rw-r--r--include/linux/sched.h44
-rw-r--r--include/linux/sysctl.h3
18 files changed, 141 insertions, 99 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f3564a558be4..43814335a1a5 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -21,7 +21,7 @@
* and page free order so much..
*/
#ifdef CONFIG_SMP
- #define FREE_PTE_NR 507
+ #define FREE_PTE_NR 506
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else
#define FREE_PTE_NR 1
@@ -40,8 +40,6 @@ typedef struct free_pte_ctx {
unsigned int fullmm; /* non-zero means full mm flush */
unsigned long freed;
struct page * pages[FREE_PTE_NR];
- unsigned long flushes;/* stats: count avoided flushes */
- unsigned long avoided_flushes;
} mmu_gather_t;
/* Users of the generic TLB shootdown code must declare this storage space. */
@@ -67,17 +65,10 @@ static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm, unsigned int fu
static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigned long end)
{
- unsigned long nr;
-
- if (!tlb->need_flush) {
- tlb->avoided_flushes++;
+ if (!tlb->need_flush)
return;
- }
tlb->need_flush = 0;
- tlb->flushes++;
-
tlb_flush(tlb);
- nr = tlb->nr;
if (!tlb_fast_mode(tlb)) {
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index a6fe2daa59a1..5a09fd4b72f1 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -44,14 +44,22 @@ typedef struct { unsigned long pte_low, pte_high; } pte_t;
typedef struct { unsigned long long pmd; } pmd_t;
typedef struct { unsigned long long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#define HPAGE_SHIFT 21
#else
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
+#define HPAGE_SHIFT 22
#endif
#define PTE_MASK PAGE_MASK
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#endif
+
typedef struct { unsigned long pgprot; } pgprot_t;
#define pmd_val(x) ((x).pmd)
diff --git a/include/asm-ppc/atomic.h b/include/asm-ppc/atomic.h
index 63e194a43944..def62e39ca4a 100644
--- a/include/asm-ppc/atomic.h
+++ b/include/asm-ppc/atomic.h
@@ -67,6 +67,8 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
return t;
}
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
static __inline__ void atomic_sub(int a, atomic_t *v)
{
int t;
diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h
index 472482ca3f36..98da2dbd2df9 100644
--- a/include/asm-ppc/highmem.h
+++ b/include/asm-ppc/highmem.h
@@ -128,6 +128,17 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
dec_preempt_count();
}
+static inline struct page *kmap_atomic_to_page(void *ptr)
+{
+ unsigned long idx, vaddr = (unsigned long) ptr;
+
+ if (vaddr < KMAP_FIX_BEGIN)
+ return virt_to_page(ptr);
+
+ idx = (vaddr - KMAP_FIX_BEGIN) >> PAGE_SHIFT;
+ return pte_page(kmap_pte[idx]);
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/include/asm-ppc/ide.h b/include/asm-ppc/ide.h
index 8a95ad1d5f46..558135208705 100644
--- a/include/asm-ppc/ide.h
+++ b/include/asm-ppc/ide.h
@@ -103,30 +103,10 @@ static __inline__ void ide_init_default_hwifs(void)
#endif
}
-#if !defined(ide_request_irq)
-#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
-#endif
-
-#if !defined(ide_free_irq)
-#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
-#endif
-#define ide_check_region(from,extent) check_region((from), (extent))
-#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
-#define ide_release_region(from,extent) release_region((from), (extent))
-
-extern void ide_fix_driveid(struct hd_driveid *id);
-
-/*
- * The following are not needed for the non-m68k ports
- * unless direct IDE on 8xx
- */
#if (defined CONFIG_APUS || defined CONFIG_BLK_DEV_MPC8xx_IDE )
+#define IDE_ARCH_ACK_INTR 1
#define ide_ack_intr(hwif) (hwif->hw.ack_intr ? hwif->hw.ack_intr(hwif) : 1)
-#else
-#define ide_ack_intr(hwif) (1)
#endif
-#define ide_release_lock(lock) do {} while (0)
-#define ide_get_lock(lock, hdlr, data) do {} while (0)
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h
index b7dafa9a36a5..f852ed60a379 100644
--- a/include/asm-ppc/spinlock.h
+++ b/include/asm-ppc/spinlock.h
@@ -100,6 +100,8 @@ typedef struct {
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 RWLOCK_DEBUG_INIT }
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
+#define rwlock_is_locked(x) ((x)->lock != 0)
+
#ifndef CONFIG_DEBUG_SPINLOCK
static __inline__ void _raw_read_lock(rwlock_t *rw)
diff --git a/include/asm-ppc/unistd.h b/include/asm-ppc/unistd.h
index 4f7e364fc0d6..510bfd1772c2 100644
--- a/include/asm-ppc/unistd.h
+++ b/include/asm-ppc/unistd.h
@@ -239,6 +239,9 @@
#define __NR_io_getevents 229
#define __NR_io_submit 230
#define __NR_io_cancel 231
+#define __NR_alloc_hugepages 232
+#define __NR_free_hugepages 233
+#define __NR_exit_group 234
#define __NR(n) #n
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7ef082f9462f..70781c985caa 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -327,7 +327,7 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(void *);
-
+extern long nr_blockdev_pages(void);
/*
* tag stuff
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 5730a5bd5a78..e98168f92e67 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -4,8 +4,6 @@
typedef int (elevator_merge_fn) (request_queue_t *, struct request **,
struct bio *);
-typedef void (elevator_merge_cleanup_fn) (request_queue_t *, struct request *, int);
-
typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *);
typedef struct request *(elevator_next_req_fn) (request_queue_t *);
@@ -21,7 +19,6 @@ typedef void (elevator_exit_fn) (request_queue_t *, elevator_t *);
struct elevator_s
{
elevator_merge_fn *elevator_merge_fn;
- elevator_merge_cleanup_fn *elevator_merge_cleanup_fn;
elevator_merge_req_fn *elevator_merge_req_fn;
elevator_next_req_fn *elevator_next_req_fn;
@@ -42,7 +39,6 @@ struct elevator_s
*/
extern void __elv_add_request(request_queue_t *, struct request *,
struct list_head *);
-extern void elv_merge_cleanup(request_queue_t *, struct request *, int);
extern int elv_merge(request_queue_t *, struct request **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *);
@@ -61,6 +57,7 @@ extern elevator_t elevator_noop;
*/
extern elevator_t elevator_linus;
#define elv_linus_sequence(rq) ((long)(rq)->elevator_private)
+#define ELV_LINUS_SEEK_COST 16
/*
* use the /proc/iosched interface, all the below is history ->
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index e05c9d984080..d4550e28f37e 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -97,9 +97,9 @@
# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
#endif
#ifdef __KERNEL__
-#define EXT3_ADDR_PER_BLOCK_BITS(s) ((s)->u.ext3_sb.s_addr_per_block_bits)
-#define EXT3_INODE_SIZE(s) ((s)->u.ext3_sb.s_inode_size)
-#define EXT3_FIRST_INO(s) ((s)->u.ext3_sb.s_first_ino)
+#define EXT3_ADDR_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_addr_per_block_bits)
+#define EXT3_INODE_SIZE(s) (EXT3_SB(s)->s_inode_size)
+#define EXT3_FIRST_INO(s) (EXT3_SB(s)->s_first_ino)
#else
#define EXT3_INODE_SIZE(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
EXT3_GOOD_OLD_INODE_SIZE : \
@@ -116,8 +116,8 @@
#define EXT3_MAX_FRAG_SIZE 4096
#define EXT3_MIN_FRAG_LOG_SIZE 10
#ifdef __KERNEL__
-# define EXT3_FRAG_SIZE(s) ((s)->u.ext3_sb.s_frag_size)
-# define EXT3_FRAGS_PER_BLOCK(s) ((s)->u.ext3_sb.s_frags_per_block)
+# define EXT3_FRAG_SIZE(s) (EXT3_SB(s)->s_frag_size)
+# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_SB(s)->s_frags_per_block)
#else
# define EXT3_FRAG_SIZE(s) (EXT3_MIN_FRAG_SIZE << (s)->s_log_frag_size)
# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / EXT3_FRAG_SIZE(s))
@@ -164,10 +164,10 @@ struct ext3_group_desc
* Macro-instructions used to manage group descriptors
*/
#ifdef __KERNEL__
-# define EXT3_BLOCKS_PER_GROUP(s) ((s)->u.ext3_sb.s_blocks_per_group)
-# define EXT3_DESC_PER_BLOCK(s) ((s)->u.ext3_sb.s_desc_per_block)
-# define EXT3_INODES_PER_GROUP(s) ((s)->u.ext3_sb.s_inodes_per_group)
-# define EXT3_DESC_PER_BLOCK_BITS(s) ((s)->u.ext3_sb.s_desc_per_block_bits)
+# define EXT3_BLOCKS_PER_GROUP(s) (EXT3_SB(s)->s_blocks_per_group)
+# define EXT3_DESC_PER_BLOCK(s) (EXT3_SB(s)->s_desc_per_block)
+# define EXT3_INODES_PER_GROUP(s) (EXT3_SB(s)->s_inodes_per_group)
+# define EXT3_DESC_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_desc_per_block_bits)
#else
# define EXT3_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
# define EXT3_DESC_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_group_desc))
@@ -346,7 +346,7 @@ struct ext3_inode {
#ifndef _LINUX_EXT2_FS_H
#define clear_opt(o, opt) o &= ~EXT3_MOUNT_##opt
#define set_opt(o, opt) o |= EXT3_MOUNT_##opt
-#define test_opt(sb, opt) ((sb)->u.ext3_sb.s_mount_opt & \
+#define test_opt(sb, opt) (EXT3_SB(sb)->s_mount_opt & \
EXT3_MOUNT_##opt)
#else
#define EXT2_MOUNT_NOLOAD EXT3_MOUNT_NOLOAD
@@ -444,7 +444,10 @@ struct ext3_super_block {
};
#ifdef __KERNEL__
-#define EXT3_SB(sb) (&((sb)->u.ext3_sb))
+static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
+{
+ return sb->u.generic_sbp;
+}
static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
{
return container_of(inode, struct ext3_inode_info, vfs_inode);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index c9064eff5996..6ce0a520cc70 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/bio.h>
+#include <linux/pci.h>
#include <asm/byteorder.h>
#include <asm/system.h>
#include <asm/hdreg.h>
@@ -904,9 +905,7 @@ extern inline void ide_unmap_buffer(struct request *rq, char *buffer, unsigned l
((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx))
#define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1)
-#ifdef CONFIG_BLK_DEV_IDEPCI
struct ide_pci_device_s;
-#endif /* CONFIG_BLK_DEV_IDEPCI */
typedef struct hwif_s {
struct hwif_s *next; /* for linked-list in ide_hwgroup_t */
@@ -937,10 +936,8 @@ typedef struct hwif_s {
hwif_chipset_t chipset; /* sub-module for tuning.. */
-#ifdef CONFIG_BLK_DEV_IDEPCI
struct pci_dev *pci_dev; /* for pci chipsets */
struct ide_pci_device_s *cds; /* chipset device struct */
-#endif /* CONFIG_BLK_DEV_IDEPCI */
#if 0
ide_hwif_ops_t *hwifops;
@@ -1108,12 +1105,10 @@ typedef struct hwgroup_s {
/* ptr to current hwif in linked-list */
ide_hwif_t *hwif;
-#ifdef CONFIG_BLK_DEV_IDEPCI
/* for pci chipsets */
struct pci_dev *pci_dev;
/* chipset device struct */
struct ide_pci_device_s *cds;
-#endif /* CONFIG_BLK_DEV_IDEPCI */
/* current request */
struct request *rq;
@@ -1637,23 +1632,16 @@ extern void ide_intr(int irq, void *dev_id, struct pt_regs *regs);
extern void do_ide_request(request_queue_t *);
extern void ide_init_subdrivers(void);
-#ifndef _IDE_C
extern struct block_device_operations ide_fops[];
extern ide_proc_entry_t generic_subdriver_entries[];
-#endif
extern int ata_attach(ide_drive_t *);
-#ifdef _IDE_C
-#ifdef CONFIG_BLK_DEV_IDE
extern int ideprobe_init(void);
-#ifdef CONFIG_BLK_DEV_IDEPCI
extern void ide_scan_pcibus(int scan_direction) __init;
-#endif /* CONFIG_BLK_DEV_IDEPCI */
-
-#endif /* CONFIG_BLK_DEV_IDE */
-#endif /* _IDE_C */
+extern int ide_pci_register_driver(struct pci_driver *driver);
+extern void ide_pci_unregister_driver(struct pci_driver *driver);
extern void default_hwif_iops(ide_hwif_t *);
extern void default_hwif_mmiops(ide_hwif_t *);
@@ -1665,8 +1653,6 @@ int ide_register_subdriver (ide_drive_t *drive, ide_driver_t *driver, int versio
int ide_unregister_subdriver (ide_drive_t *drive);
int ide_replace_subdriver(ide_drive_t *drive, const char *driver);
-#ifdef CONFIG_BLK_DEV_IDEPCI
-
#ifdef CONFIG_PROC_FS
typedef struct ide_pci_host_proc_s {
char *name;
@@ -1716,14 +1702,9 @@ typedef struct ide_pci_device_s {
struct ide_pci_device_s *next;
} ide_pci_device_t;
-#ifdef LINUX_PCI_H
extern void ide_setup_pci_device(struct pci_dev *, ide_pci_device_t *);
extern void ide_setup_pci_devices(struct pci_dev *, struct pci_dev *, ide_pci_device_t *);
-#endif /* LINUX_PCI_H */
-
-#endif /* CONFIG_BLK_DEV_IDEPCI */
-#ifdef CONFIG_BLK_DEV_IDEDMA
#define BAD_DMA_DRIVE 0
#define GOOD_DMA_DRIVE 1
extern int ide_build_dmatable(ide_drive_t *, struct request *);
@@ -1750,7 +1731,6 @@ extern int __ide_dma_verbose(ide_drive_t *);
extern int __ide_dma_retune(ide_drive_t *);
extern int __ide_dma_lostirq(ide_drive_t *);
extern int __ide_dma_timeout(ide_drive_t *);
-#endif /* CONFIG_BLK_DEV_IDEDMA */
extern void hwif_unregister(ide_hwif_t *);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 95965970e49b..5efa540d55f8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -96,6 +96,8 @@ extern const char *print_tainted(void);
#define TAINT_FORCED_MODULE (1<<1)
#define TAINT_UNSAFE_SMP (1<<2)
+extern void dump_stack(void);
+
#if DEBUG
#define pr_debug(fmt,arg...) \
printk(KERN_DEBUG fmt,##arg)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4dfac9d2cb5c..e33d3f2bd080 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -19,9 +19,6 @@ extern unsigned long max_mapnr;
extern unsigned long num_physpages;
extern void * high_memory;
extern int page_cluster;
-/* The inactive_clean lists are per zone. */
-extern struct list_head active_list;
-extern struct list_head inactive_list;
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -54,7 +51,7 @@ struct vm_area_struct {
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
unsigned long vm_flags; /* Flags, listed below. */
- rb_node_t vm_rb;
+ struct rb_node vm_rb;
/*
* For areas with an address space and backing store,
@@ -104,6 +101,7 @@ struct vm_area_struct {
#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
+#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_STACK_FLAGS (0x00000100 | VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT)
@@ -377,6 +375,20 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long
int __set_page_dirty_buffers(struct page *page);
int __set_page_dirty_nobuffers(struct page *page);
+#ifdef CONFIG_HUGETLB_PAGE
+#define is_vm_hugetlb_page(vma) (vma->vm_flags & VM_HUGETLB)
+extern int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
+extern int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
+extern int free_hugepages(struct vm_area_struct *);
+
+#else
+#define is_vm_hugetlb_page(vma) (0)
+#define follow_hugetlb_page(mm, vma, pages, vmas, start, len, i) (0)
+#define copy_hugetlb_page_range(dst, src, vma) (0)
+#define free_hugepages(mpnt) do { } while(0)
+#endif
+
+
/*
* If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads.
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e5b6bc1111b8..8ebf441bdb47 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -16,7 +16,7 @@
*/
#ifndef CONFIG_FORCE_MAX_ZONEORDER
-#define MAX_ORDER 10
+#define MAX_ORDER 11
#else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif
@@ -151,8 +151,8 @@ struct zonelist {
* On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout.
*
- * XXX: we need to move the global memory statistics (active_list, ...)
- * into the pg_data_t to properly support NUMA.
+ * Memory statistics and page replacement data structures are maintained on a
+ * per-zone basis.
*/
struct bootmem_data;
typedef struct pglist_data {
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 42d1853294ac..6475e261f609 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -78,6 +78,7 @@ extern struct page_state {
unsigned long nr_pagecache;
unsigned long nr_page_table_pages;
unsigned long nr_reverse_maps;
+ unsigned long nr_mapped;
} ____cacheline_aligned_in_smp page_states[NR_CPUS];
extern void get_page_state(struct page_state *ret);
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 96f20e145be5..7bfb4bf7ad79 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -34,7 +34,7 @@
static inline struct page * rb_search_page_cache(struct inode * inode,
unsigned long offset)
{
- rb_node_t * n = inode->i_rb_page_cache.rb_node;
+ struct rb_node * n = inode->i_rb_page_cache.rb_node;
struct page * page;
while (n)
@@ -53,10 +53,10 @@ static inline struct page * rb_search_page_cache(struct inode * inode,
static inline struct page * __rb_insert_page_cache(struct inode * inode,
unsigned long offset,
- rb_node_t * node)
+ struct rb_node * node)
{
- rb_node_t ** p = &inode->i_rb_page_cache.rb_node;
- rb_node_t * parent = NULL;
+ struct rb_node ** p = &inode->i_rb_page_cache.rb_node;
+ struct rb_node * parent = NULL;
struct page * page;
while (*p)
@@ -79,7 +79,7 @@ static inline struct page * __rb_insert_page_cache(struct inode * inode,
static inline struct page * rb_insert_page_cache(struct inode * inode,
unsigned long offset,
- rb_node_t * node)
+ struct rb_node * node)
{
struct page * ret;
if ((ret = __rb_insert_page_cache(inode, offset, node)))
@@ -97,31 +97,38 @@ static inline struct page * rb_insert_page_cache(struct inode * inode,
#include <linux/kernel.h>
#include <linux/stddef.h>
-typedef struct rb_node_s
+struct rb_node
{
- struct rb_node_s * rb_parent;
+ struct rb_node *rb_parent;
int rb_color;
#define RB_RED 0
#define RB_BLACK 1
- struct rb_node_s * rb_right;
- struct rb_node_s * rb_left;
-}
-rb_node_t;
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+};
-typedef struct rb_root_s
+struct rb_root
{
- struct rb_node_s * rb_node;
-}
-rb_root_t;
+ struct rb_node *rb_node;
+};
-#define RB_ROOT (rb_root_t) { NULL, }
+#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) \
((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
-extern void rb_insert_color(rb_node_t *, rb_root_t *);
-extern void rb_erase(rb_node_t *, rb_root_t *);
+extern void rb_insert_color(struct rb_node *, struct rb_root *);
+extern void rb_erase(struct rb_node *, struct rb_root *);
+
+/* Find logical next and previous nodes in a tree */
+extern struct rb_node *rb_next(struct rb_node *);
+extern struct rb_node *rb_prev(struct rb_node *);
+
+/* Fast replacement of a single node without remove/rebalance/add/rebalance */
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root);
-static inline void rb_link_node(rb_node_t * node, rb_node_t * parent, rb_node_t ** rb_link)
+static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
+ struct rb_node ** rb_link)
{
node->rb_parent = parent;
node->rb_color = RB_RED;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5a7e7c21009f..f0cf18022ae2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -51,7 +51,11 @@ struct exec_domain;
#define CLONE_CLEARTID 0x00200000 /* clear the userspace TID */
#define CLONE_DETACHED 0x00400000 /* parent wants no child-exit signal */
-#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
+/*
+ * List of flags we want to share for kernel threads,
+ * if only because they are not used by them anyway.
+ */
+#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
/*
* These are the constant used to fake the fixed-point load-average
@@ -173,7 +177,7 @@ struct namespace;
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
- rb_root_t mm_rb;
+ struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */
@@ -222,6 +226,8 @@ struct signal_struct {
/* thread group exit support */
int group_exit;
int group_exit_code;
+
+ struct task_struct *group_exit_task;
};
/*
@@ -418,6 +424,7 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PF_IOTHREAD 0x00020000 /* this thread is needed for doing I/O to swap */
#define PF_FROZEN 0x00040000 /* frozen for system suspend */
#define PF_SYNC 0x00080000 /* performing fsync(), etc */
+#define PF_FSTRANS 0x00100000 /* inside a filesystem transaction */
/*
* Ptrace flags
@@ -552,6 +559,7 @@ extern int dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t
extern void block_all_signals(int (*notifier)(void *priv), void *priv,
sigset_t *mask);
extern void unblock_all_signals(void);
+extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
@@ -683,7 +691,11 @@ extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
+#ifdef CONFIG_SMP
extern void wait_task_inactive(task_t * p);
+#else
+#define wait_task_inactive(p) do { } while (0)
+#endif
extern void kick_if_running(task_t * p);
#define __wait_event(wq, condition) \
@@ -949,6 +961,34 @@ static inline void cond_resched(void)
__cond_resched();
}
+#ifdef CONFIG_PREEMPT
+
+/*
+ * cond_resched_lock() - if a reschedule is pending, drop the given lock,
+ * call schedule, and on return reacquire the lock.
+ *
+ * Note: this does not assume the given lock is the _only_ lock held.
+ * The kernel preemption counter gives us "free" checking that we are
+ * atomic -- let's use it.
+ */
+static inline void cond_resched_lock(spinlock_t * lock)
+{
+ if (need_resched() && preempt_count() == 1) {
+ _raw_spin_unlock(lock);
+ preempt_enable_no_resched();
+ __cond_resched();
+ spin_lock(lock);
+ }
+}
+
+#else
+
+static inline void cond_resched_lock(spinlock_t * lock)
+{
+}
+
+#endif
+
/* Reevaluate whether the task has signals pending delivery.
This is required every time the blocked sigset_t changes.
Athread cathreaders should have t->sigmask_lock. */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 4856854660cc..3127165e7c13 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -128,6 +128,7 @@ enum
KERN_TAINTED=53, /* int: various kernel tainted flags */
KERN_CADPID=54, /* int: PID of the process to notify on CAD */
KERN_PIDMAX=55, /* int: PID # limit */
+ KERN_HUGETLB_PAGE_NUM=56, /* int: Number of available Huge Pages */
};
@@ -151,6 +152,7 @@ enum
VM_DIRTY_EXPIRE_CS=15, /* dirty_expire_centisecs */
VM_NR_PDFLUSH_THREADS=16, /* nr_pdflush_threads */
VM_OVERCOMMIT_RATIO=17, /* percent of RAM to allow overcommit in */
+ VM_PAGEBUF=18 /* struct: Control pagebuf parameters */
};
@@ -554,6 +556,7 @@ enum
FS_DIR_NOTIFY=14, /* int: directory notification enabled */
FS_LEASE_TIME=15, /* int: maximum time to wait for a lease break */
FS_DQSTATS=16, /* disc quota usage statistics */
+ FS_XFS=17, /* struct: control xfs parameters */
};
/* /proc/sys/fs/quota/ */