summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@kernel.bkbits.net>2003-02-08 10:48:18 -0800
committerDavid S. Miller <davem@kernel.bkbits.net>2003-02-08 10:48:18 -0800
commitfd7c713fd43e878d85a431bdf25ac573efc17b6d (patch)
treed775073f8d544b0d3e41fa5751b70503cbc0b2e0 /include/linux
parentd5a9256003294d65d6cd9d162cf29fb852f6569a (diff)
parent28a59cb02a6b97cb27e914687c0346c608abd110 (diff)
Merge davem@nuts.ninka.net:/home/davem/src/BK/net-2.5
into kernel.bkbits.net:/home/davem/net-2.5
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/agp_backend.h2
-rw-r--r--include/linux/apm_bios.h2
-rw-r--r--include/linux/blkdev.h10
-rw-r--r--include/linux/ext3_jbd.h6
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hugetlb.h26
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/isdnif.h2
-rw-r--r--include/linux/jbd.h183
-rw-r--r--include/linux/mm.h35
-rw-r--r--include/linux/page-flags.h7
-rw-r--r--include/linux/ptrace.h6
-rw-r--r--include/linux/sched.h37
-rw-r--r--include/linux/sdla_x25.h2
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/spinlock.h122
-rw-r--r--include/linux/types.h6
17 files changed, 377 insertions, 83 deletions
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index e8fec2776624..36568e4a3d14 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -160,7 +160,7 @@ extern agp_memory *agp_allocate_memory(size_t, u32);
* an u32 argument of the type of memory to be allocated.
* Every agp bridge device will allow you to allocate
* AGP_NORMAL_MEMORY which maps to physical ram. Any other
- * type is device dependant.
+ * type is device dependent.
*
* It returns NULL whenever memory is unavailable.
*
diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h
index ceffd587b7a8..b3b981af768d 100644
--- a/include/linux/apm_bios.h
+++ b/include/linux/apm_bios.h
@@ -45,7 +45,7 @@ struct apm_bios_info {
#define APM_BIOS_DISENGAGED 0x0010
/*
- * Data for APM that is persistant across module unload/load
+ * Data for APM that is persistent across module unload/load
*/
struct apm_info {
struct apm_bios_info bios;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c599ea36233b..82766b7e60b0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -4,6 +4,8 @@
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
@@ -188,6 +190,14 @@ struct request_queue
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
+ /*
+ * Auto-unplugging state
+ */
+ struct timer_list unplug_timer;
+ int unplug_thresh; /* After this many requests */
+ unsigned long unplug_delay; /* After this many jiffies */
+ struct work_struct unplug_work;
+
struct backing_dev_info backing_dev_info;
/*
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index 13508f6053b9..7ac910d15863 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -28,7 +28,7 @@
* indirection blocks, the group and superblock summaries, and the data
* block to complete the transaction. */
-#define EXT3_SINGLEDATA_TRANS_BLOCKS 8
+#define EXT3_SINGLEDATA_TRANS_BLOCKS 8U
/* Extended attributes may touch two data buffers, two bitmap buffers,
* and two group and summaries. */
@@ -58,7 +58,7 @@ extern int ext3_writepage_trans_blocks(struct inode *inode);
* start off at the maximum transaction size and grow the transaction
* optimistically as we go. */
-#define EXT3_MAX_TRANS_DATA 64
+#define EXT3_MAX_TRANS_DATA 64U
/* We break up a large truncate or write transaction once the handle's
* buffer credits gets this low, we need either to extend the
@@ -67,7 +67,7 @@ extern int ext3_writepage_trans_blocks(struct inode *inode);
* one block, plus two quota updates. Quota allocations are not
* needed. */
-#define EXT3_RESERVE_TRANS_BLOCKS 12
+#define EXT3_RESERVE_TRANS_BLOCKS 12U
#define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9a17c9819ae9..037c1fe2ad6c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -332,8 +332,6 @@ struct char_device {
struct list_head hash;
atomic_t count;
dev_t dev;
- atomic_t openers;
- struct semaphore sem;
};
struct block_device {
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 325d91ba012a..370411eaaba2 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -20,16 +20,32 @@ int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
void huge_page_release(struct page *);
int hugetlb_report_meminfo(char *);
int is_hugepage_mem_enough(size_t);
+struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write);
+struct vm_area_struct *hugepage_vma(struct mm_struct *mm,
+ unsigned long address);
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int write);
+int pmd_huge(pmd_t pmd);
extern int htlbpage_max;
+static inline void
+mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ if (is_vm_hugetlb_page(vma))
+ mm->used_hugetlb = 1;
+}
+
#else /* !CONFIG_HUGETLB_PAGE */
+
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return 0;
}
-#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
+#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
+#define follow_huge_addr(mm, vma, addr, write) 0
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
#define zap_hugepage_range(vma, start, len) BUG()
@@ -37,6 +53,14 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define huge_page_release(page) BUG()
#define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0
+#define hugepage_vma(mm, addr) 0
+#define mark_mm_hugetlb(mm, vma) do { } while (0)
+#define follow_huge_pmd(mm, addr, pmd, write) 0
+#define pmd_huge(x) 0
+
+#ifndef HPAGE_MASK
+#define HPAGE_MASK 0 /* Keep the compiler happy */
+#endif
#endif /* !CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 77bc3a1340ac..11483636b4d6 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -45,9 +45,13 @@
#define INIT_SIGNALS(sig) { \
.count = ATOMIC_INIT(1), \
+ .shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
+}
+
+#define INIT_SIGHAND(sighand) { \
+ .count = ATOMIC_INIT(1), \
.action = { {{0,}}, }, \
.siglock = SPIN_LOCK_UNLOCKED, \
- .shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
}
/*
@@ -90,7 +94,8 @@
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
- .sig = &init_signals, \
+ .signal = &init_signals, \
+ .sighand = &init_sighand, \
.pending = { NULL, &tsk.pending.head, {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = SPIN_LOCK_UNLOCKED, \
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h
index fed344ec7a41..06265081fa48 100644
--- a/include/linux/isdnif.h
+++ b/include/linux/isdnif.h
@@ -62,7 +62,7 @@
/* */
/* The proceed command holds a incoming call in a state to leave processes */
/* enough time to check whether ist should be accepted. */
-/* The PROT_IO Command extends the interface to make protocol dependant */
+/* The PROT_IO Command extends the interface to make protocol dependent */
/* features available (call diversion, call waiting...). */
/* */
/* The PROT_IO Command is executed with the desired driver id and the arg */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 47a20ce63fa8..2236641f5593 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -63,7 +63,38 @@ extern void * __jbd_kmalloc (const char *where, size_t size, int flags, int retr
#define JFS_MIN_JOURNAL_BLOCKS 1024
#ifdef __KERNEL__
+
+/**
+ * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
+ *
+ * All filesystem modifications made by the process go
+ * through this handle. Recursive operations (such as quota operations)
+ * are gathered into a single update.
+ *
+ * The buffer credits field is used to account for journaled buffers
+ * being modified by the running process. To ensure that there is
+ * enough log space for all outstanding operations, we need to limit the
+ * number of outstanding buffers possible at any time. When the
+ * operation completes, any buffer credits not used are credited back to
+ * the transaction, so that at all times we know how many buffers the
+ * outstanding updates on a transaction might possibly touch.
+ *
+ * This is an opaque datatype.
+ **/
typedef struct handle_s handle_t; /* Atomic operation type */
+
+
+/**
+ * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
+ *
+ * journal_t is linked to from the fs superblock structure.
+ *
+ * We use the journal_t to keep track of all outstanding transaction
+ * activity on the filesystem, and to manage the state of the log
+ * writing process.
+ *
+ * This is an opaque datatype.
+ **/
typedef struct journal_s journal_t; /* Journal control structure */
#endif
@@ -252,6 +283,20 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
}
#define HAVE_JOURNAL_CALLBACK_STATUS
+/**
+ * struct journal_callback - Base structure for callback information.
+ * @jcb_list: list information for other callbacks attached to the same handle.
+ * @jcb_func: Function to call with this callback structure.
+ *
+ * This struct is a 'seed' structure for a using with your own callback
+ * structs. If you are using callbacks you must allocate one of these
+ * or another struct of your own definition which has this struct
+ * as it's first element and pass it to journal_callback_set().
+ *
+ * This is used internally by jbd to maintain callback information.
+ *
+ * See journal_callback_set for more information.
+ **/
struct journal_callback {
struct list_head jcb_list;
void (*jcb_func)(struct journal_callback *jcb, int error);
@@ -260,18 +305,21 @@ struct journal_callback {
struct jbd_revoke_table_s;
-/* The handle_t type represents a single atomic update being performed
- * by some process. All filesystem modifications made by the process go
- * through this handle. Recursive operations (such as quota operations)
- * are gathered into a single update.
- *
- * The buffer credits field is used to account for journaled buffers
- * being modified by the running process. To ensure that there is
- * enough log space for all outstanding operations, we need to limit the
- * number of outstanding buffers possible at any time. When the
- * operation completes, any buffer credits not used are credited back to
- * the transaction, so that at all times we know how many buffers the
- * outstanding updates on a transaction might possibly touch. */
+/**
+ * struct handle_s - The handle_s type is the concrete type associated with handle_t.
+ * @h_transaction: Which compound transaction is this update a part of?
+ * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
+ * @h_ref: Reference count on this handle
+ * @h_jcb: List of application registered callbacks for this handle.
+ * @h_err: Field for caller's use to track errors through large fs operations
+ * @h_sync: flag for sync-on-close
+ * @h_jdata: flag to force data journaling
+ * @h_aborted: flag indicating fatal error on handle
+ **/
+
+/* Docbook can't yet cope with the bit fields, but will leave the documentation
+ * in so it can be fixed later.
+ */
struct handle_s
{
@@ -284,8 +332,8 @@ struct handle_s
/* Reference count on this handle */
int h_ref;
- /* Field for caller's use to track errors through large fs
- operations */
+ /* Field for caller's use to track errors through large fs */
+ /* operations */
int h_err;
/* List of application registered callbacks for this handle.
@@ -412,21 +460,58 @@ struct transaction_s
struct list_head t_jcb;
};
-
-/* The journal_t maintains all of the journaling state information for a
- * single filesystem. It is linked to from the fs superblock structure.
- *
- * We use the journal_t to keep track of all outstanding transaction
- * activity on the filesystem, and to manage the state of the log
- * writing process. */
+/**
+ * struct journal_s - The journal_s type is the concrete type associated with journal_t.
+ * @j_flags: General journaling state flags
+ * @j_errno: Is there an outstanding uncleared error on the journal (from a prior abort)?
+ * @j_sb_buffer: First part of superblock buffer
+ * @j_superblock: Second part of superblock buffer
+ * @j_format_version: Version of the superblock format
+ * @j_barrier_count: Number of processes waiting to create a barrier lock
+ * @j_barrier: The barrier lock itself
+ * @j_running_transaction: The current running transaction..
+ * @j_committing_transaction: the transaction we are pushing to disk
+ * @j_checkpoint_transactions: a linked circular list of all transactions waiting for checkpointing
+ * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction to start committing, or for a barrier lock to be released
+ * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
+ * @j_wait_done_commit: Wait queue for waiting for commit to complete
+ * @j_wait_checkpoint: Wait queue to trigger checkpointing
+ * @j_wait_commit: Wait queue to trigger commit
+ * @j_wait_updates: Wait queue to wait for updates to complete
+ * @j_checkpoint_sem: Semaphore for locking against concurrent checkpoints
+ * @j_sem: The main journal lock, used by lock_journal()
+ * @j_head: Journal head - identifies the first unused block in the journal
+ * @j_tail: Journal tail - identifies the oldest still-used block in the journal.
+ * @j_free: Journal free - how many free blocks are there in the journal?
+ * @j_first: The block number of the first usable block
+ * @j_last: The block number one beyond the last usable block
+ * @j_dev: Device where we store the journal
+ * @j_blocksize: blocksize for the location where we store the journal.
+ * @j_blk_offset: starting block offset for into the device where we store the journal
+ * @j_fs_dev: Device which holds the client fs. For internal journal this will be equal to j_dev
+ * @j_maxlen: Total maximum capacity of the journal region on disk.
+ * @j_inode: Optional inode where we store the journal. If present, all journal block numbers are mapped into this inode via bmap().
+ * @j_tail_sequence: Sequence number of the oldest transaction in the log
+ * @j_transaction_sequence: Sequence number of the next transaction to grant
+ * @j_commit_sequence: Sequence number of the most recently committed transaction
+ * @j_commit_request: Sequence number of the most recent transaction wanting commit
+ * @j_uuid: Uuid of client object.
+ * @j_task: Pointer to the current commit thread for this journal
+ * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a single compound commit transaction
+ * @j_commit_interval: What is the maximum transaction lifetime before we begin a commit?
+ * @j_commit_timer: The timer used to wakeup the commit thread
+ * @j_commit_timer_active: Timer flag
+ * @j_all_journals: Link all journals together - system-wide
+ * @j_revoke: The revoke table - maintains the list of revoked blocks in the current transaction.
+ **/
struct journal_s
{
/* General journaling state flags */
unsigned long j_flags;
- /* Is there an outstanding uncleared error on the journal (from
- * a prior abort)? */
+ /* Is there an outstanding uncleared error on the journal (from */
+ /* a prior abort)? */
int j_errno;
/* The superblock buffer */
@@ -448,13 +533,13 @@ struct journal_s
/* ... the transaction we are pushing to disk ... */
transaction_t * j_committing_transaction;
- /* ... and a linked circular list of all transactions waiting
- * for checkpointing. */
+ /* ... and a linked circular list of all transactions waiting */
+ /* for checkpointing. */
/* Protected by journal_datalist_lock */
transaction_t * j_checkpoint_transactions;
- /* Wait queue for waiting for a locked transaction to start
- committing, or for a barrier lock to be released */
+ /* Wait queue for waiting for a locked transaction to start */
+ /* committing, or for a barrier lock to be released */
wait_queue_head_t j_wait_transaction_locked;
/* Wait queue for waiting for checkpointing to complete */
@@ -481,33 +566,33 @@ struct journal_s
/* Journal head: identifies the first unused block in the journal. */
unsigned long j_head;
- /* Journal tail: identifies the oldest still-used block in the
- * journal. */
+ /* Journal tail: identifies the oldest still-used block in the */
+ /* journal. */
unsigned long j_tail;
/* Journal free: how many free blocks are there in the journal? */
unsigned long j_free;
- /* Journal start and end: the block numbers of the first usable
- * block and one beyond the last usable block in the journal. */
+ /* Journal start and end: the block numbers of the first usable */
+ /* block and one beyond the last usable block in the journal. */
unsigned long j_first, j_last;
- /* Device, blocksize and starting block offset for the location
- * where we store the journal. */
+ /* Device, blocksize and starting block offset for the location */
+ /* where we store the journal. */
struct block_device * j_dev;
int j_blocksize;
unsigned int j_blk_offset;
- /* Device which holds the client fs. For internal journal this
- * will be equal to j_dev. */
+ /* Device which holds the client fs. For internal journal this */
+ /* will be equal to j_dev. */
struct block_device * j_fs_dev;
/* Total maximum capacity of the journal region on disk. */
unsigned int j_maxlen;
- /* Optional inode where we store the journal. If present, all
- * journal block numbers are mapped into this inode via
- * bmap(). */
+ /* Optional inode where we store the journal. If present, all */
+ /* journal block numbers are mapped into this inode via */
+ /* bmap(). */
struct inode * j_inode;
/* Sequence number of the oldest transaction in the log */
@@ -519,23 +604,23 @@ struct journal_s
/* Sequence number of the most recent transaction wanting commit */
tid_t j_commit_request;
- /* Journal uuid: identifies the object (filesystem, LVM volume
- * etc) backed by this journal. This will eventually be
- * replaced by an array of uuids, allowing us to index multiple
- * devices within a single journal and to perform atomic updates
- * across them. */
+ /* Journal uuid: identifies the object (filesystem, LVM volume */
+ /* etc) backed by this journal. This will eventually be */
+ /* replaced by an array of uuids, allowing us to index multiple */
+ /* devices within a single journal and to perform atomic updates */
+ /* across them. */
__u8 j_uuid[16];
/* Pointer to the current commit thread for this journal */
struct task_struct * j_task;
- /* Maximum number of metadata buffers to allow in a single
- * compound commit transaction */
+ /* Maximum number of metadata buffers to allow in a single */
+ /* compound commit transaction */
int j_max_transaction_buffers;
- /* What is the maximum transaction lifetime before we begin a
- * commit? */
+ /* What is the maximum transaction lifetime before we begin a */
+ /* commit? */
unsigned long j_commit_interval;
/* The timer used to wakeup the commit thread: */
@@ -545,8 +630,8 @@ struct journal_s
/* Link all journals together - system-wide */
struct list_head j_all_journals;
- /* The revoke table: maintains the list of revoked blocks in the
- current transaction. */
+ /* The revoke table: maintains the list of revoked blocks in the */
+ /* current transaction. */
struct jbd_revoke_table_s *j_revoke;
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d2b99c852301..c68771c27d88 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -208,24 +208,55 @@ struct page {
* Also, many kernel routines increase the page count before a critical
* routine so they can be sure the page doesn't go away from under them.
*/
-#define get_page(p) atomic_inc(&(p)->count)
-#define __put_page(p) atomic_dec(&(p)->count)
#define put_page_testzero(p) \
({ \
BUG_ON(page_count(page) == 0); \
atomic_dec_and_test(&(p)->count); \
})
+
#define page_count(p) atomic_read(&(p)->count)
#define set_page_count(p,v) atomic_set(&(p)->count, v)
+#define __put_page(p) atomic_dec(&(p)->count)
extern void FASTCALL(__page_cache_release(struct page *));
+#ifdef CONFIG_HUGETLB_PAGE
+
+static inline void get_page(struct page *page)
+{
+ if (PageCompound(page))
+ page = (struct page *)page->lru.next;
+ atomic_inc(&page->count);
+}
+
static inline void put_page(struct page *page)
{
+ if (PageCompound(page)) {
+ page = (struct page *)page->lru.next;
+ if (page->lru.prev) { /* destructor? */
+ (*(void (*)(struct page *))page->lru.prev)(page);
+ return;
+ }
+ }
if (!PageReserved(page) && put_page_testzero(page))
__page_cache_release(page);
}
+#else /* CONFIG_HUGETLB_PAGE */
+
+static inline void get_page(struct page *page)
+{
+ atomic_inc(&page->count);
+}
+
+static inline void put_page(struct page *page)
+{
+ if (!PageReserved(page) && put_page_testzero(page))
+ __page_cache_release(page);
+}
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 0327a8421c9d..5c3bded564d8 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -72,7 +72,8 @@
#define PG_direct 16 /* ->pte_chain points directly at pte */
#define PG_mappedtodisk 17 /* Has blocks allocated on-disk */
-#define PG_reclaim 18 /* To be recalimed asap */
+#define PG_reclaim 18 /* To be reclaimed asap */
+#define PG_compound 19 /* Part of a compound page */
/*
* Global page accounting. One instance per CPU. Only unsigned longs are
@@ -251,6 +252,10 @@ extern void get_full_page_state(struct page_state *ret);
#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
+#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
+#define SetPageCompound(page) set_bit(PG_compound, &(page)->flags)
+#define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags)
+
/*
* The PageSwapCache predicate doesn't use a PG_flag at this time,
* but it may again do so one day.
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index c6de3a4ea70a..706b420fb5c9 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -26,6 +26,8 @@
/* 0x4200-0x4300 are reserved for architecture-independent additions. */
#define PTRACE_SETOPTIONS 0x4200
#define PTRACE_GETEVENTMSG 0x4201
+#define PTRACE_GETSIGINFO 0x4202
+#define PTRACE_SETSIGINFO 0x4203
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
@@ -33,12 +35,16 @@
#define PTRACE_O_TRACEVFORK 0x00000004
#define PTRACE_O_TRACECLONE 0x00000008
#define PTRACE_O_TRACEEXEC 0x00000010
+#define PTRACE_O_TRACEVFORKDONE 0x00000020
+#define PTRACE_O_TRACEEXIT 0x00000040
/* Wait extended result codes for the above trace options. */
#define PTRACE_EVENT_FORK 1
#define PTRACE_EVENT_VFORK 2
#define PTRACE_EVENT_CLONE 3
#define PTRACE_EVENT_EXEC 4
+#define PTRACE_EVENT_VFORK_DONE 5
+#define PTRACE_EVENT_EXIT 6
#include <asm/ptrace.h>
#include <linux/sched.h>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3a1367bacd1c..975dd5dca713 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -201,7 +201,9 @@ struct mm_struct {
unsigned long swap_address;
unsigned dumpable:1;
-
+#ifdef CONFIG_HUGETLB_PAGE
+ int used_hugetlb;
+#endif
/* Architecture-specific MM context */
mm_context_t context;
@@ -218,10 +220,21 @@ struct mm_struct {
extern int mmlist_nr;
-struct signal_struct {
+struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
+};
+
+/*
+ * NOTE! "signal_struct" does not have it's own
+ * locking, because a shared signal_struct always
+ * implies a shared sighand_struct, so locking
+ * sighand_struct is always a proper superset of
+ * the locking of signal_struct.
+ */
+struct signal_struct {
+ atomic_t count;
/* current thread group signal load-balancing target: */
task_t *curr_target;
@@ -233,6 +246,9 @@ struct signal_struct {
int group_exit;
int group_exit_code;
struct task_struct *group_exit_task;
+
+ /* thread group stop support, overloads group_exit_code too */
+ int group_stop_count;
};
/*
@@ -373,7 +389,8 @@ struct task_struct {
/* namespace */
struct namespace *namespace;
/* signal handlers */
- struct signal_struct *sig;
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
sigset_t blocked, real_blocked;
struct sigpending pending;
@@ -400,6 +417,7 @@ struct task_struct {
struct backing_dev_info *backing_dev_info;
unsigned long ptrace_message;
+ siginfo_t *last_siginfo; /* For ptrace use. */
};
extern void __put_task_struct(struct task_struct *tsk);
@@ -440,6 +458,8 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PT_TRACE_VFORK 0x00000020
#define PT_TRACE_CLONE 0x00000040
#define PT_TRACE_EXEC 0x00000080
+#define PT_TRACE_VFORK_DONE 0x00000100
+#define PT_TRACE_EXIT 0x00000200
#if CONFIG_SMP
extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
@@ -506,7 +526,6 @@ extern int in_egroup_p(gid_t);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *);
-extern void sig_exit(int, int, struct siginfo *);
extern int dequeue_signal(sigset_t *mask, siginfo_t *info);
extern void block_all_signals(int (*notifier)(void *priv), void *priv,
sigset_t *mask);
@@ -523,7 +542,7 @@ extern void do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
-extern int __broadcast_thread_group(struct task_struct *p, int sig);
+extern void zap_other_threads(struct task_struct *p);
extern int kill_pg(pid_t, int, int);
extern int kill_sl(pid_t, int, int);
extern int kill_proc(pid_t, int, int);
@@ -585,9 +604,13 @@ extern void exit_thread(void);
extern void exit_mm(struct task_struct *);
extern void exit_files(struct task_struct *);
+extern void exit_signal(struct task_struct *);
+extern void __exit_signal(struct task_struct *);
extern void exit_sighand(struct task_struct *);
extern void __exit_sighand(struct task_struct *);
+extern NORET_TYPE void do_group_exit(int);
+
extern void reparent_to_init(void);
extern void daemonize(void);
extern task_t *child_reaper;
@@ -755,11 +778,13 @@ static inline void cond_resched_lock(spinlock_t * lock)
/* Reevaluate whether the task has signals pending delivery.
This is required every time the blocked sigset_t changes.
- callers must hold sig->siglock. */
+ callers must hold sighand->siglock. */
extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));
extern void recalc_sigpending(void);
+extern void signal_wake_up(struct task_struct *t, int resume_stopped);
+
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
diff --git a/include/linux/sdla_x25.h b/include/linux/sdla_x25.h
index 9827e74faaf2..c110c1a835f7 100644
--- a/include/linux/sdla_x25.h
+++ b/include/linux/sdla_x25.h
@@ -157,7 +157,7 @@ typedef struct X25Cmd
#define X25RES_PROTO_VIOLATION 0x41 /* protocol violation occured */
#define X25RES_PKT_TIMEOUT 0x42 /* X.25 packet time out */
#define X25RES_PKT_RETRY_LIMIT 0x43 /* X.25 packet retry limit exceeded */
-/*----- Command-dependant results -----*/
+/*----- Command-dependent results -----*/
#define X25RES_LINK_DISC 0x00 /* HDLC_LINK_STATUS */
#define X25RES_LINK_IN_ABM 0x01 /* HDLC_LINK_STATUS */
#define X25RES_NO_DATA 0x01 /* HDLC_READ/READ_TRACE_DATA*/
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 220a672af798..c136265fd3cd 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -71,7 +71,8 @@ extern kmem_cache_t *files_cachep;
extern kmem_cache_t *filp_cachep;
extern kmem_cache_t *dquot_cachep;
extern kmem_cache_t *fs_cachep;
-extern kmem_cache_t *sigact_cachep;
+extern kmem_cache_t *signal_cachep;
+extern kmem_cache_t *sighand_cachep;
extern kmem_cache_t *bio_cachep;
#endif /* __KERNEL__ */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 403033961628..a289a20a2484 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -37,30 +37,120 @@
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
-/*
- * !CONFIG_SMP and spin_lock_init not previously defined
- * (e.g. by including include/asm/spinlock.h)
- */
-#elif !defined(spin_lock_init)
+#else
-#ifndef CONFIG_PREEMPT
+#if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
# define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
# define ATOMIC_DEC_AND_LOCK
#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+#define SPINLOCK_MAGIC 0x1D244B3C
+typedef struct {
+ unsigned long magic;
+ volatile unsigned long lock;
+ volatile unsigned int babble;
+ const char *module;
+ char *owner;
+ int oline;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
+
+#define spin_lock_init(x) \
+ do { \
+ (x)->magic = SPINLOCK_MAGIC; \
+ (x)->lock = 0; \
+ (x)->babble = 5; \
+ (x)->module = __FILE__; \
+ (x)->owner = NULL; \
+ (x)->oline = 0; \
+ } while (0)
+
+#define CHECK_LOCK(x) \
+ do { \
+ if ((x)->magic != SPINLOCK_MAGIC) { \
+ printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
+ __FILE__, __LINE__, (x)); \
+ } \
+ } while(0)
+
+#define _raw_spin_lock(x) \
+ do { \
+ CHECK_LOCK(x); \
+ if ((x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
+ __FILE__,__LINE__, (x)->module, \
+ (x), (x)->owner, (x)->oline); \
+ (x)->babble--; \
+ } \
+ (x)->lock = 1; \
+ (x)->owner = __FILE__; \
+ (x)->oline = __LINE__; \
+ } while (0)
+
+/* without debugging, spin_is_locked on UP always says
+ * FALSE. --> printk if already locked. */
+#define spin_is_locked(x) \
+ ({ \
+ CHECK_LOCK(x); \
+ if ((x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
+ __FILE__,__LINE__, (x)->module, \
+ (x), (x)->owner, (x)->oline); \
+ (x)->babble--; \
+ } \
+ 0; \
+ })
+
+/* without debugging, spin_trylock on UP always says
+ * TRUE. --> printk if already locked. */
+#define _raw_spin_trylock(x) \
+ ({ \
+ CHECK_LOCK(x); \
+ if ((x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
+ __FILE__,__LINE__, (x)->module, \
+ (x), (x)->owner, (x)->oline); \
+ (x)->babble--; \
+ } \
+ (x)->lock = 1; \
+ (x)->owner = __FILE__; \
+ (x)->oline = __LINE__; \
+ 1; \
+ })
+
+#define spin_unlock_wait(x) \
+ do { \
+ CHECK_LOCK(x); \
+ if ((x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
+ __FILE__,__LINE__, (x)->module, (x), \
+ (x)->owner, (x)->oline); \
+ (x)->babble--; \
+ }\
+ } while (0)
+
+#define _raw_spin_unlock(x) \
+ do { \
+ CHECK_LOCK(x); \
+ if (!(x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
+ __FILE__,__LINE__, (x)->module, (x));\
+ (x)->babble--; \
+ } \
+ (x)->lock = 0; \
+ } while (0)
+#else
/*
* gcc versions before ~2.95 have a nasty bug with empty initializers.
*/
#if (__GNUC__ > 2)
typedef struct { } spinlock_t;
- typedef struct { } rwlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
- #define RW_LOCK_UNLOCKED (rwlock_t) { }
#else
typedef struct { int gcc_is_buggy; } spinlock_t;
- typedef struct { int gcc_is_buggy; } rwlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
- #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#endif
/*
@@ -72,6 +162,18 @@
#define _raw_spin_trylock(lock) ((void)(lock), 1)
#define spin_unlock_wait(lock) do { (void)(lock); } while(0)
#define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+/* RW spinlocks: No debug version */
+
+#if (__GNUC__ > 2)
+ typedef struct { } rwlock_t;
+ #define RW_LOCK_UNLOCKED (rwlock_t) { }
+#else
+ typedef struct { int gcc_is_buggy; } rwlock_t;
+ #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
+#endif
+
#define rwlock_init(lock) do { (void)(lock); } while(0)
#define _raw_read_lock(lock) do { (void)(lock); } while(0)
#define _raw_read_unlock(lock) do { (void)(lock); } while(0)
diff --git a/include/linux/types.h b/include/linux/types.h
index 94ceb057eb64..f1c0ce5eb845 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -4,10 +4,12 @@
#ifdef __KERNEL__
#include <linux/config.h>
+#define BITS_TO_LONGS(bits) \
+ (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
#define DECLARE_BITMAP(name,bits) \
- unsigned long name[((bits)+BITS_PER_LONG-1)/BITS_PER_LONG]
+ unsigned long name[BITS_TO_LONGS(bits)]
#define CLEAR_BITMAP(name,bits) \
- memset(name, 0, ((bits)+BITS_PER_LONG-1)/8)
+ memset(name, 0, BITS_TO_LONGS(bits)*sizeof(unsigned long))
#endif
#include <linux/posix_types.h>