From 1669ce53e2ff7b49a60d0230866d3faee5f45573 Mon Sep 17 00:00:00 2001 From: Daniel Jacobowitz Date: Sat, 18 Jan 2003 10:40:18 -0500 Subject: Add PTRACE_GETSIGINFO and PTRACE_SETSIGINFO These new ptrace commands allow a debugger to control signals more precisely; for instance, store a signal and deliver it later, as if it had come from the original outside process or in response to the same faulting memory access. --- include/linux/ptrace.h | 2 ++ include/linux/sched.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index c6de3a4ea70a..b56bbe7ca800 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -26,6 +26,8 @@ /* 0x4200-0x4300 are reserved for architecture-independent additions. */ #define PTRACE_SETOPTIONS 0x4200 #define PTRACE_GETEVENTMSG 0x4201 +#define PTRACE_GETSIGINFO 0x4202 +#define PTRACE_SETSIGINFO 0x4203 /* options set using PTRACE_SETOPTIONS */ #define PTRACE_O_TRACESYSGOOD 0x00000001 diff --git a/include/linux/sched.h b/include/linux/sched.h index 15a951d2d27e..a325e5a8c645 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -400,6 +400,7 @@ struct task_struct { struct backing_dev_info *backing_dev_info; unsigned long ptrace_message; + siginfo_t *last_siginfo; /* For ptrace use. */ }; extern void __put_task_struct(struct task_struct *tsk); -- cgit v1.2.3 From 00c8e791cba1bb88db8a8fd73106c28fdbab5716 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Feb 2003 16:56:31 -0800 Subject: [PATCH] self-unplugging request queues The patch teaches a queue to unplug itself: a) if is has four requests OR b) if it has had plugged requests for 3 milliseconds. These numbers may need to be tuned, although doing so doesn't seem to make much difference. 10 msecs works OK, so HZ=100 machines will be fine. Instrumentation shows that about 5-10% of requests were started due to the three millisecond timeout (during a kernel compile). That's somewhat significant. It means that the kernel is leaving stuff in the queue, plugged, for too long. This testing was with a uniprocessor preemptible kernel, which is particularly vulnerable to unplug latency (submit some IO, get preempted before the unplug). This patch permits the removal of a lot of rather lame unplugging in page reclaim and in the writeback code, which kicks the queues (globally!) every four megabytes to get writeback underway. This patch doesn't use blk_run_queues(). It is able to kick just the particular queue. The patch is not expected to make much difference really, except for AIO. AIO needs a blk_run_queues() in its io_submit() call. For each request. This means that AIO has to disable plugging altogether, unless something like this patch does it for it. It means that AIO will unplug *all* queues in the machine for every io_submit(). Even against a socket! This patch was tested by disabling blk_run_queues() completely. The system ran OK. The 3 milliseconds may be too long. It's OK for the heavy writeback code, but AIO may want less. Or maybe AIO really wants zero (ie: disable plugging). If that is so, we need new code paths by which AIO can communicate the "immediate unplug" information - a global unplug is not good. To minimise unplug latency due to user CPU load, this patch gives keventd `nice -10'. This is of course completely arbitrary. Really, I think keventd should be SCHED_RR/MAX_RT_PRIO-1, as it has been in -aa kernels for ages. --- drivers/block/ll_rw_blk.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/blkdev.h | 10 ++++++++++ kernel/workqueue.c | 1 + 3 files changed, 51 insertions(+) (limited to 'include/linux') diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index f8a4e7a81f4b..e13d0bbca144 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -27,6 +27,8 @@ #include #include +static void blk_unplug_work(void *data); + /* * For the allocated request tables */ @@ -237,6 +239,14 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) blk_queue_hardsect_size(q, 512); blk_queue_dma_alignment(q, 511); + q->unplug_thresh = 4; /* hmm */ + q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ + if (q->unplug_delay == 0) + q->unplug_delay = 1; + + init_timer(&q->unplug_timer); + INIT_WORK(&q->unplug_work, blk_unplug_work, q); + /* * by default assume old behaviour and bounce for any highmem page */ @@ -960,6 +970,7 @@ void blk_plug_device(request_queue_t *q) if (!blk_queue_plugged(q)) { spin_lock(&blk_plug_lock); list_add_tail(&q->plug_list, &blk_plug_list); + mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); spin_unlock(&blk_plug_lock); } } @@ -974,6 +985,7 @@ int blk_remove_plug(request_queue_t *q) if (blk_queue_plugged(q)) { spin_lock(&blk_plug_lock); list_del_init(&q->plug_list); + del_timer(&q->unplug_timer); spin_unlock(&blk_plug_lock); return 1; } @@ -992,6 +1004,8 @@ static inline void __generic_unplug_device(request_queue_t *q) if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) return; + del_timer(&q->unplug_timer); + /* * was plugged, fire request_fn if queue has stuff to do */ @@ -1020,6 +1034,18 @@ void generic_unplug_device(void *data) spin_unlock_irq(q->queue_lock); } +static void blk_unplug_work(void *data) +{ + generic_unplug_device(data); +} + +static void blk_unplug_timeout(unsigned long data) +{ + request_queue_t *q = (request_queue_t *)data; + + schedule_work(&q->unplug_work); +} + /** * blk_start_queue - restart a previously stopped queue * @q: The &request_queue_t in question @@ -1164,6 +1190,9 @@ void blk_cleanup_queue(request_queue_t * q) count -= __blk_cleanup_queue(&q->rq[READ]); count -= __blk_cleanup_queue(&q->rq[WRITE]); + del_timer_sync(&q->unplug_timer); + flush_scheduled_work(); + if (count) printk("blk_cleanup_queue: leaked requests (%d)\n", count); @@ -1269,6 +1298,9 @@ int blk_init_queue(request_queue_t *q, request_fn_proc *rfn, spinlock_t *lock) blk_queue_make_request(q, __make_request); blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); + q->unplug_timer.function = blk_unplug_timeout; + q->unplug_timer.data = (unsigned long)q; + blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); @@ -1811,7 +1843,15 @@ get_rq: out: if (freereq) __blk_put_request(q, freereq); + + if (blk_queue_plugged(q)) { + int nr_queued = (queue_nr_requests - q->rq[0].count) + + (queue_nr_requests - q->rq[1].count); + if (nr_queued == q->unplug_thresh) + __generic_unplug_device(q); + } spin_unlock_irq(q->queue_lock); + return 0; end_io: diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c599ea36233b..82766b7e60b0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include #include #include @@ -188,6 +190,14 @@ struct request_queue unplug_fn *unplug_fn; merge_bvec_fn *merge_bvec_fn; + /* + * Auto-unplugging state + */ + struct timer_list unplug_timer; + int unplug_thresh; /* After this many requests */ + unsigned long unplug_delay; /* After this many jiffies */ + struct work_struct unplug_work; + struct backing_dev_info backing_dev_info; /* diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 156583c7dbf7..fb10d360c436 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -177,6 +177,7 @@ static int worker_thread(void *__startup) current->flags |= PF_IOTHREAD; cwq->thread = current; + set_user_nice(current, -10); set_cpus_allowed(current, 1UL << cpu); spin_lock_irq(¤t->sig->siglock); -- cgit v1.2.3 From b573296a46a508552d0909e7b2183c631f43fb51 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Feb 2003 16:56:51 -0800 Subject: [PATCH] JBD Documentation Patch from Roger Gammans Adds lots of API documentation to the JBD layer. --- Documentation/DocBook/journal-api.tmpl | 39 ++++-- fs/jbd/journal.c | 215 +++++++++++++++++++++++---------- fs/jbd/recovery.c | 29 +++-- fs/jbd/transaction.c | 183 +++++++++++++++++++--------- include/linux/jbd.h | 183 ++++++++++++++++++++-------- 5 files changed, 458 insertions(+), 191 deletions(-) (limited to 'include/linux') diff --git a/Documentation/DocBook/journal-api.tmpl b/Documentation/DocBook/journal-api.tmpl index e57c8593eabd..ece95d2eb843 100644 --- a/Documentation/DocBook/journal-api.tmpl +++ b/Documentation/DocBook/journal-api.tmpl @@ -141,17 +141,14 @@ you are have done so you need to call journal_dirty_{meta,}data(). Or if you've asked for access to a buffer you now know is now longer required to be pushed back on the device you can call journal_forget() in much the same way as you might have used bforget() in the past. - - - A journal_flush() may be called at any time to commit and checkpoint all your transactions. - + Then at umount time , in your put_super() (2.4) or write_super() (2.5) you can then call journal_destroy() to clean up your in-core journal object. @@ -168,8 +165,8 @@ on another journal. Since transactions can't be nested/batched across differing journals, and another filesystem other than yours (say ext3) may be modified in a later syscall. - + The second case to bear in mind is that journal_start() can block if there isn't enough space in the journal for your transaction (based on the passed nblocks param) - when it blocks it merely(!) needs to @@ -180,10 +177,14 @@ were semaphores and include them in your semaphore ordering rules to prevent deadlocks. Note that journal_extend() has similar blocking behaviour to journal_start() so you can deadlock here just as easily as on journal_start(). - -Try to reserve the right number of blocks the first time. ;-). + +Try to reserve the right number of blocks the first time. ;-). This will +be the maximum number of blocks you are going to touch in this transaction. +I advise having a look at at least ext3_jbd.h to see the basis on which +ext3 uses to make these decisions. + Another wriggle to watch out for is your on-disk block allocation strategy. why? Because, if you undo a delete, you need to ensure you haven't reused any @@ -211,6 +212,30 @@ The opportunities for abuse and DOS attacks with this should be obvious, if you allow unprivileged userspace to trigger codepaths containing these calls. + + +A new feature of jbd since 2.5.25 is commit callbacks with the new +journal_callback_set() function you can now ask the journalling layer +to call you back when the transaction is finally commited to disk, so that +you can do some of your own management. The key to this is the journal_callback +struct, this maintains the internal callback information but you can +extend it like this:- + + + struct myfs_callback_s { + //Data structure element required by jbd.. + struct journal_callback for_jbd; + // Stuff for myfs allocated together. + myfs_inode* i_commited; + + } + + + +this would be useful if you needed to know when data was commited to a +particular inode. + + diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index d2f5935ef972..3117885c3f6e 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -732,14 +732,21 @@ fail: * need to set up all of the mapping information to tell the journaling * system where the journal blocks are. * - * journal_init_dev creates a journal which maps a fixed contiguous - * range of blocks on an arbitrary block device. - * - * journal_init_inode creates a journal which maps an on-disk inode as - * the journal. The inode must exist already, must support bmap() and - * must have all data blocks preallocated. */ +/** + * journal_t * journal_init_dev() - creates an initialises a journal structure + * @bdev: Block device on which to create the journal + * @fs_dev: Device which hold journalled filesystem for this journal. + * @start: Block nr Start of journal. + * @len: Lenght of the journal in blocks. + * @blocksize: blocksize of journalling device + * @returns: a newly created journal_t * + * + * journal_init_dev creates a journal which maps a fixed contiguous + * range of blocks on an arbitrary block device. + * + */ journal_t * journal_init_dev(struct block_device *bdev, struct block_device *fs_dev, int start, int len, int blocksize) @@ -763,7 +770,15 @@ journal_t * journal_init_dev(struct block_device *bdev, return journal; } - + +/** + * journal_t * journal_init_inode () - creates a journal which maps to a inode. + * @inode: An inode to create the journal in + * + * journal_init_inode creates a journal which maps an on-disk inode as + * the journal. The inode must exist already, must support bmap() and + * must have all data blocks preallocated. + */ journal_t * journal_init_inode (struct inode *inode) { struct buffer_head *bh; @@ -852,12 +867,15 @@ static int journal_reset (journal_t *journal) return 0; } -/* +/** + * int journal_create() - Initialise the new journal file + * @journal: Journal to create. This structure must have been initialised + * * Given a journal_t structure which tells us which disk blocks we can * use, create a new journal superblock and initialise all of the - * journal fields from scratch. */ - -int journal_create (journal_t *journal) + * journal fields from scratch. + **/ +int journal_create(journal_t *journal) { unsigned long blocknr; struct buffer_head *bh; @@ -920,11 +938,14 @@ int journal_create (journal_t *journal) return journal_reset(journal); } -/* +/** + * void journal_update_superblock() - Update journal sb on disk. + * @journal: The journal to update. + * @wait: Set to '0' if you don't want to wait for IO completion. + * * Update a journal's dynamic superblock fields and write it to disk, * optionally waiting for the IO to complete. -*/ - + */ void journal_update_superblock(journal_t *journal, int wait) { journal_superblock_t *sb = journal->j_superblock; @@ -1040,12 +1061,14 @@ static int load_superblock(journal_t *journal) } -/* +/** + * int journal_load() - Read journal from disk. + * @journal: Journal to act on. + * * Given a journal_t structure which tells us which disk blocks contain * a journal, read the journal from disk to initialise the in-memory * structures. */ - int journal_load(journal_t *journal) { int err; @@ -1090,11 +1113,13 @@ recovery_error: return -EIO; } -/* +/** + * void journal_destroy() - Release a journal_t structure. + * @journal: Journal to act on. +* * Release a journal_t structure once it is no longer in use by the * journaled object. */ - void journal_destroy (journal_t *journal) { /* Wait for the commit thread to wake up and die. */ @@ -1131,8 +1156,12 @@ void journal_destroy (journal_t *journal) } -/* Published API: Check whether the journal uses all of a given set of - * features. Return true (non-zero) if it does. */ +/** + *int journal_check_used_features () - Check if features specified are used. + * + * Check whether the journal uses all of a given set of + * features. Return true (non-zero) if it does. + **/ int journal_check_used_features (journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) @@ -1154,7 +1183,10 @@ int journal_check_used_features (journal_t *journal, unsigned long compat, return 0; } -/* Published API: Check whether the journaling code supports the use of +/** + * int journal_check_available_features() - Check feature set in journalling layer + * + * Check whether the journaling code supports the use of * all of a given set of features on this journal. Return true * (non-zero) if it can. */ @@ -1183,8 +1215,13 @@ int journal_check_available_features (journal_t *journal, unsigned long compat, return 0; } -/* Published API: Mark a given journal feature as present on the - * superblock. Returns true if the requested features could be set. */ +/** + * int journal_set_features () - Mark a given journal feature in the superblock + * + * Mark a given journal feature as present on the + * superblock. Returns true if the requested features could be set. + * + */ int journal_set_features (journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) @@ -1210,12 +1247,12 @@ int journal_set_features (journal_t *journal, unsigned long compat, } -/* - * Published API: +/** + * int journal_update_format () - Update on-disk journal structure. + * * Given an initialised but unloaded journal struct, poke about in the * on-disk structure to update it to the most recent supported version. */ - int journal_update_format (journal_t *journal) { journal_superblock_t *sb; @@ -1265,7 +1302,10 @@ static int journal_convert_superblock_v1(journal_t *journal, } -/* +/** + * int journal_flush () - Flush journal + * @journal: Journal to act on. + * * Flush all data for a given journal to disk and empty the journal. * Filesystems can use this when remounting readonly to ensure that * recovery does not need to happen on remount. @@ -1319,12 +1359,16 @@ int journal_flush (journal_t *journal) return err; } -/* +/** + * int journal_wipe() - Wipe journal contents + * @journal: Journal to act on. + * @write: flag (see below) + * * Wipe out all of the contents of a journal, safely. This will produce * a warning if the journal contains any valid recovery information. * Must be called between journal_init_*() and journal_load(). * - * If (write) is non-zero, then we wipe out the journal on disk; otherwise + * If 'write' is non-zero, then we wipe out the journal on disk; otherwise * we merely suppress recovery. */ @@ -1373,43 +1417,11 @@ const char * journal_dev_name(journal_t *journal) } /* - * journal_abort: perform a complete, immediate shutdown of the ENTIRE - * journal (not of a single transaction). This operation cannot be - * undone without closing and reopening the journal. - * - * The journal_abort function is intended to support higher level error - * recovery mechanisms such as the ext2/ext3 remount-readonly error - * mode. - * - * Journal abort has very specific semantics. Any existing dirty, - * unjournaled buffers in the main filesystem will still be written to - * disk by bdflush, but the journaling mechanism will be suspended - * immediately and no further transaction commits will be honoured. - * - * Any dirty, journaled buffers will be written back to disk without - * hitting the journal. Atomicity cannot be guaranteed on an aborted - * filesystem, but we _do_ attempt to leave as much data as possible - * behind for fsck to use for cleanup. - * - * Any attempt to get a new transaction handle on a journal which is in - * ABORT state will just result in an -EROFS error return. A - * journal_stop on an existing handle will return -EIO if we have - * entered abort state during the update. + * Journal abort has very specific semantics, which we describe + * for journal abort. * - * Recursive transactions are not disturbed by journal abort until the - * final journal_stop, which will receive the -EIO error. - * - * Finally, the journal_abort call allows the caller to supply an errno - * which will be recored (if possible) in the journal superblock. This - * allows a client to record failure conditions in the middle of a - * transaction without having to complete the transaction to record the - * failure to disk. ext3_error, for example, now uses this - * functionality. - * - * Errors which originate from within the journaling layer will NOT - * supply an errno; a null errno implies that absolutely no further - * writes are done to the journal (unless there are any already in - * progress). + * Two internal function, which provide abort to te jbd layer + * itself are here. */ /* Quick version for internal journal use (doesn't lock the journal). @@ -1447,7 +1459,52 @@ void __journal_abort_soft (journal_t *journal, int errno) journal_update_superblock(journal, 1); } -/* Full version for external use */ +/** + * void journal_abort () - Shutdown the journal immediately. + * @journal: the journal to shutdown. + * @errno: an error number to record in the journal indicating + * the reason for the shutdown. + * + * Perform a complete, immediate shutdown of the ENTIRE + * journal (not of a single transaction). This operation cannot be + * undone without closing and reopening the journal. + * + * The journal_abort function is intended to support higher level error + * recovery mechanisms such as the ext2/ext3 remount-readonly error + * mode. + * + * Journal abort has very specific semantics. Any existing dirty, + * unjournaled buffers in the main filesystem will still be written to + * disk by bdflush, but the journaling mechanism will be suspended + * immediately and no further transaction commits will be honoured. + * + * Any dirty, journaled buffers will be written back to disk without + * hitting the journal. Atomicity cannot be guaranteed on an aborted + * filesystem, but we _do_ attempt to leave as much data as possible + * behind for fsck to use for cleanup. + * + * Any attempt to get a new transaction handle on a journal which is in + * ABORT state will just result in an -EROFS error return. A + * journal_stop on an existing handle will return -EIO if we have + * entered abort state during the update. + * + * Recursive transactions are not disturbed by journal abort until the + * final journal_stop, which will receive the -EIO error. + * + * Finally, the journal_abort call allows the caller to supply an errno + * which will be recorded (if possible) in the journal superblock. This + * allows a client to record failure conditions in the middle of a + * transaction without having to complete the transaction to record the + * failure to disk. ext3_error, for example, now uses this + * functionality. + * + * Errors which originate from within the journaling layer will NOT + * supply an errno; a null errno implies that absolutely no further + * writes are done to the journal (unless there are any already in + * progress). + * + */ + void journal_abort (journal_t *journal, int errno) { lock_journal(journal); @@ -1455,6 +1512,17 @@ void journal_abort (journal_t *journal, int errno) unlock_journal(journal); } +/** + * int journal_errno () - returns the journal's error state. + * @journal: journal to examine. + * + * This is the errno numbet set with journal_abort(), the last + * time the journal was mounted - if the journal was stopped + * without calling abort this will be 0. + * + * If the journal has been aborted on this mount time -EROFS will + * be returned. + */ int journal_errno (journal_t *journal) { int err; @@ -1468,6 +1536,14 @@ int journal_errno (journal_t *journal) return err; } + + +/** + * int journal_clear_err () - clears the journal's error state + * + * An error must be cleared or Acked to take a FS out of readonly + * mode. + */ int journal_clear_err (journal_t *journal) { int err = 0; @@ -1481,6 +1557,13 @@ int journal_clear_err (journal_t *journal) return err; } + +/** + * void journal_ack_err() - Ack journal err. + * + * An error must be cleared or Acked to take a FS out of readonly + * mode. + */ void journal_ack_err (journal_t *journal) { lock_journal(journal); diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c index e6a96d3c30ce..f82d7f3cc507 100644 --- a/fs/jbd/recovery.c +++ b/fs/jbd/recovery.c @@ -206,20 +206,22 @@ do { \ var -= ((journal)->j_last - (journal)->j_first); \ } while (0) -/* - * journal_recover - * +/** + * int journal_recover(journal_t *journal) - recovers a on-disk journal + * @journal: the journal to recover + * * The primary function for recovering the log contents when mounting a * journaled device. - * + */ +int journal_recover(journal_t *journal) +{ +/* * Recovery is done in three passes. In the first pass, we look for the * end of the log. In the second, we assemble the list of revoke * blocks. In the third and final pass, we replay any un-revoked blocks * in the log. */ -int journal_recover(journal_t *journal) -{ int err; journal_superblock_t * sb; @@ -263,20 +265,23 @@ int journal_recover(journal_t *journal) return err; } -/* - * journal_skip_recovery - * +/** + * int journal_skip_recovery() - Start journal and wipe exiting records + * @journal: journal to startup + * * Locate any valid recovery information from the journal and set up the * journal structures in memory to ignore it (presumably because the * caller has evidence that it is out of date). - * + * This function does'nt appear to be exorted.. + */ +int journal_skip_recovery(journal_t *journal) +{ +/* * We perform one pass over the journal to allow us to tell the user how * much recovery information is being erased, and to let us initialise * the journal transaction sequence numbers to the next unused ID. */ -int journal_skip_recovery(journal_t *journal) -{ int err; journal_superblock_t * sb; diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 597562cf47fe..14ca5228e9d6 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -222,19 +222,20 @@ static handle_t *new_handle(int nblocks) return handle; } -/* - * Obtain a new handle. +/** + * handle_t *journal_start() - Obtain a new handle. + * @journal: Journal to start transaction on. + * @nblocks: number of block buffer we might modify * * We make sure that the transaction can guarantee at least nblocks of * modified buffers in the log. We block until the log can guarantee * that much space. * - * This function is visible to journal users (like ext2fs), so is not + * This function is visible to journal users (like ext3fs), so is not * called with the journal already locked. * * Return a pointer to a newly allocated handle, or NULL on failure */ - handle_t *journal_start(journal_t *journal, int nblocks) { handle_t *handle = journal_current_handle(); @@ -324,7 +325,11 @@ fail_unlock: return ret; } -/* +/** + * handle_t *journal_try_start() - Don't block, but try and get a handle + * @journal: Journal to start transaction on. + * @nblocks: number of block buffer we might modify + * * Try to start a handle, but non-blockingly. If we weren't able * to, return an ERR_PTR value. */ @@ -368,16 +373,18 @@ handle_t *journal_try_start(journal_t *journal, int nblocks) return handle; } -/* - * journal_extend: extend buffer credits. - * +/** + * int journal_extend() - extend buffer credits. + * @handle: handle to 'extend' + * @nblocks: nr blocks to try to extend by. + * * Some transactions, such as large extends and truncates, can be done * atomically all at once or in several stages. The operation requests * a credit for a number of buffer modications in advance, but can * extend its credit if it needs more. * * journal_extend tries to give the running handle more buffer credits. - * It does not guarantee that allocation: this is a best-effort only. + * It does not guarantee that allocation - this is a best-effort only. * The calling process MUST be able to deal cleanly with a failure to * extend here. * @@ -386,7 +393,6 @@ handle_t *journal_try_start(journal_t *journal, int nblocks) * return code < 0 implies an error * return code > 0 implies normal transaction-full status. */ - int journal_extend (handle_t *handle, int nblocks) { transaction_t *transaction = handle->h_transaction; @@ -435,8 +441,12 @@ error_out: } -/* - * journal_restart: restart a handle for a multi-transaction filesystem +/** + * int journal_restart() - restart a handle . + * @handle: handle to restart + * @nblocks: nr credits requested + * + * Restart a handle for a multi-transaction filesystem * operation. * * If the journal_extend() call above fails to grant new buffer credits @@ -478,8 +488,9 @@ int journal_restart(handle_t *handle, int nblocks) } -/* - * Barrier operation: establish a transaction barrier. +/** + * void journal_lock_updates () - establish a transaction barrier. + * @journal: Journal to establish a barrier on. * * This locks out any further updates from being started, and blocks * until all existing updates have completed, returning only once the @@ -487,7 +498,6 @@ int journal_restart(handle_t *handle, int nblocks) * * The journal lock should not be held on entry. */ - void journal_lock_updates (journal_t *journal) { lock_journal(journal); @@ -515,12 +525,14 @@ void journal_lock_updates (journal_t *journal) down(&journal->j_barrier); } -/* +/** + * void journal_unlock_updates (journal_t* journal) - release barrier + * @journal: Journal to release the barrier on. + * * Release a transaction barrier obtained with journal_lock_updates(). * * Should be called without the journal lock held. */ - void journal_unlock_updates (journal_t *journal) { lock_journal(journal); @@ -566,9 +578,6 @@ static void jbd_unexpected_dirty_buffer(struct journal_head *jh) } /* - * journal_get_write_access: notify intent to modify a buffer for metadata - * (not data) update. - * * If the buffer is already part of the current transaction, then there * is nothing we need to do. If it is already part of a prior * transaction which we are still committing to disk, then we need to @@ -577,7 +586,6 @@ static void jbd_unexpected_dirty_buffer(struct journal_head *jh) * the handle's metadata buffer credits (unless the buffer is already * part of the transaction, that is). * - * Returns an error code or 0 on success. */ static int @@ -786,6 +794,17 @@ out_unlocked: return error; } +/** + * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. + * @handle: transaction to add buffer modifications to + * @bh: bh to be used for metadata writes + * + * Returns an error code or 0 on success. + * + * In full data journalling mode the buffer may be of type BJ_AsyncData, + * because we're write()ing a buffer which is also part of a shared mapping. + */ + int journal_get_write_access (handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; @@ -816,6 +835,13 @@ int journal_get_write_access (handle_t *handle, struct buffer_head *bh) * There is no lock ranking violation: it was a newly created, * unlocked buffer beforehand. */ +/** + * int journal_get_create_access () - notify intent to use newly created bh + * @handle: transaction to new buffer to + * @bh: new buffer. + * + * Call this if you create a new bh. + */ int journal_get_create_access (handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; @@ -875,13 +901,14 @@ out: -/* - * journal_get_undo_access: Notify intent to modify metadata with non- - * rewindable consequences - * +/** + * int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences + * @handle: transaction + * @bh: buffer to undo + * * Sometimes there is a need to distinguish between metadata which has * been committed to disk and that which has not. The ext3fs code uses - * this for freeing and allocating space: we have to make sure that we + * this for freeing and allocating space, we have to make sure that we * do not reuse freed space until the deallocation has been committed, * since if we overwrote that space we would make the delete * un-rewindable in case of a crash. @@ -893,13 +920,12 @@ out: * as we know that the buffer has definitely been committed to disk. * * We never need to know which transaction the committed data is part - * of: buffers touched here are guaranteed to be dirtied later and so + * of, buffers touched here are guaranteed to be dirtied later and so * will be committed to a new transaction in due course, at which point * we can discard the old committed data pointer. * * Returns error number or 0 on success. */ - int journal_get_undo_access (handle_t *handle, struct buffer_head *bh) { journal_t *journal = handle->h_transaction->t_journal; @@ -942,21 +968,23 @@ out: return err; } -/* - * journal_dirty_data: mark a buffer as containing dirty data which - * needs to be flushed before we can commit the current transaction. - * +/** + * int journal_dirty_data() - mark a buffer as containing dirty data which needs to be flushed before we can commit the current transaction. + * @handle: transaction + * @bh: bufferhead to mark + * * The buffer is placed on the transaction's data list and is marked as * belonging to the transaction. * * Returns error number or 0 on success. - * + */ +int journal_dirty_data (handle_t *handle, struct buffer_head *bh) +{ +/* * journal_dirty_data() can be called via page_launder->ext3_writepage * by kswapd. So it cannot block. Happily, there's nothing here * which needs lock_journal if `async' is set. */ -int journal_dirty_data (handle_t *handle, struct buffer_head *bh) -{ journal_t *journal = handle->h_transaction->t_journal; int need_brelse = 0; struct journal_head *jh; @@ -1097,24 +1125,28 @@ no_journal: return 0; } -/* - * journal_dirty_metadata: mark a buffer as containing dirty metadata - * which needs to be journaled as part of the current transaction. +/** + * int journal_dirty_metadata() - mark a buffer as containing dirty metadata + * @handle: transaction to add buffer to. + * @bh: buffer to mark + * + * mark dirty metadata which needs to be journaled as part of the current transaction. * * The buffer is placed on the transaction's metadata list and is marked * as belonging to the transaction. * + * Returns error number or 0 on success. + */ +int journal_dirty_metadata (handle_t *handle, struct buffer_head *bh) +{ +/* * Special care needs to be taken if the buffer already belongs to the * current committing transaction (in which case we should have frozen * data present for that commit). In that case, we don't relink the * buffer: that only gets done when the old transaction finally * completes its commit. * - * Returns error number or 0 on success. */ - -int journal_dirty_metadata (handle_t *handle, struct buffer_head *bh) -{ transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; struct journal_head *jh = bh2jh(bh); @@ -1199,9 +1231,12 @@ void journal_release_buffer (handle_t *handle, struct buffer_head *bh) } #endif -/* - * journal_forget: bforget() for potentially-journaled buffers. We can - * only do the bforget if there are no commits pending against the +/** + * void journal_forget() - bforget() for potentially-journaled buffers. + * @handle: transaction handle + * @bh: bh to 'forget' + * + * We can only do the bforget if there are no commits pending against the * buffer. If the buffer is dirty in the current running transaction we * can safely unlink it. * @@ -1213,7 +1248,6 @@ void journal_release_buffer (handle_t *handle, struct buffer_head *bh) * Allow this call even if the handle has aborted --- it may be part of * the caller's cleanup after an abort. */ - void journal_forget (handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; @@ -1352,8 +1386,14 @@ out: } #endif -/* - * Register a callback function for this handle. The function will be +/** + * void journal_callback_set() - Register a callback function for this handle. + * @handle: handle to attach the callback to. + * @func: function to callback. + * @jcb: structure with additional information required by func() , and + * some space for jbd internal information. + * + * The function will be * called when the transaction that this handle is part of has been * committed to disk with the original callback data struct and the * error status of the journal as parameters. There is no guarantee of @@ -1374,7 +1414,11 @@ void journal_callback_set(handle_t *handle, jcb->jcb_func = func; } -/* + +/** + * int journal_stop() - complete a transaction + * @handle: tranaction to complete. + * * All done for a particular handle. * * There is not much action needed here. We just return any remaining @@ -1387,7 +1431,6 @@ void journal_callback_set(handle_t *handle, * return -EIO if a journal_abort has been executed since the * transaction began. */ - int journal_stop(handle_t *handle) { transaction_t *transaction = handle->h_transaction; @@ -1473,8 +1516,10 @@ int journal_stop(handle_t *handle) return err; } -/* - * For synchronous operations: force any uncommitted trasnactions +/**int journal_force_commit() - force any uncommitted transactions + * @journal: journal to force + * + * For synchronous operations: force any uncommitted transactions * to disk. May seem kludgy, but it reuses all the handle batching * code in a very simple manner. */ @@ -1667,6 +1712,26 @@ out: return 0; } + +/** + * int journal_try_to_free_buffers() - try to free page buffers. + * @journal: journal for operation + * @page: to try and free + * @gfp_mask: 'IO' mode for try_to_free_buffers() + * + * + * For all the buffers on this page, + * if they are fully written out ordered data, move them onto BUF_CLEAN + * so try_to_free_buffers() can reap them. + * + * This function returns non-zero if we wish try_to_free_buffers() + * to be called. We do this if the page is releasable by try_to_free_buffers(). + * We also do it if the page has locked or dirty buffers and the caller wants + * us to perform sync or async writeout. + */ +int journal_try_to_free_buffers(journal_t *journal, + struct page *page, int unused_gfp_mask) +{ /* * journal_try_to_free_buffers(). Try to remove all this page's buffers * from the journal. @@ -1689,9 +1754,6 @@ out: * cannot happen because we never reallocate freed data as metadata * while the data is part of a transaction. Yes? */ -int journal_try_to_free_buffers(journal_t *journal, - struct page *page, int unused_gfp_mask) -{ struct buffer_head *head; struct buffer_head *bh; int ret = 0; @@ -1886,8 +1948,15 @@ zap_buffer: return may_free; } -/* - * Return non-zero if the page's buffers were successfully reaped +/** + * int journal_invalidatepage() + * @journal: journal to use for flush... + * @page: page to flush + * @offset: length of page to invalidate. + * + * Reap page buffers containing data after offset in page. + * + * Return non-zero if the page's buffers were successfully reaped. */ int journal_invalidatepage(journal_t *journal, struct page *page, diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 47a20ce63fa8..2236641f5593 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -63,7 +63,38 @@ extern void * __jbd_kmalloc (const char *where, size_t size, int flags, int retr #define JFS_MIN_JOURNAL_BLOCKS 1024 #ifdef __KERNEL__ + +/** + * typedef handle_t - The handle_t type represents a single atomic update being performed by some process. + * + * All filesystem modifications made by the process go + * through this handle. Recursive operations (such as quota operations) + * are gathered into a single update. + * + * The buffer credits field is used to account for journaled buffers + * being modified by the running process. To ensure that there is + * enough log space for all outstanding operations, we need to limit the + * number of outstanding buffers possible at any time. When the + * operation completes, any buffer credits not used are credited back to + * the transaction, so that at all times we know how many buffers the + * outstanding updates on a transaction might possibly touch. + * + * This is an opaque datatype. + **/ typedef struct handle_s handle_t; /* Atomic operation type */ + + +/** + * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem. + * + * journal_t is linked to from the fs superblock structure. + * + * We use the journal_t to keep track of all outstanding transaction + * activity on the filesystem, and to manage the state of the log + * writing process. + * + * This is an opaque datatype. + **/ typedef struct journal_s journal_t; /* Journal control structure */ #endif @@ -252,6 +283,20 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) } #define HAVE_JOURNAL_CALLBACK_STATUS +/** + * struct journal_callback - Base structure for callback information. + * @jcb_list: list information for other callbacks attached to the same handle. + * @jcb_func: Function to call with this callback structure. + * + * This struct is a 'seed' structure for a using with your own callback + * structs. If you are using callbacks you must allocate one of these + * or another struct of your own definition which has this struct + * as it's first element and pass it to journal_callback_set(). + * + * This is used internally by jbd to maintain callback information. + * + * See journal_callback_set for more information. + **/ struct journal_callback { struct list_head jcb_list; void (*jcb_func)(struct journal_callback *jcb, int error); @@ -260,18 +305,21 @@ struct journal_callback { struct jbd_revoke_table_s; -/* The handle_t type represents a single atomic update being performed - * by some process. All filesystem modifications made by the process go - * through this handle. Recursive operations (such as quota operations) - * are gathered into a single update. - * - * The buffer credits field is used to account for journaled buffers - * being modified by the running process. To ensure that there is - * enough log space for all outstanding operations, we need to limit the - * number of outstanding buffers possible at any time. When the - * operation completes, any buffer credits not used are credited back to - * the transaction, so that at all times we know how many buffers the - * outstanding updates on a transaction might possibly touch. */ +/** + * struct handle_s - The handle_s type is the concrete type associated with handle_t. + * @h_transaction: Which compound transaction is this update a part of? + * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. + * @h_ref: Reference count on this handle + * @h_jcb: List of application registered callbacks for this handle. + * @h_err: Field for caller's use to track errors through large fs operations + * @h_sync: flag for sync-on-close + * @h_jdata: flag to force data journaling + * @h_aborted: flag indicating fatal error on handle + **/ + +/* Docbook can't yet cope with the bit fields, but will leave the documentation + * in so it can be fixed later. + */ struct handle_s { @@ -284,8 +332,8 @@ struct handle_s /* Reference count on this handle */ int h_ref; - /* Field for caller's use to track errors through large fs - operations */ + /* Field for caller's use to track errors through large fs */ + /* operations */ int h_err; /* List of application registered callbacks for this handle. @@ -412,21 +460,58 @@ struct transaction_s struct list_head t_jcb; }; - -/* The journal_t maintains all of the journaling state information for a - * single filesystem. It is linked to from the fs superblock structure. - * - * We use the journal_t to keep track of all outstanding transaction - * activity on the filesystem, and to manage the state of the log - * writing process. */ +/** + * struct journal_s - The journal_s type is the concrete type associated with journal_t. + * @j_flags: General journaling state flags + * @j_errno: Is there an outstanding uncleared error on the journal (from a prior abort)? + * @j_sb_buffer: First part of superblock buffer + * @j_superblock: Second part of superblock buffer + * @j_format_version: Version of the superblock format + * @j_barrier_count: Number of processes waiting to create a barrier lock + * @j_barrier: The barrier lock itself + * @j_running_transaction: The current running transaction.. + * @j_committing_transaction: the transaction we are pushing to disk + * @j_checkpoint_transactions: a linked circular list of all transactions waiting for checkpointing + * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction to start committing, or for a barrier lock to be released + * @j_wait_logspace: Wait queue for waiting for checkpointing to complete + * @j_wait_done_commit: Wait queue for waiting for commit to complete + * @j_wait_checkpoint: Wait queue to trigger checkpointing + * @j_wait_commit: Wait queue to trigger commit + * @j_wait_updates: Wait queue to wait for updates to complete + * @j_checkpoint_sem: Semaphore for locking against concurrent checkpoints + * @j_sem: The main journal lock, used by lock_journal() + * @j_head: Journal head - identifies the first unused block in the journal + * @j_tail: Journal tail - identifies the oldest still-used block in the journal. + * @j_free: Journal free - how many free blocks are there in the journal? + * @j_first: The block number of the first usable block + * @j_last: The block number one beyond the last usable block + * @j_dev: Device where we store the journal + * @j_blocksize: blocksize for the location where we store the journal. + * @j_blk_offset: starting block offset for into the device where we store the journal + * @j_fs_dev: Device which holds the client fs. For internal journal this will be equal to j_dev + * @j_maxlen: Total maximum capacity of the journal region on disk. + * @j_inode: Optional inode where we store the journal. If present, all journal block numbers are mapped into this inode via bmap(). + * @j_tail_sequence: Sequence number of the oldest transaction in the log + * @j_transaction_sequence: Sequence number of the next transaction to grant + * @j_commit_sequence: Sequence number of the most recently committed transaction + * @j_commit_request: Sequence number of the most recent transaction wanting commit + * @j_uuid: Uuid of client object. + * @j_task: Pointer to the current commit thread for this journal + * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a single compound commit transaction + * @j_commit_interval: What is the maximum transaction lifetime before we begin a commit? + * @j_commit_timer: The timer used to wakeup the commit thread + * @j_commit_timer_active: Timer flag + * @j_all_journals: Link all journals together - system-wide + * @j_revoke: The revoke table - maintains the list of revoked blocks in the current transaction. + **/ struct journal_s { /* General journaling state flags */ unsigned long j_flags; - /* Is there an outstanding uncleared error on the journal (from - * a prior abort)? */ + /* Is there an outstanding uncleared error on the journal (from */ + /* a prior abort)? */ int j_errno; /* The superblock buffer */ @@ -448,13 +533,13 @@ struct journal_s /* ... the transaction we are pushing to disk ... */ transaction_t * j_committing_transaction; - /* ... and a linked circular list of all transactions waiting - * for checkpointing. */ + /* ... and a linked circular list of all transactions waiting */ + /* for checkpointing. */ /* Protected by journal_datalist_lock */ transaction_t * j_checkpoint_transactions; - /* Wait queue for waiting for a locked transaction to start - committing, or for a barrier lock to be released */ + /* Wait queue for waiting for a locked transaction to start */ + /* committing, or for a barrier lock to be released */ wait_queue_head_t j_wait_transaction_locked; /* Wait queue for waiting for checkpointing to complete */ @@ -481,33 +566,33 @@ struct journal_s /* Journal head: identifies the first unused block in the journal. */ unsigned long j_head; - /* Journal tail: identifies the oldest still-used block in the - * journal. */ + /* Journal tail: identifies the oldest still-used block in the */ + /* journal. */ unsigned long j_tail; /* Journal free: how many free blocks are there in the journal? */ unsigned long j_free; - /* Journal start and end: the block numbers of the first usable - * block and one beyond the last usable block in the journal. */ + /* Journal start and end: the block numbers of the first usable */ + /* block and one beyond the last usable block in the journal. */ unsigned long j_first, j_last; - /* Device, blocksize and starting block offset for the location - * where we store the journal. */ + /* Device, blocksize and starting block offset for the location */ + /* where we store the journal. */ struct block_device * j_dev; int j_blocksize; unsigned int j_blk_offset; - /* Device which holds the client fs. For internal journal this - * will be equal to j_dev. */ + /* Device which holds the client fs. For internal journal this */ + /* will be equal to j_dev. */ struct block_device * j_fs_dev; /* Total maximum capacity of the journal region on disk. */ unsigned int j_maxlen; - /* Optional inode where we store the journal. If present, all - * journal block numbers are mapped into this inode via - * bmap(). */ + /* Optional inode where we store the journal. If present, all */ + /* journal block numbers are mapped into this inode via */ + /* bmap(). */ struct inode * j_inode; /* Sequence number of the oldest transaction in the log */ @@ -519,23 +604,23 @@ struct journal_s /* Sequence number of the most recent transaction wanting commit */ tid_t j_commit_request; - /* Journal uuid: identifies the object (filesystem, LVM volume - * etc) backed by this journal. This will eventually be - * replaced by an array of uuids, allowing us to index multiple - * devices within a single journal and to perform atomic updates - * across them. */ + /* Journal uuid: identifies the object (filesystem, LVM volume */ + /* etc) backed by this journal. This will eventually be */ + /* replaced by an array of uuids, allowing us to index multiple */ + /* devices within a single journal and to perform atomic updates */ + /* across them. */ __u8 j_uuid[16]; /* Pointer to the current commit thread for this journal */ struct task_struct * j_task; - /* Maximum number of metadata buffers to allow in a single - * compound commit transaction */ + /* Maximum number of metadata buffers to allow in a single */ + /* compound commit transaction */ int j_max_transaction_buffers; - /* What is the maximum transaction lifetime before we begin a - * commit? */ + /* What is the maximum transaction lifetime before we begin a */ + /* commit? */ unsigned long j_commit_interval; /* The timer used to wakeup the commit thread: */ @@ -545,8 +630,8 @@ struct journal_s /* Link all journals together - system-wide */ struct list_head j_all_journals; - /* The revoke table: maintains the list of revoked blocks in the - current transaction. */ + /* The revoke table: maintains the list of revoked blocks in the */ + /* current transaction. */ struct jbd_revoke_table_s *j_revoke; }; -- cgit v1.2.3 From f93fcfa9e8a17bb8ef6a631ace1a14b02091e08f Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Feb 2003 16:57:17 -0800 Subject: [PATCH] Fix futexes in huge pages Using a futex in a large page causes a kernel lockup in __pin_page() - because __pin_page's page revalidation uses follow_page(), and follow_page() doesn't work for hugepages. The patch fixes up follow_page() to return the appropriate 4k page for hugepages. This incurs a vma lookup for each follow_page(), which is considerable overhead in some situations. We only _need_ to do this if the architecture cannot determin a page's hugeness from the contents of the PMD. So this patch is a "reference" implementation for, say, PPC BAT-based hugepages. --- arch/i386/mm/hugetlbpage.c | 29 +++++++++++++++++++++++++++++ include/linux/hugetlb.h | 18 ++++++++++++++++-- include/linux/sched.h | 4 +++- mm/memory.c | 5 +++++ mm/mmap.c | 2 +- 5 files changed, 54 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index 2dc1534f420d..9d7de7b0fb00 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -150,6 +150,35 @@ back1: return i; } +struct page * +follow_huge_addr(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, int write) +{ + unsigned long start = address; + int length = 1; + int nr; + struct page *page; + + nr = follow_hugetlb_page(mm, vma, &page, NULL, &start, &length, 0); + if (nr == 1) + return page; + return NULL; +} + +/* + * If virtual address `addr' lies within a huge page, return its controlling + * VMA, else NULL. + */ +struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr) +{ + if (mm->used_hugetlb) { + struct vm_area_struct *vma = find_vma(mm, addr); + if (vma && is_vm_hugetlb_page(vma)) + return vma; + } + return NULL; +} + void free_huge_page(struct page *page) { BUG_ON(page_count(page)); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 325d91ba012a..6b2f606c08f7 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -20,16 +20,28 @@ int hugetlb_prefault(struct address_space *, struct vm_area_struct *); void huge_page_release(struct page *); int hugetlb_report_meminfo(char *); int is_hugepage_mem_enough(size_t); - +struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write); +struct vm_area_struct *hugepage_vma(struct mm_struct *mm, + unsigned long address); extern int htlbpage_max; +static inline void +mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma) +{ + if (is_vm_hugetlb_page(vma)) + mm->used_hugetlb = 1; +} + #else /* !CONFIG_HUGETLB_PAGE */ + static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) { return 0; } -#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; }) +#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; }) +#define follow_huge_addr(mm, vma, addr, write) 0 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) #define zap_hugepage_range(vma, start, len) BUG() @@ -37,6 +49,8 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) #define huge_page_release(page) BUG() #define is_hugepage_mem_enough(size) 0 #define hugetlb_report_meminfo(buf) 0 +#define hugepage_vma(mm, addr) 0 +#define mark_mm_hugetlb(mm, vma) do { } while (0) #endif /* !CONFIG_HUGETLB_PAGE */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 3a1367bacd1c..648d4d3ace3c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -201,7 +201,9 @@ struct mm_struct { unsigned long swap_address; unsigned dumpable:1; - +#ifdef CONFIG_HUGETLB_PAGE + int used_hugetlb; +#endif /* Architecture-specific MM context */ mm_context_t context; diff --git a/mm/memory.c b/mm/memory.c index 63b9032c0620..f058e4cfc639 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -607,6 +607,11 @@ follow_page(struct mm_struct *mm, unsigned long address, int write) pmd_t *pmd; pte_t *ptep, pte; unsigned long pfn; + struct vm_area_struct *vma; + + vma = hugepage_vma(mm, address); + if (vma) + return follow_huge_addr(mm, vma, address, write); pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || pgd_bad(*pgd)) diff --git a/mm/mmap.c b/mm/mmap.c index 61d0dc32646a..d3b14b17da38 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -362,6 +362,7 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, if (mapping) up(&mapping->i_shared_sem); + mark_mm_hugetlb(mm, vma); mm->map_count++; validate_mm(mm); } @@ -1423,7 +1424,6 @@ void exit_mmap(struct mm_struct *mm) kmem_cache_free(vm_area_cachep, vma); vma = next; } - } /* Insert vm structure into process list sorted by address -- cgit v1.2.3 From 1f1921fc15dc2408ab3900d036cffcf0d732801f Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Feb 2003 16:57:23 -0800 Subject: [PATCH] Optimise follow_page() for page-table-based hugepages ia32 and others can determine a page's hugeness by inspecting the pmd's value directly. No need to perform a VMA lookup against the user's virtual address. This patch ifdef's away the VMA-based implementation of hugepage-aware-follow_page for ia32 and replaces it with a pmd-based implementation. The intent is that architectures will implement one or the other. So the architecture either: 1: Implements hugepage_vma()/follow_huge_addr(), and stubs out pmd_huge()/follow_huge_pmd() or 2: Implements pmd_huge()/follow_huge_pmd(), and stubs out hugepage_vma()/follow_huge_addr() --- arch/i386/mm/hugetlbpage.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/hugetlb.h | 6 ++++++ mm/memory.c | 6 +++++- 3 files changed, 56 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index 9d7de7b0fb00..106dcdd8dcf2 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -150,6 +150,7 @@ back1: return i; } +#if 0 /* This is just for testing */ struct page * follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write) @@ -179,6 +180,50 @@ struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr) return NULL; } +int pmd_huge(pmd_t pmd) +{ + return 0; +} + +struct page * +follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int write) +{ + return NULL; +} + +#else + +struct page * +follow_huge_addr(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, int write) +{ + return NULL; +} + +struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr) +{ + return NULL; +} + +int pmd_huge(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_PSE); +} + +struct page * +follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int write) +{ + struct page *page; + + page = pte_page(*(pte_t *)pmd); + if (page) + page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); + return page; +} +#endif + void free_huge_page(struct page *page) { BUG_ON(page_count(page)); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6b2f606c08f7..b51d51d05190 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -24,6 +24,10 @@ struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write); struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long address); +struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int write); +int pmd_huge(pmd_t pmd); + extern int htlbpage_max; static inline void @@ -51,6 +55,8 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) #define hugetlb_report_meminfo(buf) 0 #define hugepage_vma(mm, addr) 0 #define mark_mm_hugetlb(mm, vma) do { } while (0) +#define follow_huge_pmd(mm, addr, pmd, write) 0 +#define pmd_huge(x) 0 #endif /* !CONFIG_HUGETLB_PAGE */ diff --git a/mm/memory.c b/mm/memory.c index f058e4cfc639..e390e0bf4aea 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -618,7 +618,11 @@ follow_page(struct mm_struct *mm, unsigned long address, int write) goto out; pmd = pmd_offset(pgd, address); - if (pmd_none(*pmd) || pmd_bad(*pmd)) + if (pmd_none(*pmd)) + goto out; + if (pmd_huge(*pmd)) + return follow_huge_pmd(mm, address, pmd, write); + if (pmd_bad(*pmd)) goto out; ptep = pte_offset_map(pmd, address); -- cgit v1.2.3 From eefb08ee7da81e1548ffd5b664682dc5b229ddc2 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Feb 2003 16:57:54 -0800 Subject: [PATCH] Infrastructure for correct hugepage refcounting We currently have a problem when things like ptrace, futexes and direct-io try to pin user pages. If the user's address is in a huge page we're elevting the refcount of a constituent 4k page, not the head page of the high-order allocation unit. To solve this, a generic way of handling higher-order pages has been implemented: - A higher-order page is called a "compound page". Chose this because "huge page", "large page", "super page", etc all seem to mean different things to different people. - The first (controlling) 4k page of a compound page is referred to as the "head" page. - The remaining pages are tail pages. All pages have PG_compound set. All pages have their lru.next pointing at the head page (even the head page has this). The head page's lru.prev, if non-zero, holds the address of the compound page's put_page() function. The order of the allocation is stored in the first tail page's lru.prev. This is only for debug at present. This usage means that zero-order pages may not be compound. The above relationships are established for _all_ higher-order pages in the page allocator. Which has some cost, but not much - another atomic op during fork(), mainly. This functionality is only enabled if CONFIG_HUGETLB_PAGE, although it could be turned on permanently. There's a little extra cost in get_page/put_page. These changes do not preclude adding compound pages to the LRU in the future - we can add a new page flag to the head page and then move all the additional data to the first tail page's lru.next, lru.prev, list.next, list.prev, index, private, etc. --- include/linux/mm.h | 35 ++++++++++++++++++++++-- include/linux/page-flags.h | 7 ++++- mm/page_alloc.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 105 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index d2b99c852301..c68771c27d88 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -208,24 +208,55 @@ struct page { * Also, many kernel routines increase the page count before a critical * routine so they can be sure the page doesn't go away from under them. */ -#define get_page(p) atomic_inc(&(p)->count) -#define __put_page(p) atomic_dec(&(p)->count) #define put_page_testzero(p) \ ({ \ BUG_ON(page_count(page) == 0); \ atomic_dec_and_test(&(p)->count); \ }) + #define page_count(p) atomic_read(&(p)->count) #define set_page_count(p,v) atomic_set(&(p)->count, v) +#define __put_page(p) atomic_dec(&(p)->count) extern void FASTCALL(__page_cache_release(struct page *)); +#ifdef CONFIG_HUGETLB_PAGE + +static inline void get_page(struct page *page) +{ + if (PageCompound(page)) + page = (struct page *)page->lru.next; + atomic_inc(&page->count); +} + static inline void put_page(struct page *page) { + if (PageCompound(page)) { + page = (struct page *)page->lru.next; + if (page->lru.prev) { /* destructor? */ + (*(void (*)(struct page *))page->lru.prev)(page); + return; + } + } if (!PageReserved(page) && put_page_testzero(page)) __page_cache_release(page); } +#else /* CONFIG_HUGETLB_PAGE */ + +static inline void get_page(struct page *page) +{ + atomic_inc(&page->count); +} + +static inline void put_page(struct page *page) +{ + if (!PageReserved(page) && put_page_testzero(page)) + __page_cache_release(page); +} + +#endif /* CONFIG_HUGETLB_PAGE */ + /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 0327a8421c9d..5c3bded564d8 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -72,7 +72,8 @@ #define PG_direct 16 /* ->pte_chain points directly at pte */ #define PG_mappedtodisk 17 /* Has blocks allocated on-disk */ -#define PG_reclaim 18 /* To be recalimed asap */ +#define PG_reclaim 18 /* To be reclaimed asap */ +#define PG_compound 19 /* Part of a compound page */ /* * Global page accounting. One instance per CPU. Only unsigned longs are @@ -251,6 +252,10 @@ extern void get_full_page_state(struct page_state *ret); #define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags) #define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags) +#define PageCompound(page) test_bit(PG_compound, &(page)->flags) +#define SetPageCompound(page) set_bit(PG_compound, &(page)->flags) +#define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags) + /* * The PageSwapCache predicate doesn't use a PG_flag at this time, * but it may again do so one day. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ef7d20c61e46..15df1737f072 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -85,6 +85,62 @@ static void bad_page(const char *function, struct page *page) page->mapping = NULL; } +#ifndef CONFIG_HUGETLB_PAGE +#define prep_compound_page(page, order) do { } while (0) +#define destroy_compound_page(page, order) do { } while (0) +#else +/* + * Higher-order pages are called "compound pages". They are structured thusly: + * + * The first PAGE_SIZE page is called the "head page". + * + * The remaining PAGE_SIZE pages are called "tail pages". + * + * All pages have PG_compound set. All pages have their lru.next pointing at + * the head page (even the head page has this). + * + * The head page's lru.prev, if non-zero, holds the address of the compound + * page's put_page() function. + * + * The order of the allocation is stored in the first tail page's lru.prev. + * This is only for debug at present. This usage means that zero-order pages + * may not be compound. + */ +static void prep_compound_page(struct page *page, int order) +{ + int i; + int nr_pages = 1 << order; + + page->lru.prev = NULL; + page[1].lru.prev = (void *)order; + for (i = 0; i < nr_pages; i++) { + struct page *p = page + i; + + SetPageCompound(p); + p->lru.next = (void *)page; + } +} + +static void destroy_compound_page(struct page *page, int order) +{ + int i; + int nr_pages = 1 << order; + + if (page[1].lru.prev != (void *)order) + bad_page(__FUNCTION__, page); + + for (i = 0; i < nr_pages; i++) { + struct page *p = page + i; + + if (!PageCompound(p)) + bad_page(__FUNCTION__, page); + if (p->lru.next != (void *)page) + bad_page(__FUNCTION__, page); + ClearPageCompound(p); + } +} +#endif /* CONFIG_HUGETLB_PAGE */ + /* * Freeing function for a buddy system allocator. * @@ -114,6 +170,8 @@ static inline void __free_pages_bulk (struct page *page, struct page *base, { unsigned long page_idx, index; + if (order) + destroy_compound_page(page, order); page_idx = page - base; if (page_idx & ~mask) BUG(); @@ -409,6 +467,12 @@ void free_cold_page(struct page *page) free_hot_cold_page(page, 1); } +/* + * Really, prep_compound_page() should be called from __rmqueue_bulk(). But + * we cheat by calling it from here, in the order > 0 path. Saves a branch + * or two. + */ + static struct page *buffered_rmqueue(struct zone *zone, int order, int cold) { unsigned long flags; @@ -435,6 +499,8 @@ static struct page *buffered_rmqueue(struct zone *zone, int order, int cold) spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order); spin_unlock_irqrestore(&zone->lock, flags); + if (order && page) + prep_compound_page(page, order); } if (page != NULL) { -- cgit v1.2.3 From df79ea4004dd472d22b1ae21bb51f6b4ec3a312e Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Feb 2003 16:58:57 -0800 Subject: [PATCH] hugetlb mremap fix If you attempt to perform a relocating 4k-aligned mremap and the new address for the map lands on top of a hugepage VMA, do_mremap() will attempt to perform a 4k-aligned unmap inside the hugetlb VMA. The hugetlb layer goes BUG. Fix that by trapping the poorly-aligned unmap attempt in do_munmap(). do_remap() will then fall through without having done anything to the place where it tests for a hugetlb VMA. It would be neater to perform these checks on entry to do_mremap(), but that would incur another VMA lookup. Also, if you attempt to perform a 4k-aligned and/or sized munmap() inside a hugepage VMA the same BUG happens. This patch fixes that too. This all means that an mremap attempt against a hugetlb area will fail, but only after having unmapped the source pages. That's a bit messy, but supporting hugetlb mremap doesn't seem worth it, and completely disallowing it will add overhead to normal mremaps. --- include/linux/hugetlb.h | 4 ++++ mm/mmap.c | 5 +++++ 2 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index b51d51d05190..370411eaaba2 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -58,6 +58,10 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) #define follow_huge_pmd(mm, addr, pmd, write) 0 #define pmd_huge(x) 0 +#ifndef HPAGE_MASK +#define HPAGE_MASK 0 /* Keep the compiler happy */ +#endif + #endif /* !CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_HUGETLBFS diff --git a/mm/mmap.c b/mm/mmap.c index d3b14b17da38..af3d4a272ad7 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1223,6 +1223,11 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) return 0; /* we have start < mpnt->vm_end */ + if (is_vm_hugetlb_page(mpnt)) { + if ((start & ~HPAGE_MASK) || (len & ~HPAGE_MASK)) + return -EINVAL; + } + /* if it doesn't overlap, we have nothing.. */ end = start + len; if (mpnt->vm_start >= end) -- cgit v1.2.3 From ecd2d2201d475af0f3aa822ba991463da33bf54f Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Feb 2003 16:59:11 -0800 Subject: [PATCH] spinlock debugging on uniprocessors Patch from Manfred Spraul This enables spinlock debuggng on uniprocessor builds, under CONFIG_DEBUG_SPINLOCK. The reason I want this is that one day we'll need to pull out the debugging support from the timer code which detects uninitialised timers. And once that has gone, uniprocessor developers and testers have no way of detecting uninitialised timers - there will be mysterious deadlocks on SMP machines. And there will surely be more uninitialised timers The patch also removes the last pieces of the support for including directly. Doesn't work since (IIRC) 2.3.x --- include/linux/spinlock.h | 122 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 112 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 403033961628..a289a20a2484 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -37,30 +37,120 @@ #ifdef CONFIG_SMP #include -/* - * !CONFIG_SMP and spin_lock_init not previously defined - * (e.g. by including include/asm/spinlock.h) - */ -#elif !defined(spin_lock_init) +#else -#ifndef CONFIG_PREEMPT +#if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK) # define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) # define ATOMIC_DEC_AND_LOCK #endif +#ifdef CONFIG_DEBUG_SPINLOCK + +#define SPINLOCK_MAGIC 0x1D244B3C +typedef struct { + unsigned long magic; + volatile unsigned long lock; + volatile unsigned int babble; + const char *module; + char *owner; + int oline; +} spinlock_t; +#define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0} + +#define spin_lock_init(x) \ + do { \ + (x)->magic = SPINLOCK_MAGIC; \ + (x)->lock = 0; \ + (x)->babble = 5; \ + (x)->module = __FILE__; \ + (x)->owner = NULL; \ + (x)->oline = 0; \ + } while (0) + +#define CHECK_LOCK(x) \ + do { \ + if ((x)->magic != SPINLOCK_MAGIC) { \ + printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \ + __FILE__, __LINE__, (x)); \ + } \ + } while(0) + +#define _raw_spin_lock(x) \ + do { \ + CHECK_LOCK(x); \ + if ((x)->lock&&(x)->babble) { \ + printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \ + __FILE__,__LINE__, (x)->module, \ + (x), (x)->owner, (x)->oline); \ + (x)->babble--; \ + } \ + (x)->lock = 1; \ + (x)->owner = __FILE__; \ + (x)->oline = __LINE__; \ + } while (0) + +/* without debugging, spin_is_locked on UP always says + * FALSE. --> printk if already locked. */ +#define spin_is_locked(x) \ + ({ \ + CHECK_LOCK(x); \ + if ((x)->lock&&(x)->babble) { \ + printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \ + __FILE__,__LINE__, (x)->module, \ + (x), (x)->owner, (x)->oline); \ + (x)->babble--; \ + } \ + 0; \ + }) + +/* without debugging, spin_trylock on UP always says + * TRUE. --> printk if already locked. */ +#define _raw_spin_trylock(x) \ + ({ \ + CHECK_LOCK(x); \ + if ((x)->lock&&(x)->babble) { \ + printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \ + __FILE__,__LINE__, (x)->module, \ + (x), (x)->owner, (x)->oline); \ + (x)->babble--; \ + } \ + (x)->lock = 1; \ + (x)->owner = __FILE__; \ + (x)->oline = __LINE__; \ + 1; \ + }) + +#define spin_unlock_wait(x) \ + do { \ + CHECK_LOCK(x); \ + if ((x)->lock&&(x)->babble) { \ + printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \ + __FILE__,__LINE__, (x)->module, (x), \ + (x)->owner, (x)->oline); \ + (x)->babble--; \ + }\ + } while (0) + +#define _raw_spin_unlock(x) \ + do { \ + CHECK_LOCK(x); \ + if (!(x)->lock&&(x)->babble) { \ + printk("%s:%d: spin_unlock(%s:%p) not locked\n", \ + __FILE__,__LINE__, (x)->module, (x));\ + (x)->babble--; \ + } \ + (x)->lock = 0; \ + } while (0) +#else /* * gcc versions before ~2.95 have a nasty bug with empty initializers. */ #if (__GNUC__ > 2) typedef struct { } spinlock_t; - typedef struct { } rwlock_t; #define SPIN_LOCK_UNLOCKED (spinlock_t) { } - #define RW_LOCK_UNLOCKED (rwlock_t) { } #else typedef struct { int gcc_is_buggy; } spinlock_t; - typedef struct { int gcc_is_buggy; } rwlock_t; #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } - #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } #endif /* @@ -72,6 +162,18 @@ #define _raw_spin_trylock(lock) ((void)(lock), 1) #define spin_unlock_wait(lock) do { (void)(lock); } while(0) #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) +#endif /* CONFIG_DEBUG_SPINLOCK */ + +/* RW spinlocks: No debug version */ + +#if (__GNUC__ > 2) + typedef struct { } rwlock_t; + #define RW_LOCK_UNLOCKED (rwlock_t) { } +#else + typedef struct { int gcc_is_buggy; } rwlock_t; + #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } +#endif + #define rwlock_init(lock) do { (void)(lock); } while(0) #define _raw_read_lock(lock) do { (void)(lock); } while(0) #define _raw_read_unlock(lock) do { (void)(lock); } while(0) -- cgit v1.2.3 From 9a3e1a9676060802c78d70332a18da28b8e1e480 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Feb 2003 16:59:24 -0800 Subject: [PATCH] Fix signed use of i_blocks in ext3 truncate Patch from "Stephen C. Tweedie" Fix "h_buffer_credits<0" assert failure during truncate. The bug occurs when the "i_blocks" count in the file's inode overflows past 2^31. That works fine most of the time, because i_blocks is an unsigned long, and should go up to 2^32; but there's a place in truncate where ext3 calculates the size of the next transaction chunk for the delete, and that mistakenly uses a signed long instead. Because the huge i_blocks gets cast to a negative value, ext3 does not reserve enough credits for the transaction and the above error results. This is usually only possible on filesystems corrupted for other reasons, but it is reproducible if you create a single, non-sparse file larger than 1TB on ext3 and then try to delete it. --- fs/ext3/inode.c | 47 +++++++++++++++++++++++++++++++---------------- include/linux/ext3_jbd.h | 6 +++--- 2 files changed, 34 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index ccdb52c9cc77..24897acf33da 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -99,6 +99,34 @@ int ext3_forget(handle_t *handle, int is_metadata, return err; } +/* + * Work out how many blocks we need to progress with the next chunk of a + * truncate transaction. + */ + +static unsigned long blocks_for_truncate(struct inode *inode) +{ + unsigned long needed; + + needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); + + /* Give ourselves just enough room to cope with inodes in which + * i_blocks is corrupt: we've seen disk corruptions in the past + * which resulted in random data in an inode which looked enough + * like a regular file for ext3 to try to delete it. Things + * will go a bit crazy if that happens, but at least we should + * try not to panic the whole kernel. */ + if (needed < 2) + needed = 2; + + /* But we need to bound the transaction so we don't overflow the + * journal. */ + if (needed > EXT3_MAX_TRANS_DATA) + needed = EXT3_MAX_TRANS_DATA; + + return EXT3_DATA_TRANS_BLOCKS + needed; +} + /* * Truncate transactions can be complex and absolutely huge. So we need to * be able to restart the transaction at a conventient checkpoint to make @@ -112,14 +140,9 @@ int ext3_forget(handle_t *handle, int is_metadata, static handle_t *start_transaction(struct inode *inode) { - long needed; handle_t *result; - needed = inode->i_blocks; - if (needed > EXT3_MAX_TRANS_DATA) - needed = EXT3_MAX_TRANS_DATA; - - result = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS + needed); + result = ext3_journal_start(inode, blocks_for_truncate(inode)); if (!IS_ERR(result)) return result; @@ -135,14 +158,9 @@ static handle_t *start_transaction(struct inode *inode) */ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) { - long needed; - if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS) return 0; - needed = inode->i_blocks; - if (needed > EXT3_MAX_TRANS_DATA) - needed = EXT3_MAX_TRANS_DATA; - if (!ext3_journal_extend(handle, EXT3_RESERVE_TRANS_BLOCKS + needed)) + if (!ext3_journal_extend(handle, blocks_for_truncate(inode))) return 0; return 1; } @@ -154,11 +172,8 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) */ static int ext3_journal_test_restart(handle_t *handle, struct inode *inode) { - long needed = inode->i_blocks; - if (needed > EXT3_MAX_TRANS_DATA) - needed = EXT3_MAX_TRANS_DATA; jbd_debug(2, "restarting handle %p\n", handle); - return ext3_journal_restart(handle, EXT3_DATA_TRANS_BLOCKS + needed); + return ext3_journal_restart(handle, blocks_for_truncate(inode)); } /* diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h index 13508f6053b9..7ac910d15863 100644 --- a/include/linux/ext3_jbd.h +++ b/include/linux/ext3_jbd.h @@ -28,7 +28,7 @@ * indirection blocks, the group and superblock summaries, and the data * block to complete the transaction. */ -#define EXT3_SINGLEDATA_TRANS_BLOCKS 8 +#define EXT3_SINGLEDATA_TRANS_BLOCKS 8U /* Extended attributes may touch two data buffers, two bitmap buffers, * and two group and summaries. */ @@ -58,7 +58,7 @@ extern int ext3_writepage_trans_blocks(struct inode *inode); * start off at the maximum transaction size and grow the transaction * optimistically as we go. */ -#define EXT3_MAX_TRANS_DATA 64 +#define EXT3_MAX_TRANS_DATA 64U /* We break up a large truncate or write transaction once the handle's * buffer credits gets this low, we need either to extend the @@ -67,7 +67,7 @@ extern int ext3_writepage_trans_blocks(struct inode *inode); * one block, plus two quota updates. Quota allocations are not * needed. */ -#define EXT3_RESERVE_TRANS_BLOCKS 12 +#define EXT3_RESERVE_TRANS_BLOCKS 12U #define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8 -- cgit v1.2.3 From ebf5ebe31d2cd1e0f13e5b65deb0b4af7afd9dc1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 5 Feb 2003 20:49:30 -0800 Subject: [PATCH] signal-fixes-2.5.59-A4 this is the current threading patchset, which accumulated up during the past two weeks. It consists of a biggest set of changes from Roland, to make threaded signals work. There were still tons of testcases and boundary conditions (mostly in the signal/exit/ptrace area) that we did not handle correctly. Roland's thread-signal semantics/behavior/ptrace fixes: - fix signal delivery race with do_exit() => signals are re-queued to the 'process' if do_exit() finds pending unhandled ones. This prevents signals getting lost upon thread-sys_exit(). - a non-main thread has died on one processor and gone to TASK_ZOMBIE, but before it's gotten to release_task a sys_wait4 on the other processor reaps it. It's only because it's ptraced that this gets through eligible_child. Somewhere in there the main thread is also dying so it reparents the child thread to hit that case. This means that there is a race where P might be totally invalid. - forget_original_parent is not doing the right thing when the group leader dies, i.e. reparenting threads to init when there is a zombie group leader. Perhaps it doesn't matter for any practical purpose without ptrace, though it makes for ppid=1 for each thread in core dumps, which looks funny. Incidentally, SIGCHLD here really should be p->exit_signal. - one of the gdb tests makes a questionable assumption about what kill will do when it has some threads stopped by ptrace and others running. exit races: 1. Processor A is in sys_wait4 case TASK_STOPPED considering task P. Processor B is about to resume P and then switch to it. While A is inside that case block, B starts running P and it clears P->exit_code, or takes a pending fatal signal and sets it to a new value. Depending on the interleaving, the possible failure modes are: a. A gets to its put_user after B has cleared P->exit_code => returns with WIFSTOPPED, WSTOPSIG==0 b. A gets to its put_user after B has set P->exit_code anew => returns with e.g. WIFSTOPPED, WSTOPSIG==SIGKILL A can spend an arbitrarily long time in that case block, because there's getrusage and put_user that can take page faults, and write_lock'ing of the tasklist_lock that can block. But even if it's short the race is there in principle. 2. This is new with NPTL, i.e. CLONE_THREAD. Two processors A and B are both in sys_wait4 case TASK_STOPPED considering task P. Both get through their tests and fetches of P->exit_code before either gets to P->exit_code = 0. => two threads return the same pid from waitpid. In other interleavings where one processor gets to its put_user after the other has cleared P->exit_code, it's like case 1(a). 3. SMP races with stop/cont signals First, take: kill(pid, SIGSTOP); kill(pid, SIGCONT); or: kill(pid, SIGSTOP); kill(pid, SIGKILL); It's possible for this to leave the process stopped with a pending SIGCONT/SIGKILL. That's a state that should never be possible. Moreover, kill(pid, SIGKILL) without any repetition should always be enough to kill a process. (Likewise SIGCONT when you know it's sequenced after the last stop signal, must be sufficient to resume a process.) 4. take: kill(pid, SIGKILL); // or any fatal signal kill(pid, SIGCONT); // or SIGKILL it's possible for this to cause pid to be reaped with status 0 instead of its true termination status. The equivalent scenario happens when the process being killed is in an _exit call or a trap-induced fatal signal before the kills. plus i've done stability fixes for bugs that popped up during beta-testing, and minor tidying of Roland's changes: - a rare tasklist corruption during exec, causing some very spurious and colorful crashes. - a copy_process()-related dereference of already freed thread structure if hit with a SIGKILL in the wrong moment. - SMP spinlock deadlocks in the signal code this patchset has been tested quite well in the 2.4 backport of the threading changes - and i've done some stresstesting on 2.5.59 SMP as well, and did an x86 UP testcompile + testboot as well. --- fs/exec.c | 6 +- include/linux/sched.h | 10 +- kernel/exit.c | 148 +++++-- kernel/fork.c | 24 +- kernel/signal.c | 1058 +++++++++++++++++++++++++++++-------------------- kernel/suspend.c | 3 +- 6 files changed, 779 insertions(+), 470 deletions(-) (limited to 'include/linux') diff --git a/fs/exec.c b/fs/exec.c index 028fbda85a71..0b41239937b7 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -587,7 +587,7 @@ static inline int de_thread(struct signal_struct *oldsig) return -EAGAIN; } oldsig->group_exit = 1; - __broadcast_thread_group(current, SIGKILL); + zap_other_threads(current); /* * Account for the thread group leader hanging around: @@ -659,7 +659,8 @@ static inline int de_thread(struct signal_struct *oldsig) current->ptrace = ptrace; __ptrace_link(current, parent); } - + + list_del(¤t->tasks); list_add_tail(¤t->tasks, &init_task.tasks); current->exit_signal = SIGCHLD; state = leader->state; @@ -680,6 +681,7 @@ out: newsig->group_exit = 0; newsig->group_exit_code = 0; newsig->group_exit_task = NULL; + newsig->group_stop_count = 0; memcpy(newsig->action, current->sig->action, sizeof(newsig->action)); init_sigpending(&newsig->shared_pending); diff --git a/include/linux/sched.h b/include/linux/sched.h index 648d4d3ace3c..d41f7a24fc14 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -235,6 +235,9 @@ struct signal_struct { int group_exit; int group_exit_code; struct task_struct *group_exit_task; + + /* thread group stop support, overloads group_exit_code too */ + int group_stop_count; }; /* @@ -508,7 +511,6 @@ extern int in_egroup_p(gid_t); extern void proc_caches_init(void); extern void flush_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *); -extern void sig_exit(int, int, struct siginfo *); extern int dequeue_signal(sigset_t *mask, siginfo_t *info); extern void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask); @@ -525,7 +527,7 @@ extern void do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); -extern int __broadcast_thread_group(struct task_struct *p, int sig); +extern void zap_other_threads(struct task_struct *p); extern int kill_pg(pid_t, int, int); extern int kill_sl(pid_t, int, int); extern int kill_proc(pid_t, int, int); @@ -590,6 +592,8 @@ extern void exit_files(struct task_struct *); extern void exit_sighand(struct task_struct *); extern void __exit_sighand(struct task_struct *); +extern NORET_TYPE void do_group_exit(int); + extern void reparent_to_init(void); extern void daemonize(void); extern task_t *child_reaper; @@ -762,6 +766,8 @@ static inline void cond_resched_lock(spinlock_t * lock) extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t)); extern void recalc_sigpending(void); +extern void signal_wake_up(struct task_struct *t, int resume_stopped); + /* * Wrappers for p->thread_info->cpu access. No-op on UP. */ diff --git a/kernel/exit.c b/kernel/exit.c index 057c562f62b1..25281033be8d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -647,7 +647,7 @@ fake_volatile: exit_namespace(tsk); exit_thread(); - if (current->leader) + if (tsk->leader) disassociate_ctty(1); module_put(tsk->thread_info->exec_domain->module); @@ -657,8 +657,31 @@ fake_volatile: tsk->exit_code = code; exit_notify(); preempt_disable(); - if (current->exit_signal == -1) - release_task(current); + if (signal_pending(tsk) && !tsk->sig->group_exit + && !thread_group_empty(tsk)) { + /* + * This occurs when there was a race between our exit + * syscall and a group signal choosing us as the one to + * wake up. It could be that we are the only thread + * alerted to check for pending signals, but another thread + * should be woken now to take the signal since we will not. + * Now we'll wake all the threads in the group just to make + * sure someone gets all the pending signals. + */ + struct task_struct *t; + read_lock(&tasklist_lock); + spin_lock_irq(&tsk->sig->siglock); + for (t = next_thread(tsk); t != tsk; t = next_thread(t)) + if (!signal_pending(t) && !(t->flags & PF_EXITING)) { + recalc_sigpending_tsk(t); + if (signal_pending(t)) + signal_wake_up(t, 0); + } + spin_unlock_irq(&tsk->sig->siglock); + read_unlock(&tasklist_lock); + } + if (tsk->exit_signal == -1) + release_task(tsk); schedule(); BUG(); /* @@ -710,31 +733,44 @@ task_t *next_thread(task_t *p) } /* - * this kills every thread in the thread group. Note that any externally - * wait4()-ing process will get the correct exit code - even if this - * thread is not the thread group leader. + * Take down every thread in the group. This is called by fatal signals + * as well as by sys_exit_group (below). */ -asmlinkage long sys_exit_group(int error_code) +NORET_TYPE void +do_group_exit(int exit_code) { - unsigned int exit_code = (error_code & 0xff) << 8; - - if (!thread_group_empty(current)) { - struct signal_struct *sig = current->sig; + BUG_ON(exit_code & 0x80); /* core dumps don't get here */ + if (current->sig->group_exit) + exit_code = current->sig->group_exit_code; + else if (!thread_group_empty(current)) { + struct signal_struct *const sig = current->sig; + read_lock(&tasklist_lock); spin_lock_irq(&sig->siglock); - if (sig->group_exit) { - spin_unlock_irq(&sig->siglock); - - /* another thread was faster: */ - do_exit(sig->group_exit_code); - } + if (sig->group_exit) + /* Another thread got here before we took the lock. */ + exit_code = sig->group_exit_code; + else { sig->group_exit = 1; sig->group_exit_code = exit_code; - __broadcast_thread_group(current, SIGKILL); + zap_other_threads(current); + } spin_unlock_irq(&sig->siglock); + read_unlock(&tasklist_lock); } do_exit(exit_code); + /* NOTREACHED */ +} + +/* + * this kills every thread in the thread group. Note that any externally + * wait4()-ing process will get the correct exit code - even if this + * thread is not the thread group leader. + */ +asmlinkage long sys_exit_group(int error_code) +{ + do_group_exit((error_code & 0xff) << 8); } static int eligible_child(pid_t pid, int options, task_t *p) @@ -800,6 +836,8 @@ repeat: int ret; list_for_each(_p,&tsk->children) { + int exit_code; + p = list_entry(_p,struct task_struct,sibling); ret = eligible_child(pid, options, p); @@ -813,20 +851,69 @@ repeat: continue; if (!(options & WUNTRACED) && !(p->ptrace & PT_PTRACED)) continue; + if (ret == 2 && !(p->ptrace & PT_PTRACED) && + p->sig && p->sig->group_stop_count > 0) + /* + * A group stop is in progress and + * we are the group leader. We won't + * report until all threads have + * stopped. + */ + continue; read_unlock(&tasklist_lock); /* move to end of parent's list to avoid starvation */ write_lock_irq(&tasklist_lock); remove_parent(p); add_parent(p, p->parent); + + /* + * This uses xchg to be atomic with + * the thread resuming and setting it. + * It must also be done with the write + * lock held to prevent a race with the + * TASK_ZOMBIE case (below). + */ + exit_code = xchg(&p->exit_code, 0); + if (unlikely(p->state > TASK_STOPPED)) { + /* + * The task resumed and then died. + * Let the next iteration catch it + * in TASK_ZOMBIE. Note that + * exit_code might already be zero + * here if it resumed and did + * _exit(0). The task itself is + * dead and won't touch exit_code + * again; other processors in + * this function are locked out. + */ + p->exit_code = exit_code; + exit_code = 0; + } + if (unlikely(exit_code == 0)) { + /* + * Another thread in this function + * got to it first, or it resumed, + * or it resumed and then died. + */ + write_unlock_irq(&tasklist_lock); + continue; + } + /* + * Make sure this doesn't get reaped out from + * under us while we are examining it below. + * We don't want to keep holding onto the + * tasklist_lock while we call getrusage and + * possibly take page faults for user memory. + */ + get_task_struct(p); write_unlock_irq(&tasklist_lock); retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; if (!retval && stat_addr) - retval = put_user((p->exit_code << 8) | 0x7f, stat_addr); - if (!retval) { - p->exit_code = 0; + retval = put_user((exit_code << 8) | 0x7f, stat_addr); + if (!retval) retval = p->pid; - } + put_task_struct(p); goto end_wait4; case TASK_ZOMBIE: /* @@ -841,6 +928,13 @@ repeat: state = xchg(&p->state, TASK_DEAD); if (state != TASK_ZOMBIE) continue; + if (unlikely(p->exit_signal == -1)) + /* + * This can only happen in a race with + * a ptraced thread dying on another + * processor. + */ + continue; read_unlock(&tasklist_lock); retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; @@ -857,11 +951,17 @@ repeat: retval = p->pid; if (p->real_parent != p->parent) { write_lock_irq(&tasklist_lock); + /* Double-check with lock held. */ + if (p->real_parent != p->parent) { __ptrace_unlink(p); - do_notify_parent(p, SIGCHLD); + do_notify_parent( + p, p->exit_signal); p->state = TASK_ZOMBIE; + p = NULL; + } write_unlock_irq(&tasklist_lock); - } else + } + if (p != NULL) release_task(p); goto end_wait4; default: diff --git a/kernel/fork.c b/kernel/fork.c index 4fc3fcd5dacb..c042b5a8eaec 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -680,6 +680,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t sig->group_exit = 0; sig->group_exit_code = 0; sig->group_exit_task = NULL; + sig->group_stop_count = 0; memcpy(sig->action, current->sig->action, sizeof(sig->action)); sig->curr_target = NULL; init_sigpending(&sig->shared_pending); @@ -801,7 +802,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, spin_lock_init(&p->alloc_lock); spin_lock_init(&p->switch_lock); - clear_tsk_thread_flag(p,TIF_SIGPENDING); + clear_tsk_thread_flag(p, TIF_SIGPENDING); init_sigpending(&p->pending); p->it_real_value = p->it_virt_value = p->it_prof_value = 0; @@ -910,6 +911,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, */ if (sigismember(¤t->pending.signal, SIGKILL)) { write_unlock_irq(&tasklist_lock); + retval = -EINTR; goto bad_fork_cleanup_namespace; } @@ -934,6 +936,17 @@ static struct task_struct *copy_process(unsigned long clone_flags, } p->tgid = current->tgid; p->group_leader = current->group_leader; + + if (current->sig->group_stop_count > 0) { + /* + * There is an all-stop in progress for the group. + * We ourselves will stop as soon as we check signals. + * Make the new thread part of that group stop too. + */ + current->sig->group_stop_count++; + set_tsk_thread_flag(p, TIF_SIGPENDING); + } + spin_unlock(¤t->sig->siglock); } @@ -1036,8 +1049,13 @@ struct task_struct *do_fork(unsigned long clone_flags, init_completion(&vfork); } - if (p->ptrace & PT_PTRACED) - send_sig(SIGSTOP, p, 1); + if (p->ptrace & PT_PTRACED) { + /* + * We'll start up with an immediate SIGSTOP. + */ + sigaddset(&p->pending.signal, SIGSTOP); + set_tsk_thread_flag(p, TIF_SIGPENDING); + } wake_up_forked_process(p); /* do this last */ ++total_forks; diff --git a/kernel/signal.c b/kernel/signal.c index 7c485d01a4b0..809ea104b63f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -55,7 +55,7 @@ int max_queued_signals = 1024; | SIGALRM | load-balance | kill-all | | SIGTERM | load-balance | kill-all | | SIGCHLD | load-balance | ignore | -| SIGCONT | specific | continue-all | +| SIGCONT | load-balance | ignore | | SIGSTOP | n/a | stop-all | | SIGTSTP | load-balance | stop-all | | SIGTTIN | load-balance | stop-all | @@ -98,26 +98,11 @@ int max_queued_signals = 1024; #endif #if SIGRTMIN > BITS_PER_LONG -#define M(sig) (1ULL << (sig)) +#define M(sig) (1ULL << ((sig)-1)) #else -#define M(sig) (1UL << (sig)) +#define M(sig) (1UL << ((sig)-1)) #endif -#define T(sig, mask) (M(sig) & mask) - -#define SIG_USER_SPECIFIC_MASK (\ - M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | M(SIGBUS) | \ - M(SIGFPE) | M(SIGSEGV) | M(SIGPIPE) | M(SIGXFSZ) | \ - M(SIGPROF) | M(SIGSYS) | M_SIGSTKFLT | M(SIGCONT) | \ - M_SIGEMT ) - -#define SIG_USER_LOAD_BALANCE_MASK (\ - M(SIGHUP) | M(SIGINT) | M(SIGQUIT) | M(SIGUSR1) | \ - M(SIGUSR2) | M(SIGALRM) | M(SIGTERM) | M(SIGCHLD) | \ - M(SIGURG) | M(SIGVTALRM) | M(SIGPOLL) | M(SIGWINCH) | \ - M(SIGPWR) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) ) - -#define SIG_KERNEL_SPECIFIC_MASK (\ - M(SIGCHLD) | M(SIGURG) | M(SIGWINCH) ) +#define T(sig, mask) (M(sig) & (mask)) #define SIG_KERNEL_BROADCAST_MASK (\ M(SIGHUP) | M(SIGINT) | M(SIGQUIT) | M(SIGILL) | \ @@ -132,34 +117,37 @@ int max_queued_signals = 1024; #define SIG_KERNEL_ONLY_MASK (\ M(SIGKILL) | M(SIGSTOP) ) +#define SIG_KERNEL_STOP_MASK (\ + M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) ) + #define SIG_KERNEL_COREDUMP_MASK (\ M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \ M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \ M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT ) -#define sig_user_specific(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_USER_SPECIFIC_MASK)) -#define sig_user_load_balance(sig) \ - (((sig) >= SIGRTMIN) || T(sig, SIG_USER_LOAD_BALANCE_MASK)) -#define sig_kernel_specific(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_SPECIFIC_MASK)) -#define sig_kernel_broadcast(sig) \ - (((sig) >= SIGRTMIN) || T(sig, SIG_KERNEL_BROADCAST_MASK)) +#define SIG_KERNEL_IGNORE_MASK (\ + M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) ) + #define sig_kernel_only(sig) \ (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK)) #define sig_kernel_coredump(sig) \ (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK)) +#define sig_kernel_ignore(sig) \ + (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK)) +#define sig_kernel_stop(sig) \ + (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) -#define sig_user_defined(t, sig) \ - (((t)->sig->action[(sig)-1].sa.sa_handler != SIG_DFL) && \ - ((t)->sig->action[(sig)-1].sa.sa_handler != SIG_IGN)) +#define sig_user_defined(t, signr) \ + (((t)->sig->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ + ((t)->sig->action[(signr)-1].sa.sa_handler != SIG_IGN)) -#define sig_ignored(t, sig) \ - (((sig) != SIGCHLD) && \ - ((t)->sig->action[(sig)-1].sa.sa_handler == SIG_IGN)) +#define sig_ignored(t, signr) \ + (!((t)->ptrace & PT_PTRACED) && \ + (t)->sig->action[(signr)-1].sa.sa_handler == SIG_IGN) -static int -__send_sig_info(int sig, struct siginfo *info, struct task_struct *p); +#define sig_fatal(t, signr) \ + (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ + (t)->sig->action[(signr)-1].sa.sa_handler == SIG_DFL) /* * Re-calculate pending state from the set of locally pending @@ -193,9 +181,10 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) -void recalc_sigpending_tsk(struct task_struct *t) +inline void recalc_sigpending_tsk(struct task_struct *t) { - if (PENDING(&t->pending, &t->blocked) || + if (t->sig->group_stop_count > 0 || + PENDING(&t->pending, &t->blocked) || PENDING(&t->sig->shared_pending, &t->blocked)) set_tsk_thread_flag(t, TIF_SIGPENDING); else @@ -204,11 +193,7 @@ void recalc_sigpending_tsk(struct task_struct *t) void recalc_sigpending(void) { - if (PENDING(¤t->pending, ¤t->blocked) || - PENDING(¤t->sig->shared_pending, ¤t->blocked)) - set_thread_flag(TIF_SIGPENDING); - else - clear_thread_flag(TIF_SIGPENDING); + recalc_sigpending_tsk(current); } /* Given the mask, find the first available signal that should be serviced. */ @@ -337,23 +322,6 @@ flush_signal_handlers(struct task_struct *t) } } -/* - * sig_exit - cause the current task to exit due to a signal. - */ - -void -sig_exit(int sig, int exit_code, struct siginfo *info) -{ - sigaddset(¤t->pending.signal, sig); - recalc_sigpending(); - current->flags |= PF_SIGNALED; - - if (current->sig->group_exit) - exit_code = current->sig->group_exit_code; - - do_exit(exit_code); - /* NOTREACHED */ -} /* Notify the system that a driver wants to block all signals for this * process, and wants to be notified if any signals at all were to be @@ -473,32 +441,74 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, */ int dequeue_signal(sigset_t *mask, siginfo_t *info) { + int signr = __dequeue_signal(¤t->pending, mask, info); + if (!signr) + signr = __dequeue_signal(¤t->sig->shared_pending, + mask, info); + return signr; +} + +/* + * Tell a process that it has a new active signal.. + * + * NOTE! we rely on the previous spin_lock to + * lock interrupts for us! We can only be called with + * "siglock" held, and the local interrupt must + * have been disabled when that got acquired! + * + * No need to set need_resched since signal event passing + * goes through ->blocked + */ +inline void signal_wake_up(struct task_struct *t, int resume) +{ + set_tsk_thread_flag(t,TIF_SIGPENDING); + + /* + * If the task is running on a different CPU + * force a reschedule on the other CPU to make + * it notice the new signal quickly. + * + * The code below is a tad loose and might occasionally + * kick the wrong CPU if we catch the process in the + * process of changing - but no harm is done by that + * other than doing an extra (lightweight) IPI interrupt. + */ + if (t->state == TASK_RUNNING) + kick_if_running(t); /* - * Here we handle shared pending signals. To implement the full - * semantics we need to unqueue and resend them. It will likely - * get into our own pending queue. + * If resume is set, we want to wake it up in the TASK_STOPPED case. + * We don't check for TASK_STOPPED because there is a race with it + * executing another processor and just now entering stopped state. + * By calling wake_up_process any time resume is set, we ensure + * the process will wake up and handle its stop or death signal. */ - if (current->sig->shared_pending.head) { - int signr = __dequeue_signal(¤t->sig->shared_pending, mask, info); - if (signr) - __send_sig_info(signr, info, current); + if ((t->state & TASK_INTERRUPTIBLE) || + (resume && t->state < TASK_ZOMBIE)) { + wake_up_process(t); + return; } - return __dequeue_signal(¤t->pending, mask, info); } -static int rm_from_queue(int sig, struct sigpending *s) +/* + * Remove signals in mask from the pending set and queue. + * Returns 1 if any signals were found. + * + * All callers must be holding the siglock. + */ +static int rm_from_queue(unsigned long mask, struct sigpending *s) { struct sigqueue *q, **pp; - if (!sigismember(&s->signal, sig)) + if (!sigtestsetmask(&s->signal, mask)) return 0; - sigdelset(&s->signal, sig); + sigdelsetmask(&s->signal, mask); pp = &s->head; while ((q = *pp) != NULL) { - if (q->info.si_signo == sig) { + if (q->info.si_signo < SIGRTMIN && + (mask & sigmask (q->info.si_signo))) { if ((*pp = q->next) == NULL) s->tail = pp; kmem_cache_free(sigqueue_cachep,q); @@ -510,112 +520,101 @@ static int rm_from_queue(int sig, struct sigpending *s) return 1; } -/* - * Remove signal sig from t->pending. - * Returns 1 if sig was found. - * - * All callers must be holding the siglock. - */ -static int rm_sig_from_queue(int sig, struct task_struct *t) -{ - return rm_from_queue(sig, &t->pending); -} - /* * Bad permissions for sending the signal */ -static inline int bad_signal(int sig, struct siginfo *info, struct task_struct *t) +static inline int check_kill_permission(int sig, struct siginfo *info, + struct task_struct *t) { - return (!info || ((unsigned long)info != 1 && + int error = -EINVAL; + if (sig < 0 || sig > _NSIG) + return error; + error = -EPERM; + if ((!info || ((unsigned long)info != 1 && (unsigned long)info != 2 && SI_FROMUSER(info))) && ((sig != SIGCONT) || (current->session != t->session)) && (current->euid ^ t->suid) && (current->euid ^ t->uid) && (current->uid ^ t->suid) && (current->uid ^ t->uid) - && !capable(CAP_KILL); + && !capable(CAP_KILL)) + return error; + return security_task_kill(t, info, sig); } +/* forward decl */ +static void do_notify_parent_cldstop(struct task_struct *tsk, + struct task_struct *parent); + /* - * Signal type: - * < 0 : global action (kill - spread to all non-blocked threads) - * = 0 : ignored - * > 0 : wake up. + * Handle magic process-wide effects of stop/continue signals, and SIGKILL. + * Unlike the signal actions, these happen immediately at signal-generation + * time regardless of blocking, ignoring, or handling. This does the + * actual continuing for SIGCONT, but not the actual stopping for stop + * signals. The process stop is done as a signal action for SIG_DFL. */ -static int signal_type(int sig, struct signal_struct *signals) +static void handle_stop_signal(int sig, struct task_struct *p) { - unsigned long handler; - - if (!signals) - return 0; - - handler = (unsigned long) signals->action[sig-1].sa.sa_handler; - if (handler > 1) - return 1; - - /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */ - if (handler == 1) - return sig == SIGCHLD; - - /* Default handler. Normally lethal, but.. */ - switch (sig) { - - /* Ignored */ - case SIGCONT: case SIGWINCH: - case SIGCHLD: case SIGURG: - return 0; - - /* Implicit behaviour */ - case SIGTSTP: case SIGTTIN: case SIGTTOU: - return 1; + struct task_struct *t; - /* Implicit actions (kill or do special stuff) */ - default: - return -1; + if (sig_kernel_stop(sig)) { + /* + * This is a stop signal. Remove SIGCONT from all queues. + */ + rm_from_queue(sigmask(SIGCONT), &p->sig->shared_pending); + t = p; + do { + rm_from_queue(sigmask(SIGCONT), &t->pending); + t = next_thread(t); + } while (t != p); } -} - - -/* - * Determine whether a signal should be posted or not. - * - * Signals with SIG_IGN can be ignored, except for the - * special case of a SIGCHLD. - * - * Some signals with SIG_DFL default to a non-action. + else if (sig == SIGCONT) { + /* + * Remove all stop signals from all queues, + * and wake all threads. */ -static int ignored_signal(int sig, struct task_struct *t) -{ - /* Don't ignore traced or blocked signals */ - if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig)) - return 0; - - return signal_type(sig, t->sig) == 0; -} - -/* - * Handle TASK_STOPPED cases etc implicit behaviour - * of certain magical signals. - * - * SIGKILL gets spread out to every thread. + if (unlikely(p->sig->group_stop_count > 0)) { + /* + * There was a group stop in progress. We'll + * pretend it finished before we got here. We are + * obliged to report it to the parent: if the + * SIGSTOP happened "after" this SIGCONT, then it + * would have cleared this pending SIGCONT. If it + * happened "before" this SIGCONT, then the parent + * got the SIGCHLD about the stop finishing before + * the continue happened. We do the notification + * now, and it's as if the stop had finished and + * the SIGCHLD was pending on entry to this kill. + */ + p->sig->group_stop_count = 0; + if (p->ptrace & PT_PTRACED) + do_notify_parent_cldstop(p, p->parent); + else + do_notify_parent_cldstop( + p->group_leader, + p->group_leader->real_parent); + } + rm_from_queue(SIG_KERNEL_STOP_MASK, &p->sig->shared_pending); + t = p; + do { + rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); + if (t->state == TASK_STOPPED) { + /* + * If there is a handler for SIGCONT, we + * must make sure that no thread returns to + * user mode before we post the signal, in + * case it was the only thread eligible to + * run the signal handler--then it must not + * do anything between resuming and running + * the handler. With the TIF_SIGPENDING flag + * set, the thread will pause and acquire the + * siglock that we hold now and until we've + * queued the pending signal. */ -static void handle_stop_signal(int sig, struct task_struct *t) -{ - switch (sig) { - case SIGKILL: case SIGCONT: - /* Wake up the process if stopped. */ - if (t->state == TASK_STOPPED) - wake_up_process(t); - t->exit_code = 0; - rm_sig_from_queue(SIGSTOP, t); - rm_sig_from_queue(SIGTSTP, t); - rm_sig_from_queue(SIGTTOU, t); - rm_sig_from_queue(SIGTTIN, t); - break; - - case SIGSTOP: case SIGTSTP: - case SIGTTIN: case SIGTTOU: - /* If we're stopping again, cancel SIGCONT */ - rm_sig_from_queue(SIGCONT, t); - break; + if (sig_user_defined(p, SIGCONT)) + set_tsk_thread_flag(t, TIF_SIGPENDING); + wake_up_process(t); + } + t = next_thread(t); + } while (t != p); } } @@ -678,51 +677,12 @@ out_set: return 0; } -/* - * Tell a process that it has a new active signal.. - * - * NOTE! we rely on the previous spin_lock to - * lock interrupts for us! We can only be called with - * "siglock" held, and the local interrupt must - * have been disabled when that got acquired! - * - * No need to set need_resched since signal event passing - * goes through ->blocked - */ -inline void signal_wake_up(struct task_struct *t) -{ - set_tsk_thread_flag(t,TIF_SIGPENDING); - - /* - * If the task is running on a different CPU - * force a reschedule on the other CPU to make - * it notice the new signal quickly. - * - * The code below is a tad loose and might occasionally - * kick the wrong CPU if we catch the process in the - * process of changing - but no harm is done by that - * other than doing an extra (lightweight) IPI interrupt. - */ - if (t->state == TASK_RUNNING) - kick_if_running(t); - if (t->state & TASK_INTERRUPTIBLE) { - wake_up_process(t); - return; - } -} - -static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t) -{ - int retval = send_signal(sig, info, &t->pending); - - if (!retval && !sigismember(&t->blocked, sig)) - signal_wake_up(t); +#define LEGACY_QUEUE(sigptr, sig) \ + (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) - return retval; -} static int -specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t, int shared) +specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { int ret; @@ -732,49 +692,21 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t, int if (!spin_is_locked(&t->sig->siglock)) BUG(); #endif - ret = -EINVAL; - if (sig < 0 || sig > _NSIG) - goto out; - /* The somewhat baroque permissions check... */ - ret = -EPERM; - if (bad_signal(sig, info, t)) - goto out; - ret = security_task_kill(t, info, sig); - if (ret) - goto out; - - /* The null signal is a permissions and process existence probe. - No signal is actually delivered. Same goes for zombies. */ - ret = 0; - if (!sig || !t->sig) - goto out; - handle_stop_signal(sig, t); - - /* Optimize away the signal, if it's a signal that can be - handled immediately (ie non-blocked and untraced) and - that is ignored (either explicitly or by default). */ - - if (ignored_signal(sig, t)) - goto out; - -#define LEGACY_QUEUE(sigptr, sig) \ - (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) + /* Short-circuit ignored signals. */ + if (sig_ignored(t, sig)) + return 0; - if (!shared) { /* Support queueing exactly one non-rt signal, so that we can get more detailed information about the cause of the signal. */ if (LEGACY_QUEUE(&t->pending, sig)) - goto out; + return 0; + + ret = send_signal(sig, info, &t->pending); + if (!ret && !sigismember(&t->blocked, sig)) + signal_wake_up(t, sig == SIGKILL); - ret = deliver_signal(sig, info, t); - } else { - if (LEGACY_QUEUE(&t->sig->shared_pending, sig)) - goto out; - ret = send_signal(sig, info, &t->sig->shared_pending); - } -out: return ret; } @@ -794,26 +726,12 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) t->sig->action[sig-1].sa.sa_handler = SIG_DFL; sigdelset(&t->blocked, sig); recalc_sigpending_tsk(t); - ret = __send_sig_info(sig, info, t); + ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sig->siglock, flags); return ret; } -static int -__specific_force_sig_info(int sig, struct task_struct *t) -{ - if (!t->sig) - return -ESRCH; - - if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN) - t->sig->action[sig-1].sa.sa_handler = SIG_DFL; - sigdelset(&t->blocked, sig); - recalc_sigpending_tsk(t); - - return specific_send_sig_info(sig, (void *)2, t, 0); -} - void force_sig_specific(int sig, struct task_struct *t) { @@ -824,157 +742,182 @@ force_sig_specific(int sig, struct task_struct *t) t->sig->action[sig-1].sa.sa_handler = SIG_DFL; sigdelset(&t->blocked, sig); recalc_sigpending_tsk(t); - specific_send_sig_info(sig, (void *)2, t, 0); + specific_send_sig_info(sig, (void *)2, t); spin_unlock_irqrestore(&t->sig->siglock, flags); } -#define can_take_signal(p, sig) \ - (((unsigned long) p->sig->action[sig-1].sa.sa_handler > 1) && \ - !sigismember(&p->blocked, sig) && (task_curr(p) || !signal_pending(p))) +/* + * Test if P wants to take SIG. After we've checked all threads with this, + * it's equivalent to finding no threads not blocking SIG. Any threads not + * blocking SIG were ruled out because they are not running and already + * have pending signals. Such threads will dequeue from the shared queue + * as soon as they're available, so putting the signal on the shared queue + * will be equivalent to sending it to one such thread. + */ +#define wants_signal(sig, p) (!sigismember(&(p)->blocked, sig) \ + && (p)->state < TASK_STOPPED \ + && !((p)->flags & PF_EXITING) \ + && (task_curr(p) || !signal_pending(p))) -static inline -int load_balance_thread_group(struct task_struct *p, int sig, - struct siginfo *info) +static inline int +__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { - struct task_struct *tmp; + struct task_struct *t; int ret; +#if CONFIG_SMP + if (!spin_is_locked(&p->sig->siglock)) + BUG(); +#endif + handle_stop_signal(sig, p); + + /* Short-circuit ignored signals. */ + if (sig_ignored(p, sig)) + return 0; + + if (LEGACY_QUEUE(&p->sig->shared_pending, sig)) + /* This is a non-RT signal and we already have one queued. */ + return 0; + /* - * if the specified thread is not blocking this signal - * then deliver it. + * Put this signal on the shared-pending queue, or fail with EAGAIN. + * We always use the shared queue for process-wide signals, + * to avoid several races. */ - if (can_take_signal(p, sig)) - return specific_send_sig_info(sig, info, p, 0); + ret = send_signal(sig, info, &p->sig->shared_pending); + if (unlikely(ret)) + return ret; /* + * Now find a thread we can wake up to take the signal off the queue. + * + * If the main thread wants the signal, it gets first crack. + * Probably the least surprising to the average bear. + */ + if (p->state < TASK_ZOMBIE && + (sig_kernel_only(sig) || wants_signal(sig, p))) + t = p; + else if (thread_group_empty(p)) + /* + * There is just one thread and it does not need to be woken. + * It will dequeue unblocked signals before it runs again. + */ + return 0; + else { + /* * Otherwise try to find a suitable thread. - * If no such thread is found then deliver to - * the original thread. */ - - tmp = p->sig->curr_target; - - if (!tmp || tmp->tgid != p->tgid) + t = p->sig->curr_target; + if (t == NULL) /* restart balancing at this thread */ - p->sig->curr_target = p; - - else for (;;) { - if (thread_group_empty(p)) - BUG(); - if (!tmp || tmp->tgid != p->tgid) - BUG(); + t = p->sig->curr_target = p; + BUG_ON(t->tgid != p->tgid); + while (!wants_signal(sig, t)) { + t = next_thread(t); + if (t == p->sig->curr_target) /* - * Do not send signals that are ignored or blocked, - * or to not-running threads that are overworked: + * No thread needs to be woken. + * Any eligible threads will see + * the signal in the queue soon. */ - if (!can_take_signal(tmp, sig)) { - tmp = next_thread(tmp); - p->sig->curr_target = tmp; - if (tmp == p) - break; - continue; + return 0; } - ret = specific_send_sig_info(sig, info, tmp, 0); - return ret; + p->sig->curr_target = t; } + /* - * No suitable thread was found - put the signal - * into the shared-pending queue. + * Found a killable thread. If the signal will be fatal, + * then start taking the whole group down immediately. */ - return specific_send_sig_info(sig, info, p, 1); -} - -int __broadcast_thread_group(struct task_struct *p, int sig) -{ - struct task_struct *tmp; - struct list_head *l; - struct pid *pid; - int err = 0; - - for_each_task_pid(p->tgid, PIDTYPE_TGID, tmp, l, pid) - err = __specific_force_sig_info(sig, tmp); - - return err; -} + if (sig_fatal(p, sig) && !p->sig->group_exit && + !sigismember(&t->real_blocked, sig) && + (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { + /* + * This signal will be fatal to the whole group. + */ + if (!sig_kernel_coredump(sig)) { + /* + * Start a group exit and wake everybody up. + * This way we don't have other threads + * running and doing things after a slower + * thread has the fatal signal pending. + */ + p->sig->group_exit = 1; + p->sig->group_exit_code = sig; + p->sig->group_stop_count = 0; + t = p; + do { + sigaddset(&t->pending.signal, SIGKILL); + signal_wake_up(t, 1); + t = next_thread(t); + } while (t != p); + return 0; + } -struct task_struct * find_unblocked_thread(struct task_struct *p, int signr) -{ - struct task_struct *tmp; - struct list_head *l; - struct pid *pid; + /* + * There will be a core dump. We make all threads other + * than the chosen one go into a group stop so that nothing + * happens until it gets scheduled, takes the signal off + * the shared queue, and does the core dump. This is a + * little more complicated than strictly necessary, but it + * keeps the signal state that winds up in the core dump + * unchanged from the death state, e.g. which thread had + * the core-dump signal unblocked. + */ + rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); + rm_from_queue(SIG_KERNEL_STOP_MASK, &p->sig->shared_pending); + p->sig->group_stop_count = 0; + p->sig->group_exit_task = t; + t = p; + do { + p->sig->group_stop_count++; + signal_wake_up(t, 0); + t = next_thread(t); + } while (t != p); + wake_up_process(p->sig->group_exit_task); + return 0; + } - for_each_task_pid(p->tgid, PIDTYPE_TGID, tmp, l, pid) - if (!sigismember(&tmp->blocked, signr)) - return tmp; - return NULL; + /* + * The signal is already in the shared-pending queue. + * Tell the chosen thread to wake up and dequeue it. + */ + signal_wake_up(t, sig == SIGKILL); + return 0; } -static int -__send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +/* + * Nuke all other threads in the group. + */ +void zap_other_threads(struct task_struct *p) { struct task_struct *t; - int ret = 0; - -#if CONFIG_SMP - if (!spin_is_locked(&p->sig->siglock)) - BUG(); -#endif - /* not a thread group - normal signal behavior */ - if (thread_group_empty(p) || !sig) - goto out_send; - - if (sig_user_defined(p, sig)) { - if (sig_user_specific(sig)) - goto out_send; - if (sig_user_load_balance(sig)) { - ret = load_balance_thread_group(p, sig, info); - goto out_unlock; - } - /* must not happen */ - BUG(); - } - /* optimize away ignored signals: */ - if (sig_ignored(p, sig)) - goto out_unlock; + p->sig->group_stop_count = 0; - if (sig_kernel_specific(sig) || - ((p->ptrace & PT_PTRACED) && !sig_kernel_only(sig))) - goto out_send; + if (thread_group_empty(p)) + return; - /* Does any of the threads unblock the signal? */ - t = find_unblocked_thread(p, sig); - if (!t) { - ret = specific_send_sig_info(sig, info, p, 1); - goto out_unlock; - } - if (sigismember(&t->real_blocked,sig)) { - ret = specific_send_sig_info(sig, info, t, 0); - goto out_unlock; + for (t = next_thread(p); t != p; t = next_thread(t)) { + sigaddset(&t->pending.signal, SIGKILL); + rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); + signal_wake_up(t, 1); } - if (sig_kernel_broadcast(sig) || sig_kernel_coredump(sig)) { - ret = __broadcast_thread_group(p, sig); - goto out_unlock; - } - - /* must not happen */ - BUG(); -out_send: - ret = specific_send_sig_info(sig, info, p, 0); -out_unlock: - return ret; } int -send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { unsigned long flags; int ret; - spin_lock_irqsave(&p->sig->siglock, flags); - ret = __send_sig_info(sig, info, p); - spin_unlock_irqrestore(&p->sig->siglock, flags); + ret = check_kill_permission(sig, info, p); + if (!ret && sig && p->sig) { + spin_lock_irqsave(&p->sig->siglock, flags); + ret = __group_send_sig_info(sig, info, p); + spin_unlock_irqrestore(&p->sig->siglock, flags); + } return ret; } @@ -995,7 +938,7 @@ int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) return -EINVAL; for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) { - err = send_sig_info(sig, info, p); + err = group_send_sig_info(sig, info, p); if (retval) retval = err; } @@ -1037,7 +980,7 @@ kill_sl_info(int sig, struct siginfo *info, pid_t sid) for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) { if (!p->leader) continue; - err = send_sig_info(sig, info, p); + err = group_send_sig_info(sig, info, p); if (retval) retval = err; } @@ -1056,7 +999,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid) p = find_task_by_pid(pid); error = -ESRCH; if (p) - error = send_sig_info(sig, info, p); + error = group_send_sig_info(sig, info, p); read_unlock(&tasklist_lock); return error; } @@ -1079,8 +1022,8 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) read_lock(&tasklist_lock); for_each_process(p) { - if (p->pid > 1 && p != current) { - int err = send_sig_info(sig, info, p); + if (p->pid > 1 && p->tgid != current->tgid) { + int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) retval = err; @@ -1099,6 +1042,22 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) * These are for backward compatibility with the rest of the kernel source. */ +int +send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +{ + /* XXX should nix these interfaces and update the kernel */ + if (T(sig, SIG_KERNEL_BROADCAST_MASK)) + /* XXX do callers really always hold the tasklist_lock?? */ + return group_send_sig_info(sig, info, p); + else { + int error; + spin_lock_irq(&p->sig->siglock); + error = specific_send_sig_info(sig, info, p); + spin_unlock_irq(&p->sig->siglock); + return error; + } +} + int send_sig(int sig, struct task_struct *p, int priv) { @@ -1133,9 +1092,10 @@ kill_proc(pid_t pid, int sig, int priv) * Joy. Or not. Pthread wants us to wake up every thread * in our parent group. */ -static inline void __wake_up_parent(struct task_struct *p) +static inline void __wake_up_parent(struct task_struct *p, + struct task_struct *parent) { - struct task_struct *parent = p->parent, *tsk = parent; + struct task_struct *tsk = parent; /* * Fortunately this is not necessary for thread groups: @@ -1162,6 +1122,7 @@ void do_notify_parent(struct task_struct *tsk, int sig) struct siginfo info; unsigned long flags; int why, status; + struct signal_struct *psig; if (sig == -1) BUG(); @@ -1200,10 +1161,34 @@ void do_notify_parent(struct task_struct *tsk, int sig) info.si_code = why; info.si_status = status; - spin_lock_irqsave(&tsk->parent->sig->siglock, flags); - __send_sig_info(sig, &info, tsk->parent); - __wake_up_parent(tsk); - spin_unlock_irqrestore(&tsk->parent->sig->siglock, flags); + psig = tsk->parent->sig; + spin_lock_irqsave(&psig->siglock, flags); + if (sig == SIGCHLD && tsk->state != TASK_STOPPED && + (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || + (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { + /* + * We are exiting and our parent doesn't care. POSIX.1 + * defines special semantics for setting SIGCHLD to SIG_IGN + * or setting the SA_NOCLDWAIT flag: we should be reaped + * automatically and not left for our parent's wait4 call. + * Rather than having the parent do it as a magic kind of + * signal handler, we just set this to tell do_exit that we + * can be cleaned up without becoming a zombie. Note that + * we still call __wake_up_parent in this case, because a + * blocked sys_wait4 might now return -ECHILD. + * + * Whether we send SIGCHLD or not for SA_NOCLDWAIT + * is implementation-defined: we do (if you don't want + * it, just use SIG_IGN instead). + */ + tsk->exit_signal = -1; + if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) + sig = 0; + } + if (sig > 0 && sig <= _NSIG) + __group_send_sig_info(sig, &info, tsk->parent); + __wake_up_parent(tsk, tsk->parent); + spin_unlock_irqrestore(&psig->siglock, flags); } @@ -1224,6 +1209,149 @@ notify_parent(struct task_struct *tsk, int sig) } } +static void +do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent) +{ + struct siginfo info; + unsigned long flags; + + info.si_signo = SIGCHLD; + info.si_errno = 0; + info.si_pid = tsk->pid; + info.si_uid = tsk->uid; + + /* FIXME: find out whether or not this is supposed to be c*time. */ + info.si_utime = tsk->utime; + info.si_stime = tsk->stime; + + info.si_status = tsk->exit_code & 0x7f; + info.si_code = CLD_STOPPED; + + spin_lock_irqsave(&parent->sig->siglock, flags); + if (parent->sig->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && + !(parent->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) + __group_send_sig_info(SIGCHLD, &info, parent); + /* + * Even if SIGCHLD is not generated, we must wake up wait4 calls. + */ + __wake_up_parent(tsk, parent); + spin_unlock_irqrestore(&parent->sig->siglock, flags); +} + +static void +finish_stop(int stop_count) +{ + /* + * If there are no other threads in the group, or if there is + * a group stop in progress and we are the last to stop, + * report to the parent. When ptraced, every thread reports itself. + */ + if (stop_count < 0 || (current->ptrace & PT_PTRACED)) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current, current->parent); + read_unlock(&tasklist_lock); + } + else if (stop_count == 0) { + read_lock(&tasklist_lock); + do_notify_parent_cldstop(current->group_leader, + current->group_leader->real_parent); + read_unlock(&tasklist_lock); + } + + schedule(); + /* + * Now we don't run again until continued. + */ + current->exit_code = 0; +} + +/* + * This performs the stopping for SIGSTOP and other stop signals. + * We have to stop all threads in the thread group. + */ +static void +do_signal_stop(int signr) +{ + struct signal_struct *sig = current->sig; + int stop_count = -1; + + if (sig->group_stop_count > 0) { + /* + * There is a group stop in progress. We don't need to + * start another one. + */ + spin_lock_irq(&sig->siglock); + if (unlikely(sig->group_stop_count == 0)) { + BUG_ON(!sig->group_exit); + spin_unlock_irq(&sig->siglock); + return; + } + signr = sig->group_exit_code; + stop_count = --sig->group_stop_count; + current->exit_code = signr; + set_current_state(TASK_STOPPED); + spin_unlock_irq(&sig->siglock); + } + else if (thread_group_empty(current)) { + /* + * No locks needed in this case. + */ + current->exit_code = signr; + set_current_state(TASK_STOPPED); + } + else { + /* + * There is no group stop already in progress. + * We must initiate one now. + */ + struct task_struct *t; + read_lock(&tasklist_lock); + spin_lock_irq(&sig->siglock); + + if (unlikely(sig->group_exit)) { + /* + * There is a group exit in progress now. + * We'll just ignore the stop and process the + * associated fatal signal. + */ + spin_unlock_irq(&sig->siglock); + read_unlock(&tasklist_lock); + return; + } + + if (sig->group_stop_count == 0) { + sig->group_exit_code = signr; + stop_count = 0; + for (t = next_thread(current); t != current; + t = next_thread(t)) + /* + * Setting state to TASK_STOPPED for a group + * stop is always done with the siglock held, + * so this check has no races. + */ + if (t->state < TASK_STOPPED) { + stop_count++; + signal_wake_up(t, 0); + } + sig->group_stop_count = stop_count; + } + else { + /* A race with another thread while unlocked. */ + signr = sig->group_exit_code; + stop_count = --sig->group_stop_count; + } + + current->exit_code = signr; + set_current_state(TASK_STOPPED); + + spin_unlock_irq(&sig->siglock); + read_unlock(&tasklist_lock); + } + + finish_stop(stop_count); +} + + #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) @@ -1235,6 +1363,28 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) struct k_sigaction *ka; spin_lock_irq(¤t->sig->siglock); + if (unlikely(current->sig->group_stop_count > 0)) { + int stop_count; + if (current->sig->group_exit_task == current) { + /* + * Group stop is so we can do a core dump. + */ + current->sig->group_exit_task = NULL; + goto dequeue; + } + /* + * There is a group stop in progress. We stop + * without any associated signal being in our queue. + */ + stop_count = --current->sig->group_stop_count; + signr = current->sig->group_exit_code; + current->exit_code = signr; + set_current_state(TASK_STOPPED); + spin_unlock_irq(¤t->sig->siglock); + finish_stop(stop_count); + continue; + } + dequeue: signr = dequeue_signal(mask, info); spin_unlock_irq(¤t->sig->siglock); @@ -1242,6 +1392,16 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) break; if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { + /* + * If there is a group stop in progress, + * we must participate in the bookkeeping. + */ + if (current->sig->group_stop_count > 0) { + spin_lock_irq(¤t->sig->siglock); + --current->sig->group_stop_count; + spin_unlock_irq(¤t->sig->siglock); + } + /* Let the debugger run. */ current->exit_code = signr; set_current_state(TASK_STOPPED); @@ -1254,10 +1414,6 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) continue; current->exit_code = 0; - /* The debugger continued. Ignore SIGSTOP. */ - if (signr == SIGSTOP) - continue; - /* Update the siginfo structure. Is this good? */ if (signr != info->si_signo) { info->si_signo = signr; @@ -1269,61 +1425,69 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) /* If the (new) signal is now blocked, requeue it. */ if (sigismember(¤t->blocked, signr)) { - send_sig_info(signr, info, current); + spin_lock_irq(¤t->sig->siglock); + specific_send_sig_info(signr, info, current); + spin_unlock_irq(¤t->sig->siglock); continue; } } ka = ¤t->sig->action[signr-1]; - if (ka->sa.sa_handler == SIG_IGN) { - if (signr != SIGCHLD) - continue; - /* Check for SIGCHLD: it's special. */ - while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0) - /* nothing */; + if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ continue; - } + if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */ + return signr; - if (ka->sa.sa_handler == SIG_DFL) { - int exit_code = signr; + /* + * Now we are doing the default action for this signal. + */ + if (sig_kernel_ignore(signr)) /* Default is nothing. */ + continue; /* Init gets no signals it doesn't want. */ if (current->pid == 1) continue; - switch (signr) { - case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG: - continue; - - case SIGTSTP: case SIGTTIN: case SIGTTOU: - if (is_orphaned_pgrp(current->pgrp)) - continue; - /* FALLTHRU */ - - case SIGSTOP: { - struct signal_struct *sig; - set_current_state(TASK_STOPPED); - current->exit_code = signr; - sig = current->parent->sig; - if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) - notify_parent(current, SIGCHLD); - schedule(); + if (sig_kernel_stop(signr)) { + /* + * The default action is to stop all threads in + * the thread group. The job control signals + * do nothing in an orphaned pgrp, but SIGSTOP + * always works. + */ + if (signr == SIGSTOP || + !is_orphaned_pgrp(current->pgrp)) + do_signal_stop(signr); continue; } - case SIGQUIT: case SIGILL: case SIGTRAP: - case SIGABRT: case SIGFPE: case SIGSEGV: - case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ: - if (do_coredump(signr, exit_code, regs)) - exit_code |= 0x80; - /* FALLTHRU */ - - default: - sig_exit(signr, exit_code, info); + /* + * Anything else is fatal, maybe with a core dump. + */ + current->flags |= PF_SIGNALED; + if (sig_kernel_coredump(signr) && + do_coredump(signr, signr, regs)) { + /* + * That killed all other threads in the group and + * synchronized with their demise, so there can't + * be any more left to kill now. The group_exit + * flags are set by do_coredump. Note that + * thread_group_empty won't always be true yet, + * because those threads were blocked in __exit_mm + * and we just let them go to finish dying. + */ + const int code = signr | 0x80; + BUG_ON(!current->sig->group_exit); + BUG_ON(current->sig->group_exit_code != code); + do_exit(code); /* NOTREACHED */ } - } - return signr; + + /* + * Death signals, no core dump. + */ + do_group_exit(signr); + /* NOTREACHED */ } return 0; } @@ -1435,12 +1599,17 @@ long do_sigpending(void *set, unsigned long sigsetsize) goto out; spin_lock_irq(¤t->sig->siglock); - sigandsets(&pending, ¤t->blocked, ¤t->pending.signal); + sigorsets(&pending, ¤t->pending.signal, + ¤t->sig->shared_pending.signal); spin_unlock_irq(¤t->sig->siglock); + /* Outside the lock because only this thread touches it. */ + sigandsets(&pending, ¤t->blocked, &pending); + error = -EFAULT; if (!copy_to_user(set, &pending, sigsetsize)) error = 0; + out: return error; } @@ -1628,9 +1797,17 @@ sys_tkill(int pid, int sig) p = find_task_by_pid(pid); error = -ESRCH; if (p) { - spin_lock_irq(&p->sig->siglock); - error = specific_send_sig_info(sig, &info, p, 0); - spin_unlock_irq(&p->sig->siglock); + error = check_kill_permission(sig, &info, p); + /* + * The null signal is a permissions and process existence + * probe. No signal is actually delivered. + */ + if (!error && sig && p->sig) { + spin_lock_irq(&p->sig->siglock); + handle_stop_signal(sig, p); + error = specific_send_sig_info(sig, &info, p); + spin_unlock_irq(&p->sig->siglock); + } } read_unlock(&tasklist_lock); return error; @@ -1664,7 +1841,17 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) k = ¤t->sig->action[sig-1]; + read_lock(&tasklist_lock); spin_lock_irq(¤t->sig->siglock); + if (signal_pending(current)) { + /* + * If there might be a fatal signal pending on multiple + * threads, make sure we take it before changing the action. + */ + spin_unlock_irq(¤t->sig->siglock); + read_unlock(&tasklist_lock); + return -ERESTARTSYS; + } if (oact) *oact = *k; @@ -1683,25 +1870,22 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) * pending and whose default action is to ignore the signal * (for example, SIGCHLD), shall cause the pending signal to * be discarded, whether or not it is blocked" - * - * Note the silly behaviour of SIGCHLD: SIG_IGN means that the - * signal isn't actually ignored, but does automatic child - * reaping, while SIG_DFL is explicitly said by POSIX to force - * the signal to be ignored. */ - if (k->sa.sa_handler == SIG_IGN - || (k->sa.sa_handler == SIG_DFL - && (sig == SIGCONT || - sig == SIGCHLD || - sig == SIGWINCH || - sig == SIGURG))) { - if (rm_sig_from_queue(sig, current)) - recalc_sigpending(); + if (k->sa.sa_handler == SIG_IGN || + (k->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { + struct task_struct *t = current; + rm_from_queue(sigmask(sig), &t->sig->shared_pending); + do { + rm_from_queue(sigmask(sig), &t->pending); + recalc_sigpending_tsk(t); + t = next_thread(t); + } while (t != current); } } - spin_unlock_irq(¤t->sig->siglock); + read_unlock(&tasklist_lock); + return 0; } diff --git a/kernel/suspend.c b/kernel/suspend.c index 3c55c284b537..40efb8d02db6 100644 --- a/kernel/suspend.c +++ b/kernel/suspend.c @@ -65,7 +65,6 @@ #include #include -extern void signal_wake_up(struct task_struct *t); extern int sys_sync(void); unsigned char software_suspend_enabled = 0; @@ -220,7 +219,7 @@ int freeze_processes(void) without locking */ p->flags |= PF_FREEZE; spin_lock_irqsave(&p->sig->siglock, flags); - signal_wake_up(p); + signal_wake_up(p, 0); spin_unlock_irqrestore(&p->sig->siglock, flags); todo++; } while_each_thread(g, p); -- cgit v1.2.3 From ea4e0b5bc6a86a370d5b7089af4d9aa901cecf9c Mon Sep 17 00:00:00 2001 From: Matthew Dobson Date: Wed, 5 Feb 2003 22:55:32 -0800 Subject: [PATCH] Broken CLEAR_BITMAP() macro The CLEAR_BITMAP() macro in include/linux/types.h is broken and doesn't round the bitmap size to the proper 'long' boundary. This fixes it by creating a macro BITS_TO_LONGS that just rounds a number of bits up to the closest number of unsigned longs. This makes the DECLARE & CLEAR _BITMAP macros more readable and fixes the bug. --- include/linux/types.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/types.h b/include/linux/types.h index 94ceb057eb64..f1c0ce5eb845 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -4,10 +4,12 @@ #ifdef __KERNEL__ #include +#define BITS_TO_LONGS(bits) \ + (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) #define DECLARE_BITMAP(name,bits) \ - unsigned long name[((bits)+BITS_PER_LONG-1)/BITS_PER_LONG] + unsigned long name[BITS_TO_LONGS(bits)] #define CLEAR_BITMAP(name,bits) \ - memset(name, 0, ((bits)+BITS_PER_LONG-1)/8) + memset(name, 0, BITS_TO_LONGS(bits)*sizeof(unsigned long)) #endif #include -- cgit v1.2.3 From 6b932555f7693040e3f8034dff9042d8b236dc3e Mon Sep 17 00:00:00 2001 From: "Andries E. Brouwer" Date: Thu, 6 Feb 2003 00:22:48 -0800 Subject: [PATCH] Remove dead code In struct char_dev the fields openers and sem are unused. The file char_dev.c claims that it is called differently. --- fs/char_dev.c | 9 +++------ include/linux/fs.h | 2 -- 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/char_dev.c b/fs/char_dev.c index ff34b5e336cd..ec9489c3a387 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -1,5 +1,5 @@ /* - * linux/fs/block_dev.c + * linux/fs/char_dev.c * * Copyright (C) 1991, 1992 Linus Torvalds */ @@ -38,16 +38,13 @@ static kmem_cache_t * cdev_cachep; ((struct char_device *) kmem_cache_alloc(cdev_cachep, SLAB_KERNEL)) #define destroy_cdev(cdev) kmem_cache_free(cdev_cachep, (cdev)) -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { - struct char_device * cdev = (struct char_device *) foo; + struct char_device *cdev = (struct char_device *) foo; if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) - { memset(cdev, 0, sizeof(*cdev)); - sema_init(&cdev->sem, 1); - } } void __init cdev_cache_init(void) diff --git a/include/linux/fs.h b/include/linux/fs.h index 9a17c9819ae9..037c1fe2ad6c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -332,8 +332,6 @@ struct char_device { struct list_head hash; atomic_t count; dev_t dev; - atomic_t openers; - struct semaphore sem; }; struct block_device { -- cgit v1.2.3 From 45c1a159b85b3b30afd26a77b4be312226bba416 Mon Sep 17 00:00:00 2001 From: Daniel Jacobowitz Date: Thu, 6 Feb 2003 04:32:29 -0500 Subject: Add PTRACE_O_TRACEVFORKDONE and PTRACE_O_TRACEEXIT facilities. --- include/linux/ptrace.h | 4 ++++ include/linux/sched.h | 2 ++ kernel/exit.c | 3 +++ kernel/fork.c | 6 ++++-- kernel/ptrace.c | 13 ++++++++++++- 5 files changed, 25 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index b56bbe7ca800..706b420fb5c9 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -35,12 +35,16 @@ #define PTRACE_O_TRACEVFORK 0x00000004 #define PTRACE_O_TRACECLONE 0x00000008 #define PTRACE_O_TRACEEXEC 0x00000010 +#define PTRACE_O_TRACEVFORKDONE 0x00000020 +#define PTRACE_O_TRACEEXIT 0x00000040 /* Wait extended result codes for the above trace options. */ #define PTRACE_EVENT_FORK 1 #define PTRACE_EVENT_VFORK 2 #define PTRACE_EVENT_CLONE 3 #define PTRACE_EVENT_EXEC 4 +#define PTRACE_EVENT_VFORK_DONE 5 +#define PTRACE_EVENT_EXIT 6 #include #include diff --git a/include/linux/sched.h b/include/linux/sched.h index a325e5a8c645..c424a353a748 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -441,6 +441,8 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) #define PT_TRACE_VFORK 0x00000020 #define PT_TRACE_CLONE 0x00000040 #define PT_TRACE_EXEC 0x00000080 +#define PT_TRACE_VFORK_DONE 0x00000100 +#define PT_TRACE_EXIT 0x00000200 #if CONFIG_SMP extern void set_cpus_allowed(task_t *p, unsigned long new_mask); diff --git a/kernel/exit.c b/kernel/exit.c index 03801540a5e6..5cb58a1d2075 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -653,6 +653,9 @@ NORET_TYPE void do_exit(long code) profile_exit_task(tsk); + if (unlikely(current->ptrace & PT_TRACE_EXIT)) + ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP); + fake_volatile: acct_process(code); __exit_mm(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index 347f957b080e..3da82d978812 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1046,9 +1046,11 @@ struct task_struct *do_fork(unsigned long clone_flags, ptrace_notify ((trace << 8) | SIGTRAP); } - if (clone_flags & CLONE_VFORK) + if (clone_flags & CLONE_VFORK) { wait_for_completion(&vfork); - else + if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) + ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); + } else /* * Let the child process run first, to avoid most of the * COW overhead when the child exec()s afterwards. diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 9f3769bfdc7e..14d158864d9e 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -277,9 +277,20 @@ static int ptrace_setoptions(struct task_struct *child, long data) else child->ptrace &= ~PT_TRACE_EXEC; + if (data & PTRACE_O_TRACEVFORKDONE) + child->ptrace |= PT_TRACE_VFORK_DONE; + else + child->ptrace &= ~PT_TRACE_VFORK_DONE; + + if (data & PTRACE_O_TRACEEXIT) + child->ptrace |= PT_TRACE_EXIT; + else + child->ptrace &= ~PT_TRACE_EXIT; + if ((data & (PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACECLONE - | PTRACE_O_TRACEEXEC)) + | PTRACE_O_TRACEEXEC | PTRACE_O_TRACEEXIT + | PTRACE_O_TRACEVFORKDONE)) != data) return -EINVAL; -- cgit v1.2.3 From da11be46accaae5bbe089161b00e43a67097308d Mon Sep 17 00:00:00 2001 From: Steven Cole Date: Thu, 6 Feb 2003 16:11:08 -0800 Subject: [PATCH] Spelling fixes for consistent, dependent, persistent This fixes the following common misspellings and their variants. consistant -> consistent dependant -> dependent persistant -> persistent --- Documentation/filesystems/ext3.txt | 2 +- Documentation/isdn/HiSax.cert | 2 +- Documentation/md.txt | 4 ++-- Documentation/scsi/ChangeLog.sym53c8xx | 2 +- Documentation/scsi/ibmmca.txt | 4 ++-- Documentation/sparc/sbus_drivers.txt | 4 ++-- Documentation/tipar.txt | 4 ++-- Documentation/usb/silverlink.txt | 2 +- arch/arm/mach-integrator/pci_v3.c | 2 +- arch/arm/mach-sa1100/stork.c | 2 +- arch/arm/mm/proc-arm920.S | 2 +- arch/arm/mm/proc-arm922.S | 2 +- arch/arm/mm/proc-arm926.S | 2 +- arch/cris/drivers/serial.c | 2 +- arch/cris/kernel/ptrace.c | 2 +- arch/ia64/kernel/smpboot.c | 2 +- arch/ia64/sn/io/sn1/pcibr.c | 4 ++-- arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c | 4 ++-- arch/m68k/mac/macints.c | 2 +- arch/m68k/mac/via.c | 2 +- arch/m68knommu/platform/68360/uCquicc/crt0_ram.S | 2 +- arch/m68knommu/platform/68360/uCquicc/crt0_rom.S | 2 +- arch/parisc/kernel/entry.S | 2 +- arch/ppc/boot/simple/Makefile | 2 +- arch/ppc/boot/simple/misc.c | 2 +- arch/ppc/kernel/pci.c | 2 +- arch/ppc64/kernel/pci.c | 2 +- arch/sh/kernel/io.c | 2 +- arch/sparc/kernel/entry.S | 2 +- arch/sparc/kernel/sys_sparc.c | 2 +- arch/sparc/kernel/sys_sunos.c | 2 +- arch/sparc/math-emu/math.c | 2 +- arch/sparc/mm/sun4c.c | 2 +- arch/sparc64/kernel/sbus.c | 2 +- arch/sparc64/kernel/winfixup.S | 6 +++--- arch/sparc64/math-emu/math.c | 2 +- arch/v850/kernel/rte_mb_a_pci.c | 4 ++-- drivers/atm/lanai.c | 2 +- drivers/block/nbd.c | 2 +- drivers/char/ip2/i2ellis.c | 2 +- drivers/char/ip2/i2os.h | 2 +- drivers/char/ip2main.c | 2 +- drivers/char/nvram.c | 2 +- drivers/char/rio/riotty.c | 4 ++-- drivers/char/rtc.c | 2 +- drivers/hotplug/cpci_hotplug_pci.c | 2 +- drivers/hotplug/cpqphp_core.c | 2 +- drivers/hotplug/cpqphp_pci.c | 2 +- drivers/ide/pci/sis5513.c | 4 ++-- drivers/isdn/eicon/eicon.h | 2 +- drivers/isdn/hisax/isdnl2.c | 20 ++++++++++---------- drivers/isdn/hisax/l3dss1.c | 2 +- drivers/isdn/hisax/l3ni1.c | 2 +- drivers/isdn/hysdn/hysdn_boot.c | 2 +- drivers/media/radio/radio-zoltrix.c | 2 +- drivers/mtd/chips/jedec.c | 2 +- drivers/net/acenic.c | 2 +- drivers/net/declance.c | 2 +- drivers/net/e1000/e1000_osdep.h | 2 +- drivers/net/hamradio/6pack.c | 8 ++++---- drivers/net/hamradio/scc.c | 2 +- drivers/net/hamradio/yam.c | 2 +- drivers/net/rrunner.c | 2 +- drivers/net/sgiseeq.c | 2 +- drivers/net/sk98lin/skvpd.c | 2 +- drivers/net/sk98lin/skxmac2.c | 4 ++-- drivers/net/skfp/cfm.c | 6 +++--- drivers/net/skfp/ecm.c | 8 ++++---- drivers/net/skfp/h/osdef1st.h | 2 +- drivers/net/skfp/pcmplc.c | 8 ++++---- drivers/net/skfp/rmt.c | 6 +++--- drivers/net/skfp/skfddi.c | 4 ++-- drivers/net/wan/lmc/lmc_main.c | 2 +- drivers/net/wan/lmc/lmc_ver.h | 2 +- drivers/net/wireless/airo.c | 2 +- drivers/net/wireless/orinoco.h | 2 +- drivers/sbus/char/aurora.c | 2 +- drivers/sbus/char/bbc_envctrl.c | 2 +- drivers/scsi/aacraid/aachba.c | 4 ++-- drivers/scsi/aic7xxx/aic79xx_inline.h | 2 +- drivers/scsi/aic7xxx/aic79xx_osm.h | 2 +- drivers/scsi/aic7xxx/aic7xxx_inline.h | 2 +- drivers/scsi/aic7xxx/aic7xxx_osm.h | 2 +- drivers/scsi/megaraid.c | 2 +- drivers/scsi/qla1280.c | 2 +- drivers/scsi/sym53c8xx_2/sym_glue.c | 4 ++-- drivers/scsi/sym53c8xx_2/sym_glue.h | 2 +- drivers/usb/misc/atmsar.c | 2 +- drivers/usb/serial/safe_serial.c | 2 +- drivers/usb/storage/usb.c | 2 +- drivers/video/skeletonfb.c | 2 +- fs/befs/ChangeLog | 2 +- fs/partitions/ldm.c | 2 +- include/asm-alpha/pci.h | 12 ++++++------ include/asm-cris/io.h | 2 +- include/asm-generic/rmap.h | 2 +- include/asm-generic/rtc.h | 2 +- include/asm-mips/isadep.h | 2 +- include/asm-mips64/r10kcache.h | 2 +- include/asm-ppc/io.h | 2 +- include/asm-ppc/system.h | 2 +- include/asm-ppc64/system.h | 2 +- include/asm-v850/pci.h | 2 +- include/linux/agp_backend.h | 2 +- include/linux/apm_bios.h | 2 +- include/linux/isdnif.h | 2 +- include/linux/sdla_x25.h | 2 +- net/irda/iriap.c | 2 +- net/irda/irlmp.c | 4 ++-- net/irda/irnet/irnet.h | 2 +- sound/core/hwdep.c | 2 +- sound/core/seq/seq_midi_emul.c | 4 ++-- sound/oss/ac97_codec.c | 2 +- sound/oss/maestro.c | 2 +- 114 files changed, 158 insertions(+), 158 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt index 84e8ca1f4418..12c01af809ac 100644 --- a/Documentation/filesystems/ext3.txt +++ b/Documentation/filesystems/ext3.txt @@ -78,7 +78,7 @@ design to add journaling capabilities on a block device. The ext3 filesystem code will inform the JBD of modifications it is performing (Call a transaction). the journal support the transactions start and stop, and in case of crash, the journal can replayed the transactions -to put the partition on a consistant state fastly. +to put the partition on a consistent state fastly. handles represent a single atomic update to a filesystem. JBD can handle external journal on a block device. diff --git a/Documentation/isdn/HiSax.cert b/Documentation/isdn/HiSax.cert index 2e3523ca7d22..f2a6fcb8efee 100644 --- a/Documentation/isdn/HiSax.cert +++ b/Documentation/isdn/HiSax.cert @@ -27,7 +27,7 @@ These tests included all layers 1-3 and as well all functional tests for the layer 1. Because all hardware based on these chips are complete ISDN solutions in one chip all cards and USB-TAs using these chips are to be regarded as approved for those tests. Some additional electrical tests -of the layer 1 which are independant of the driver and related to a +of the layer 1 which are independent of the driver and related to a special hardware used will be regarded as approved if at least one solution has been tested including those electrical tests. So if cards or tas have been completely approved for any other os, the approval diff --git a/Documentation/md.txt b/Documentation/md.txt index 0df89447bb0e..cecc9beba2fb 100644 --- a/Documentation/md.txt +++ b/Documentation/md.txt @@ -9,7 +9,7 @@ device with the following kernel command lines: for old raid arrays without persistent superblocks: md=,,,,dev0,dev1,...,devn -for raid arrays with persistant superblocks +for raid arrays with persistent superblocks md=,dev0,dev1,...,devn md device no. = the number of the md device ... @@ -21,7 +21,7 @@ md device no. = the number of the md device ... raid level = -1 linear mode 0 striped mode - other modes are only supported with persistant super blocks + other modes are only supported with persistent super blocks chunk size factor = (raid-0 and raid-1 only) Set the chunk size as 4k << n. diff --git a/Documentation/scsi/ChangeLog.sym53c8xx b/Documentation/scsi/ChangeLog.sym53c8xx index 423ceecfd9f3..8dc00328bf94 100644 --- a/Documentation/scsi/ChangeLog.sym53c8xx +++ b/Documentation/scsi/ChangeLog.sym53c8xx @@ -529,7 +529,7 @@ Sun Nov 1 14H00 1998 Gerard Roudier (groudier@club-internet.fr) * version pre-sym53c8xx-0.13 - Some rewrite of the device detection code. This code had been patched too much and needed to be face-lifted a bit. - Remove all platform dependant fix-ups that was not needed or + Remove all platform dependent fix-ups that was not needed or conflicted with some other driver code as work-arounds. Reread the NVRAM before the calling of ncr_attach(). This spares stack space and so allows to handle more boards. diff --git a/Documentation/scsi/ibmmca.txt b/Documentation/scsi/ibmmca.txt index 912d10209c1c..3957eb3034a4 100644 --- a/Documentation/scsi/ibmmca.txt +++ b/Documentation/scsi/ibmmca.txt @@ -906,7 +906,7 @@ to does not offer more space, invalid memory accesses destabilized the kernel. 3) version 4.0 is only valid for kernel 2.4.0 or later. This is necessary - to remove old kernel version dependant waste from the driver. 3.2d is + to remove old kernel version dependent waste from the driver. 3.2d is only distributed with older kernels but keeps compatibility with older kernel versions. 4.0 and higher versions cannot be used with older kernels anymore!! You must have at least kernel 2.4.0!! @@ -1382,7 +1382,7 @@ 9 Disclaimer ------------ - Beside the GNU General Public License and the dependant disclaimers and disclaimers + Beside the GNU General Public License and the dependent disclaimers and disclaimers concerning the Linux-kernel in special, this SCSI-driver comes without any warranty. Its functionality is tested as good as possible on certain machines and combinations of computer hardware, which does not exclude, diff --git a/Documentation/sparc/sbus_drivers.txt b/Documentation/sparc/sbus_drivers.txt index fc8ab4a8b10e..876195dc2aef 100644 --- a/Documentation/sparc/sbus_drivers.txt +++ b/Documentation/sparc/sbus_drivers.txt @@ -208,7 +208,7 @@ like the following: char *mem; /* Address in the CPU space */ u32 busa; /* Address in the SBus space */ - mem = (char *) sbus_alloc_consistant(sdev, MYMEMSIZE, &busa); + mem = (char *) sbus_alloc_consistent(sdev, MYMEMSIZE, &busa); Then mem is used when CPU accesses this memory and u32 is fed to the device so that it can do DVMA. This is typically @@ -216,7 +216,7 @@ done with an sbus_writel() into some device register. Do not forget to free the DVMA resources once you are done: - sbus_free_consistant(sdev, MYMEMSIZE, mem, busa); + sbus_free_consistent(sdev, MYMEMSIZE, mem, busa); Streaming DVMA is more interesting. First you allocate some memory suitable for it or pin down some user pages. Then it all works diff --git a/Documentation/tipar.txt b/Documentation/tipar.txt index 73e5548357cf..773b9a2483eb 100644 --- a/Documentation/tipar.txt +++ b/Documentation/tipar.txt @@ -12,7 +12,7 @@ INTRODUCTION: This is a driver for the very common home-made parallel link cable, a cable designed for connecting TI8x/9x graphing calculators (handhelds) to a computer or workstation (Alpha, Sparc). Given that driver is built on parport, the -parallel port abstraction layer, this driver is independant of the platform. +parallel port abstraction layer, this driver is independent of the platform. It can also be used with another device plugged on the same port (such as a ZIP drive). I have a 100MB ZIP and both of them work fine ! @@ -90,4 +90,4 @@ You can also mail JB at jb@jblache.org. He packaged these drivers for Debian. CREDITS: The code is based on tidev.c & parport.c. -The driver has been developed independantly of Texas Instruments. +The driver has been developed independently of Texas Instruments. diff --git a/Documentation/usb/silverlink.txt b/Documentation/usb/silverlink.txt index ae4b9ab3985b..d720415ebb34 100644 --- a/Documentation/usb/silverlink.txt +++ b/Documentation/usb/silverlink.txt @@ -73,4 +73,4 @@ this driver but he better knows the Mac OS-X driver. CREDITS: The code is based on dabusb.c, printer.c and scanner.c ! -The driver has been developed independantly of Texas Instruments. +The driver has been developed independently of Texas Instruments. diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index f2f73e15a11b..46dd55038145 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c @@ -120,7 +120,7 @@ * function = which function * offset = configuration space register we are interested in * - * description: this routine will generate a platform dependant config + * description: this routine will generate a platform dependent config * address. * * calls: none diff --git a/arch/arm/mach-sa1100/stork.c b/arch/arm/mach-sa1100/stork.c index 0c34c9d61bb8..9ac3a0a10c7d 100644 --- a/arch/arm/mach-sa1100/stork.c +++ b/arch/arm/mach-sa1100/stork.c @@ -181,7 +181,7 @@ static void storkClockTS(void) { storkSetLatchB(STORK_TOUCH_SCREEN_DCLK); udelay(10); /* hmm wait 200ns (min) - ok this ought to be udelay(1) but that doesn't get */ - /* consistant values so I'm using 10 (urgh) */ + /* consistent values so I'm using 10 (urgh) */ storkClearLatchB(STORK_TOUCH_SCREEN_DCLK); udelay(10); } diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 09a2b12fad9b..cbeaaf9a0853 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -297,7 +297,7 @@ ENTRY(cpu_arm920_dcache_clean_entry) * * This is a little misleading, it is not intended to clean out * the i-cache but to make sure that any data written to the - * range is made consistant. This means that when we execute code + * range is made consistent. This means that when we execute code * in that region, everything works as we expect. * * This generally means writing back data in the Dcache and diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 7bfbc8965019..f115e476bc7b 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -298,7 +298,7 @@ ENTRY(cpu_arm922_dcache_clean_entry) * * This is a little misleading, it is not intended to clean out * the i-cache but to make sure that any data written to the - * range is made consistant. This means that when we execute code + * range is made consistent. This means that when we execute code * in that region, everything works as we expect. * * This generally means writing back data in the Dcache and diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 24b5d4f70217..a2cdcdd3d9f3 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -147,7 +147,7 @@ cpu_arm926_cache_clean_invalidate_all_r2: * * This is a little misleading, it is not intended to clean out * the i-cache but to make sure that any data written to the - * range is made consistant. This means that when we execute code + * range is made consistent. This means that when we execute code * in that region, everything works as we expect. * * This generally means writing back data in the Dcache and diff --git a/arch/cris/drivers/serial.c b/arch/cris/drivers/serial.c index 412f7519c488..13bcee964f09 100644 --- a/arch/cris/drivers/serial.c +++ b/arch/cris/drivers/serial.c @@ -318,7 +318,7 @@ static char *serial_version = "$Revision: 1.3 $"; #include -/* non-arch dependant serial structures are in linux/serial.h */ +/* non-arch dependent serial structures are in linux/serial.h */ #include /* while we keep our own stuff (struct e100_serial) in a local .h file */ #include "serial.h" diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c index b4e1f9282694..c8a066c4ee4c 100644 --- a/arch/cris/kernel/ptrace.c +++ b/arch/cris/kernel/ptrace.c @@ -18,7 +18,7 @@ * PTRACE_DETACH works more simple in 2.4.10 * * Revision 1.6 2001/07/25 16:08:47 bjornw - * PTRACE_ATTACH bulk moved into arch-independant code in 2.4.7 + * PTRACE_ATTACH bulk moved into arch-independent code in 2.4.7 * * Revision 1.5 2001/03/26 14:24:28 orjanf * * Changed loop condition. diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 15d820d88c64..cb5a89f68763 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -551,7 +551,7 @@ __cpu_up (unsigned int cpu) } /* - * Assume that CPU's have been discovered by some platform-dependant interface. For + * Assume that CPU's have been discovered by some platform-dependent interface. For * SoftSDV/Lion, that would be ACPI. * * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). diff --git a/arch/ia64/sn/io/sn1/pcibr.c b/arch/ia64/sn/io/sn1/pcibr.c index ac30ed1badd8..6029f26613fe 100644 --- a/arch/ia64/sn/io/sn1/pcibr.c +++ b/arch/ia64/sn/io/sn1/pcibr.c @@ -4811,7 +4811,7 @@ pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl, /* Device is capable of A64 operations, * and the attributes of the DMA are - * consistant with any previous DMA + * consistent with any previous DMA * mappings using shared resources. */ @@ -4853,7 +4853,7 @@ pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl, if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) { /* User desires DIRECT A32 operations, * and the attributes of the DMA are - * consistant with any previous DMA + * consistent with any previous DMA * mappings using shared resources. * Mapping calls may fail if target * is outside the direct32 range. diff --git a/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c index 867178f42fab..54939d8508c9 100644 --- a/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c +++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c @@ -3216,7 +3216,7 @@ pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl, /* Device is capable of A64 operations, * and the attributes of the DMA are - * consistant with any previous DMA + * consistent with any previous DMA * mappings using shared resources. */ @@ -3266,7 +3266,7 @@ pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl, if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) { /* User desires DIRECT A32 operations, * and the attributes of the DMA are - * consistant with any previous DMA + * consistent with any previous DMA * mappings using shared resources. * Mapping calls may fail if target * is outside the direct32 range. diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c index 4f8ea17f57ac..b0a647a964fc 100644 --- a/arch/m68k/mac/macints.c +++ b/arch/m68k/mac/macints.c @@ -749,7 +749,7 @@ void mac_scc_dispatch(int irq, void *dev_id, struct pt_regs *regs) /* */ /* Note that we're ignoring scc_mask for now. */ /* If we actually mask the ints then we tend to */ - /* get hammered by very persistant SCC irqs, */ + /* get hammered by very persistent SCC irqs, */ /* and since they're autovector interrupts they */ /* pretty much kill the system. */ diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 53b448deb37a..5b43e574046d 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -48,7 +48,7 @@ __u8 rbv_clear; * just hit the combined register (ie, vIER|rIER) but that seems to * break on AV Macs...probably because they actually decode more than * eight address bits. Why can't Apple engineers at least be - * _consistantly_ lazy? - 1999-05-21 (jmt) + * _consistently_ lazy? - 1999-05-21 (jmt) */ static int gIER,gIFR,gBufA,gBufB; diff --git a/arch/m68knommu/platform/68360/uCquicc/crt0_ram.S b/arch/m68knommu/platform/68360/uCquicc/crt0_ram.S index c8d9142cdd24..56027aac42f8 100644 --- a/arch/m68knommu/platform/68360/uCquicc/crt0_ram.S +++ b/arch/m68knommu/platform/68360/uCquicc/crt0_ram.S @@ -358,7 +358,7 @@ _ramend: .long 0 /* (Unassigned, Reserved) - 62. */ .long 0 /* (Unassigned, Reserved) - 63. */ /* The assignment of these vectors to the CPM is */ - /* dependant on the configuration of the CPM vba */ + /* dependent on the configuration of the CPM vba */ /* fields. */ .long 0 /* (User-Defined Vectors 1) CPM Error - 64. */ .long 0 /* (User-Defined Vectors 2) CPM Parallel IO PC11- 65. */ diff --git a/arch/m68knommu/platform/68360/uCquicc/crt0_rom.S b/arch/m68knommu/platform/68360/uCquicc/crt0_rom.S index 6c0df839eb7a..ed9187c740ac 100644 --- a/arch/m68knommu/platform/68360/uCquicc/crt0_rom.S +++ b/arch/m68knommu/platform/68360/uCquicc/crt0_rom.S @@ -367,7 +367,7 @@ _ramend: .long 0 /* (Unassigned, Reserved) - 62. */ .long 0 /* (Unassigned, Reserved) - 63. */ /* The assignment of these vectors to the CPM is */ - /* dependant on the configuration of the CPM vba */ + /* dependent on the configuration of the CPM vba */ /* fields. */ .long 0 /* (User-Defined Vectors 1) CPM Error - 64. */ .long 0 /* (User-Defined Vectors 2) CPM Parallel IO PC11- 65. */ diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 2b858b1b67a0..84064c3a02bd 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -689,7 +689,7 @@ syscall_exit_rfi: * (we don't store them in the sigcontext), so set them * to "proper" values now (otherwise we'll wind up restoring * whatever was last stored in the task structure, which might - * be inconsistant if an interrupt occured while on the gateway + * be inconsistent if an interrupt occured while on the gateway * page) Note that we may be "trashing" values the user put in * them, but we don't support the the user changing them. */ diff --git a/arch/ppc/boot/simple/Makefile b/arch/ppc/boot/simple/Makefile index 82f57d14b810..40cd523f7587 100644 --- a/arch/ppc/boot/simple/Makefile +++ b/arch/ppc/boot/simple/Makefile @@ -19,7 +19,7 @@ # ENTRYPOINT which the image should be loaded at. The optimal setting # for ENTRYPOINT is the link address. # (4) It is advisable to pass in the memory size using BI_MEMSIZE and -# get_mem_size(), which is memory controller dependant. Add in the correct +# get_mem_size(), which is memory controller dependent. Add in the correct # XXX_memory.o file for this to work, as well as editing the $(MISC) file. boot: zImage diff --git a/arch/ppc/boot/simple/misc.c b/arch/ppc/boot/simple/misc.c index e204557f2494..6b2909ed4eae 100644 --- a/arch/ppc/boot/simple/misc.c +++ b/arch/ppc/boot/simple/misc.c @@ -82,7 +82,7 @@ decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum) #if defined(CONFIG_LOPEC) || defined(CONFIG_PAL4) /* - * Call get_mem_size(), which is memory controller dependant, + * Call get_mem_size(), which is memory controller dependent, * and we must have the correct file linked in here. */ TotalMemory = get_mem_size(); diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c index 90df3328b3e8..ee68f236114b 100644 --- a/arch/ppc/kernel/pci.c +++ b/arch/ppc/kernel/pci.c @@ -1069,7 +1069,7 @@ pcibios_init(void) if (ppc_md.pci_swizzle && ppc_md.pci_map_irq) pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq); - /* Call machine dependant fixup */ + /* Call machine dependent fixup */ if (ppc_md.pcibios_fixup) ppc_md.pcibios_fixup(); diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c index 049a5e58ffd7..c459ee120097 100644 --- a/arch/ppc64/kernel/pci.c +++ b/arch/ppc64/kernel/pci.c @@ -413,7 +413,7 @@ pcibios_init(void) next_busno = hose->last_busno+1; } - /* Call machine dependant fixup */ + /* Call machine dependent fixup */ if (ppc_md.pcibios_fixup) { ppc_md.pcibios_fixup(); } diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c index 1b0f0918dfd6..1f49c739d1bc 100644 --- a/arch/sh/kernel/io.c +++ b/arch/sh/kernel/io.c @@ -4,7 +4,7 @@ * Copyright (C) 2000 Stuart Menefy * * Provide real functions which expand to whatever the header file defined. - * Also definitions of machine independant IO functions. + * Also definitions of machine independent IO functions. */ #include diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index d4c1e922b59f..4434547f406d 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -1889,7 +1889,7 @@ C_LABEL(kill_user_windows): wr %o4, 0x0, %psr ! the uwinmask state WRITE_PAUSE ! burn them cycles 1: - ld [%g6 + TI_UWINMASK], %o0 ! get consistant state + ld [%g6 + TI_UWINMASK], %o0 ! get consistent state orcc %g0, %o0, %g0 ! did an interrupt come in? be 4f ! yep, we are done rd %wim, %o3 ! get current wim diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c index d060b430c08c..fbf31d498e8e 100644 --- a/arch/sparc/kernel/sys_sparc.c +++ b/arch/sparc/kernel/sys_sparc.c @@ -409,7 +409,7 @@ sparc_sigaction (int sig, const struct old_sigaction *act, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - /* In the clone() case we could copy half consistant + /* In the clone() case we could copy half consistent * state to the user, however this could sleep and * deadlock us if we held the signal lock on SMP. So for * now I take the easy way out and do no locking. diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c index 1babc9d4d326..a81ca4978702 100644 --- a/arch/sparc/kernel/sys_sunos.c +++ b/arch/sparc/kernel/sys_sunos.c @@ -1168,7 +1168,7 @@ sunos_sigaction(int sig, const struct old_sigaction *act, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - /* In the clone() case we could copy half consistant + /* In the clone() case we could copy half consistent * state to the user, however this could sleep and * deadlock us if we held the signal lock on SMP. So for * now I take the easy way out and do no locking. diff --git a/arch/sparc/math-emu/math.c b/arch/sparc/math-emu/math.c index 050431cfcd87..cd15e4f41ef2 100644 --- a/arch/sparc/math-emu/math.c +++ b/arch/sparc/math-emu/math.c @@ -203,7 +203,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt) } /* All routines returning an exception to raise should detect - * such exceptions _before_ rounding to be consistant with + * such exceptions _before_ rounding to be consistent with * the behavior of the hardware in the implemented cases * (and thus with the recommendations in the V9 architecture * manual). diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index 74c23da4a7b0..5b165f0d3b1b 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -1042,7 +1042,7 @@ static struct thread_info *sun4c_alloc_thread_info(void) get_locked_segment(addr); /* We are changing the virtual color of the page(s) - * so we must flush the cache to guarentee consistancy. + * so we must flush the cache to guarentee consistency. */ sun4c_flush_page(pages); #ifndef CONFIG_SUN4 diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 27ffa86806f9..9a5f8a99cfdc 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c @@ -767,7 +767,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino) } imap += reg_base; - /* SYSIO inconsistancy. For external SLOTS, we have to select + /* SYSIO inconsistency. For external SLOTS, we have to select * the right ICLR register based upon the lower SBUS irq level * bits. */ diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index a633e009aae9..707e85e8f59b 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -60,7 +60,7 @@ fill_fixup: sll %g2, 3, %g2 ! NORMAL-->OTHER wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. - wrpr %g2, 0x0, %wstate ! This must be consistant. + wrpr %g2, 0x0, %wstate ! This must be consistent. wrpr %g0, 0x0, %otherwin ! We know this. mov PRIMARY_CONTEXT, %g1 ! Change contexts... stxa %g0, [%g1] ASI_DMMU ! Back into the nucleus. @@ -181,7 +181,7 @@ fill_fixup_mna: sll %g2, 3, %g2 ! NORMAL-->OTHER wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. - wrpr %g2, 0x0, %wstate ! This must be consistant. + wrpr %g2, 0x0, %wstate ! This must be consistent. wrpr %g0, 0x0, %otherwin ! We know this. mov PRIMARY_CONTEXT, %g1 ! Change contexts... stxa %g0, [%g1] ASI_DMMU ! Back into the nucleus. @@ -287,7 +287,7 @@ fill_fixup_dax: sll %g2, 3, %g2 ! NORMAL-->OTHER wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. - wrpr %g2, 0x0, %wstate ! This must be consistant. + wrpr %g2, 0x0, %wstate ! This must be consistent. wrpr %g0, 0x0, %otherwin ! We know this. mov PRIMARY_CONTEXT, %g1 ! Change contexts... stxa %g0, [%g1] ASI_DMMU ! Back into the nucleus. diff --git a/arch/sparc64/math-emu/math.c b/arch/sparc64/math-emu/math.c index f45419845cde..48dfa37ddd72 100644 --- a/arch/sparc64/math-emu/math.c +++ b/arch/sparc64/math-emu/math.c @@ -88,7 +88,7 @@ #define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT) /* All routines returning an exception to raise should detect - * such exceptions _before_ rounding to be consistant with + * such exceptions _before_ rounding to be consistent with * the behavior of the hardware in the implemented cases * (and thus with the recommendations in the V9 architecture * manual). diff --git a/arch/v850/kernel/rte_mb_a_pci.c b/arch/v850/kernel/rte_mb_a_pci.c index c56b0d28bb98..6fc07ccd955e 100644 --- a/arch/v850/kernel/rte_mb_a_pci.c +++ b/arch/v850/kernel/rte_mb_a_pci.c @@ -689,7 +689,7 @@ void pci_unmap_single (struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, free_dma_mapping (mapping); } -/* Make physical memory consistant for a single streaming mode DMA +/* Make physical memory consistent for a single streaming mode DMA translation after a transfer. If you perform a pci_map_single() but wish to interrogate the @@ -731,7 +731,7 @@ pci_unmap_sg (struct pci_dev *pdev, struct scatterlist *sg, int sg_len,int dir) BUG (); } -/* Make physical memory consistant for a set of streaming mode DMA +/* Make physical memory consistent for a set of streaming mode DMA translations after a transfer. The same as pci_dma_sync_single but for a scatter-gather list, same rules and usage. */ diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 8ca6798f0796..e6788148675b 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -632,7 +632,7 @@ static inline void reset_board(const struct lanai_dev *lanai) * anytime it wants to consult its table of vccs - for instance * when handling an incoming PDU. This also explains why we would * probably want the write_lock while in _change_qos - to prevent - * handling of PDUs while possibly in an inconsistant state. + * handling of PDUs while possibly in an inconsistent state. * Also, _send would grab the lock for reading. * * One problem with this is that _open and _close could no longer diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index c71a04f5d457..0f9f7a9de7ba 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -24,7 +24,7 @@ * 01-3-11 Make nbd work with new Linux block layer code. It now supports * plugging like all the other block devices. Also added in MSG_MORE to * reduce number of partial TCP segments sent. - * 01-12-6 Fix deadlock condition by making queue locks independant of + * 01-12-6 Fix deadlock condition by making queue locks independent of * the transmit lock. * 02-10-11 Allow hung xmit to be aborted via SIGKILL & various fixes. * diff --git a/drivers/char/ip2/i2ellis.c b/drivers/char/ip2/i2ellis.c index 443f414eea68..f834d05ccc97 100644 --- a/drivers/char/ip2/i2ellis.c +++ b/drivers/char/ip2/i2ellis.c @@ -773,7 +773,7 @@ iiWriteBuf16(i2eBordStrPtr pB, unsigned char *address, int count) // // Writes 'count' bytes from 'address' to the data fifo specified by the board // structure pointer pB. Should count happen to be odd, an extra pad byte is -// sent (identity unknown...). This is to be consistant with the 16-bit version. +// sent (identity unknown...). This is to be consistent with the 16-bit version. // Uses 8-bit (byte) operations. Is called indirectly through pB->i2eWriteBuf. // //****************************************************************************** diff --git a/drivers/char/ip2/i2os.h b/drivers/char/ip2/i2os.h index 8466d774700e..c83e901ea63f 100644 --- a/drivers/char/ip2/i2os.h +++ b/drivers/char/ip2/i2os.h @@ -8,7 +8,7 @@ * PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport * serial I/O controllers. * -* DESCRIPTION: Defines, definitions and includes which are heavily dependant +* DESCRIPTION: Defines, definitions and includes which are heavily dependent * on O/S, host, compiler, etc. This file is tailored for: * Linux v2.0.0 and later * Gnu gcc c2.7.2 diff --git a/drivers/char/ip2main.c b/drivers/char/ip2main.c index 244399a6ae04..7ca1e640b7cc 100644 --- a/drivers/char/ip2main.c +++ b/drivers/char/ip2main.c @@ -21,7 +21,7 @@ // // 1.2.14 /\/\|=mhw=|\/\/ // Added bounds checking to ip2_ipl_ioctl to avoid potential terroristic acts. -// Changed the definition of ip2trace to be more consistant with kernel style +// Changed the definition of ip2trace to be more consistent with kernel style // Thanks to Andreas Dilger for these updates // // 1.2.13 /\/\|=mhw=|\/\/ diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index d1d8717f31bb..3565715eb561 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -11,7 +11,7 @@ * "NVRAM" (NV stands for non-volatile). * * The data are supplied as a (seekable) character device, /dev/nvram. The - * size of this file is dependant on the controller. The usual size is 114, + * size of this file is dependent on the controller. The usual size is 114, * the number of freely available bytes in the memory (i.e., not used by the * RTC itself). * diff --git a/drivers/char/rio/riotty.c b/drivers/char/rio/riotty.c index 15868bfbb285..3e0de643fe2e 100644 --- a/drivers/char/rio/riotty.c +++ b/drivers/char/rio/riotty.c @@ -1287,7 +1287,7 @@ register caddr_t arg; } /* - ttyseth -- set hardware dependant tty settings + ttyseth -- set hardware dependent tty settings */ void ttyseth(PortP, s, sg) @@ -1342,7 +1342,7 @@ struct old_sgttyb *sg; } /* - ttyseth_pv -- set hardware dependant tty settings using either the + ttyseth_pv -- set hardware dependent tty settings using either the POSIX termios structure or the System V termio structure. sysv = 0 => (POSIX): struct termios *sg sysv != 0 => (System V): struct termio *sg diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 1e5f4c6ec5a1..cf6e7b75dbac 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -486,7 +486,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) yrs = 73; } #endif - /* These limits and adjustments are independant of + /* These limits and adjustments are independent of * whether the chip is in binary mode or not. */ if (yrs > 169) { diff --git a/drivers/hotplug/cpci_hotplug_pci.c b/drivers/hotplug/cpci_hotplug_pci.c index da4d0d77b13b..822a7b9353e6 100644 --- a/drivers/hotplug/cpci_hotplug_pci.c +++ b/drivers/hotplug/cpci_hotplug_pci.c @@ -341,7 +341,7 @@ static int cpci_configure_dev(struct pci_bus *bus, struct pci_dev *dev) /* * Need to explicitly set irq field to 0 so that it'll get assigned - * by the pcibios platform dependant code called by pci_enable_device. + * by the pcibios platform dependent code called by pci_enable_device. */ dev->irq = 0; diff --git a/drivers/hotplug/cpqphp_core.c b/drivers/hotplug/cpqphp_core.c index 5e641fd1c6e3..15b4f37424e6 100644 --- a/drivers/hotplug/cpqphp_core.c +++ b/drivers/hotplug/cpqphp_core.c @@ -42,7 +42,7 @@ #include "cpqphp.h" #include "cpqphp_nvram.h" -#include "../../arch/i386/pci/pci.h" /* horrible hack showing how processor dependant we are... */ +#include "../../arch/i386/pci/pci.h" /* horrible hack showing how processor dependent we are... */ /* Global variables */ diff --git a/drivers/hotplug/cpqphp_pci.c b/drivers/hotplug/cpqphp_pci.c index 1d3bb90efdf2..e8e5328b7666 100644 --- a/drivers/hotplug/cpqphp_pci.c +++ b/drivers/hotplug/cpqphp_pci.c @@ -36,7 +36,7 @@ #include #include "cpqphp.h" #include "cpqphp_nvram.h" -#include "../../arch/i386/pci/pci.h" /* horrible hack showing how processor dependant we are... */ +#include "../../arch/i386/pci/pci.h" /* horrible hack showing how processor dependent we are... */ u8 cpqhp_nic_irq; diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index c91a7f93c6d5..9812cb5bef09 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c @@ -22,7 +22,7 @@ /* * TODO: - * - Get ridden of SisHostChipInfo[] completness dependancy. + * - Get ridden of SisHostChipInfo[] completness dependency. * - Study drivers/ide/ide-timing.h. * - Are there pre-ATA_16 SiS5513 chips ? -> tune init code for them * or remove ATA_00 define @@ -64,7 +64,7 @@ /* Miscellaneaous flags */ #define SIS5513_LATENCY 0x01 -/* registers layout and init values are chipset family dependant */ +/* registers layout and init values are chipset family dependent */ /* 1/ define families */ #define ATA_00 0x00 #define ATA_16 0x01 diff --git a/drivers/isdn/eicon/eicon.h b/drivers/isdn/eicon/eicon.h index f2c6a043a056..4070b06f2380 100644 --- a/drivers/isdn/eicon/eicon.h +++ b/drivers/isdn/eicon/eicon.h @@ -305,7 +305,7 @@ typedef struct { * Per card driver data */ typedef struct eicon_card { - eicon_hwif hwif; /* Hardware dependant interface */ + eicon_hwif hwif; /* Hardware dependent interface */ DESCRIPTOR *d; /* IDI Descriptor */ u_char ptype; /* Protocol type (1TR6 or Euro) */ u_char bus; /* Bustype (ISA, MCA, PCI) */ diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index be30c6ea972f..87142482cf05 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c @@ -1445,7 +1445,7 @@ l2_tei_remove(struct FsmInst *fi, int event, void *arg) } static void -l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg) +l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; @@ -1456,7 +1456,7 @@ l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg) } static void -l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg) +l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; @@ -1469,7 +1469,7 @@ l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg) } static void -l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg) +l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; @@ -1480,7 +1480,7 @@ l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg) } static void -l2_persistant_da(struct FsmInst *fi, int event, void *arg) +l2_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; @@ -1615,14 +1615,14 @@ static struct FsmNode L2FnList[] __initdata = {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest}, {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest}, - {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da}, + {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da}, {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove}, {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove}, - {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da}, - {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da}, - {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da}, - {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da}, - {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da}, + {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da}, + {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da}, + {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da}, + {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da}, + {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da}, }; #define L2_FN_COUNT (sizeof(L2FnList)/sizeof(struct FsmNode)) diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c index 0ba4d72ff48b..75e4706ce9b7 100644 --- a/drivers/isdn/hisax/l3dss1.c +++ b/drivers/isdn/hisax/l3dss1.c @@ -2169,7 +2169,7 @@ static void l3dss1_redir_req_early(struct l3_process *pc, u8 pr, void *arg) /***********************************************/ /* handle special commands for this protocol. */ -/* Examples are call independant services like */ +/* Examples are call independent services like */ /* remote operations with dummy callref. */ /***********************************************/ static int l3dss1_cmd_global(struct PStack *st, isdn_ctrl *ic) diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c index ca1caf72abe0..b9001580be28 100644 --- a/drivers/isdn/hisax/l3ni1.c +++ b/drivers/isdn/hisax/l3ni1.c @@ -2024,7 +2024,7 @@ static void l3ni1_redir_req_early(struct l3_process *pc, u8 pr, void *arg) /***********************************************/ /* handle special commands for this protocol. */ -/* Examples are call independant services like */ +/* Examples are call independent services like */ /* remote operations with dummy callref. */ /***********************************************/ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic) diff --git a/drivers/isdn/hysdn/hysdn_boot.c b/drivers/isdn/hysdn/hysdn_boot.c index 25e4fe68fa3f..bf8756c0954f 100644 --- a/drivers/isdn/hysdn/hysdn_boot.c +++ b/drivers/isdn/hysdn/hysdn_boot.c @@ -78,7 +78,7 @@ DecryptBuf(struct boot_data *boot, int cnt) } /* DecryptBuf */ /********************************************************************************/ -/* pof_handle_data executes the required actions dependant on the active record */ +/* pof_handle_data executes the required actions dependent on the active record */ /* id. If successful 0 is returned, a negative value shows an error. */ /********************************************************************************/ static int diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c index 2c309c5e8ed1..70a3ee406503 100644 --- a/drivers/media/radio/radio-zoltrix.c +++ b/drivers/media/radio/radio-zoltrix.c @@ -2,7 +2,7 @@ * (c) 1998 C. van Schaik * * BUGS - * Due to the inconsistancy in reading from the signal flags + * Due to the inconsistency in reading from the signal flags * it is difficult to get an accurate tuned signal. * * It seems that the card is not linear to 0 volume. It cuts off diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c index b95e3b58d895..337d0d22bfb0 100644 --- a/drivers/mtd/chips/jedec.c +++ b/drivers/mtd/chips/jedec.c @@ -608,7 +608,7 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr) /* Poll the flash for erasure completion, specs say this can take as long as 480 seconds to do all the sectors (for a 2 meg flash). - Erasure time is dependant on chip age, temp and wear.. */ + Erasure time is dependent on chip age, temp and wear.. */ /* This being a generic routine assumes a 32 bit bus. It does read32s and bundles interleved chips into the same grouping. This will work diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index bcf04cb0be4a..5d0ef4856b07 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c @@ -482,7 +482,7 @@ static inline void tasklet_init(struct tasklet_struct *tasklet, * * One advantagous side effect of this allocation approach is that the * entire rx processing can be done without holding any spin lock - * since the rx rings and registers are totally independant of the tx + * since the rx rings and registers are totally independent of the tx * ring and its registers. This of course includes the kmalloc's of * new skb's. Thus start_xmit can run in parallel with rx processing * and the memory allocation on SMP systems. diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 98f558dd7f90..e57ccf08b4d6 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c @@ -279,7 +279,7 @@ struct lance_private { lp->tx_old - lp->tx_new-1) /* The lance control ports are at an absolute address, machine and tc-slot - * dependant. + * dependent. * DECstations do only 32-bit access and the LANCE uses 16 bit addresses, * so we have to give the structure an extra member making rap pointing * at the right address diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index aa1dd7dd8d01..0d68940f9b98 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h @@ -27,7 +27,7 @@ *******************************************************************************/ -/* glue for the OS independant part of e1000 +/* glue for the OS independent part of e1000 * includes register access macros */ diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index b4fb6e2e8de3..38b55e119c90 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -113,7 +113,7 @@ struct sixpack { /* 6pack stuff */ unsigned char tx_delay; - unsigned char persistance; + unsigned char persistence; unsigned char slottime; unsigned char duplex; unsigned char led_state; @@ -302,7 +302,7 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len) switch (p[0]) { case 1: sp->tx_delay = p[1]; return; - case 2: sp->persistance = p[1]; return; + case 2: sp->persistence = p[1]; return; case 3: sp->slottime = p[1]; return; case 4: /* ignored */ return; case 5: sp->duplex = p[1]; return; @@ -392,7 +392,7 @@ static void sp_xmit_on_air(unsigned long channel) random = random * 17 + 41; - if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistance)) { + if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) { sp->led_state = 0x70; sp->tty->driver.write(sp->tty, 0, &sp->led_state, 1); sp->tx_enable = 1; @@ -469,7 +469,7 @@ static int sp_open(struct net_device *dev) sp->duplex = 0; sp->tx_delay = SIXP_TXDELAY; - sp->persistance = SIXP_PERSIST; + sp->persistence = SIXP_PERSIST; sp->slottime = SIXP_SLOTTIME; sp->led_state = 0x60; sp->status = 1; diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 3487fac205e7..f1ed6647615c 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -1703,7 +1703,7 @@ static int scc_net_tx(struct sk_buff *skb, struct net_device *dev) /* * Start transmission if the trx state is idle or - * t_idle hasn't expired yet. Use dwait/persistance/slottime + * t_idle hasn't expired yet. Use dwait/persistence/slottime * algorithm for normal halfduplex operation. */ diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index d685b95b294c..66348071a670 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -34,7 +34,7 @@ * 0.5 F6FBB 01.08.98 Shared IRQs, /proc/net and network statistics * 0.6 F6FBB 25.08.98 Added 1200Bds format * 0.7 F6FBB 12.09.98 Added to the kernel configuration - * 0.8 F6FBB 14.10.98 Fixed slottime/persistance timing bug + * 0.8 F6FBB 14.10.98 Fixed slottime/persistence timing bug * OK1ZIA 2.09.01 Fixed "kfree_skb on hard IRQ" * using dev_kfree_skb_any(). (important in 2.4 kernel) * diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index c04703381230..e5465e5a08ec 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c @@ -386,7 +386,7 @@ static int rr_reset(struct net_device *dev) writel(0, ®s->CmdRing[i]); /* - * Why 32 ? is this not cache line size dependant? + * Why 32 ? is this not cache line size dependent? */ writel(RBURST_64|WBURST_64, ®s->PciState); wmb(); diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index 73b6b55f7475..a59f1872ed2a 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -524,7 +524,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) * 2) Do no allow the HPC to look at a new descriptor until * we have completely set up it's state. This means, do * not clear HPCDMA_EOX in the current last descritptor - * until the one we are adding looks consistant and could + * until the one we are adding looks consistent and could * be processes right now. * 3) The tx interrupt code must notice when we've added a new * entry and the HPC got to the end of the chain before we diff --git a/drivers/net/sk98lin/skvpd.c b/drivers/net/sk98lin/skvpd.c index f3683e3af4e8..0ac799a0f99e 100644 --- a/drivers/net/sk98lin/skvpd.c +++ b/drivers/net/sk98lin/skvpd.c @@ -88,7 +88,7 @@ * Revision 1.9 1998/09/16 07:33:52 malthoff * remove memcmp() by SK_MEMCMP and * memcpy() by SK_MEMCPY() to be - * independant from the 'C' Standard Library. + * independent from the 'C' Standard Library. * * Revision 1.8 1998/08/19 12:52:35 malthoff * compiler fix: use SK_VPD_KEY instead of S_VPD. diff --git a/drivers/net/sk98lin/skxmac2.c b/drivers/net/sk98lin/skxmac2.c index 32dc2aa38460..67c2fa53361a 100644 --- a/drivers/net/sk98lin/skxmac2.c +++ b/drivers/net/sk98lin/skxmac2.c @@ -194,7 +194,7 @@ * * Revision 1.12 1998/10/14 14:45:04 malthoff * Remove SKERR_SIRQ_E0xx and SKERR_SIRQ_E0xxMSG by - * SKERR_HWI_Exx and SKERR_HWI_E0xxMSG to be independant + * SKERR_HWI_Exx and SKERR_HWI_E0xxMSG to be independent * from the Sirq module. * * Revision 1.11 1998/10/14 13:59:01 gklug @@ -826,7 +826,7 @@ int Port) /* Port Index (MAC_1 + n) */ for (i = 0; i < 3; i++) { /* * The following 2 statements are together endianess - * independant. Remember this when changing. + * independent. Remember this when changing. */ SK_IN16(IoC, (B2_MAC_2 + Port * 8 + i * 2), &SWord); XM_OUT16(IoC, Port, (XM_SA + i * 2), SWord); diff --git a/drivers/net/skfp/cfm.c b/drivers/net/skfp/cfm.c index 7a51b765b4a5..91eb36f3ec3a 100644 --- a/drivers/net/skfp/cfm.c +++ b/drivers/net/skfp/cfm.c @@ -21,15 +21,15 @@ */ /* - * Hardware independant state machine implemantation + * Hardware independent state machine implemantation * The following external SMT functions are referenced : * * queue_event() * - * The following external HW dependant functions are referenced : + * The following external HW dependent functions are referenced : * config_mux() * - * The following HW dependant events are required : + * The following HW dependent events are required : * NONE */ diff --git a/drivers/net/skfp/ecm.c b/drivers/net/skfp/ecm.c index 9dcca7f5581e..7eaab1c3f9d2 100644 --- a/drivers/net/skfp/ecm.c +++ b/drivers/net/skfp/ecm.c @@ -17,23 +17,23 @@ /* SMT ECM Entity Coordination Management - Hardware independant state machine + Hardware independent state machine */ /* - * Hardware independant state machine implemantation + * Hardware independent state machine implemantation * The following external SMT functions are referenced : * * queue_event() * smt_timer_start() * smt_timer_stop() * - * The following external HW dependant functions are referenced : + * The following external HW dependent functions are referenced : * sm_pm_bypass_req() * sm_pm_ls_latch() * sm_pm_get_ls() * - * The following HW dependant events are required : + * The following HW dependent events are required : * NONE * */ diff --git a/drivers/net/skfp/h/osdef1st.h b/drivers/net/skfp/h/osdef1st.h index a6866248abc0..5359eb53008d 100644 --- a/drivers/net/skfp/h/osdef1st.h +++ b/drivers/net/skfp/h/osdef1st.h @@ -13,7 +13,7 @@ ******************************************************************************/ /* - * Operating system-dependant definitions that have to be defined + * Operating system-dependent definitions that have to be defined * before any other header files are included. */ diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c index b19b2a81b6ac..4be8f3a9e8f4 100644 --- a/drivers/net/skfp/pcmplc.c +++ b/drivers/net/skfp/pcmplc.c @@ -20,19 +20,19 @@ */ /* - * Hardware independant state machine implemantation + * Hardware independent state machine implemantation * The following external SMT functions are referenced : * * queue_event() * smt_timer_start() * smt_timer_stop() * - * The following external HW dependant functions are referenced : + * The following external HW dependent functions are referenced : * sm_pm_control() * sm_ph_linestate() * sm_pm_ls_latch() * - * The following HW dependant events are required : + * The following HW dependent events are required : * PC_QLS * PC_ILS * PC_HLS @@ -714,7 +714,7 @@ int cmd; mib = phy->mib ; /* - * general transitions independant of state + * general transitions independent of state */ switch (cmd) { case PC_STOP : diff --git a/drivers/net/skfp/rmt.c b/drivers/net/skfp/rmt.c index 473eb0c9cdfe..5771dc42ced8 100644 --- a/drivers/net/skfp/rmt.c +++ b/drivers/net/skfp/rmt.c @@ -20,18 +20,18 @@ */ /* - * Hardware independant state machine implemantation + * Hardware independent state machine implemantation * The following external SMT functions are referenced : * * queue_event() * smt_timer_start() * smt_timer_stop() * - * The following external HW dependant functions are referenced : + * The following external HW dependent functions are referenced : * sm_ma_control() * sm_mac_check_beacon_claim() * - * The following HW dependant events are required : + * The following HW dependent events are required : * RM_RING_OP * RM_RING_NON_OP * RM_MY_BEACON diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 52f9d11ceba6..4a430f99b947 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c @@ -33,7 +33,7 @@ * The driver architecture is based on the DEC FDDI driver by * Lawrence V. Stefani and several ethernet drivers. * I also used an existing Windows NT miniport driver. - * All hardware dependant fuctions are handled by the SysKonnect + * All hardware dependent fuctions are handled by the SysKonnect * Hardware Module. * The only headerfiles that are directly related to this source * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h. @@ -1729,7 +1729,7 @@ u_long dma_master(struct s_smc * smc, void *virt, int len, int flag) * dma_complete * * The hardware module calls this routine when it has completed a DMA - * transfer. If the operating system dependant module has set up the DMA + * transfer. If the operating system dependent module has set up the DMA * channel via dma_master() (e.g. Windows NT or AIX) it should clean up * the DMA channel. * Args diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 6f65949dd7ea..bf13c8182eb8 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1074,7 +1074,7 @@ int lmc_probe (struct net_device *dev) /*fold00*/ * This prevents taking someone else's device. * * Check either the subvendor or the subdevice, some systems reverse - * the setting in the bois, seems to be version and arch dependant? + * the setting in the bois, seems to be version and arch dependent? * Fix the two variables * */ diff --git a/drivers/net/wan/lmc/lmc_ver.h b/drivers/net/wan/lmc/lmc_ver.h index 1e9e2f5f7ddb..dd1c592813e4 100644 --- a/drivers/net/wan/lmc/lmc_ver.h +++ b/drivers/net/wan/lmc/lmc_ver.h @@ -25,7 +25,7 @@ * made the souce code not only hard to read but version problems hard * to track down. If I'm overiding a function/etc with something in * this file it will be prefixed by "LMC_" which will mean look - * here for the version dependant change that's been done. + * here for the version dependent change that's been done. * */ diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index d1e1ae8f6c1b..ec86ccb3bfda 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -5144,7 +5144,7 @@ static int airo_set_scan(struct net_device *dev, /*------------------------------------------------------------------*/ /* - * Translate scan data returned from the card to a card independant + * Translate scan data returned from the card to a card independent * format that the Wireless Tools will understand - Jean II */ static inline char *airo_translate_scan(struct net_device *dev, diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h index 8317270a46da..8608626def40 100644 --- a/drivers/net/wireless/orinoco.h +++ b/drivers/net/wireless/orinoco.h @@ -36,7 +36,7 @@ struct orinoco_key { struct orinoco_private { - void *card; /* Pointer to card dependant structure */ + void *card; /* Pointer to card dependent structure */ int (*hard_reset)(struct orinoco_private *); /* Synchronisation stuff */ diff --git a/drivers/sbus/char/aurora.c b/drivers/sbus/char/aurora.c index cb00080d2fc9..1a27aa713c88 100644 --- a/drivers/sbus/char/aurora.c +++ b/drivers/sbus/char/aurora.c @@ -1046,7 +1046,7 @@ static void aurora_change_speed(struct Aurora_board *bp, struct Aurora_port *por &bp->r[chip]->r[CD180_MSVR]); } - /* Now we must calculate some speed dependant things. */ + /* Now we must calculate some speed dependent things. */ /* Set baud rate for port. */ tmp = (((bp->oscfreq + baud/2) / baud + diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c index de7c9eadb63d..a5ac67cc498f 100644 --- a/drivers/sbus/char/bbc_envctrl.c +++ b/drivers/sbus/char/bbc_envctrl.c @@ -30,7 +30,7 @@ static int errno; * * The max1617 is capable of being programmed with power-off * temperature values, one low limit and one high limit. These - * can be controlled independantly for the cpu or ambient temperature. + * can be controlled independently for the cpu or ambient temperature. * If a limit is violated, the power is simply shut off. The frequency * with which the max1617 does temperature sampling can be controlled * as well. diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 416cf07afb38..8e3ad622cb8b 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -392,8 +392,8 @@ static char *container_types[] = { * Arguments: [1] pointer to void [1] int * * Purpose: Sets SCSI inquiry data strings for vendor, product - * and revision level. Allows strings to be set in platform dependant - * files instead of in OS dependant driver source. + * and revision level. Allows strings to be set in platform dependent + * files instead of in OS dependent driver source. */ static void setinqstr(int devtype, void *data, int tindex) diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h index e41701472bfc..bdca5c17b33f 100644 --- a/drivers/scsi/aic7xxx/aic79xx_inline.h +++ b/drivers/scsi/aic7xxx/aic79xx_inline.h @@ -769,7 +769,7 @@ ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) ahd_setup_scb_common(ahd, scb); /* - * Make sure our data is consistant from the + * Make sure our data is consistent from the * perspective of the adapter. */ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h index 5913588f86d2..e7c7c0896113 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.h +++ b/drivers/scsi/aic7xxx/aic79xx_osm.h @@ -321,7 +321,7 @@ struct ahd_cmd { /* * A per probed device structure used to deal with some error recovery * scenarios that the Linux mid-layer code just doesn't know how to - * handle. The structure allocated for a device only becomes persistant + * handle. The structure allocated for a device only becomes persistent * after a successfully completed inquiry command to the target when * that inquiry data indicates a lun is present. */ diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h index 53de3904b10b..c38b0cb97d36 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_inline.h +++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h @@ -460,7 +460,7 @@ ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; /* - * Make sure our data is consistant from the + * Make sure our data is consistent from the * perspective of the adapter. */ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h index 4baa42a415b6..e400114b9ca7 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.h +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h @@ -334,7 +334,7 @@ struct ahc_cmd { /* * A per probed device structure used to deal with some error recovery * scenarios that the Linux mid-layer code just doesn't know how to - * handle. The structure allocated for a device only becomes persistant + * handle. The structure allocated for a device only becomes persistent * after a successfully completed inquiry command to the target when * that inquiry data indicates a lun is present. */ diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index bb942cd73b6e..640ba4d96e69 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -443,7 +443,7 @@ * Mon Aug 6 14:59:29 BST 2001 - "Michael Johnson" * * Make the HP print formatting and check for buggy firmware runtime not - * ifdef dependant. + * ifdef dependent. * * * Version 1.17d diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 7c781dbfc344..dc61815d69e2 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -107,7 +107,7 @@ - Provide compat macros for pci_enable_device(), pci_find_subsys() and scsi_set_pci_device() - Call scsi_set_pci_device() for all devices - - Reduce size of kernel version dependant device probe code + - Reduce size of kernel version dependent device probe code - Move duplicate probe/init code to separate function - Handle error if qla1280_mem_alloc() fails - Kill OFFSET() macro and use Linux's PCI definitions instead diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 0356142db1aa..df8fa6f3de3f 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -1843,7 +1843,7 @@ static void sym_free_resources(hcb_p np) pci_unmap_mem(np->s.ram_va, np->ram_ws); #endif /* - * Free O/S independant resources. + * Free O/S independent resources. */ sym_hcb_free(np); @@ -2043,7 +2043,7 @@ sym_attach (Scsi_Host_Template *tpnt, int unit, sym_device *dev) } /* - * Perform O/S independant stuff. + * Perform O/S independent stuff. */ if (sym_hcb_attach(np, fw, nvram)) goto attach_failed; diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h index 94bcef34d906..b711718b9df6 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.h +++ b/drivers/scsi/sym53c8xx_2/sym_glue.h @@ -188,7 +188,7 @@ typedef struct sym_sccb *sccb_p; typedef struct sym_shcb *shcb_p; /* - * Define a reference to the O/S dependant IO request. + * Define a reference to the O/S dependent IO request. */ typedef Scsi_Cmnd *cam_ccb_p; /* Generic */ typedef Scsi_Cmnd *cam_scsiio_p;/* SCSI I/O */ diff --git a/drivers/usb/misc/atmsar.c b/drivers/usb/misc/atmsar.c index 5f6a7f033361..fce7041d51a2 100644 --- a/drivers/usb/misc/atmsar.c +++ b/drivers/usb/misc/atmsar.c @@ -45,7 +45,7 @@ * - No more in-buffer rewriting for cloned buffers. * - Removed the PII specific CFLAGS in the Makefile. * - * 0.2.1: - removed dependancy on alloc_tx. tis presented problems when + * 0.2.1: - removed dependency on alloc_tx. tis presented problems when * using this with the br2684 code. * * 0.2: - added AAL0 reassembly diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c index fd8ae959defb..3cce9b7d1e73 100644 --- a/drivers/usb/serial/safe_serial.c +++ b/drivers/usb/serial/safe_serial.c @@ -256,7 +256,7 @@ static void safe_read_bulk_callback (struct urb *urb, struct pt_regs *regs) } tty_flip_buffer_push (port->tty); } else { - err ("%s - inconsistant lengths %d:%d", __FUNCTION__, + err ("%s - inconsistent lengths %d:%d", __FUNCTION__, actual_length, length); } } else { diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index aa4af2b0a948..5fb910c1a6e2 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -706,7 +706,7 @@ static int storage_probe(struct usb_interface *intf, /* * Set the handler pointers based on the protocol - * Again, this data is persistant across reattachments + * Again, this data is persistent across reattachments */ switch (ss->protocol) { case US_PR_CB: diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c index 57b69168e37e..4d0bd916ec75 100644 --- a/drivers/video/skeletonfb.c +++ b/drivers/video/skeletonfb.c @@ -513,7 +513,7 @@ void xxxfb_poll(struct fb_info *info, poll_table *wait) * for a graphics card take a specific amount of time. * Often we have to wait for the accelerator to finish * its operation before we can write to the framebuffer - * so we can have consistant display output. + * so we can have consistent display output. * * @info: frame buffer structure that represents a single frame buffer */ diff --git a/fs/befs/ChangeLog b/fs/befs/ChangeLog index 8e09a0bd8ebb..6774a4e815b2 100644 --- a/fs/befs/ChangeLog +++ b/fs/befs/ChangeLog @@ -60,7 +60,7 @@ Version 0.63 (2002-01-31) * Documentation improvements in source. [WD] -* Makefile fix for independant module when CONFIG_MODVERSION is set in +* Makefile fix for independent module when CONFIG_MODVERSION is set in kernel config [Pavel Roskin ] * Compile warning fix for namei.c. [Sergey S. Kostyliov ] diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index 9a33c941cd5c..e0d448b3a6c9 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c @@ -510,7 +510,7 @@ static BOOL ldm_validate_vmdb (struct block_device *bdev, unsigned long base, /* Are there uncommitted transactions? */ if (BE16(data + 0x10) != 0x01) { - ldm_crit ("Database is not in a consistant state. Aborting."); + ldm_crit ("Database is not in a consistent state. Aborting."); goto out; } diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h index 923f913e544a..472a96bdd49b 100644 --- a/include/asm-alpha/pci.h +++ b/include/asm-alpha/pci.h @@ -68,16 +68,16 @@ extern inline void pcibios_penalize_isa_irq(int irq) decisions. */ #define PCI_DMA_BUS_IS_PHYS 0 -/* Allocate and map kernel buffer using consistant mode DMA for PCI +/* Allocate and map kernel buffer using consistent mode DMA for PCI device. Returns non-NULL cpu-view pointer to the buffer if successful and sets *DMA_ADDRP to the pci side dma address as well, else DMA_ADDRP is undefined. */ extern void *pci_alloc_consistent(struct pci_dev *, size_t, dma_addr_t *); -/* Free and unmap a consistant DMA buffer. CPU_ADDR and DMA_ADDR must - be values that were returned from pci_alloc_consistant. SIZE must - be the same as what as passed into pci_alloc_consistant. +/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must + be values that were returned from pci_alloc_consistent. SIZE must + be the same as what as passed into pci_alloc_consistent. References to the memory and mappings assosciated with CPU_ADDR or DMA_ADDR past this call are illegal. */ @@ -139,7 +139,7 @@ extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int, int); extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int, int); -/* Make physical memory consistant for a single streaming mode DMA +/* Make physical memory consistent for a single streaming mode DMA translation after a transfer. If you perform a pci_map_single() but wish to interrogate the @@ -155,7 +155,7 @@ pci_dma_sync_single(struct pci_dev *dev, dma_addr_t dma_addr, long size, /* Nothing to do. */ } -/* Make physical memory consistant for a set of streaming mode DMA +/* Make physical memory consistent for a set of streaming mode DMA translations after a transfer. The same as pci_dma_sync_single but for a scatter-gather list, same rules and usage. */ diff --git a/include/asm-cris/io.h b/include/asm-cris/io.h index 607b6291b366..82a06f841c9f 100644 --- a/include/asm-cris/io.h +++ b/include/asm-cris/io.h @@ -246,7 +246,7 @@ extern inline void * ioremap (unsigned long offset, unsigned long size) #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d)) -/* The following is junk needed for the arch-independant code but which +/* The following is junk needed for the arch-independent code but which * we never use in the CRIS port */ diff --git a/include/asm-generic/rmap.h b/include/asm-generic/rmap.h index d96b2e3fed98..5932b91fa2e7 100644 --- a/include/asm-generic/rmap.h +++ b/include/asm-generic/rmap.h @@ -3,7 +3,7 @@ /* * linux/include/asm-generic/rmap.h * - * Architecture dependant parts of the reverse mapping code, + * Architecture dependent parts of the reverse mapping code, * this version should work for most architectures with a * 'normal' page table layout. * diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h index 845641b06b0c..001667ef0d35 100644 --- a/include/asm-generic/rtc.h +++ b/include/asm-generic/rtc.h @@ -147,7 +147,7 @@ static inline int set_rtc_time(struct rtc_time *time) yrs = 73; } #endif - /* These limits and adjustments are independant of + /* These limits and adjustments are independent of * whether the chip is in binary mode or not. */ if (yrs > 169) { diff --git a/include/asm-mips/isadep.h b/include/asm-mips/isadep.h index 3cd1eb8eb58a..b3453bb3ba34 100644 --- a/include/asm-mips/isadep.h +++ b/include/asm-mips/isadep.h @@ -1,5 +1,5 @@ /* - * Various ISA level dependant constants. + * Various ISA level dependent constants. * Most of the following constants reflect the different layout * of Coprocessor 0 registers. * diff --git a/include/asm-mips64/r10kcache.h b/include/asm-mips64/r10kcache.h index 564ac6cc79aa..984f2f6e6ea2 100644 --- a/include/asm-mips64/r10kcache.h +++ b/include/asm-mips64/r10kcache.h @@ -25,7 +25,7 @@ #define ic_lsize 64 #define dc_lsize 32 -/* These are configuration dependant. */ +/* These are configuration dependent. */ #define scache_size() ({ \ unsigned long __res; \ __res = (read_32bit_cp0_register(CP0_CONFIG) >> 16) & 3; \ diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index 29eaaae63187..1616aede8ce5 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h @@ -36,7 +36,7 @@ #define _IO_BASE isa_io_base #define _ISA_MEM_BASE isa_mem_base #define PCI_DRAM_OFFSET pci_dram_offset -#endif /* Platform-dependant I/O */ +#endif /* Platform-dependent I/O */ extern unsigned long isa_io_base; extern unsigned long isa_mem_base; diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index ca47022e92ea..837f9bc6bab5 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h @@ -22,7 +22,7 @@ * mb() prevents loads and stores being reordered across this point. * rmb() prevents loads being reordered across this point. * wmb() prevents stores being reordered across this point. - * read_barrier_depends() prevents data-dependant loads being reordered + * read_barrier_depends() prevents data-dependent loads being reordered * across this point (nop on PPC). * * We can use the eieio instruction for wmb, but since it doesn't diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h index 68f31b97c314..c78f830ed823 100644 --- a/include/asm-ppc64/system.h +++ b/include/asm-ppc64/system.h @@ -25,7 +25,7 @@ * mb() prevents loads and stores being reordered across this point. * rmb() prevents loads being reordered across this point. * wmb() prevents stores being reordered across this point. - * read_barrier_depends() prevents data-dependant loads being reordered + * read_barrier_depends() prevents data-dependent loads being reordered * across this point (nop on PPC). * * We can use the eieio instruction for wmb, but since it doesn't diff --git a/include/asm-v850/pci.h b/include/asm-v850/pci.h index 5ec5944d2b37..b915819c609b 100644 --- a/include/asm-v850/pci.h +++ b/include/asm-v850/pci.h @@ -36,7 +36,7 @@ extern void pci_unmap_single (struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, int dir); -/* Make physical memory consistant for a single streaming mode DMA +/* Make physical memory consistent for a single streaming mode DMA translation after a transfer. If you perform a pci_map_single() but wish to interrogate the diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index e8fec2776624..36568e4a3d14 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -160,7 +160,7 @@ extern agp_memory *agp_allocate_memory(size_t, u32); * an u32 argument of the type of memory to be allocated. * Every agp bridge device will allow you to allocate * AGP_NORMAL_MEMORY which maps to physical ram. Any other - * type is device dependant. + * type is device dependent. * * It returns NULL whenever memory is unavailable. * diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h index ceffd587b7a8..b3b981af768d 100644 --- a/include/linux/apm_bios.h +++ b/include/linux/apm_bios.h @@ -45,7 +45,7 @@ struct apm_bios_info { #define APM_BIOS_DISENGAGED 0x0010 /* - * Data for APM that is persistant across module unload/load + * Data for APM that is persistent across module unload/load */ struct apm_info { struct apm_bios_info bios; diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h index fed344ec7a41..06265081fa48 100644 --- a/include/linux/isdnif.h +++ b/include/linux/isdnif.h @@ -62,7 +62,7 @@ /* */ /* The proceed command holds a incoming call in a state to leave processes */ /* enough time to check whether ist should be accepted. */ -/* The PROT_IO Command extends the interface to make protocol dependant */ +/* The PROT_IO Command extends the interface to make protocol dependent */ /* features available (call diversion, call waiting...). */ /* */ /* The PROT_IO Command is executed with the desired driver id and the arg */ diff --git a/include/linux/sdla_x25.h b/include/linux/sdla_x25.h index 9827e74faaf2..c110c1a835f7 100644 --- a/include/linux/sdla_x25.h +++ b/include/linux/sdla_x25.h @@ -157,7 +157,7 @@ typedef struct X25Cmd #define X25RES_PROTO_VIOLATION 0x41 /* protocol violation occured */ #define X25RES_PKT_TIMEOUT 0x42 /* X.25 packet time out */ #define X25RES_PKT_RETRY_LIMIT 0x43 /* X.25 packet retry limit exceeded */ -/*----- Command-dependant results -----*/ +/*----- Command-dependent results -----*/ #define X25RES_LINK_DISC 0x00 /* HDLC_LINK_STATUS */ #define X25RES_LINK_IN_ABM 0x01 /* HDLC_LINK_STATUS */ #define X25RES_NO_DATA 0x01 /* HDLC_READ/READ_TRACE_DATA*/ diff --git a/net/irda/iriap.c b/net/irda/iriap.c index edf9a77c078c..d996cb5e0496 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -990,7 +990,7 @@ int irias_proc_read(char *buf, char **start, off_t offset, int len) len += sprintf(buf+len, "\n"); /* Careful for priority inversions here ! - * All other uses of attrib spinlock are independant of + * All other uses of attrib spinlock are independent of * the object spinlock, so we are safe. Jean II */ spin_lock(&obj->attribs->hb_spinlock); diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index ba525b82f994..3ab62fa1d884 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -1623,7 +1623,7 @@ int irlmp_slsap_inuse(__u8 slsap_sel) ASSERT(lap->magic == LMP_LAP_MAGIC, return TRUE;); /* Careful for priority inversions here ! - * All other uses of attrib spinlock are independant of + * All other uses of attrib spinlock are independent of * the object spinlock, so we are safe. Jean II */ spin_lock(&lap->lsaps->hb_spinlock); @@ -1786,7 +1786,7 @@ int irlmp_proc_read(char *buf, char **start, off_t offset, int len) len += sprintf(buf+len, "\n"); /* Careful for priority inversions here ! - * All other uses of attrib spinlock are independant of + * All other uses of attrib spinlock are independent of * the object spinlock, so we are safe. Jean II */ spin_lock(&lap->lsaps->hb_spinlock); diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h index 612030769391..97381500fd20 100644 --- a/net/irda/irnet/irnet.h +++ b/net/irda/irnet/irnet.h @@ -284,7 +284,7 @@ /* * This set of flags enable and disable all the various warning, * error and debug message of this driver. - * Each section can be enabled and disabled independantly + * Each section can be enabled and disabled independently */ /* In the PPP part */ #define DEBUG_CTRL_TRACE 0 /* Control channel */ diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c index 5826ea7d5419..be0245dbfdf0 100644 --- a/sound/core/hwdep.c +++ b/sound/core/hwdep.c @@ -334,7 +334,7 @@ static int snd_hwdep_dev_register(snd_device_t *device) if ((err = snd_register_device(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device, &snd_hwdep_reg, name)) < 0) { - snd_printk(KERN_ERR "unable to register hardware dependant device %i:%i\n", + snd_printk(KERN_ERR "unable to register hardware dependent device %i:%i\n", hwdep->card->number, hwdep->device); snd_hwdep_devices[idx] = NULL; up(®ister_mutex); diff --git a/sound/core/seq/seq_midi_emul.c b/sound/core/seq/seq_midi_emul.c index 8afa4df16428..0bc8ee757b05 100644 --- a/sound/core/seq/seq_midi_emul.c +++ b/sound/core/seq/seq_midi_emul.c @@ -60,7 +60,7 @@ static void reset_all_channels(snd_midi_channel_set_t *chset); /* - * Process an event in a driver independant way. This means dealing + * Process an event in a driver independent way. This means dealing * with RPN, NRPN, SysEx etc that are defined for common midi applications * such as GM, GS and XG. * There modes that this module will run in are: @@ -258,7 +258,7 @@ note_off(snd_midi_op_t *ops, void *drv, snd_midi_channel_t *chan, int note, int } /* - * Do all driver independant operations for this controler and pass + * Do all driver independent operations for this controler and pass * events that need to take place immediately to the driver. */ static void diff --git a/sound/oss/ac97_codec.c b/sound/oss/ac97_codec.c index 62aa94ef6df2..689cac0755ec 100644 --- a/sound/oss/ac97_codec.c +++ b/sound/oss/ac97_codec.c @@ -446,7 +446,7 @@ static void ac97_set_mixer(struct ac97_codec *codec, unsigned int oss_mixer, uns } /* read or write the recmask, the ac97 can really have left and right recording - inputs independantly set, but OSS doesn't seem to want us to express that to + inputs independently set, but OSS doesn't seem to want us to express that to the user. the caller guarantees that we have a supported bit set, and they must be holding the card's spinlock */ static int ac97_recmask_io(struct ac97_codec *codec, int rw, int mask) diff --git a/sound/oss/maestro.c b/sound/oss/maestro.c index 287b0619a414..7a22f5f26e84 100644 --- a/sound/oss/maestro.c +++ b/sound/oss/maestro.c @@ -793,7 +793,7 @@ static unsigned int ac97_oss_rm[] = { /* read or write the recmask the ac97 can really have left and right recording - inputs independantly set, but OSS doesn't seem to + inputs independently set, but OSS doesn't seem to want us to express that to the user. the caller guarantees that we have a supported bit set, and they must be holding the card's spinlock */ -- cgit v1.2.3 From 8eae299835cf161a93a5acd890cebf0f83f2a2ce Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 6 Feb 2003 20:25:24 -0800 Subject: Split up "struct signal_struct" into "signal" and "sighand" parts. This is required to get make the old LinuxThread semantics work together with the fixed-for-POSIX full signal sharing. A traditional CLONE_SIGHAND thread (LinuxThread) will not see any other shared signal state, while a new-style CLONE_THREAD thread will share all of it. This way the two methods don't confuse each other. --- arch/i386/kernel/init_task.c | 1 + arch/i386/kernel/signal.c | 22 +-- arch/i386/kernel/vm86.c | 4 +- drivers/block/loop.c | 4 +- drivers/char/n_tty.c | 2 +- drivers/scsi/scsi_error.c | 4 +- drivers/usb/storage/usb.c | 4 +- fs/autofs/waitq.c | 12 +- fs/autofs4/waitq.c | 12 +- fs/exec.c | 107 ++++++++------ fs/jbd/journal.c | 4 +- fs/lockd/clntproc.c | 18 +-- fs/lockd/svc.c | 12 +- fs/nfsd/nfssvc.c | 8 +- fs/proc/array.c | 8 +- include/linux/init_task.h | 9 +- include/linux/sched.h | 18 ++- include/linux/slab.h | 3 +- kernel/exit.c | 34 ++--- kernel/fork.c | 65 ++++++--- kernel/kmod.c | 12 +- kernel/signal.c | 322 +++++++++++++++++++++++-------------------- kernel/workqueue.c | 8 +- mm/pdflush.c | 4 +- net/sunrpc/clnt.c | 10 +- net/sunrpc/sched.c | 12 +- net/sunrpc/svc.c | 4 +- security/capability.c | 2 +- 28 files changed, 410 insertions(+), 315 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/init_task.c b/arch/i386/kernel/init_task.c index 4eb40a9582c7..a2a7181dac62 100644 --- a/arch/i386/kernel/init_task.c +++ b/arch/i386/kernel/init_task.c @@ -11,6 +11,7 @@ static struct fs_struct init_fs = INIT_FS; static struct files_struct init_files = INIT_FILES; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); /* diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index 42160fc0322d..90561449cc07 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c @@ -37,11 +37,11 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask) sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->eax = -EINTR; while (1) { @@ -66,11 +66,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->eax = -EINTR; while (1) { @@ -224,10 +224,10 @@ asmlinkage int sys_sigreturn(unsigned long __unused) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(regs, &frame->sc, &eax)) goto badframe; @@ -252,10 +252,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax)) goto badframe; @@ -513,7 +513,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) { - struct k_sigaction *ka = ¤t->sig->action[sig-1]; + struct k_sigaction *ka = ¤t->sighand->action[sig-1]; /* Are we from a system call? */ if (regs->orig_eax >= 0) { @@ -547,11 +547,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index facb01379561..269cf00ddf4e 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -512,10 +512,10 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno return 1; /* we let this handle by the calling routine */ if (current->ptrace & PT_PTRACED) { unsigned long flags; - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); sigdelset(¤t->blocked, SIGTRAP); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } send_sig(SIGTRAP, current, 1); current->thread.trap_no = trapno; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 86d653d168df..71ae15a0c6fd 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -584,10 +584,10 @@ static int loop_thread(void *data) hence, it mustn't be stopped at all because it could be indirectly used during suspension */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); flush_signals(current); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); set_user_nice(current, -20); diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index 34304a4dcb7f..ccf36427d4b0 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c @@ -787,7 +787,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, int is_ignored(int sig) { return (sigismember(¤t->blocked, sig) || - current->sig->action[sig-1].sa.sa_handler == SIG_IGN); + current->sighand->action[sig-1].sa.sa_handler == SIG_IGN); } static void n_tty_set_termios(struct tty_struct *tty, struct termios * old) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 28ef5461ec78..f21af2d1c1e2 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1575,10 +1575,10 @@ void scsi_error_handler(void *data) int rtn; DECLARE_MUTEX_LOCKED(sem); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); lock_kernel(); diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 5fb910c1a6e2..e4a98569d058 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -301,12 +301,12 @@ static int usb_stor_control_thread(void * __us) daemonize(); /* avoid getting signals */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); flush_signals(current); current->flags |= PF_IOTHREAD; sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* set our name for identification purposes */ sprintf(current->comm, "usb-storage"); diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c index c212015631b9..6c82dc144b33 100644 --- a/fs/autofs/waitq.c +++ b/fs/autofs/waitq.c @@ -70,10 +70,10 @@ static int autofs_write(struct file *file, const void *addr, int bytes) /* Keep the currently executing process from receiving a SIGPIPE unless it was already supposed to get one */ if (wr == -EPIPE && !sigpipe) { - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); sigdelset(¤t->pending.signal, SIGPIPE); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } return (bytes > 0); @@ -161,18 +161,18 @@ int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name) sigset_t oldset; unsigned long irqflags; - spin_lock_irqsave(¤t->sig->siglock, irqflags); + spin_lock_irqsave(¤t->sighand->siglock, irqflags); oldset = current->blocked; siginitsetinv(¤t->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, irqflags); + spin_unlock_irqrestore(¤t->sighand->siglock, irqflags); interruptible_sleep_on(&wq->queue); - spin_lock_irqsave(¤t->sig->siglock, irqflags); + spin_lock_irqsave(¤t->sighand->siglock, irqflags); current->blocked = oldset; recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, irqflags); + spin_unlock_irqrestore(¤t->sighand->siglock, irqflags); } else { DPRINTK(("autofs_wait: skipped sleeping\n")); } diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 7af5f71e16b9..c1b7279cae81 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -74,10 +74,10 @@ static int autofs4_write(struct file *file, const void *addr, int bytes) /* Keep the currently executing process from receiving a SIGPIPE unless it was already supposed to get one */ if (wr == -EPIPE && !sigpipe) { - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); sigdelset(¤t->pending.signal, SIGPIPE); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } return (bytes > 0); @@ -198,18 +198,18 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct qstr *name, sigset_t oldset; unsigned long irqflags; - spin_lock_irqsave(¤t->sig->siglock, irqflags); + spin_lock_irqsave(¤t->sighand->siglock, irqflags); oldset = current->blocked; siginitsetinv(¤t->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, irqflags); + spin_unlock_irqrestore(¤t->sighand->siglock, irqflags); interruptible_sleep_on(&wq->queue); - spin_lock_irqsave(¤t->sig->siglock, irqflags); + spin_lock_irqsave(¤t->sighand->siglock, irqflags); current->blocked = oldset; recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, irqflags); + spin_unlock_irqrestore(¤t->sighand->siglock, irqflags); } else { DPRINTK(("autofs_wait: skipped sleeping\n")); } diff --git a/fs/exec.c b/fs/exec.c index 0b41239937b7..a63d5c43da1f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -559,31 +559,61 @@ static inline void put_proc_dentry(struct dentry *dentry) * disturbing other processes. (Other processes might share the signal * table via the CLONE_SIGHAND option to clone().) */ -static inline int de_thread(struct signal_struct *oldsig) +static inline int de_thread(struct task_struct *tsk) { - struct signal_struct *newsig; + struct signal_struct *newsig, *oldsig = tsk->signal; + struct sighand_struct *newsighand, *oldsighand = tsk->sighand; + spinlock_t *lock = &oldsighand->siglock; int count; - if (atomic_read(¤t->sig->count) <= 1) + /* + * If we don't share sighandlers, then we aren't sharing anything + * and we can just re-use it all. + */ + if (atomic_read(&oldsighand->count) <= 1) return 0; - newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL); - if (!newsig) + newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); + if (!newsighand) return -ENOMEM; + spin_lock_init(&newsighand->siglock); + atomic_set(&newsighand->count, 1); + memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action)); + + /* + * See if we need to allocate a new signal structure + */ + newsig = NULL; + if (atomic_read(&oldsig->count) > 1) { + newsig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); + if (!newsig) { + kmem_cache_free(sighand_cachep, newsighand); + return -ENOMEM; + } + atomic_set(&newsig->count, 1); + newsig->group_exit = 0; + newsig->group_exit_code = 0; + newsig->group_exit_task = NULL; + newsig->group_stop_count = 0; + init_sigpending(&newsig->shared_pending); + } + if (thread_group_empty(current)) - goto out; + goto no_thread_group; /* * Kill all other threads in the thread group: */ - spin_lock_irq(&oldsig->siglock); + spin_lock_irq(lock); if (oldsig->group_exit) { /* * Another group action in progress, just * return so that the signal is processed. */ - spin_unlock_irq(&oldsig->siglock); - kmem_cache_free(sigact_cachep, newsig); + spin_unlock_irq(lock); + kmem_cache_free(sighand_cachep, newsighand); + if (newsig) + kmem_cache_free(signal_cachep, newsig); return -EAGAIN; } oldsig->group_exit = 1; @@ -598,13 +628,13 @@ static inline int de_thread(struct signal_struct *oldsig) while (atomic_read(&oldsig->count) > count) { oldsig->group_exit_task = current; current->state = TASK_UNINTERRUPTIBLE; - spin_unlock_irq(&oldsig->siglock); + spin_unlock_irq(lock); schedule(); - spin_lock_irq(&oldsig->siglock); + spin_lock_irq(lock); if (oldsig->group_exit_task) BUG(); } - spin_unlock_irq(&oldsig->siglock); + spin_unlock_irq(lock); /* * At this point all other threads have exited, all we have to @@ -675,32 +705,29 @@ static inline int de_thread(struct signal_struct *oldsig) release_task(leader); } -out: - spin_lock_init(&newsig->siglock); - atomic_set(&newsig->count, 1); - newsig->group_exit = 0; - newsig->group_exit_code = 0; - newsig->group_exit_task = NULL; - newsig->group_stop_count = 0; - memcpy(newsig->action, current->sig->action, sizeof(newsig->action)); - init_sigpending(&newsig->shared_pending); +no_thread_group: write_lock_irq(&tasklist_lock); - spin_lock(&oldsig->siglock); - spin_lock(&newsig->siglock); + spin_lock(&oldsighand->siglock); + spin_lock(&newsighand->siglock); if (current == oldsig->curr_target) oldsig->curr_target = next_thread(current); - current->sig = newsig; + if (newsig) + current->signal = newsig; + current->sighand = newsighand; init_sigpending(¤t->pending); recalc_sigpending(); - spin_unlock(&newsig->siglock); - spin_unlock(&oldsig->siglock); + spin_unlock(&newsighand->siglock); + spin_unlock(&oldsighand->siglock); write_unlock_irq(&tasklist_lock); - if (atomic_dec_and_test(&oldsig->count)) - kmem_cache_free(sigact_cachep, oldsig); + if (newsig && atomic_dec_and_test(&oldsig->count)) + kmem_cache_free(signal_cachep, oldsig); + + if (atomic_dec_and_test(&oldsighand->count)) + kmem_cache_free(sighand_cachep, oldsighand); if (!thread_group_empty(current)) BUG(); @@ -746,21 +773,20 @@ int flush_old_exec(struct linux_binprm * bprm) { char * name; int i, ch, retval; - struct signal_struct * oldsig = current->sig; /* * Release all of the old mmap stuff */ retval = exec_mmap(bprm->mm); if (retval) - goto mmap_failed; + goto out; /* * Make sure we have a private signal table and that * we are unassociated from the previous thread group. */ - retval = de_thread(oldsig); + retval = de_thread(current); if (retval) - goto flush_failed; + goto out; /* This is the point of no return */ @@ -794,14 +820,7 @@ int flush_old_exec(struct linux_binprm * bprm) return 0; -mmap_failed: -flush_failed: - spin_lock_irq(¤t->sig->siglock); - if (current->sig != oldsig) { - kmem_cache_free(sigact_cachep, current->sig); - current->sig = oldsig; - } - spin_unlock_irq(¤t->sig->siglock); +out: return retval; } @@ -885,7 +904,7 @@ void compute_creds(struct linux_binprm *bprm) if (must_not_trace_exec(current) || atomic_read(¤t->fs->count) > 1 || atomic_read(¤t->files->count) > 1 - || atomic_read(¤t->sig->count) > 1) { + || atomic_read(¤t->sighand->count) > 1) { if(!capable(CAP_SETUID)) { bprm->e_uid = current->uid; bprm->e_gid = current->gid; @@ -1302,8 +1321,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) } mm->dumpable = 0; init_completion(&mm->core_done); - current->sig->group_exit = 1; - current->sig->group_exit_code = exit_code; + current->signal->group_exit = 1; + current->signal->group_exit_code = exit_code; coredump_wait(mm); if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) @@ -1330,7 +1349,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) retval = binfmt->core_dump(signr, regs, file); - current->sig->group_exit_code |= 0x80; + current->signal->group_exit_code |= 0x80; close_fail: filp_close(file, NULL); fail_unlock: diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 3117885c3f6e..a106e23956f7 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -205,10 +205,10 @@ int kjournald(void *arg) lock_kernel(); daemonize(); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); sprintf(current->comm, "kjournald"); diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 404ac2d3a95b..c4c4e0595163 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -139,7 +139,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) } /* Keep the old signal mask */ - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); oldset = current->blocked; /* If we're cleaning up locks because the process is exiting, @@ -149,7 +149,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) && (current->flags & PF_EXITING)) { sigfillset(¤t->blocked); /* Mask all signals */ recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); call = nlmclnt_alloc_call(); if (!call) { @@ -158,7 +158,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) } call->a_flags = RPC_TASK_ASYNC; } else { - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); memset(call, 0, sizeof(*call)); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); @@ -183,10 +183,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) kfree(call); out_restore: - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); done: dprintk("lockd: clnt proc returns %d\n", status); @@ -588,11 +588,11 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl) int status; /* Block all signals while setting up call */ - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); oldset = current->blocked; sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); req = nlmclnt_alloc_call(); if (!req) @@ -607,10 +607,10 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl) if (status < 0) kfree(req); - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); return status; } diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index f608fbc8354b..a0cafbdfbb0a 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -101,10 +101,10 @@ lockd(struct svc_rqst *rqstp) sprintf(current->comm, "lockd"); /* Process request with signals blocked. */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked, sigmask(SIGKILL)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* kick rpciod */ rpciod_up(); @@ -126,9 +126,9 @@ lockd(struct svc_rqst *rqstp) { long timeout = MAX_SCHEDULE_TIMEOUT; if (signalled()) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); flush_signals(current); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (nlmsvc_ops) { nlmsvc_invalidate_all(); grace_period_expire = set_grace_period(); @@ -297,9 +297,9 @@ lockd_down(void) "lockd_down: lockd failed to exit, clearing pid\n"); nlmsvc_pid = 0; } - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); out: up(&nlmsvc_sema); } diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 94f48ae35e95..3919e77036e3 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -189,10 +189,10 @@ nfsd(struct svc_rqst *rqstp) */ for (;;) { /* Block all but the shutdown signals */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked, SHUTDOWN_SIGS); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* * Find a socket with data available and call its @@ -210,10 +210,10 @@ nfsd(struct svc_rqst *rqstp) exp_readlock(); /* Process request with signals blocked. */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked, ALLOWED_SIGS); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); svc_process(serv, rqstp); diff --git a/fs/proc/array.c b/fs/proc/array.c index e135ac5a1080..df1501a0f332 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -190,16 +190,16 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, sigemptyset(catch); read_lock(&tasklist_lock); - if (p->sig) { - spin_lock_irq(&p->sig->siglock); - k = p->sig->action; + if (p->sighand) { + spin_lock_irq(&p->sighand->siglock); + k = p->sighand->action; for (i = 1; i <= _NSIG; ++i, ++k) { if (k->sa.sa_handler == SIG_IGN) sigaddset(ign, i); else if (k->sa.sa_handler != SIG_DFL) sigaddset(catch, i); } - spin_unlock_irq(&p->sig->siglock); + spin_unlock_irq(&p->sighand->siglock); } read_unlock(&tasklist_lock); } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 77bc3a1340ac..11483636b4d6 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -44,10 +44,14 @@ } #define INIT_SIGNALS(sig) { \ + .count = ATOMIC_INIT(1), \ + .shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \ +} + +#define INIT_SIGHAND(sighand) { \ .count = ATOMIC_INIT(1), \ .action = { {{0,}}, }, \ .siglock = SPIN_LOCK_UNLOCKED, \ - .shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \ } /* @@ -90,7 +94,8 @@ .thread = INIT_THREAD, \ .fs = &init_fs, \ .files = &init_files, \ - .sig = &init_signals, \ + .signal = &init_signals, \ + .sighand = &init_sighand, \ .pending = { NULL, &tsk.pending.head, {{0}}}, \ .blocked = {{0}}, \ .alloc_lock = SPIN_LOCK_UNLOCKED, \ diff --git a/include/linux/sched.h b/include/linux/sched.h index d41f7a24fc14..78970007590f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -220,10 +220,21 @@ struct mm_struct { extern int mmlist_nr; -struct signal_struct { +struct sighand_struct { atomic_t count; struct k_sigaction action[_NSIG]; spinlock_t siglock; +}; + +/* + * NOTE! "signal_struct" does not have it's own + * locking, because a shared signal_struct always + * implies a shared sighand_struct, so locking + * sighand_struct is always a proper superset of + * the locking of signal_struct. + */ +struct signal_struct { + atomic_t count; /* current thread group signal load-balancing target: */ task_t *curr_target; @@ -378,7 +389,8 @@ struct task_struct { /* namespace */ struct namespace *namespace; /* signal handlers */ - struct signal_struct *sig; + struct signal_struct *signal; + struct sighand_struct *sighand; sigset_t blocked, real_blocked; struct sigpending pending; @@ -589,6 +601,8 @@ extern void exit_thread(void); extern void exit_mm(struct task_struct *); extern void exit_files(struct task_struct *); +extern void exit_signal(struct task_struct *); +extern void __exit_signal(struct task_struct *); extern void exit_sighand(struct task_struct *); extern void __exit_sighand(struct task_struct *); diff --git a/include/linux/slab.h b/include/linux/slab.h index 220a672af798..c136265fd3cd 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -71,7 +71,8 @@ extern kmem_cache_t *files_cachep; extern kmem_cache_t *filp_cachep; extern kmem_cache_t *dquot_cachep; extern kmem_cache_t *fs_cachep; -extern kmem_cache_t *sigact_cachep; +extern kmem_cache_t *signal_cachep; +extern kmem_cache_t *sighand_cachep; extern kmem_cache_t *bio_cachep; #endif /* __KERNEL__ */ diff --git a/kernel/exit.c b/kernel/exit.c index cee8991011f7..febad08ae9ef 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -76,6 +76,7 @@ void release_task(struct task_struct * p) if (unlikely(p->ptrace)) __ptrace_unlink(p); BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); + __exit_signal(p); __exit_sighand(p); proc_dentry = __unhash_process(p); @@ -546,7 +547,7 @@ static void exit_notify(struct task_struct *tsk) { struct task_struct *t; - if (signal_pending(tsk) && !tsk->sig->group_exit + if (signal_pending(tsk) && !tsk->signal->group_exit && !thread_group_empty(tsk)) { /* * This occurs when there was a race between our exit @@ -558,14 +559,14 @@ static void exit_notify(struct task_struct *tsk) * sure someone gets all the pending signals. */ read_lock(&tasklist_lock); - spin_lock_irq(&tsk->sig->siglock); + spin_lock_irq(&tsk->sighand->siglock); for (t = next_thread(tsk); t != tsk; t = next_thread(t)) if (!signal_pending(t) && !(t->flags & PF_EXITING)) { recalc_sigpending_tsk(t); if (signal_pending(t)) signal_wake_up(t, 0); } - spin_unlock_irq(&tsk->sig->siglock); + spin_unlock_irq(&tsk->sighand->siglock); read_unlock(&tasklist_lock); } @@ -708,9 +709,9 @@ task_t *next_thread(task_t *p) struct list_head *tmp, *head = &link->pidptr->task_list; #if CONFIG_SMP - if (!p->sig) + if (!p->sighand) BUG(); - if (!spin_is_locked(&p->sig->siglock) && + if (!spin_is_locked(&p->sighand->siglock) && !rwlock_is_locked(&tasklist_lock)) BUG(); #endif @@ -730,21 +731,22 @@ do_group_exit(int exit_code) { BUG_ON(exit_code & 0x80); /* core dumps don't get here */ - if (current->sig->group_exit) - exit_code = current->sig->group_exit_code; + if (current->signal->group_exit) + exit_code = current->signal->group_exit_code; else if (!thread_group_empty(current)) { - struct signal_struct *const sig = current->sig; + struct signal_struct *const sig = current->signal; + struct sighand_struct *const sighand = current->sighand; read_lock(&tasklist_lock); - spin_lock_irq(&sig->siglock); + spin_lock_irq(&sighand->siglock); if (sig->group_exit) /* Another thread got here before we took the lock. */ exit_code = sig->group_exit_code; else { - sig->group_exit = 1; - sig->group_exit_code = exit_code; + sig->group_exit = 1; + sig->group_exit_code = exit_code; zap_other_threads(current); } - spin_unlock_irq(&sig->siglock); + spin_unlock_irq(&sighand->siglock); read_unlock(&tasklist_lock); } @@ -838,8 +840,8 @@ static int wait_task_zombie(task_t *p, unsigned int *stat_addr, struct rusage *r retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; if (!retval && stat_addr) { - if (p->sig->group_exit) - retval = put_user(p->sig->group_exit_code, stat_addr); + if (p->signal->group_exit) + retval = put_user(p->signal->group_exit_code, stat_addr); else retval = put_user(p->exit_code, stat_addr); } @@ -879,7 +881,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader, if (!p->exit_code) return 0; if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && - p->sig && p->sig->group_stop_count > 0) + p->signal && p->signal->group_stop_count > 0) /* * A group stop is in progress and this is the group leader. * We won't report until all threads have stopped. @@ -1004,7 +1006,7 @@ repeat: if (options & __WNOTHREAD) break; tsk = next_thread(tsk); - if (tsk->sig != current->sig) + if (tsk->signal != current->signal) BUG(); } while (tsk != current); read_unlock(&tasklist_lock); diff --git a/kernel/fork.c b/kernel/fork.c index c042b5a8eaec..988a195bcc93 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -665,23 +665,39 @@ out_release: static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) { - struct signal_struct *sig; + struct sighand_struct *sig; - if (clone_flags & CLONE_SIGHAND) { - atomic_inc(¤t->sig->count); + if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) { + atomic_inc(¤t->sighand->count); return 0; } - sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL); - tsk->sig = sig; + sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); + tsk->sighand = sig; if (!sig) return -1; spin_lock_init(&sig->siglock); atomic_set(&sig->count, 1); + memcpy(sig->action, current->sighand->action, sizeof(sig->action)); + return 0; +} + +static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) +{ + struct signal_struct *sig; + + if (clone_flags & CLONE_THREAD) { + atomic_inc(¤t->signal->count); + return 0; + } + sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); + tsk->signal = sig; + if (!sig) + return -1; + atomic_set(&sig->count, 1); sig->group_exit = 0; sig->group_exit_code = 0; sig->group_exit_task = NULL; sig->group_stop_count = 0; - memcpy(sig->action, current->sig->action, sizeof(sig->action)); sig->curr_target = NULL; init_sigpending(&sig->shared_pending); @@ -831,8 +847,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto bad_fork_cleanup_files; if (copy_sighand(clone_flags, p)) goto bad_fork_cleanup_fs; - if (copy_mm(clone_flags, p)) + if (copy_signal(clone_flags, p)) goto bad_fork_cleanup_sighand; + if (copy_mm(clone_flags, p)) + goto bad_fork_cleanup_signal; if (copy_namespace(clone_flags, p)) goto bad_fork_cleanup_mm; retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); @@ -923,31 +941,31 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->parent = p->real_parent; if (clone_flags & CLONE_THREAD) { - spin_lock(¤t->sig->siglock); + spin_lock(¤t->sighand->siglock); /* * Important: if an exit-all has been started then * do not create this new thread - the whole thread * group is supposed to exit anyway. */ - if (current->sig->group_exit) { - spin_unlock(¤t->sig->siglock); + if (current->signal->group_exit) { + spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); goto bad_fork_cleanup_namespace; } p->tgid = current->tgid; p->group_leader = current->group_leader; - if (current->sig->group_stop_count > 0) { + if (current->signal->group_stop_count > 0) { /* * There is an all-stop in progress for the group. * We ourselves will stop as soon as we check signals. * Make the new thread part of that group stop too. */ - current->sig->group_stop_count++; + current->signal->group_stop_count++; set_tsk_thread_flag(p, TIF_SIGPENDING); } - spin_unlock(¤t->sig->siglock); + spin_unlock(¤t->sighand->siglock); } SET_LINKS(p); @@ -977,6 +995,8 @@ bad_fork_cleanup_namespace: exit_namespace(p); bad_fork_cleanup_mm: exit_mm(p); +bad_fork_cleanup_signal: + exit_signal(p); bad_fork_cleanup_sighand: exit_sighand(p); bad_fork_cleanup_fs: @@ -1077,8 +1097,11 @@ struct task_struct *do_fork(unsigned long clone_flags, return p; } -/* SLAB cache for signal_struct structures (tsk->sig) */ -kmem_cache_t *sigact_cachep; +/* SLAB cache for signal_struct structures (tsk->signal) */ +kmem_cache_t *signal_cachep; + +/* SLAB cache for sighand_struct structures (tsk->sighand) */ +kmem_cache_t *sighand_cachep; /* SLAB cache for files_struct structures (tsk->files) */ kmem_cache_t *files_cachep; @@ -1094,11 +1117,17 @@ kmem_cache_t *mm_cachep; void __init proc_caches_init(void) { - sigact_cachep = kmem_cache_create("signal_act", + sighand_cachep = kmem_cache_create("sighand_cache", + sizeof(struct sighand_struct), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!sighand_cachep) + panic("Cannot create sighand SLAB cache"); + + signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); - if (!sigact_cachep) - panic("Cannot create signal action SLAB cache"); + if (!signal_cachep) + panic("Cannot create signal SLAB cache"); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, diff --git a/kernel/kmod.c b/kernel/kmod.c index 6a9a2c8f937c..2b85eff87f43 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -111,12 +111,12 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[]) as the super user right after the execve fails if you time the signal just right. */ - spin_lock_irq(&curtask->sig->siglock); + spin_lock_irq(&curtask->sighand->siglock); sigemptyset(&curtask->blocked); flush_signals(curtask); flush_signal_handlers(curtask); recalc_sigpending(); - spin_unlock_irq(&curtask->sig->siglock); + spin_unlock_irq(&curtask->sighand->siglock); for (i = 0; i < curtask->files->max_fds; i++ ) { if (curtask->files->fd[i]) close(i); @@ -239,20 +239,20 @@ int request_module(const char * module_name) } /* Block everything but SIGKILL/SIGSTOP */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); tmpsig = current->blocked; siginitsetinv(¤t->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); waitpid_result = waitpid(pid, NULL, __WCLONE); atomic_dec(&kmod_concurrent); /* Allow signals again.. */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = tmpsig; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (waitpid_result != pid) { printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n", diff --git a/kernel/signal.c b/kernel/signal.c index e8ff3bb6324e..a095215cffb1 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -138,16 +138,16 @@ int max_queued_signals = 1024; (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) #define sig_user_defined(t, signr) \ - (((t)->sig->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ - ((t)->sig->action[(signr)-1].sa.sa_handler != SIG_IGN)) + (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ + ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) #define sig_ignored(t, signr) \ (!((t)->ptrace & PT_PTRACED) && \ - (t)->sig->action[(signr)-1].sa.sa_handler == SIG_IGN) + (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_IGN) #define sig_fatal(t, signr) \ (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ - (t)->sig->action[(signr)-1].sa.sa_handler == SIG_DFL) + (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) /* * Re-calculate pending state from the set of locally pending @@ -183,9 +183,9 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) inline void recalc_sigpending_tsk(struct task_struct *t) { - if (t->sig->group_stop_count > 0 || + if (t->signal->group_stop_count > 0 || PENDING(&t->pending, &t->blocked) || - PENDING(&t->sig->shared_pending, &t->blocked)) + PENDING(&t->signal->shared_pending, &t->blocked)) set_tsk_thread_flag(t, TIF_SIGPENDING); else clear_tsk_thread_flag(t, TIF_SIGPENDING); @@ -265,20 +265,41 @@ flush_signals(struct task_struct *t) */ void __exit_sighand(struct task_struct *tsk) { - struct signal_struct * sig = tsk->sig; + struct sighand_struct * sighand = tsk->sighand; + + /* Ok, we're done with the signal handlers */ + tsk->sighand = NULL; + if (atomic_dec_and_test(&sighand->count)) + kmem_cache_free(sighand_cachep, sighand); +} + +void exit_sighand(struct task_struct *tsk) +{ + write_lock_irq(&tasklist_lock); + __exit_sighand(tsk); + write_unlock_irq(&tasklist_lock); +} + +/* + * This function expects the tasklist_lock write-locked. + */ +void __exit_signal(struct task_struct *tsk) +{ + struct signal_struct * sig = tsk->signal; + struct sighand_struct * sighand = tsk->sighand; if (!sig) BUG(); if (!atomic_read(&sig->count)) BUG(); - spin_lock(&sig->siglock); + spin_lock(&sighand->siglock); if (atomic_dec_and_test(&sig->count)) { if (tsk == sig->curr_target) sig->curr_target = next_thread(tsk); - tsk->sig = NULL; - spin_unlock(&sig->siglock); + tsk->signal = NULL; + spin_unlock(&sighand->siglock); flush_sigqueue(&sig->shared_pending); - kmem_cache_free(sigact_cachep, sig); + kmem_cache_free(signal_cachep, sig); } else { /* * If there is any task waiting for the group exit @@ -290,17 +311,17 @@ void __exit_sighand(struct task_struct *tsk) } if (tsk == sig->curr_target) sig->curr_target = next_thread(tsk); - tsk->sig = NULL; - spin_unlock(&sig->siglock); + tsk->signal = NULL; + spin_unlock(&sighand->siglock); } clear_tsk_thread_flag(tsk,TIF_SIGPENDING); flush_sigqueue(&tsk->pending); } -void exit_sighand(struct task_struct *tsk) +void exit_signal(struct task_struct *tsk) { write_lock_irq(&tasklist_lock); - __exit_sighand(tsk); + __exit_signal(tsk); write_unlock_irq(&tasklist_lock); } @@ -312,7 +333,7 @@ void flush_signal_handlers(struct task_struct *t) { int i; - struct k_sigaction *ka = &t->sig->action[0]; + struct k_sigaction *ka = &t->sighand->action[0]; for (i = _NSIG ; i != 0 ; i--) { if (ka->sa.sa_handler != SIG_IGN) ka->sa.sa_handler = SIG_DFL; @@ -336,11 +357,11 @@ block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) { unsigned long flags; - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); current->notifier_mask = mask; current->notifier_data = priv; current->notifier = notifier; - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } /* Notify the system that blocking has ended. */ @@ -350,11 +371,11 @@ unblock_all_signals(void) { unsigned long flags; - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); current->notifier = NULL; current->notifier_data = NULL; recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info) @@ -443,7 +464,7 @@ int dequeue_signal(sigset_t *mask, siginfo_t *info) { int signr = __dequeue_signal(¤t->pending, mask, info); if (!signr) - signr = __dequeue_signal(¤t->sig->shared_pending, + signr = __dequeue_signal(¤t->signal->shared_pending, mask, info); return signr; } @@ -559,7 +580,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) /* * This is a stop signal. Remove SIGCONT from all queues. */ - rm_from_queue(sigmask(SIGCONT), &p->sig->shared_pending); + rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); t = p; do { rm_from_queue(sigmask(SIGCONT), &t->pending); @@ -570,8 +591,8 @@ static void handle_stop_signal(int sig, struct task_struct *p) /* * Remove all stop signals from all queues, * and wake all threads. - */ - if (unlikely(p->sig->group_stop_count > 0)) { + */ + if (unlikely(p->signal->group_stop_count > 0)) { /* * There was a group stop in progress. We'll * pretend it finished before we got here. We are @@ -584,7 +605,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) * now, and it's as if the stop had finished and * the SIGCHLD was pending on entry to this kill. */ - p->sig->group_stop_count = 0; + p->signal->group_stop_count = 0; if (p->ptrace & PT_PTRACED) do_notify_parent_cldstop(p, p->parent); else @@ -592,7 +613,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) p->group_leader, p->group_leader->real_parent); } - rm_from_queue(SIG_KERNEL_STOP_MASK, &p->sig->shared_pending); + rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); t = p; do { rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); @@ -608,7 +629,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) * set, the thread will pause and acquire the * siglock that we hold now and until we've * queued the pending signal. - */ + */ if (sig_user_defined(p, SIGCONT)) set_tsk_thread_flag(t, TIF_SIGPENDING); wake_up_process(t); @@ -646,23 +667,23 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals *signals->tail = q; signals->tail = &q->next; switch ((unsigned long) info) { - case 0: - q->info.si_signo = sig; - q->info.si_errno = 0; - q->info.si_code = SI_USER; - q->info.si_pid = current->pid; - q->info.si_uid = current->uid; - break; - case 1: - q->info.si_signo = sig; - q->info.si_errno = 0; - q->info.si_code = SI_KERNEL; - q->info.si_pid = 0; - q->info.si_uid = 0; - break; - default: - copy_siginfo(&q->info, info); - break; + case 0: + q->info.si_signo = sig; + q->info.si_errno = 0; + q->info.si_code = SI_USER; + q->info.si_pid = current->pid; + q->info.si_uid = current->uid; + break; + case 1: + q->info.si_signo = sig; + q->info.si_errno = 0; + q->info.si_code = SI_KERNEL; + q->info.si_pid = 0; + q->info.si_uid = 0; + break; + default: + copy_siginfo(&q->info, info); + break; } } else if (sig >= SIGRTMIN && info && (unsigned long)info != 1 && info->si_code != SI_USER) @@ -689,7 +710,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) if (!irqs_disabled()) BUG(); #if CONFIG_SMP - if (!spin_is_locked(&t->sig->siglock)) + if (!spin_is_locked(&t->sighand->siglock)) BUG(); #endif @@ -697,10 +718,10 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) if (sig_ignored(t, sig)) return 0; - /* Support queueing exactly one non-rt signal, so that we - can get more detailed information about the cause of - the signal. */ - if (LEGACY_QUEUE(&t->pending, sig)) + /* Support queueing exactly one non-rt signal, so that we + can get more detailed information about the cause of + the signal. */ + if (LEGACY_QUEUE(&t->pending, sig)) return 0; ret = send_signal(sig, info, &t->pending); @@ -721,13 +742,13 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) unsigned long int flags; int ret; - spin_lock_irqsave(&t->sig->siglock, flags); - if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN) - t->sig->action[sig-1].sa.sa_handler = SIG_DFL; + spin_lock_irqsave(&t->sighand->siglock, flags); + if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) + t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; sigdelset(&t->blocked, sig); recalc_sigpending_tsk(t); ret = specific_send_sig_info(sig, info, t); - spin_unlock_irqrestore(&t->sig->siglock, flags); + spin_unlock_irqrestore(&t->sighand->siglock, flags); return ret; } @@ -737,13 +758,13 @@ force_sig_specific(int sig, struct task_struct *t) { unsigned long int flags; - spin_lock_irqsave(&t->sig->siglock, flags); - if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN) - t->sig->action[sig-1].sa.sa_handler = SIG_DFL; + spin_lock_irqsave(&t->sighand->siglock, flags); + if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) + t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; sigdelset(&t->blocked, sig); recalc_sigpending_tsk(t); specific_send_sig_info(sig, (void *)2, t); - spin_unlock_irqrestore(&t->sig->siglock, flags); + spin_unlock_irqrestore(&t->sighand->siglock, flags); } /* @@ -766,7 +787,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) int ret; #if CONFIG_SMP - if (!spin_is_locked(&p->sig->siglock)) + if (!spin_is_locked(&p->sighand->siglock)) BUG(); #endif handle_stop_signal(sig, p); @@ -775,7 +796,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) if (sig_ignored(p, sig)) return 0; - if (LEGACY_QUEUE(&p->sig->shared_pending, sig)) + if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) /* This is a non-RT signal and we already have one queued. */ return 0; @@ -784,7 +805,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) * We always use the shared queue for process-wide signals, * to avoid several races. */ - ret = send_signal(sig, info, &p->sig->shared_pending); + ret = send_signal(sig, info, &p->signal->shared_pending); if (unlikely(ret)) return ret; @@ -804,32 +825,32 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) return 0; else { /* - * Otherwise try to find a suitable thread. - */ - t = p->sig->curr_target; + * Otherwise try to find a suitable thread. + */ + t = p->signal->curr_target; if (t == NULL) - /* restart balancing at this thread */ - t = p->sig->curr_target = p; + /* restart balancing at this thread */ + t = p->signal->curr_target = p; BUG_ON(t->tgid != p->tgid); while (!wants_signal(sig, t)) { t = next_thread(t); - if (t == p->sig->curr_target) - /* + if (t == p->signal->curr_target) + /* * No thread needs to be woken. * Any eligible threads will see * the signal in the queue soon. - */ + */ return 0; } - p->sig->curr_target = t; + p->signal->curr_target = t; } /* * Found a killable thread. If the signal will be fatal, * then start taking the whole group down immediately. */ - if (sig_fatal(p, sig) && !p->sig->group_exit && + if (sig_fatal(p, sig) && !p->signal->group_exit && !sigismember(&t->real_blocked, sig) && (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { /* @@ -842,9 +863,9 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) * running and doing things after a slower * thread has the fatal signal pending. */ - p->sig->group_exit = 1; - p->sig->group_exit_code = sig; - p->sig->group_stop_count = 0; + p->signal->group_exit = 1; + p->signal->group_exit_code = sig; + p->signal->group_stop_count = 0; t = p; do { sigaddset(&t->pending.signal, SIGKILL); @@ -865,16 +886,16 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) * the core-dump signal unblocked. */ rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); - rm_from_queue(SIG_KERNEL_STOP_MASK, &p->sig->shared_pending); - p->sig->group_stop_count = 0; - p->sig->group_exit_task = t; + rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); + p->signal->group_stop_count = 0; + p->signal->group_exit_task = t; t = p; do { - p->sig->group_stop_count++; + p->signal->group_stop_count++; signal_wake_up(t, 0); t = next_thread(t); } while (t != p); - wake_up_process(p->sig->group_exit_task); + wake_up_process(p->signal->group_exit_task); return 0; } @@ -893,7 +914,7 @@ void zap_other_threads(struct task_struct *p) { struct task_struct *t; - p->sig->group_stop_count = 0; + p->signal->group_stop_count = 0; if (thread_group_empty(p)) return; @@ -912,10 +933,10 @@ group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) int ret; ret = check_kill_permission(sig, info, p); - if (!ret && sig && p->sig) { - spin_lock_irqsave(&p->sig->siglock, flags); + if (!ret && sig && p->sighand) { + spin_lock_irqsave(&p->sighand->siglock, flags); ret = __group_send_sig_info(sig, info, p); - spin_unlock_irqrestore(&p->sig->siglock, flags); + spin_unlock_irqrestore(&p->sighand->siglock, flags); } return ret; @@ -1050,9 +1071,9 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p) return group_send_sig_info(sig, info, p); else { int error; - spin_lock_irq(&p->sig->siglock); + spin_lock_irq(&p->sighand->siglock); error = specific_send_sig_info(sig, info, p); - spin_unlock_irq(&p->sig->siglock); + spin_unlock_irq(&p->sighand->siglock); return error; } } @@ -1107,7 +1128,7 @@ static inline void __wake_up_parent(struct task_struct *p, do { wake_up_interruptible(&tsk->wait_chldexit); tsk = next_thread(tsk); - if (tsk->sig != parent->sig) + if (tsk->signal != parent->signal) BUG(); } while (tsk != parent); } @@ -1121,7 +1142,7 @@ void do_notify_parent(struct task_struct *tsk, int sig) struct siginfo info; unsigned long flags; int why, status; - struct signal_struct *psig; + struct sighand_struct *psig; if (sig == -1) BUG(); @@ -1160,7 +1181,7 @@ void do_notify_parent(struct task_struct *tsk, int sig) info.si_code = why; info.si_status = status; - psig = tsk->parent->sig; + psig = tsk->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); if (sig == SIGCHLD && tsk->state != TASK_STOPPED && (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || @@ -1213,6 +1234,7 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent) { struct siginfo info; unsigned long flags; + struct sighand_struct *sighand; info.si_signo = SIGCHLD; info.si_errno = 0; @@ -1226,15 +1248,16 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent) info.si_status = tsk->exit_code & 0x7f; info.si_code = CLD_STOPPED; - spin_lock_irqsave(&parent->sig->siglock, flags); - if (parent->sig->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && - !(parent->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) + sighand = parent->sighand; + spin_lock_irqsave(&sighand->siglock, flags); + if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && + !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) __group_send_sig_info(SIGCHLD, &info, parent); /* * Even if SIGCHLD is not generated, we must wake up wait4 calls. */ __wake_up_parent(tsk, parent); - spin_unlock_irqrestore(&parent->sig->siglock, flags); + spin_unlock_irqrestore(&sighand->siglock, flags); } static void @@ -1271,7 +1294,8 @@ finish_stop(int stop_count) static void do_signal_stop(int signr) { - struct signal_struct *sig = current->sig; + struct signal_struct *sig = current->signal; + struct sighand_struct *sighand = current->sighand; int stop_count = -1; if (sig->group_stop_count > 0) { @@ -1279,17 +1303,17 @@ do_signal_stop(int signr) * There is a group stop in progress. We don't need to * start another one. */ - spin_lock_irq(&sig->siglock); + spin_lock_irq(&sighand->siglock); if (unlikely(sig->group_stop_count == 0)) { BUG_ON(!sig->group_exit); - spin_unlock_irq(&sig->siglock); + spin_unlock_irq(&sighand->siglock); return; } signr = sig->group_exit_code; stop_count = --sig->group_stop_count; current->exit_code = signr; set_current_state(TASK_STOPPED); - spin_unlock_irq(&sig->siglock); + spin_unlock_irq(&sighand->siglock); } else if (thread_group_empty(current)) { /* @@ -1305,7 +1329,7 @@ do_signal_stop(int signr) */ struct task_struct *t; read_lock(&tasklist_lock); - spin_lock_irq(&sig->siglock); + spin_lock_irq(&sighand->siglock); if (unlikely(sig->group_exit)) { /* @@ -1313,7 +1337,7 @@ do_signal_stop(int signr) * We'll just ignore the stop and process the * associated fatal signal. */ - spin_unlock_irq(&sig->siglock); + spin_unlock_irq(&sighand->siglock); read_unlock(&tasklist_lock); return; } @@ -1343,7 +1367,7 @@ do_signal_stop(int signr) current->exit_code = signr; set_current_state(TASK_STOPPED); - spin_unlock_irq(&sig->siglock); + spin_unlock_irq(&sighand->siglock); read_unlock(&tasklist_lock); } @@ -1361,31 +1385,31 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) unsigned long signr = 0; struct k_sigaction *ka; - spin_lock_irq(¤t->sig->siglock); - if (unlikely(current->sig->group_stop_count > 0)) { + spin_lock_irq(¤t->sighand->siglock); + if (unlikely(current->signal->group_stop_count > 0)) { int stop_count; - if (current->sig->group_exit_task == current) { + if (current->signal->group_exit_task == current) { /* * Group stop is so we can do a core dump. */ - current->sig->group_exit_task = NULL; + current->signal->group_exit_task = NULL; goto dequeue; } /* * There is a group stop in progress. We stop * without any associated signal being in our queue. */ - stop_count = --current->sig->group_stop_count; - signr = current->sig->group_exit_code; + stop_count = --current->signal->group_stop_count; + signr = current->signal->group_exit_code; current->exit_code = signr; set_current_state(TASK_STOPPED); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); finish_stop(stop_count); continue; } dequeue: signr = dequeue_signal(mask, info); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (!signr) break; @@ -1395,10 +1419,10 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) * If there is a group stop in progress, * we must participate in the bookkeeping. */ - if (current->sig->group_stop_count > 0) { - spin_lock_irq(¤t->sig->siglock); - --current->sig->group_stop_count; - spin_unlock_irq(¤t->sig->siglock); + if (current->signal->group_stop_count > 0) { + spin_lock_irq(¤t->sighand->siglock); + --current->signal->group_stop_count; + spin_unlock_irq(¤t->sighand->siglock); } /* Let the debugger run. */ @@ -1424,14 +1448,14 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) /* If the (new) signal is now blocked, requeue it. */ if (sigismember(¤t->blocked, signr)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); specific_send_sig_info(signr, info, current); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); continue; } } - ka = ¤t->sig->action[signr-1]; + ka = ¤t->sighand->action[signr-1]; if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ continue; if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */ @@ -1443,9 +1467,9 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) if (sig_kernel_ignore(signr)) /* Default is nothing. */ continue; - /* Init gets no signals it doesn't want. */ - if (current->pid == 1) - continue; + /* Init gets no signals it doesn't want. */ + if (current->pid == 1) + continue; if (sig_kernel_stop(signr)) { /* @@ -1457,8 +1481,8 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) if (signr == SIGSTOP || !is_orphaned_pgrp(current->pgrp)) do_signal_stop(signr); - continue; - } + continue; + } /* * Anything else is fatal, maybe with a core dump. @@ -1476,8 +1500,8 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs) * and we just let them go to finish dying. */ const int code = signr | 0x80; - BUG_ON(!current->sig->group_exit); - BUG_ON(current->sig->group_exit_code != code); + BUG_ON(!current->signal->group_exit); + BUG_ON(current->signal->group_exit_code != code); do_exit(code); /* NOTREACHED */ } @@ -1549,7 +1573,7 @@ sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize) goto out; sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); old_set = current->blocked; error = 0; @@ -1569,15 +1593,15 @@ sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize) current->blocked = new_set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (error) goto out; if (oset) goto set_old; } else if (oset) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); old_set = current->blocked; - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); set_old: error = -EFAULT; @@ -1597,10 +1621,10 @@ long do_sigpending(void *set, unsigned long sigsetsize) if (sigsetsize > sizeof(sigset_t)) goto out; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(&pending, ¤t->pending.signal, - ¤t->sig->shared_pending.signal); - spin_unlock_irq(¤t->sig->siglock); + ¤t->signal->shared_pending.signal); + spin_unlock_irq(¤t->sighand->siglock); /* Outside the lock because only this thread touches it. */ sigandsets(&pending, ¤t->blocked, &pending); @@ -1714,7 +1738,7 @@ sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo, return -EINVAL; } - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(&these, &info); if (!sig) { timeout = MAX_SCHEDULE_TIMEOUT; @@ -1729,19 +1753,19 @@ sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo, current->real_blocked = current->blocked; sigandsets(¤t->blocked, ¤t->blocked, &these); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); current->state = TASK_INTERRUPTIBLE; timeout = schedule_timeout(timeout); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(&these, &info); current->blocked = current->real_blocked; siginitset(¤t->real_blocked, 0); recalc_sigpending(); } } - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (sig) { ret = sig; @@ -1801,11 +1825,11 @@ sys_tkill(int pid, int sig) * The null signal is a permissions and process existence * probe. No signal is actually delivered. */ - if (!error && sig && p->sig) { - spin_lock_irq(&p->sig->siglock); + if (!error && sig && p->sighand) { + spin_lock_irq(&p->sighand->siglock); handle_stop_signal(sig, p); error = specific_send_sig_info(sig, &info, p); - spin_unlock_irq(&p->sig->siglock); + spin_unlock_irq(&p->sighand->siglock); } } read_unlock(&tasklist_lock); @@ -1838,15 +1862,15 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig))) return -EINVAL; - k = ¤t->sig->action[sig-1]; + k = ¤t->sighand->action[sig-1]; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); if (signal_pending(current)) { /* * If there might be a fatal signal pending on multiple * threads, make sure we take it before changing the action. */ - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return -ERESTARTNOINTR; } @@ -1875,20 +1899,20 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) * dance to maintain the lock hierarchy. */ struct task_struct *t = current; - spin_unlock_irq(&t->sig->siglock); + spin_unlock_irq(&t->sighand->siglock); read_lock(&tasklist_lock); - spin_lock_irq(&t->sig->siglock); + spin_lock_irq(&t->sighand->siglock); *k = *act; sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); - rm_from_queue(sigmask(sig), &t->sig->shared_pending); + rm_from_queue(sigmask(sig), &t->signal->shared_pending); do { rm_from_queue(sigmask(sig), &t->pending); recalc_sigpending_tsk(t); t = next_thread(t); } while (t != current); - spin_unlock_irq(¤t->sig->siglock); - read_unlock(&tasklist_lock); + spin_unlock_irq(¤t->sighand->siglock); + read_unlock(&tasklist_lock); return 0; } @@ -1897,7 +1921,7 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) sigmask(SIGKILL) | sigmask(SIGSTOP)); } - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return 0; } @@ -1926,7 +1950,7 @@ do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp) goto out; error = -EPERM; - if (on_sig_stack (sp)) + if (on_sig_stack(sp)) goto out; error = -EINVAL; @@ -1984,9 +2008,9 @@ sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset) error = -EFAULT; if (copy_from_user(&new_set, set, sizeof(*set))) goto out; - new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP)); + new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); old_set = current->blocked.sig[0]; error = 0; @@ -2006,7 +2030,7 @@ sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset) } recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (error) goto out; if (oset) @@ -2068,13 +2092,13 @@ sys_ssetmask(int newmask) { int old; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); old = current->blocked.sig[0]; siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| sigmask(SIGSTOP))); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return old; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index fb10d360c436..8fd97c6764a4 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -180,10 +180,10 @@ static int worker_thread(void *__startup) set_user_nice(current, -10); set_cpus_allowed(current, 1UL << cpu); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked, sigmask(SIGCHLD)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); complete(&startup->done); @@ -213,10 +213,10 @@ static int worker_thread(void *__startup) /* SIGCHLD - auto-reaping */ ; /* zap all other signals */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); flush_signals(current); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } remove_wait_queue(&cwq->more_work, &wait); diff --git a/mm/pdflush.c b/mm/pdflush.c index 141ce97a6ea9..c38ad46c58c7 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c @@ -90,10 +90,10 @@ static int __pdflush(struct pdflush_work *my_work) strcpy(current->comm, "pdflush"); /* interruptible sleep, so block all signals */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked, 0); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); current->flags |= PF_FLUSHER; my_work->fn = NULL; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 54c2eb392856..f0da5234b109 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -233,27 +233,27 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) /* Turn off various signals */ if (clnt->cl_intr) { - struct k_sigaction *action = current->sig->action; + struct k_sigaction *action = current->sighand->action; if (action[SIGINT-1].sa.sa_handler == SIG_DFL) sigallow |= sigmask(SIGINT); if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL) sigallow |= sigmask(SIGQUIT); } - spin_lock_irqsave(¤t->sig->siglock, irqflags); + spin_lock_irqsave(¤t->sighand->siglock, irqflags); *oldset = current->blocked; siginitsetinv(¤t->blocked, sigallow & ~oldset->sig[0]); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, irqflags); + spin_unlock_irqrestore(¤t->sighand->siglock, irqflags); } void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) { unsigned long irqflags; - spin_lock_irqsave(¤t->sig->siglock, irqflags); + spin_lock_irqsave(¤t->sighand->siglock, irqflags); current->blocked = *oldset; recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, irqflags); + spin_unlock_irqrestore(¤t->sighand->siglock, irqflags); } /* diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index c999d5b2008b..c8e5364ed1ea 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -964,10 +964,10 @@ rpciod(void *ptr) daemonize(); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked, sigmask(SIGKILL)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); strcpy(current->comm, "rpciod"); @@ -1022,9 +1022,9 @@ rpciod_killall(void) } } - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } /* @@ -1100,9 +1100,9 @@ rpciod_down(void) } interruptible_sleep_on(&rpciod_killer); } - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); out: up(&rpciod_sema); MOD_DEC_USE_COUNT; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index aacd138fb911..beadf395b863 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -235,9 +235,9 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port) } if (!port) { - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } return error; diff --git a/security/capability.c b/security/capability.c index cf6d2440a21d..d9b00d69fe41 100644 --- a/security/capability.c +++ b/security/capability.c @@ -133,7 +133,7 @@ void cap_bprm_compute_creds (struct linux_binprm *bprm) if (must_not_trace_exec (current) || atomic_read (¤t->fs->count) > 1 || atomic_read (¤t->files->count) > 1 - || atomic_read (¤t->sig->count) > 1) { + || atomic_read (¤t->sighand->count) > 1) { if (!capable (CAP_SETPCAP)) { new_permitted = cap_intersect (new_permitted, current-> -- cgit v1.2.3 From 631da088f602364f696a50d001b5c2f3d0026070 Mon Sep 17 00:00:00 2001 From: Chris Wedgwood Date: Fri, 7 Feb 2003 00:24:40 -0800 Subject: [PATCH] signal locking update Accomodate the signal locking moving from "tsk->sig" to "tsk->sighand". --- arch/alpha/kernel/signal.c | 24 ++++++++++++------------ arch/arm/kernel/signal.c | 24 ++++++++++++------------ arch/ia64/ia32/ia32_signal.c | 12 ++++++------ arch/ia64/kernel/signal.c | 12 ++++++------ arch/m68knommu/kernel/signal.c | 20 ++++++++++---------- arch/parisc/kernel/signal.c | 12 ++++++------ arch/ppc/kernel/signal.c | 20 ++++++++++---------- arch/ppc64/kernel/signal.c | 12 ++++++------ arch/ppc64/kernel/signal32.c | 20 ++++++++++---------- arch/s390/kernel/signal.c | 20 ++++++++++---------- arch/s390x/kernel/linux32.c | 8 ++++---- arch/s390x/kernel/signal.c | 20 ++++++++++---------- arch/s390x/kernel/signal32.c | 20 ++++++++++---------- arch/sparc/kernel/signal.c | 32 ++++++++++++++++---------------- arch/sparc/kernel/sys_sunos.c | 8 ++++---- arch/sparc64/kernel/power.c | 4 ++-- arch/sparc64/kernel/signal.c | 24 ++++++++++++------------ arch/sparc64/kernel/signal32.c | 32 ++++++++++++++++---------------- arch/sparc64/kernel/sys_sparc32.c | 8 ++++---- arch/sparc64/kernel/sys_sunos32.c | 8 ++++---- arch/sparc64/solaris/signal.c | 16 ++++++++-------- arch/um/kernel/signal_kern.c | 20 ++++++++++---------- arch/v850/kernel/signal.c | 20 ++++++++++---------- arch/x86_64/ia32/ia32_signal.c | 12 ++++++------ arch/x86_64/kernel/signal.c | 12 ++++++------ drivers/block/nbd.c | 12 ++++++------ drivers/bluetooth/bt3c_cs.c | 8 ++++---- drivers/char/ftape/lowlevel/fdc-io.c | 8 ++++---- drivers/macintosh/adb.c | 4 ++-- drivers/md/md.c | 4 ++-- drivers/media/video/saa5249.c | 8 ++++---- drivers/mtd/devices/blkmtd.c | 4 ++-- drivers/mtd/mtdblock.c | 4 ++-- drivers/net/8139too.c | 8 ++++---- drivers/net/irda/sir_kthread.c | 4 ++-- fs/afs/cmservice.c | 4 ++-- fs/afs/internal.h | 4 ++-- fs/afs/kafsasyncd.c | 4 ++-- fs/afs/kafstimod.c | 4 ++-- fs/jffs/intrep.c | 8 ++++---- fs/jffs2/os-linux.h | 2 +- fs/jfs/jfs_logmgr.c | 4 ++-- fs/jfs/jfs_txnmgr.c | 8 ++++---- fs/ncpfs/sock.c | 8 ++++---- fs/smbfs/smbiod.c | 4 ++-- fs/xfs/pagebuf/page_buf.c | 4 ++-- include/linux/sched.h | 2 +- kernel/suspend.c | 4 ++-- net/rxrpc/internal.h | 4 ++-- net/rxrpc/krxiod.c | 4 ++-- net/rxrpc/krxsecd.c | 4 ++-- net/rxrpc/krxtimod.c | 4 ++-- 52 files changed, 280 insertions(+), 280 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index a986bc9a2db8..c17cb0f62c69 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -63,7 +63,7 @@ osf_sigprocmask(int how, unsigned long newmask, long a2, long a3, unsigned long block, unblock; newmask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); oldmask = current->blocked.sig[0]; unblock = oldmask & ~newmask; @@ -76,7 +76,7 @@ osf_sigprocmask(int how, unsigned long newmask, long a2, long a3, sigemptyset(¤t->blocked); current->blocked.sig[0] = newmask; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); (®s)->r0 = 0; /* special no error return */ } @@ -150,11 +150,11 @@ do_sigsuspend(old_sigset_t mask, struct pt_regs *reg, struct switch_stack *sw) sigset_t oldset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); oldset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); while (1) { current->state = TASK_INTERRUPTIBLE; @@ -177,11 +177,11 @@ do_rt_sigsuspend(sigset_t *uset, size_t sigsetsize, return -EFAULT; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); oldset = current->blocked; current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); while (1) { current->state = TASK_INTERRUPTIBLE; @@ -284,10 +284,10 @@ do_sigreturn(struct sigframe *frame, struct pt_regs *regs, goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(&frame->sc, regs, sw)) goto give_sigsegv; @@ -323,10 +323,10 @@ do_rt_sigreturn(struct rt_sigframe *frame, struct pt_regs *regs, goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw)) goto give_sigsegv; @@ -562,11 +562,11 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 51f8711be5d0..c70ebebb1eb1 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -59,11 +59,11 @@ asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t m sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->ARM_r0 = -EINTR; while (1) { @@ -87,11 +87,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, struct pt_regs *regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->ARM_r0 = -EINTR; while (1) { @@ -207,10 +207,10 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(regs, &frame->sc)) goto badframe; @@ -247,10 +247,10 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; @@ -477,12 +477,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(&tsk->sig->siglock); + spin_lock_irq(&tsk->sighand->siglock); sigorsets(&tsk->blocked, &tsk->blocked, &ka->sa.sa_mask); sigaddset(&tsk->blocked, sig); recalc_sigpending(); - spin_unlock_irq(&tsk->sig->siglock); + spin_unlock_irq(&tsk->sighand->siglock); } return; } @@ -521,9 +521,9 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) unsigned long signr = 0; struct k_sigaction *ka; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); signr = dequeue_signal(¤t->blocked, &info); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (!signr) break; diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c index f2d006240df2..de4213cf1a5a 100644 --- a/arch/ia64/ia32/ia32_signal.c +++ b/arch/ia64/ia32/ia32_signal.c @@ -479,13 +479,13 @@ ia32_rt_sigsuspend (compat_sigset_t *uset, unsigned int sigsetsize, struct sigsc sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); { oldset = current->blocked; current->blocked = set; recalc_sigpending(); } - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* * The return below usually returns to the signal handler. We need to pre-set the @@ -1007,10 +1007,10 @@ sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = (sigset_t) set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext_ia32(regs, &frame->sc, &eax)) goto badframe; @@ -1038,10 +1038,10 @@ sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext_ia32(regs, &frame->uc.uc_mcontext, &eax)) goto badframe; diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 8ff4fe33902a..626725da43f7 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -68,13 +68,13 @@ ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr) sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); { oldset = current->blocked; current->blocked = set; recalc_sigpending(); } - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* * The return below usually returns to the signal handler. We need to @@ -274,12 +274,12 @@ ia64_rt_sigreturn (struct sigscratch *scr) sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); { current->blocked = set; recalc_sigpending(); } - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(sc, scr)) goto give_sigsegv; @@ -465,13 +465,13 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); { sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); sigaddset(¤t->blocked, sig); recalc_sigpending(); } - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } return 1; } diff --git a/arch/m68knommu/kernel/signal.c b/arch/m68knommu/kernel/signal.c index 3b9d312da583..2d51c175074e 100644 --- a/arch/m68knommu/kernel/signal.c +++ b/arch/m68knommu/kernel/signal.c @@ -63,11 +63,11 @@ asmlinkage int do_sigsuspend(struct pt_regs *regs) sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->d0 = -EINTR; while (1) { @@ -93,11 +93,11 @@ do_rt_sigsuspend(struct pt_regs *regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->d0 = -EINTR; while (1) { @@ -370,10 +370,10 @@ asmlinkage int do_sigreturn(unsigned long __unused) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0)) goto badframe; @@ -399,10 +399,10 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (rt_restore_ucontext(regs, sw, &frame->uc, &d0)) goto badframe; @@ -738,11 +738,11 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 0fd358285653..47c9c81ad69f 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -118,11 +118,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, struct pt_regs *regs) #endif sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->gr[28] = -EINTR; while (1) { @@ -177,10 +177,10 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* Good thing we saved the old gr[30], eh? */ if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) @@ -407,11 +407,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } return 1; } diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c index fddae70728fd..0878aed66721 100644 --- a/arch/ppc/kernel/signal.c +++ b/arch/ppc/kernel/signal.c @@ -65,11 +65,11 @@ sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7, sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->result = -EINTR; regs->ccr |= 0x10000000; @@ -96,11 +96,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4, int p6, return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->result = -EINTR; regs->ccr |= 0x10000000; @@ -208,10 +208,10 @@ int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, || copy_from_user(&st, &rt_sf->uc.uc_stack, sizeof(st))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (regs->msr & MSR_FP) giveup_fpu(current); @@ -311,10 +311,10 @@ int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, set.sig[1] = sigctx._unused[3]; #endif sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (regs->msr & MSR_FP ) giveup_fpu(current); @@ -450,11 +450,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } return; diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c index acb5577269f6..c07f30d4ccac 100644 --- a/arch/ppc64/kernel/signal.c +++ b/arch/ppc64/kernel/signal.c @@ -112,11 +112,11 @@ long sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4, int return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->result = -EINTR; regs->gpr[3] = EINTR; @@ -164,10 +164,10 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, || copy_from_user(&st, &rt_sf->uc.uc_stack, sizeof(st))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (regs->msr & MSR_FP) giveup_fpu(current); @@ -333,11 +333,11 @@ static void handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } return; diff --git a/arch/ppc64/kernel/signal32.c b/arch/ppc64/kernel/signal32.c index 8dd332ded5ff..3999fd4ea35f 100644 --- a/arch/ppc64/kernel/signal32.c +++ b/arch/ppc64/kernel/signal32.c @@ -126,11 +126,11 @@ long sys32_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->result = -EINTR; regs->gpr[3] = EINTR; @@ -268,10 +268,10 @@ long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, */ set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32); sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (regs->msr & MSR_FP ) giveup_fpu(current); /* Last stacked signal - restore registers */ @@ -487,10 +487,10 @@ long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, */ sigdelsetmask(&set, ~_BLOCKABLE); /* update the current based on the sigmask found in the rt_stackframe */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* If currently owning the floating point - give them up */ if (regs->msr & MSR_FP) @@ -863,11 +863,11 @@ int sys32_rt_sigsuspend(sigset32_t* unewset, size_t sigsetsize, int p3, sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->result = -EINTR; regs->gpr[3] = EINTR; @@ -1055,11 +1055,11 @@ static void handle_signal32(unsigned long sig, siginfo_t *info, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } return; diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 1f2b732fb96a..8c69d545b82f 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -61,11 +61,11 @@ sys_sigsuspend(struct pt_regs * regs, int history0, int history1, sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->gprs[2] = -EINTR; while (1) { @@ -89,11 +89,11 @@ sys_rt_sigsuspend(struct pt_regs * regs,sigset_t *unewset, size_t sigsetsize) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->gprs[2] = -EINTR; while (1) { @@ -194,10 +194,10 @@ asmlinkage long sys_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigregs(regs, &frame->sregs)) goto badframe; @@ -220,10 +220,10 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; @@ -427,11 +427,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/arch/s390x/kernel/linux32.c b/arch/s390x/kernel/linux32.c index ee1dcc5d8f61..298c903a2d7c 100644 --- a/arch/s390x/kernel/linux32.c +++ b/arch/s390x/kernel/linux32.c @@ -1725,7 +1725,7 @@ sys32_rt_sigtimedwait(compat_sigset_t *uthese, siginfo_t32 *uinfo, return -EINVAL; } - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(&these, &info); if (!sig) { /* None ready -- temporarily unblock those we're interested @@ -1733,7 +1733,7 @@ sys32_rt_sigtimedwait(compat_sigset_t *uthese, siginfo_t32 *uinfo, current->real_blocked = current->blocked; sigandsets(¤t->blocked, ¤t->blocked, &these); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); timeout = MAX_SCHEDULE_TIMEOUT; if (uts) @@ -1743,13 +1743,13 @@ sys32_rt_sigtimedwait(compat_sigset_t *uthese, siginfo_t32 *uinfo, current->state = TASK_INTERRUPTIBLE; timeout = schedule_timeout(timeout); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(&these, &info); current->blocked = current->real_blocked; siginitset(¤t->real_blocked, 0); recalc_sigpending(); } - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (sig) { ret = sig; diff --git a/arch/s390x/kernel/signal.c b/arch/s390x/kernel/signal.c index bb9291563f5f..957197b9b35c 100644 --- a/arch/s390x/kernel/signal.c +++ b/arch/s390x/kernel/signal.c @@ -60,11 +60,11 @@ sys_sigsuspend(struct pt_regs * regs,int history0, int history1, old_sigset_t ma sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->gprs[2] = -EINTR; while (1) { @@ -88,11 +88,11 @@ sys_rt_sigsuspend(struct pt_regs * regs,sigset_t *unewset, size_t sigsetsize) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->gprs[2] = -EINTR; while (1) { @@ -188,10 +188,10 @@ asmlinkage long sys_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigregs(regs, &frame->sregs)) goto badframe; @@ -214,10 +214,10 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; @@ -421,11 +421,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/arch/s390x/kernel/signal32.c b/arch/s390x/kernel/signal32.c index 11f5e3baed07..9757d092bbfb 100644 --- a/arch/s390x/kernel/signal32.c +++ b/arch/s390x/kernel/signal32.c @@ -112,11 +112,11 @@ sys32_sigsuspend(struct pt_regs * regs,int history0, int history1, old_sigset_t sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->gprs[2] = -EINTR; while (1) { @@ -147,11 +147,11 @@ sys32_rt_sigsuspend(struct pt_regs * regs,compat_sigset_t *unewset, size_t sigse } sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->gprs[2] = -EINTR; while (1) { @@ -345,10 +345,10 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; @@ -375,10 +375,10 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; @@ -588,11 +588,11 @@ handle_signal32(unsigned long sig, siginfo_t *info, sigset_t *oldset, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c index 9ea6c0e50af4..afaa3df1b6c0 100644 --- a/arch/sparc/kernel/signal.c +++ b/arch/sparc/kernel/signal.c @@ -104,11 +104,11 @@ asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs) sigset_t saveset; set &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, set); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->pc = regs->npc; regs->npc += 4; @@ -161,11 +161,11 @@ asmlinkage void do_rt_sigsuspend(sigset_t *uset, size_t sigsetsize, } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); oldset = current->blocked; current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->pc = regs->npc; regs->npc += 4; @@ -267,10 +267,10 @@ static inline void do_new_sigreturn (struct pt_regs *regs) goto segv_and_exit; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return; segv_and_exit: @@ -314,10 +314,10 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) goto segv_and_exit; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->pc = pc; regs->npc = npc; @@ -384,10 +384,10 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) do_sigaltstack(&st, NULL, (unsigned long)sf); sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return; segv: send_sig(SIGSEGV, current, 1); @@ -967,10 +967,10 @@ asmlinkage int svr4_setcontext (svr4_ucontext_t *c, struct pt_regs *regs) set.sig[3] = setv.sigbits[3]; } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->pc = pc; regs->npc = npc | 1; err |= __get_user(regs->y, &((*gr) [SVR4_Y])); @@ -1007,11 +1007,11 @@ handle_signal(unsigned long signr, struct k_sigaction *ka, if(ka->sa.sa_flags & SA_ONESHOT) ka->sa.sa_handler = SIG_DFL; if(!(ka->sa.sa_flags & SA_NOMASK)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked, signr); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } @@ -1066,9 +1066,9 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, sigset_t *mask = ¤t->blocked; unsigned long signr = 0; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); signr = dequeue_signal(mask, &info); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (!signr) break; diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c index a81ca4978702..383012ad6d90 100644 --- a/arch/sparc/kernel/sys_sunos.c +++ b/arch/sparc/kernel/sys_sunos.c @@ -281,11 +281,11 @@ asmlinkage unsigned long sunos_sigblock(unsigned long blk_mask) { unsigned long old; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); old = current->blocked.sig[0]; current->blocked.sig[0] |= (blk_mask & _BLOCKABLE); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return old; } @@ -293,11 +293,11 @@ asmlinkage unsigned long sunos_sigsetmask(unsigned long newmask) { unsigned long retval; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); retval = current->blocked.sig[0]; current->blocked.sig[0] = (newmask & _BLOCKABLE); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return retval; } diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c index e0da1f38b54a..042bb08bbb21 100644 --- a/arch/sparc64/kernel/power.c +++ b/arch/sparc64/kernel/power.c @@ -70,9 +70,9 @@ static int powerd(void *__unused) again: while (button_pressed == 0) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); flush_signals(current); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); interruptible_sleep_on(&powerd_wait); } diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c index d90f860410b2..fc16c629e2b7 100644 --- a/arch/sparc64/kernel/signal.c +++ b/arch/sparc64/kernel/signal.c @@ -70,10 +70,10 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) goto do_sigsegv; } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } if (test_thread_flag(TIF_32BIT)) { pc &= 0xffffffff; @@ -257,11 +257,11 @@ asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs) } #endif set &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, set); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (test_thread_flag(TIF_32BIT)) { regs->tpc = (regs->tnpc & 0xffffffff); @@ -317,11 +317,11 @@ asmlinkage void do_rt_sigsuspend(sigset_t *uset, size_t sigsetsize, struct pt_re } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); oldset = current->blocked; current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (test_thread_flag(TIF_32BIT)) { regs->tpc = (regs->tnpc & 0xffffffff); @@ -428,10 +428,10 @@ void do_rt_sigreturn(struct pt_regs *regs) set_fs(old_fs); sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return; segv: send_sig(SIGSEGV, current, 1); @@ -564,11 +564,11 @@ static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, if (ka->sa.sa_flags & SA_ONESHOT) ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NOMASK)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,signr); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } @@ -619,9 +619,9 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs, sigset_t *mask = ¤t->blocked; unsigned long signr = 0; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); signr = dequeue_signal(mask, &info); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (!signr) break; diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c index b2a6edc8ed8d..8b5de881a810 100644 --- a/arch/sparc64/kernel/signal32.c +++ b/arch/sparc64/kernel/signal32.c @@ -144,11 +144,11 @@ asmlinkage void _sigpause32_common(compat_old_sigset_t set, struct pt_regs *regs sigset_t saveset; set &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, set); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->tpc = regs->tnpc; regs->tnpc += 4; @@ -199,11 +199,11 @@ asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs * case 1: set.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32); } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); oldset = current->blocked; current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->tpc = regs->tnpc; regs->tnpc += 4; @@ -312,10 +312,10 @@ void do_new_sigreturn32(struct pt_regs *regs) case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return; segv: @@ -359,10 +359,10 @@ asmlinkage void do_sigreturn32(struct pt_regs *regs) case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (test_thread_flag(TIF_32BIT)) { pc &= 0xffffffff; @@ -461,10 +461,10 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return; segv: do_exit(SIGSEGV); @@ -1059,10 +1059,10 @@ asmlinkage int svr4_setcontext(svr4_ucontext_t *c, struct pt_regs *regs) set_fs(old_fs); sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->tpc = pc; regs->tnpc = npc | 1; if (test_thread_flag(TIF_32BIT)) { @@ -1241,11 +1241,11 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, if (ka->sa.sa_flags & SA_ONESHOT) ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NOMASK)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,signr); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } @@ -1288,9 +1288,9 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs, sigset_t *mask = ¤t->blocked; unsigned long signr = 0; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); signr = dequeue_signal(mask, &info); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (!signr) break; diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index 62e4753a5eca..b01db9baf0d6 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c @@ -1812,7 +1812,7 @@ sys32_rt_sigtimedwait(compat_sigset_t *uthese, siginfo_t32 *uinfo, return -EINVAL; } - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(&these, &info); if (!sig) { timeout = MAX_SCHEDULE_TIMEOUT; @@ -1827,19 +1827,19 @@ sys32_rt_sigtimedwait(compat_sigset_t *uthese, siginfo_t32 *uinfo, current->real_blocked = current->blocked; sigandsets(¤t->blocked, ¤t->blocked, &these); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); current->state = TASK_INTERRUPTIBLE; timeout = schedule_timeout(timeout); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(&these, &info); current->blocked = current->real_blocked; siginitset(¤t->real_blocked, 0); recalc_sigpending(); } } - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (sig) { ret = sig; diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c index 8d0b518b3227..b3341bbbd2fe 100644 --- a/arch/sparc64/kernel/sys_sunos32.c +++ b/arch/sparc64/kernel/sys_sunos32.c @@ -238,11 +238,11 @@ asmlinkage u32 sunos_sigblock(u32 blk_mask) { u32 old; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); old = (u32) current->blocked.sig[0]; current->blocked.sig[0] |= (blk_mask & _BLOCKABLE); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return old; } @@ -250,11 +250,11 @@ asmlinkage u32 sunos_sigsetmask(u32 newmask) { u32 retval; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); retval = (u32) current->blocked.sig[0]; current->blocked.sig[0] = (newmask & _BLOCKABLE); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return retval; } diff --git a/arch/sparc64/solaris/signal.c b/arch/sparc64/solaris/signal.c index bb2d2b30c645..72f126c3dcae 100644 --- a/arch/sparc64/solaris/signal.c +++ b/arch/sparc64/solaris/signal.c @@ -99,16 +99,16 @@ static inline long solaris_signal(int sig, u32 arg) static long solaris_sigset(int sig, u32 arg) { if (arg != 2) /* HOLD */ { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigdelsetmask(¤t->blocked, _S(sig)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return sig_handler (sig, arg, 0); } else { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigaddsetmask(¤t->blocked, (_S(sig) & ~_BLOCKABLE)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return 0; } } @@ -120,10 +120,10 @@ static inline long solaris_sighold(int sig) static inline long solaris_sigrelse(int sig) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigdelsetmask(¤t->blocked, _S(sig)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); return 0; } @@ -311,10 +311,10 @@ asmlinkage int solaris_sigpending(int which, u32 set) u32 tmp[4]; switch (which) { case 1: /* sigpending */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigandsets(&s, ¤t->blocked, ¤t->pending.signal); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); break; case 2: /* sigfillset - I just set signals which have linux equivalents */ sigfillset(&s); diff --git a/arch/um/kernel/signal_kern.c b/arch/um/kernel/signal_kern.c index bd4572688650..d640ff441ff8 100644 --- a/arch/um/kernel/signal_kern.c +++ b/arch/um/kernel/signal_kern.c @@ -95,12 +95,12 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); sigaddset(¤t->blocked, signr); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } sp = PT_REGS_SP(regs); @@ -186,11 +186,11 @@ int sys_sigsuspend(int history0, int history1, old_sigset_t mask) sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); while (1) { current->state = TASK_INTERRUPTIBLE; @@ -212,11 +212,11 @@ int sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); while (1) { current->state = TASK_INTERRUPTIBLE; @@ -242,13 +242,13 @@ int sys_sigreturn(struct pt_regs regs) void *mask = sp_to_mask(PT_REGS_SP(¤t->thread.regs)); int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); copy_from_user(¤t->blocked.sig[0], sc_sigmask(sc), sizeof(current->blocked.sig[0])); copy_from_user(¤t->blocked.sig[1], mask, sig_size); sigdelsetmask(¤t->blocked, ~_BLOCKABLE); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); copy_sc_from_user(¤t->thread.regs, sc, &signal_frame_sc.common.arch); return(PT_REGS_SYSCALL_RET(¤t->thread.regs)); @@ -260,11 +260,11 @@ int sys_rt_sigreturn(struct pt_regs regs) void *fp; int sig_size = _NSIG_WORDS * sizeof(unsigned long); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); copy_from_user(¤t->blocked, &uc->uc_sigmask, sig_size); sigdelsetmask(¤t->blocked, ~_BLOCKABLE); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); fp = (void *) (((unsigned long) uc) + sizeof(struct ucontext)); copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext, &signal_frame_si.common.arch); diff --git a/arch/v850/kernel/signal.c b/arch/v850/kernel/signal.c index d7ec3892e848..324a2fe2fe21 100644 --- a/arch/v850/kernel/signal.c +++ b/arch/v850/kernel/signal.c @@ -50,11 +50,11 @@ sys_sigsuspend(old_sigset_t mask, struct pt_regs *regs) sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->gpr[GPR_RVAL] = -EINTR; while (1) { @@ -78,11 +78,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, if (copy_from_user(&newset, unewset, sizeof(newset))) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs->gpr[GPR_RVAL] = -EINTR; while (1) { @@ -188,10 +188,10 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(regs, &frame->sc, &rval)) goto badframe; @@ -216,10 +216,10 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) goto badframe; @@ -472,11 +472,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c index c3d9c99d760e..a638bc480cff 100644 --- a/arch/x86_64/ia32/ia32_signal.c +++ b/arch/x86_64/ia32/ia32_signal.c @@ -83,11 +83,11 @@ sys32_sigsuspend(int history0, int history1, old_sigset_t mask, struct pt_regs r sigset_t saveset; mask &= _BLOCKABLE; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); regs.rax = -EINTR; while (1) { @@ -243,10 +243,10 @@ asmlinkage long sys32_sigreturn(struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (ia32_restore_sigcontext(®s, &frame->sc, &eax)) goto badframe; @@ -270,10 +270,10 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (ia32_restore_sigcontext(®s, &frame->uc.uc_mcontext, &eax)) goto badframe; diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c index a2c0878a1c16..3f3582cd270b 100644 --- a/arch/x86_64/kernel/signal.c +++ b/arch/x86_64/kernel/signal.c @@ -52,11 +52,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); saveset = current->blocked; current->blocked = newset; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); #if DEBUG_SIG printk("rt_sigsuspend savset(%lx) newset(%lx) regs(%p) rip(%lx)\n", saveset, newset, ®s, regs.rip); @@ -155,10 +155,10 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs regs) } sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &eax)) { goto badframe; @@ -401,11 +401,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ka->sa.sa_handler = SIG_DFL; if (!(ka->sa.sa_flags & SA_NODEFER)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 0f9f7a9de7ba..d765900b200b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -118,12 +118,12 @@ static int nbd_xmit(int send, struct socket *sock, char *buf, int size, int msg_ set_fs(get_ds()); /* Allow interception of SIGKILL only * Don't allow other signals to interrupt the transmission */ - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); oldset = current->blocked; sigfillset(¤t->blocked); sigdelsetmask(¤t->blocked, sigmask(SIGKILL)); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); do { @@ -146,11 +146,11 @@ static int nbd_xmit(int send, struct socket *sock, char *buf, int size, int msg_ if (signal_pending(current)) { siginfo_t info; - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); printk(KERN_WARNING "NBD (pid %d: %s) got signal %d\n", current->pid, current->comm, dequeue_signal(¤t->blocked, &info)); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); result = -EINTR; break; } @@ -166,10 +166,10 @@ static int nbd_xmit(int send, struct socket *sock, char *buf, int size, int msg_ buf += result; } while (size > 0); - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); set_fs(oldfs); return result; diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index e959f7814225..de14aeafa20c 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -528,19 +528,19 @@ static int bt3c_firmware_load(bt3c_info_t *info) } /* Block signals, everything but SIGKILL/SIGSTOP */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); tmpsig = current->blocked; siginitsetinv(¤t->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); result = waitpid(pid, NULL, __WCLONE); /* Allow signals again */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = tmpsig; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); if (result != pid) { printk(KERN_WARNING "bt3c_cs: Waiting for pid %d failed (errno=%d).\n", pid, -result); diff --git a/drivers/char/ftape/lowlevel/fdc-io.c b/drivers/char/ftape/lowlevel/fdc-io.c index c58ea6f76c72..99e4b8fb8b7b 100644 --- a/drivers/char/ftape/lowlevel/fdc-io.c +++ b/drivers/char/ftape/lowlevel/fdc-io.c @@ -386,11 +386,11 @@ int fdc_interrupt_wait(unsigned int time) /* timeout time will be up to USPT microseconds too long ! */ timeout = (1000 * time + FT_USPT - 1) / FT_USPT; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); old_sigmask = current->blocked; sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); current->state = TASK_INTERRUPTIBLE; add_wait_queue(&ftape_wait_intr, &wait); @@ -398,10 +398,10 @@ int fdc_interrupt_wait(unsigned int time) timeout = schedule_timeout(timeout); } - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = old_sigmask; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); remove_wait_queue(&ftape_wait_intr, &wait); /* the following IS necessary. True: as well diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 3dace6456209..6e994acf13b6 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -246,10 +246,10 @@ adb_probe_task(void *x) { strcpy(current->comm, "kadbprobe"); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); flush_signals(current); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); printk(KERN_INFO "adb: starting probe task...\n"); do_adb_reset_bus(); diff --git a/drivers/md/md.c b/drivers/md/md.c index e05f1b3daeba..9f1d5d0878e5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2444,9 +2444,9 @@ static struct block_device_operations md_fops = static inline void flush_curr_signals(void) { - spin_lock(¤t->sig->siglock); + spin_lock(¤t->sighand->siglock); flush_signals(current); - spin_unlock(¤t->sig->siglock); + spin_unlock(¤t->sighand->siglock); } int md_thread(void * arg) diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c index 30041f612215..897b3382d0d9 100644 --- a/drivers/media/video/saa5249.c +++ b/drivers/media/video/saa5249.c @@ -280,17 +280,17 @@ static void jdelay(unsigned long delay) { sigset_t oldblocked = current->blocked; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule_timeout(delay); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); current->blocked = oldblocked; recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } diff --git a/drivers/mtd/devices/blkmtd.c b/drivers/mtd/devices/blkmtd.c index d609dfc4f8ef..7d738397f667 100644 --- a/drivers/mtd/devices/blkmtd.c +++ b/drivers/mtd/devices/blkmtd.c @@ -305,10 +305,10 @@ static int write_queue_task(void *data) DEBUG(1, "blkmtd: writetask: starting (pid = %d)\n", tsk->pid); daemonize(); strcpy(tsk->comm, "blkmtdd"); - spin_lock_irq(&tsk->sig->siglock); + spin_lock_irq(&tsk->sighand->siglock); sigfillset(&tsk->blocked); recalc_sigpending(); - spin_unlock_irq(&tsk->sig->siglock); + spin_unlock_irq(&tsk->sighand->siglock); if(alloc_kiovec(1, &iobuf)) { printk("blkmtd: write_queue_task cant allocate kiobuf\n"); diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index bec639985ebc..0aec2e6709e7 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c @@ -453,10 +453,10 @@ int mtdblock_thread(void *dummy) /* we might get involved when memory gets low, so use PF_MEMALLOC */ tsk->flags |= PF_MEMALLOC; strcpy(tsk->comm, "mtdblockd"); - spin_lock_irq(&tsk->sig->siglock); + spin_lock_irq(&tsk->sighand->siglock); sigfillset(&tsk->blocked); recalc_sigpending(); - spin_unlock_irq(&tsk->sig->siglock); + spin_unlock_irq(&tsk->sighand->siglock); daemonize(); while (!leaving) { diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 8fcf0527a01d..e1e8d997983e 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -1589,10 +1589,10 @@ static int rtl8139_thread (void *data) unsigned long timeout; daemonize(); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigemptyset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); strncpy (current->comm, dev->name, sizeof(current->comm) - 1); current->comm[sizeof(current->comm) - 1] = '\0'; @@ -1604,9 +1604,9 @@ static int rtl8139_thread (void *data) } while (!signal_pending (current) && (timeout > 0)); if (signal_pending (current)) { - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); flush_signals(current); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } if (tp->time_to_die) diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c index 80fd4a071836..3f2a538981d9 100644 --- a/drivers/net/irda/sir_kthread.c +++ b/drivers/net/irda/sir_kthread.c @@ -116,10 +116,10 @@ static int irda_thread(void *startup) daemonize(); strcpy(current->comm, "kIrDAd"); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); set_fs(KERNEL_DS); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index b3eca7db8051..99960d002026 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -127,10 +127,10 @@ static int kafscmd(void *arg) complete(&kafscmd_alive); /* only certain signals are of interest */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked,0); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* loop around looking for things to attend to */ do { diff --git a/fs/afs/internal.h b/fs/afs/internal.h index a875684e3d4b..7de072e495c0 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -46,9 +46,9 @@ static inline void afs_discard_my_signals(void) while (signal_pending(current)) { siginfo_t sinfo; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); dequeue_signal(¤t->blocked,&sinfo); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c index caedebc20095..e546a6da5015 100644 --- a/fs/afs/kafsasyncd.c +++ b/fs/afs/kafsasyncd.c @@ -101,10 +101,10 @@ static int kafsasyncd(void *arg) complete(&kafsasyncd_alive); /* only certain signals are of interest */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked,0); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* loop around looking for things to attend to */ do { diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c index 0d3f30a73657..2b0f5a9d84e9 100644 --- a/fs/afs/kafstimod.c +++ b/fs/afs/kafstimod.c @@ -78,10 +78,10 @@ static int kafstimod(void *arg) complete(&kafstimod_alive); /* only certain signals are of interest */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked,0); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* loop around looking for things to attend to */ loop: diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c index d5b053e5b73a..6cf3d86a5d79 100644 --- a/fs/jffs/intrep.c +++ b/fs/jffs/intrep.c @@ -3347,10 +3347,10 @@ jffs_garbage_collect_thread(void *ptr) current->session = 1; current->pgrp = 1; init_completion(&c->gc_thread_comp); /* barrier */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv (¤t->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); strcpy(current->comm, "jffs_gcd"); D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): Starting infinite loop.\n")); @@ -3378,9 +3378,9 @@ jffs_garbage_collect_thread(void *ptr) siginfo_t info; unsigned long signr = 0; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); signr = dequeue_signal(¤t->blocked, &info); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); switch(signr) { case SIGSTOP: diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index b1654cff562b..a5c35fdb51c8 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h @@ -54,7 +54,7 @@ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,40) #define current_sig_lock current->sigmask_lock #else -#define current_sig_lock current->sig->siglock +#define current_sig_lock current->sighand->siglock #endif static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index e3d931ff7ca2..360139794557 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -2139,10 +2139,10 @@ int jfsIOWait(void *arg) unlock_kernel(); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); complete(&jfsIOwait); diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index f85bb58be45b..6af148d0387c 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -2780,10 +2780,10 @@ int jfs_lazycommit(void *arg) jfsCommitTask = current; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); LAZY_LOCK_INIT(); TxAnchor.unlock_queue = TxAnchor.unlock_tail = 0; @@ -2985,10 +2985,10 @@ int jfs_sync(void *arg) unlock_kernel(); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); complete(&jfsIOwait); diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 93ba7610dde0..f01c538eb282 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -745,7 +745,7 @@ static int ncp_do_request(struct ncp_server *server, int size, sigset_t old_set; unsigned long mask, flags; - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); old_set = current->blocked; if (current->flags & PF_EXITING) mask = 0; @@ -764,7 +764,7 @@ static int ncp_do_request(struct ncp_server *server, int size, } siginitsetinv(¤t->blocked, mask); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); fs = get_fs(); set_fs(get_ds()); @@ -773,10 +773,10 @@ static int ncp_do_request(struct ncp_server *server, int size, set_fs(fs); - spin_lock_irqsave(¤t->sig->siglock, flags); + spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = old_set; recalc_sigpending(); - spin_unlock_irqrestore(¤t->sig->siglock, flags); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); } DDPRINTK("do_ncp_rpc_call returned %d\n", result); diff --git a/fs/smbfs/smbiod.c b/fs/smbfs/smbiod.c index 5f7d1d5969fe..41d5bbd8a334 100644 --- a/fs/smbfs/smbiod.c +++ b/fs/smbfs/smbiod.c @@ -285,10 +285,10 @@ static int smbiod(void *unused) MOD_INC_USE_COUNT; daemonize(); - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked, sigmask(SIGKILL)); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); strcpy(current->comm, "smbiod"); diff --git a/fs/xfs/pagebuf/page_buf.c b/fs/xfs/pagebuf/page_buf.c index 4c60a8799fcb..d6b027eb2022 100644 --- a/fs/xfs/pagebuf/page_buf.c +++ b/fs/xfs/pagebuf/page_buf.c @@ -1581,10 +1581,10 @@ pagebuf_daemon( daemonize(); /* Avoid signals */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); sigfillset(¤t->blocked); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); strcpy(current->comm, "pagebufd"); current->flags |= PF_MEMALLOC; diff --git a/include/linux/sched.h b/include/linux/sched.h index ea5d949f946c..975dd5dca713 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -778,7 +778,7 @@ static inline void cond_resched_lock(spinlock_t * lock) /* Reevaluate whether the task has signals pending delivery. This is required every time the blocked sigset_t changes. - callers must hold sig->siglock. */ + callers must hold sighand->siglock. */ extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t)); extern void recalc_sigpending(void); diff --git a/kernel/suspend.c b/kernel/suspend.c index 40efb8d02db6..8ed7bde5aa18 100644 --- a/kernel/suspend.c +++ b/kernel/suspend.c @@ -218,9 +218,9 @@ int freeze_processes(void) /* FIXME: smp problem here: we may not access other process' flags without locking */ p->flags |= PF_FREEZE; - spin_lock_irqsave(&p->sig->siglock, flags); + spin_lock_irqsave(&p->sighand->siglock, flags); signal_wake_up(p, 0); - spin_unlock_irqrestore(&p->sig->siglock, flags); + spin_unlock_irqrestore(&p->sighand->siglock, flags); todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); diff --git a/net/rxrpc/internal.h b/net/rxrpc/internal.h index 9e4553cc1aea..b0ee06b71a7e 100644 --- a/net/rxrpc/internal.h +++ b/net/rxrpc/internal.h @@ -54,9 +54,9 @@ static inline void rxrpc_discard_my_signals(void) while (signal_pending(current)) { siginfo_t sinfo; - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); dequeue_signal(¤t->blocked,&sinfo); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c index d246585b8f07..ae9987d6155f 100644 --- a/net/rxrpc/krxiod.c +++ b/net/rxrpc/krxiod.c @@ -47,10 +47,10 @@ static int rxrpc_krxiod(void *arg) daemonize(); /* only certain signals are of interest */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked,0); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* loop around waiting for work to do */ do { diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c index 4e35bd351412..39f4eac9f224 100644 --- a/net/rxrpc/krxsecd.c +++ b/net/rxrpc/krxsecd.c @@ -59,10 +59,10 @@ static int rxrpc_krxsecd(void *arg) daemonize(); /* only certain signals are of interest */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked,0); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* loop around waiting for work to do */ do { diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c index 8eb61e64fa55..c6df1edf38ff 100644 --- a/net/rxrpc/krxtimod.c +++ b/net/rxrpc/krxtimod.c @@ -77,10 +77,10 @@ static int krxtimod(void *arg) complete(&krxtimod_alive); /* only certain signals are of interest */ - spin_lock_irq(¤t->sig->siglock); + spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked,0); recalc_sigpending(); - spin_unlock_irq(¤t->sig->siglock); + spin_unlock_irq(¤t->sighand->siglock); /* loop around looking for things to attend to */ loop: -- cgit v1.2.3