diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/befs/ChangeLog | 6 | ||||
| -rw-r--r-- | fs/befs/btree.c | 12 | ||||
| -rw-r--r-- | fs/befs/datastream.c | 2 | ||||
| -rw-r--r-- | fs/binfmt_aout.c | 1 | ||||
| -rw-r--r-- | fs/jfs/jfs_logmgr.c | 98 | ||||
| -rw-r--r-- | fs/jfs/jfs_logmgr.h | 1 | ||||
| -rw-r--r-- | fs/jfs/jfs_txnmgr.c | 3 | ||||
| -rw-r--r-- | fs/jfs/jfs_umount.c | 4 | ||||
| -rw-r--r-- | fs/proc/array.c | 95 | ||||
| -rw-r--r-- | fs/proc/base.c | 1 |
10 files changed, 151 insertions, 72 deletions
diff --git a/fs/befs/ChangeLog b/fs/befs/ChangeLog index 6774a4e815b2..33eb4822c609 100644 --- a/fs/befs/ChangeLog +++ b/fs/befs/ChangeLog @@ -102,7 +102,7 @@ Version 0.6 (2001-12-15) The option is, simply enough, 'debug'. (super.c, debug.c) [WD] -* Removed notion of btree handle from btree.c. It was unessisary, as the +* Removed notion of btree handle from btree.c. It was unnecessary, as the linux VFS doesn't allow us to keep any state between calls. Updated dir.c, namei.c befs_fs.h to account for it. [WD] @@ -312,9 +312,9 @@ Version 0.4 (2001-10-28) ========== * Fixed a misunderstanding of the inode fields. This fixed the problmem with wrong file sizes from du and others. - The i_blocks field of the inode struct is not the nuber of blocks for the + The i_blocks field of the inode struct is not the number of blocks for the inode, it is the number of blocks for the file. Also, i_blksize is not - nessisarily the size of the inode, although in practice it works out. + necessarily the size of the inode, although in practice it works out. Changed to blocksize of filesystem. (fs/befs/inode.c) diff --git a/fs/befs/btree.c b/fs/befs/btree.c index 42a81113a396..c5ed07677b69 100644 --- a/fs/befs/btree.c +++ b/fs/befs/btree.c @@ -56,9 +56,9 @@ /* Note: * * The book states 2 confusing things about befs b+trees. First, - * it states that the overflow feild of node headers is used by internal nodes - * to point to another node that "effectivly continues this one". Here is what - * I belive that means. Each key in internal nodes points to another node that + * it states that the overflow field of node headers is used by internal nodes + * to point to another node that "effectively continues this one". Here is what + * I believe that means. Each key in internal nodes points to another node that * contains key values less than itself. Inspection reveals that the last key * in the internal node is not the last key in the index. Keys that are * greater than the last key in the internal node go into the overflow node. @@ -124,7 +124,7 @@ static int befs_compare_strings(const void *key1, int keylen1, * @sup: Buffer in which to place the btree superblock * * Calls befs_read_datastream to read in the btree superblock and - * makes sure it is in cpu byteorder, byteswapping if nessisary. + * makes sure it is in cpu byteorder, byteswapping if necessary. * * On success, returns BEFS_OK and *@sup contains the btree superblock, * in cpu byte order. @@ -179,8 +179,8 @@ befs_bt_read_super(struct super_block *sb, befs_data_stream * ds, * @node_off: Starting offset (in bytes) of the node in @ds * * Calls befs_read_datastream to read in the indicated btree node and - * makes sure its header feilds are in cpu byteorder, byteswapping if - * nessisary. + * makes sure its header fields are in cpu byteorder, byteswapping if + * necessary. * Note: node->bh must be NULL when this function called first * time. Don't forget brelse(node->bh) after last call. * diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c index e03b73e5a418..75289900ccd2 100644 --- a/fs/befs/datastream.c +++ b/fs/befs/datastream.c @@ -229,7 +229,7 @@ befs_count_blocks(struct super_block * sb, befs_data_stream * ds) Algorithm: Linear search. Checks each element of array[] to see if it - contains the blockno-th filesystem block. This is nessisary + contains the blockno-th filesystem block. This is necessary because the block runs map variable amounts of data. Simply keeps a count of the number of blocks searched so far (sum), incrementing this by the length of each block run as we come diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index a369757dc78c..91d24995dfcf 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -24,6 +24,7 @@ #include <linux/binfmts.h> #include <linux/personality.h> #include <linux/init.h> +#include <linux/ptrace.h> #include <asm/system.h> #include <asm/uaccess.h> diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index b76e1f3ff16d..fc218ce7c597 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -97,7 +97,7 @@ DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait); #define LOGGC_LOCK_INIT(log) spin_lock_init(&(log)->gclock) #define LOGGC_LOCK(log) spin_lock_irq(&(log)->gclock) #define LOGGC_UNLOCK(log) spin_unlock_irq(&(log)->gclock) -#define LOGGC_WAKEUP(tblk) wake_up(&(tblk)->gcwait) +#define LOGGC_WAKEUP(tblk) wake_up_all(&(tblk)->gcwait) /* * log sync serialization (per log) @@ -511,7 +511,6 @@ lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, tblk->bp = log->bp; tblk->pn = log->page; tblk->eor = log->eor; - init_waitqueue_head(&tblk->gcwait); /* enqueue transaction to commit queue */ tblk->cqnext = NULL; @@ -831,6 +830,12 @@ void lmPostGC(struct lbuf * bp) tblk->flag &= ~tblkGC_QUEUE; tblk->cqnext = 0; + if (tblk == log->flush_tblk) { + /* we can stop flushing the log now */ + clear_bit(log_FLUSH, &log->flag); + log->flush_tblk = NULL; + } + jfs_info("lmPostGC: tblk = 0x%p, flag = 0x%x", tblk, tblk->flag); @@ -843,10 +848,10 @@ void lmPostGC(struct lbuf * bp) /* state transition: COMMIT -> COMMITTED */ tblk->flag |= tblkGC_COMMITTED; - if (tblk->flag & tblkGC_READY) { + if (tblk->flag & tblkGC_READY) log->gcrtc--; - LOGGC_WAKEUP(tblk); - } + + LOGGC_WAKEUP(tblk); } /* was page full before pageout ? @@ -892,6 +897,7 @@ void lmPostGC(struct lbuf * bp) else { log->cflag &= ~logGC_PAGEOUT; clear_bit(log_FLUSH, &log->flag); + WARN_ON(log->flush_tblk); } //LOGGC_UNLOCK(log); @@ -1307,7 +1313,8 @@ int lmLogInit(struct jfs_log * log) INIT_LIST_HEAD(&log->synclist); - log->cqueue.head = log->cqueue.tail = 0; + log->cqueue.head = log->cqueue.tail = NULL; + log->flush_tblk = NULL; log->count = 0; @@ -1395,38 +1402,78 @@ int lmLogClose(struct super_block *sb, struct jfs_log * log) * * FUNCTION: initiate write of any outstanding transactions to the journal * and optionally wait until they are all written to disk + * + * wait == 0 flush until latest txn is committed, don't wait + * wait == 1 flush until latest txn is committed, wait + * wait > 1 flush until all txn's are complete, wait */ void jfs_flush_journal(struct jfs_log *log, int wait) { int i; + struct tblock *target; jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait); - /* - * This ensures that we will keep writing to the journal as long - * as there are unwritten commit records - */ - set_bit(log_FLUSH, &log->flag); - - /* - * Initiate I/O on outstanding transactions - */ LOGGC_LOCK(log); - if (log->cqueue.head && !(log->cflag & logGC_PAGEOUT)) { - log->cflag |= logGC_PAGEOUT; - lmGCwrite(log, 0); + + target = log->cqueue.head; + + if (target) { + /* + * This ensures that we will keep writing to the journal as long + * as there are unwritten commit records + */ + + if (test_bit(log_FLUSH, &log->flag)) { + /* + * We're already flushing. + * if flush_tblk is NULL, we are flushing everything, + * so leave it that way. Otherwise, update it to the + * latest transaction + */ + if (log->flush_tblk) + log->flush_tblk = target; + } else { + /* Only flush until latest transaction is committed */ + log->flush_tblk = target; + set_bit(log_FLUSH, &log->flag); + + /* + * Initiate I/O on outstanding transactions + */ + if (!(log->cflag & logGC_PAGEOUT)) { + log->cflag |= logGC_PAGEOUT; + lmGCwrite(log, 0); + } + } + } + if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) { + /* Flush until all activity complete */ + set_bit(log_FLUSH, &log->flag); + log->flush_tblk = NULL; + } + + if (wait && target && !(target->flag & tblkGC_COMMITTED)) { + DECLARE_WAITQUEUE(__wait, current); + + add_wait_queue(&target->gcwait, &__wait); + set_current_state(TASK_UNINTERRUPTIBLE); + LOGGC_UNLOCK(log); + schedule(); + current->state = TASK_RUNNING; + LOGGC_LOCK(log); + remove_wait_queue(&target->gcwait, &__wait); } LOGGC_UNLOCK(log); - if (!wait) + if (wait < 2) return; + /* + * If there was recent activity, we may need to wait + * for the lazycommit thread to catch up + */ if (log->cqueue.head || !list_empty(&log->synclist)) { - /* - * If there was very recent activity, we may need to wait - * for the lazycommit thread to catch up - */ - for (i = 0; i < 800; i++) { /* Too much? */ current->state = TASK_INTERRUPTIBLE; schedule_timeout(HZ / 4); @@ -1437,7 +1484,6 @@ void jfs_flush_journal(struct jfs_log *log, int wait) } assert(log->cqueue.head == NULL); assert(list_empty(&log->synclist)); - clear_bit(log_FLUSH, &log->flag); } @@ -1467,7 +1513,7 @@ int lmLogShutdown(struct jfs_log * log) jfs_info("lmLogShutdown: log:0x%p", log); - jfs_flush_journal(log, 1); + jfs_flush_journal(log, 2); /* * We need to make sure all of the "written" metapages diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h index 45eb3316d991..97983cb47c0f 100644 --- a/fs/jfs/jfs_logmgr.h +++ b/fs/jfs/jfs_logmgr.h @@ -403,6 +403,7 @@ struct jfs_log { struct tblock *head; struct tblock *tail; } cqueue; + struct tblock *flush_tblk; /* tblk we're waiting on for flush */ int gcrtc; /* 4: GC_READY transaction count */ struct tblock *gclrt; /* 4: latest GC_READY transaction */ spinlock_t gclock; /* 4: group commit lock */ diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index f86efd5f14c7..13226284c648 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -2741,8 +2741,7 @@ void txLazyCommit(struct tblock * tblk) if (tblk->flag & tblkGC_READY) log->gcrtc--; - if (tblk->flag & tblkGC_READY) - wake_up(&tblk->gcwait); // LOGGC_WAKEUP + wake_up_all(&tblk->gcwait); // LOGGC_WAKEUP /* * Can't release log->gclock until we've tested tblk->flag diff --git a/fs/jfs/jfs_umount.c b/fs/jfs/jfs_umount.c index b40511e4cf18..9e5c1b84ea36 100644 --- a/fs/jfs/jfs_umount.c +++ b/fs/jfs/jfs_umount.c @@ -69,7 +69,7 @@ int jfs_umount(struct super_block *sb) /* * Wait for outstanding transactions to be written to log: */ - jfs_flush_journal(log, 1); + jfs_flush_journal(log, 2); /* * close fileset inode allocation map (aka fileset inode) @@ -149,7 +149,7 @@ int jfs_umount_rw(struct super_block *sb) * * remove file system from log active file system list. */ - jfs_flush_journal(log, 1); + jfs_flush_journal(log, 2); /* * Make sure all metadata makes it to disk diff --git a/fs/proc/array.c b/fs/proc/array.c index 5cba4d1a70ff..37fb278f96d7 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -180,51 +180,74 @@ static inline char * task_state(struct task_struct *p, char *buffer) return buffer; } +static char * render_sigset_t(const char *header, sigset_t *set, char *buffer) +{ + int i, len; + + len = strlen(header); + memcpy(buffer, header, len); + buffer += len; + + i = _NSIG; + do { + int x = 0; + + i -= 4; + if (sigismember(set, i+1)) x |= 1; + if (sigismember(set, i+2)) x |= 2; + if (sigismember(set, i+3)) x |= 4; + if (sigismember(set, i+4)) x |= 8; + *buffer++ = (x < 10 ? '0' : 'a' - 10) + x; + } while (i >= 4); + + *buffer++ = '\n'; + *buffer = 0; + return buffer; +} + static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, sigset_t *catch) { struct k_sigaction *k; int i; - sigemptyset(ign); - sigemptyset(catch); + k = p->sighand->action; + for (i = 1; i <= _NSIG; ++i, ++k) { + if (k->sa.sa_handler == SIG_IGN) + sigaddset(ign, i); + else if (k->sa.sa_handler != SIG_DFL) + sigaddset(catch, i); + } +} + +static inline char * task_sig(struct task_struct *p, char *buffer) +{ + sigset_t pending, shpending, blocked, ignored, caught; + sigemptyset(&pending); + sigemptyset(&shpending); + sigemptyset(&blocked); + sigemptyset(&ignored); + sigemptyset(&caught); + + /* Gather all the data with the appropriate locks held */ read_lock(&tasklist_lock); if (p->sighand) { spin_lock_irq(&p->sighand->siglock); - k = p->sighand->action; - for (i = 1; i <= _NSIG; ++i, ++k) { - if (k->sa.sa_handler == SIG_IGN) - sigaddset(ign, i); - else if (k->sa.sa_handler != SIG_DFL) - sigaddset(catch, i); - } + pending = p->pending.signal; + shpending = p->signal->shared_pending.signal; + blocked = p->blocked; + collect_sigign_sigcatch(p, &ignored, &caught); spin_unlock_irq(&p->sighand->siglock); } read_unlock(&tasklist_lock); -} - -static inline char * task_sig(struct task_struct *p, char *buffer) -{ - sigset_t ign, catch; - - buffer += sprintf(buffer, "SigPnd:\t"); - buffer = render_sigset_t(&p->pending.signal, buffer); - *buffer++ = '\n'; - buffer += sprintf(buffer, "ShdPnd:\t"); - buffer = render_sigset_t(&p->signal->shared_pending.signal, buffer); - *buffer++ = '\n'; - buffer += sprintf(buffer, "SigBlk:\t"); - buffer = render_sigset_t(&p->blocked, buffer); - *buffer++ = '\n'; - collect_sigign_sigcatch(p, &ign, &catch); - buffer += sprintf(buffer, "SigIgn:\t"); - buffer = render_sigset_t(&ign, buffer); - *buffer++ = '\n'; - buffer += sprintf(buffer, "SigCgt:\t"); /* Linux 2.0 uses "SigCgt" */ - buffer = render_sigset_t(&catch, buffer); - *buffer++ = '\n'; + /* render them all */ + buffer = render_sigset_t("SigPnd:\t", &pending, buffer); + buffer = render_sigset_t("ShdPnd:\t", &shpending, buffer); + buffer = render_sigset_t("SigBlk:\t", &blocked, buffer); + buffer = render_sigset_t("SigIgn:\t", &ignored, buffer); + buffer = render_sigset_t("SigCgt:\t", &caught, buffer); return buffer; } @@ -293,7 +316,15 @@ int proc_pid_stat(struct task_struct *task, char * buffer) wchan = get_wchan(task); - collect_sigign_sigcatch(task, &sigign, &sigcatch); + sigemptyset(&sigign); + sigemptyset(&sigcatch); + read_lock(&tasklist_lock); + if (task->sighand) { + spin_lock_irq(&task->sighand->siglock); + collect_sigign_sigcatch(task, &sigign, &sigcatch); + spin_unlock_irq(&task->sighand->siglock); + } + read_unlock(&tasklist_lock); /* scale priority and nice values from timeslices to -20..20 */ /* to make it look like a "normal" Unix priority/nice value */ diff --git a/fs/proc/base.c b/fs/proc/base.c index 164fb87ea678..cfb54c010ef9 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -31,6 +31,7 @@ #include <linux/kallsyms.h> #include <linux/mount.h> #include <linux/security.h> +#include <linux/ptrace.h> /* * For hysterical raisins we keep the same inumbers as in the old procfs. |
