summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 18:03:52 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 18:03:52 -0800
commitb56566c61ebe66fc5c9aa33385ba5b13bf6a8957 (patch)
tree098a056a41285753cbd8b993e292b82c55a2fc9a /kernel
parent44e8778cf19e61151a4819b771186c7e4c49bff1 (diff)
v2.4.2.2 -> v2.4.2.3
- Alan Cox: continued merging - Urban Widmark: smbfs fix (d_add on already hashed dentry - no-no). - Andrew Morton: 3c59x update - Jeff Garzik: network driver cleanups and fixes - Gérard Roudier: sym-ncr drivers update - Jens Axboe: more loop cleanups and fixes - David Miller: sparc update, some networking fixes
Diffstat (limited to 'kernel')
-rw-r--r--kernel/pm.c62
-rw-r--r--kernel/sched.c28
-rw-r--r--kernel/sys.c120
3 files changed, 165 insertions, 45 deletions
diff --git a/kernel/pm.c b/kernel/pm.c
index eb7c6f615bd8..a520cae1595f 100644
--- a/kernel/pm.c
+++ b/kernel/pm.c
@@ -25,7 +25,19 @@
int pm_active;
-static spinlock_t pm_devs_lock = SPIN_LOCK_UNLOCKED;
+/*
+ * Locking notes:
+ * pm_devs_lock can be a semaphore providing pm ops are not called
+ * from an interrupt handler (already a bad idea so no change here). Each
+ * change must be protected so that an unlink of an entry doesnt clash
+ * with a pm send - which is permitted to sleep in the current architecture
+ *
+ * Module unloads clashing with pm events now work out safely, the module
+ * unload path will block until the event has been sent. It may well block
+ * until a resume but that will be fine.
+ */
+
+static DECLARE_MUTEX(pm_devs_lock);
static LIST_HEAD(pm_devs);
/**
@@ -45,16 +57,14 @@ struct pm_dev *pm_register(pm_dev_t type,
{
struct pm_dev *dev = kmalloc(sizeof(struct pm_dev), GFP_KERNEL);
if (dev) {
- unsigned long flags;
-
memset(dev, 0, sizeof(*dev));
dev->type = type;
dev->id = id;
dev->callback = callback;
- spin_lock_irqsave(&pm_devs_lock, flags);
+ down(&pm_devs_lock);
list_add(&dev->entry, &pm_devs);
- spin_unlock_irqrestore(&pm_devs_lock, flags);
+ up(&pm_devs_lock);
}
return dev;
}
@@ -70,12 +80,18 @@ struct pm_dev *pm_register(pm_dev_t type,
void pm_unregister(struct pm_dev *dev)
{
if (dev) {
- unsigned long flags;
-
- spin_lock_irqsave(&pm_devs_lock, flags);
+ down(&pm_devs_lock);
list_del(&dev->entry);
- spin_unlock_irqrestore(&pm_devs_lock, flags);
+ up(&pm_devs_lock);
+
+ kfree(dev);
+ }
+}
+static void __pm_unregister(struct pm_dev *dev)
+{
+ if (dev) {
+ list_del(&dev->entry);
kfree(dev);
}
}
@@ -97,13 +113,15 @@ void pm_unregister_all(pm_callback callback)
if (!callback)
return;
+ down(&pm_devs_lock);
entry = pm_devs.next;
while (entry != &pm_devs) {
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
entry = entry->next;
if (dev->callback == callback)
- pm_unregister(dev);
+ __pm_unregister(dev);
}
+ up(&pm_devs_lock);
}
/**
@@ -119,6 +137,13 @@ void pm_unregister_all(pm_callback callback)
*
* BUGS: what stops two power management requests occuring in parallel
* and conflicting.
+ *
+ * WARNING: Calling pm_send directly is not generally recommended, in
+ * paticular there is no locking against the pm_dev going away. The
+ * caller must maintain all needed locking or have 'inside knowledge'
+ * on the safety. Also remember that this function is not locked against
+ * pm_unregister. This means that you must handle SMP races on callback
+ * execution and unload yourself.
*/
int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
@@ -183,6 +208,12 @@ static void pm_undo_all(struct pm_dev *last)
* during the processing of this request are restored to their
* previous state.
*
+ * WARNING: This function takes the pm_devs_lock. The lock is not dropped until
+ * the callbacks have completed. This prevents races against pm locking
+ * functions, races against module unload pm_unregister code. It does
+ * mean however that you must not issue pm_ functions within the callback
+ * or you will deadlock and users will hate you.
+ *
* Zero is returned on success. If a suspend fails then the status
* from the device that vetoes the suspend is returned.
*
@@ -192,7 +223,10 @@ static void pm_undo_all(struct pm_dev *last)
int pm_send_all(pm_request_t rqst, void *data)
{
- struct list_head *entry = pm_devs.next;
+ struct list_head *entry;
+
+ down(&pm_devs_lock);
+ entry = pm_devs.next;
while (entry != &pm_devs) {
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
if (dev->callback) {
@@ -203,11 +237,13 @@ int pm_send_all(pm_request_t rqst, void *data)
*/
if (rqst == PM_SUSPEND)
pm_undo_all(dev);
+ up(&pm_devs_lock);
return status;
}
}
entry = entry->next;
}
+ up(&pm_devs_lock);
return 0;
}
@@ -222,6 +258,10 @@ int pm_send_all(pm_request_t rqst, void *data)
* of the list.
*
* To search from the beginning pass %NULL as the @from value.
+ *
+ * The caller MUST hold the pm_devs_lock lock when calling this
+ * function. The instant that the lock is dropped all pointers returned
+ * may become invalid.
*/
struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from)
diff --git a/kernel/sched.c b/kernel/sched.c
index 33ec2ec77178..a15880d83e73 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -359,6 +359,32 @@ static void process_timeout(unsigned long __data)
wake_up_process(p);
}
+/**
+ * schedule_timeout - sleep until timeout
+ * @timeout: timeout value in jiffies
+ *
+ * Make the current task sleep until @timeout jiffies have
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
+ * pass before the routine returns. The routine will return 0
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task. In this case the remaining time
+ * in jiffies will be returned, or 0 if the timer expired in time
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
+ * the CPU away without a bound on the timeout. In this case the return
+ * value will be %MAX_SCHEDULE_TIMEOUT.
+ *
+ * In all cases the return value is guaranteed to be non-negative.
+ */
signed long schedule_timeout(signed long timeout)
{
struct timer_list timer;
@@ -541,7 +567,7 @@ move_rr_back:
}
default:
del_from_runqueue(prev);
- case TASK_RUNNING:
+ case TASK_RUNNING:;
}
prev->need_resched = 0;
diff --git a/kernel/sys.c b/kernel/sys.c
index 38eb5dee9ce8..65e7bbcda823 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -330,6 +330,12 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void * arg)
return 0;
}
+static void deferred_cad(void *dummy)
+{
+ notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
+ machine_restart(NULL);
+}
+
/*
* This function gets called by ctrl-alt-del - ie the keyboard interrupt.
* As it's called within an interrupt, it may NOT sync: the only choice
@@ -337,10 +343,13 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void * arg)
*/
void ctrl_alt_del(void)
{
- if (C_A_D) {
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- machine_restart(NULL);
- } else
+ static struct tq_struct cad_tq = {
+ routine: deferred_cad,
+ };
+
+ if (C_A_D)
+ schedule_task(&cad_tq);
+ else
kill_proc(1, SIGINT, 1);
}
@@ -367,12 +376,14 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
{
int old_rgid = current->gid;
int old_egid = current->egid;
+ int new_rgid = old_rgid;
+ int new_egid = old_egid;
if (rgid != (gid_t) -1) {
if ((old_rgid == rgid) ||
(current->egid==rgid) ||
capable(CAP_SETGID))
- current->gid = rgid;
+ new_rgid = rgid;
else
return -EPERM;
}
@@ -381,18 +392,22 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
(current->egid == egid) ||
(current->sgid == egid) ||
capable(CAP_SETGID))
- current->fsgid = current->egid = egid;
+ new_egid = egid;
else {
- current->gid = old_rgid;
return -EPERM;
}
}
+ if (new_egid != old_egid)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && egid != old_rgid))
- current->sgid = current->egid;
- current->fsgid = current->egid;
- if (current->egid != old_egid)
- current->dumpable = 0;
+ current->sgid = new_egid;
+ current->fsgid = new_egid;
+ current->egid = new_egid;
+ current->gid = new_rgid;
return 0;
}
@@ -406,14 +421,25 @@ asmlinkage long sys_setgid(gid_t gid)
int old_egid = current->egid;
if (capable(CAP_SETGID))
+ {
+ if(old_egid != gid)
+ {
+ current->dumpable=0;
+ wmb();
+ }
current->gid = current->egid = current->sgid = current->fsgid = gid;
+ }
else if ((gid == current->gid) || (gid == current->sgid))
+ {
+ if(old_egid != gid)
+ {
+ current->dumpable=0;
+ wmb();
+ }
current->egid = current->fsgid = gid;
+ }
else
return -EPERM;
-
- if (current->egid != old_egid)
- current->dumpable = 0;
return 0;
}
@@ -463,7 +489,7 @@ extern inline void cap_emulate_setxuid(int old_ruid, int old_euid,
}
}
-static int set_user(uid_t new_ruid)
+static int set_user(uid_t new_ruid, int dumpclear)
{
struct user_struct *new_user, *old_user;
@@ -479,6 +505,11 @@ static int set_user(uid_t new_ruid)
atomic_dec(&old_user->processes);
atomic_inc(&new_user->processes);
+ if(dumpclear)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
current->uid = new_ruid;
current->user = new_user;
free_uid(old_user);
@@ -525,16 +556,19 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
return -EPERM;
}
- if (new_ruid != old_ruid && set_user(new_ruid) < 0)
+ if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
return -EAGAIN;
+ if (new_euid != old_euid)
+ {
+ current->dumpable=0;
+ wmb();
+ }
current->fsuid = current->euid = new_euid;
if (ruid != (uid_t) -1 ||
(euid != (uid_t) -1 && euid != old_ruid))
current->suid = current->euid;
current->fsuid = current->euid;
- if (current->euid != old_euid)
- current->dumpable = 0;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
cap_emulate_setxuid(old_ruid, old_euid, old_suid);
@@ -559,21 +593,26 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
asmlinkage long sys_setuid(uid_t uid)
{
int old_euid = current->euid;
- int old_ruid, old_suid, new_ruid;
+ int old_ruid, old_suid, new_ruid, new_suid;
old_ruid = new_ruid = current->uid;
old_suid = current->suid;
+ new_suid = old_suid;
+
if (capable(CAP_SETUID)) {
- if (uid != old_ruid && set_user(uid) < 0)
+ if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
return -EAGAIN;
- current->suid = uid;
- } else if ((uid != current->uid) && (uid != current->suid))
+ new_suid = uid;
+ } else if ((uid != current->uid) && (uid != new_suid))
return -EPERM;
- current->fsuid = current->euid = uid;
-
if (old_euid != uid)
+ {
current->dumpable = 0;
+ wmb();
+ }
+ current->fsuid = current->euid = uid;
+ current->suid = new_suid;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
cap_emulate_setxuid(old_ruid, old_euid, old_suid);
@@ -605,12 +644,15 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
return -EPERM;
}
if (ruid != (uid_t) -1) {
- if (ruid != current->uid && set_user(ruid) < 0)
+ if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
return -EAGAIN;
}
if (euid != (uid_t) -1) {
if (euid != current->euid)
+ {
current->dumpable = 0;
+ wmb();
+ }
current->euid = euid;
current->fsuid = euid;
}
@@ -640,7 +682,7 @@ asmlinkage long sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid)
*/
asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
{
- if (!capable(CAP_SETGID)) {
+ if (!capable(CAP_SETGID)) {
if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
(rgid != current->egid) && (rgid != current->sgid))
return -EPERM;
@@ -651,14 +693,17 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
(sgid != current->egid) && (sgid != current->sgid))
return -EPERM;
}
- if (rgid != (gid_t) -1)
- current->gid = rgid;
if (egid != (gid_t) -1) {
if (egid != current->egid)
+ {
current->dumpable = 0;
+ wmb();
+ }
current->egid = egid;
current->fsgid = egid;
}
+ if (rgid != (gid_t) -1)
+ current->gid = rgid;
if (sgid != (gid_t) -1)
current->sgid = sgid;
return 0;
@@ -690,9 +735,14 @@ asmlinkage long sys_setfsuid(uid_t uid)
if (uid == current->uid || uid == current->euid ||
uid == current->suid || uid == current->fsuid ||
capable(CAP_SETUID))
+ {
+ if (uid != old_fsuid)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
current->fsuid = uid;
- if (current->fsuid != old_fsuid)
- current->dumpable = 0;
+ }
/* We emulate fsuid by essentially doing a scaled-down version
* of what we did in setresuid and friends. However, we only
@@ -727,10 +777,14 @@ asmlinkage long sys_setfsgid(gid_t gid)
if (gid == current->gid || gid == current->egid ||
gid == current->sgid || gid == current->fsgid ||
capable(CAP_SETGID))
+ {
+ if (gid != old_fsgid)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
current->fsgid = gid;
- if (current->fsgid != old_fsgid)
- current->dumpable = 0;
-
+ }
return old_fsgid;
}