summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorVojtech Pavlik <vojtech@suse.cz>2002-11-15 10:15:29 +0100
committerVojtech Pavlik <vojtech@suse.cz>2002-11-15 10:15:29 +0100
commit26c692941cd9362f55a9a0ca71eed2381c593124 (patch)
treeae2f274fd89856dee98d35c1d153b8b9544c74cb /kernel
parentcdd78a965de150f55dc6bc2836995e5f8bc991a9 (diff)
parentb11523f3840b25ad8be50e33f8729cd922447dce (diff)
Merge suse.cz:/home/vojtech/bk/linus into suse.cz:/home/vojtech/bk/input
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile8
-rw-r--r--kernel/exec_domain.c4
-rw-r--r--kernel/intermodule.c183
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/ksyms.c8
-rw-r--r--kernel/module.c2110
-rw-r--r--kernel/suspend.c6
-rw-r--r--kernel/sys.c2
8 files changed, 1142 insertions, 1181 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index daf6cbd5d42a..bc0f6371f222 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -4,18 +4,18 @@
export-objs = signal.o sys.o kmod.o workqueue.o ksyms.o pm.o exec_domain.o \
printk.o platform.o suspend.o dma.o module.o cpufreq.o \
- profile.o rcupdate.o
+ profile.o rcupdate.o intermodule.o
obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
- module.o exit.o itimer.o time.o softirq.o resource.o \
+ exit.o itimer.o time.o softirq.o resource.o \
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o futex.o platform.o pid.o \
- rcupdate.o
+ rcupdate.o intermodule.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o
obj-$(CONFIG_UID16) += uid16.o
-obj-$(CONFIG_MODULES) += ksyms.o
+obj-$(CONFIG_MODULES) += ksyms.o module.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index a62818a2188b..e0b31f7f5243 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -210,8 +210,8 @@ get_exec_domain_list(char *page)
read_lock(&exec_domains_lock);
for (ep = exec_domains; ep && len < PAGE_SIZE - 80; ep = ep->next)
len += sprintf(page + len, "%d-%d\t%-16s\t[%s]\n",
- ep->pers_low, ep->pers_high, ep->name,
- ep->module ? ep->module->name : "kernel");
+ ep->pers_low, ep->pers_high, ep->name,
+ module_name(ep->module));
read_unlock(&exec_domains_lock);
return (len);
}
diff --git a/kernel/intermodule.c b/kernel/intermodule.c
new file mode 100644
index 000000000000..a6cd1d08afa4
--- /dev/null
+++ b/kernel/intermodule.c
@@ -0,0 +1,183 @@
+/* Deprecated, do not use. Moved from module.c to here. --RR */
+
+/* Written by Keith Owens <kaos@ocs.com.au> Oct 2000 */
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/* inter_module functions are always available, even when the kernel is
+ * compiled without modules. Consumers of inter_module_xxx routines
+ * will always work, even when both are built into the kernel, this
+ * approach removes lots of #ifdefs in mainline code.
+ */
+
+static struct list_head ime_list = LIST_HEAD_INIT(ime_list);
+static spinlock_t ime_lock = SPIN_LOCK_UNLOCKED;
+static int kmalloc_failed;
+
+struct inter_module_entry {
+ struct list_head list;
+ const char *im_name;
+ struct module *owner;
+ const void *userdata;
+};
+
+/**
+ * inter_module_register - register a new set of inter module data.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ * @owner: module that is registering the data, always use THIS_MODULE
+ * @userdata: pointer to arbitrary userdata to be registered
+ *
+ * Description: Check that the im_name has not already been registered,
+ * complain if it has. For new data, add it to the inter_module_entry
+ * list.
+ */
+void inter_module_register(const char *im_name, struct module *owner, const void *userdata)
+{
+ struct list_head *tmp;
+ struct inter_module_entry *ime, *ime_new;
+
+ if (!(ime_new = kmalloc(sizeof(*ime), GFP_KERNEL))) {
+ /* Overloaded kernel, not fatal */
+ printk(KERN_ERR
+ "Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
+ im_name);
+ kmalloc_failed = 1;
+ return;
+ }
+ memset(ime_new, 0, sizeof(*ime_new));
+ ime_new->im_name = im_name;
+ ime_new->owner = owner;
+ ime_new->userdata = userdata;
+
+ spin_lock(&ime_lock);
+ list_for_each(tmp, &ime_list) {
+ ime = list_entry(tmp, struct inter_module_entry, list);
+ if (strcmp(ime->im_name, im_name) == 0) {
+ spin_unlock(&ime_lock);
+ kfree(ime_new);
+ /* Program logic error, fatal */
+ printk(KERN_ERR "inter_module_register: duplicate im_name '%s'", im_name);
+ BUG();
+ }
+ }
+ list_add(&(ime_new->list), &ime_list);
+ spin_unlock(&ime_lock);
+}
+
+/**
+ * inter_module_unregister - unregister a set of inter module data.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ *
+ * Description: Check that the im_name has been registered, complain if
+ * it has not. For existing data, remove it from the
+ * inter_module_entry list.
+ */
+void inter_module_unregister(const char *im_name)
+{
+ struct list_head *tmp;
+ struct inter_module_entry *ime;
+
+ spin_lock(&ime_lock);
+ list_for_each(tmp, &ime_list) {
+ ime = list_entry(tmp, struct inter_module_entry, list);
+ if (strcmp(ime->im_name, im_name) == 0) {
+ list_del(&(ime->list));
+ spin_unlock(&ime_lock);
+ kfree(ime);
+ return;
+ }
+ }
+ spin_unlock(&ime_lock);
+ if (kmalloc_failed) {
+ printk(KERN_ERR
+ "inter_module_unregister: no entry for '%s', "
+ "probably caused by previous kmalloc failure\n",
+ im_name);
+ return;
+ }
+ else {
+ /* Program logic error, fatal */
+ printk(KERN_ERR "inter_module_unregister: no entry for '%s'", im_name);
+ BUG();
+ }
+}
+
+/**
+ * inter_module_get - return arbitrary userdata from another module.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ *
+ * Description: If the im_name has not been registered, return NULL.
+ * Try to increment the use count on the owning module, if that fails
+ * then return NULL. Otherwise return the userdata.
+ */
+const void *inter_module_get(const char *im_name)
+{
+ struct list_head *tmp;
+ struct inter_module_entry *ime;
+ const void *result = NULL;
+
+ spin_lock(&ime_lock);
+ list_for_each(tmp, &ime_list) {
+ ime = list_entry(tmp, struct inter_module_entry, list);
+ if (strcmp(ime->im_name, im_name) == 0) {
+ if (try_inc_mod_count(ime->owner))
+ result = ime->userdata;
+ break;
+ }
+ }
+ spin_unlock(&ime_lock);
+ return(result);
+}
+
+/**
+ * inter_module_get_request - im get with automatic request_module.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ * @modname: module that is expected to register im_name
+ *
+ * Description: If inter_module_get fails, do request_module then retry.
+ */
+const void *inter_module_get_request(const char *im_name, const char *modname)
+{
+ const void *result = inter_module_get(im_name);
+ if (!result) {
+ request_module(modname);
+ result = inter_module_get(im_name);
+ }
+ return(result);
+}
+
+/**
+ * inter_module_put - release use of data from another module.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ *
+ * Description: If the im_name has not been registered, complain,
+ * otherwise decrement the use count on the owning module.
+ */
+void inter_module_put(const char *im_name)
+{
+ struct list_head *tmp;
+ struct inter_module_entry *ime;
+
+ spin_lock(&ime_lock);
+ list_for_each(tmp, &ime_list) {
+ ime = list_entry(tmp, struct inter_module_entry, list);
+ if (strcmp(ime->im_name, im_name) == 0) {
+ if (ime->owner)
+ __MOD_DEC_USE_COUNT(ime->owner);
+ spin_unlock(&ime_lock);
+ return;
+ }
+ }
+ spin_unlock(&ime_lock);
+ printk(KERN_ERR "inter_module_put: no entry for '%s'", im_name);
+ BUG();
+}
+
+EXPORT_SYMBOL(inter_module_register);
+EXPORT_SYMBOL(inter_module_unregister);
+EXPORT_SYMBOL(inter_module_get);
+EXPORT_SYMBOL(inter_module_get_request);
+EXPORT_SYMBOL(inter_module_put);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 755e5807e815..dd655dde199d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -155,7 +155,7 @@ char modprobe_path[256] = "/sbin/modprobe";
static int exec_modprobe(void * module_name)
{
static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
- char *argv[] = { modprobe_path, "-s", "-k", "--", (char*)module_name, NULL };
+ char *argv[] = { modprobe_path, "--", (char*)module_name, NULL };
int ret;
if (!system_running)
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 65f76c0b2ab3..8356f35808a5 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -71,14 +71,6 @@ __attribute__((section("__ksymtab"))) = {
};
#endif
-
-EXPORT_SYMBOL(inter_module_register);
-EXPORT_SYMBOL(inter_module_unregister);
-EXPORT_SYMBOL(inter_module_get);
-EXPORT_SYMBOL(inter_module_get_request);
-EXPORT_SYMBOL(inter_module_put);
-EXPORT_SYMBOL(try_inc_mod_count);
-
/* process memory management */
EXPORT_SYMBOL(do_mmap_pgoff);
EXPORT_SYMBOL(do_munmap);
diff --git a/kernel/module.c b/kernel/module.c
index 9921cb0e9c28..668406fa22cd 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1,1376 +1,1160 @@
+/* Rewritten by Rusty Russell, on the backs of many others...
+ Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
#include <linux/config.h>
-#include <linux/mm.h>
#include <linux/module.h>
-#include <asm/module.h>
-#include <asm/uaccess.h>
-#include <linux/kallsyms.h>
-#include <linux/vmalloc.h>
-#include <linux/smp_lock.h>
-#include <asm/pgalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/elf.h>
#include <linux/seq_file.h>
-#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/rcupdate.h>
+#include <linux/cpu.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
-/*
- * Originally by Anonymous (as far as I know...)
- * Linux version by Bas Laarhoven <bas@vimec.nl>
- * 0.99.14 version by Jon Tombs <jon@gtex02.us.es>,
- * Heavily modified by Bjorn Ekwall <bj0rn@blox.se> May 1994 (C)
- * Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
- * Add MOD_INITIALIZING Keith Owens <kaos@ocs.com.au> Nov 1999
- * Add kallsyms support, Keith Owens <kaos@ocs.com.au> Apr 2000
- * Add asm/module support, IA64 has special requirements. Keith Owens <kaos@ocs.com.au> Sep 2000
- * Fix assorted bugs in module verification. Keith Owens <kaos@ocs.com.au> Sep 2000
- * Fix sys_init_module race, Andrew Morton <andrewm@uow.edu.au> Oct 2000
- * http://www.uwsg.iu.edu/hypermail/linux/kernel/0008.3/0379.html
- * Replace xxx_module_symbol with inter_module_xxx. Keith Owens <kaos@ocs.com.au> Oct 2000
- * Add a module list lock for kernel fault race fixing. Alan Cox <alan@redhat.com>
- *
- * This source is covered by the GNU GPL, the same as all kernel sources.
- */
-
-#if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS)
-
-extern struct module_symbol __start___ksymtab[];
-extern struct module_symbol __stop___ksymtab[];
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt , a...)
+#endif
extern const struct exception_table_entry __start___ex_table[];
extern const struct exception_table_entry __stop___ex_table[];
+extern const struct kernel_symbol __start___ksymtab[];
+extern const struct kernel_symbol __stop___ksymtab[];
+
+/* Protects extables and symbol tables */
+spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
-extern const char __start___kallsyms[] __attribute__((weak));
-extern const char __stop___kallsyms[] __attribute__((weak));
+/* The exception and symbol tables: start with kernel only. */
+LIST_HEAD(extables);
+static LIST_HEAD(symbols);
-/* modutils uses these exported symbols to figure out if
- kallsyms support is present */
+static struct exception_table kernel_extable;
+static struct kernel_symbol_group kernel_symbols;
-EXPORT_SYMBOL(__start___kallsyms);
-EXPORT_SYMBOL(__stop___kallsyms);
+/* List of modules, protected by module_mutex */
+static DECLARE_MUTEX(module_mutex);
+LIST_HEAD(modules); /* FIXME: Accessed w/o lock on oops by some archs */
-struct module kernel_module =
+/* Convenient structure for holding init and core sizes */
+struct sizes
{
- .size_of_struct = sizeof(struct module),
- .name = "",
- .uc = {ATOMIC_INIT(1)},
- .flags = MOD_RUNNING,
- .syms = __start___ksymtab,
- .ex_table_start = __start___ex_table,
- .ex_table_end = __stop___ex_table,
- .kallsyms_start = __start___kallsyms,
- .kallsyms_end = __stop___kallsyms,
+ unsigned long init_size;
+ unsigned long core_size;
};
-struct module *module_list = &kernel_module;
-
-#endif /* defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS) */
-
-/* inter_module functions are always available, even when the kernel is
- * compiled without modules. Consumers of inter_module_xxx routines
- * will always work, even when both are built into the kernel, this
- * approach removes lots of #ifdefs in mainline code.
- */
-
-static struct list_head ime_list = LIST_HEAD_INIT(ime_list);
-static spinlock_t ime_lock = SPIN_LOCK_UNLOCKED;
-static int kmalloc_failed;
-
-/*
- * This lock prevents modifications that might race the kernel fault
- * fixups. It does not prevent reader walks that the modules code
- * does. The kernel lock does that.
- *
- * Since vmalloc fault fixups occur in any context this lock is taken
- * irqsave at all times.
- */
+/* Find a symbol, return value and the symbol group */
+static unsigned long __find_symbol(const char *name,
+ struct kernel_symbol_group **group)
+{
+ struct kernel_symbol_group *ks;
-spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
+ list_for_each_entry(ks, &symbols, list) {
+ unsigned int i;
-/**
- * inter_module_register - register a new set of inter module data.
- * @im_name: an arbitrary string to identify the data, must be unique
- * @owner: module that is registering the data, always use THIS_MODULE
- * @userdata: pointer to arbitrary userdata to be registered
- *
- * Description: Check that the im_name has not already been registered,
- * complain if it has. For new data, add it to the inter_module_entry
- * list.
- */
-void inter_module_register(const char *im_name, struct module *owner, const void *userdata)
-{
- struct list_head *tmp;
- struct inter_module_entry *ime, *ime_new;
-
- if (!(ime_new = kmalloc(sizeof(*ime), GFP_KERNEL))) {
- /* Overloaded kernel, not fatal */
- printk(KERN_ERR
- "Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
- im_name);
- kmalloc_failed = 1;
- return;
- }
- memset(ime_new, 0, sizeof(*ime_new));
- ime_new->im_name = im_name;
- ime_new->owner = owner;
- ime_new->userdata = userdata;
-
- spin_lock(&ime_lock);
- list_for_each(tmp, &ime_list) {
- ime = list_entry(tmp, struct inter_module_entry, list);
- if (strcmp(ime->im_name, im_name) == 0) {
- spin_unlock(&ime_lock);
- kfree(ime_new);
- /* Program logic error, fatal */
- printk(KERN_ERR "inter_module_register: duplicate im_name '%s'", im_name);
- BUG();
+ for (i = 0; i < ks->num_syms; i++) {
+ if (strcmp(ks->syms[i].name, name) == 0) {
+ *group = ks;
+ return ks->syms[i].value;
+ }
}
}
- list_add(&(ime_new->list), &ime_list);
- spin_unlock(&ime_lock);
+ DEBUGP("Failed to find symbol %s\n", name);
+ return 0;
}
-/**
- * inter_module_unregister - unregister a set of inter module data.
- * @im_name: an arbitrary string to identify the data, must be unique
- *
- * Description: Check that the im_name has been registered, complain if
- * it has not. For existing data, remove it from the
- * inter_module_entry list.
- */
-void inter_module_unregister(const char *im_name)
+/* Find a symbol in this elf symbol table */
+static unsigned long find_local_symbol(Elf_Shdr *sechdrs,
+ unsigned int symindex,
+ const char *strtab,
+ const char *name)
{
- struct list_head *tmp;
- struct inter_module_entry *ime;
-
- spin_lock(&ime_lock);
- list_for_each(tmp, &ime_list) {
- ime = list_entry(tmp, struct inter_module_entry, list);
- if (strcmp(ime->im_name, im_name) == 0) {
- list_del(&(ime->list));
- spin_unlock(&ime_lock);
- kfree(ime);
- return;
- }
- }
- spin_unlock(&ime_lock);
- if (kmalloc_failed) {
- printk(KERN_ERR
- "inter_module_unregister: no entry for '%s', "
- "probably caused by previous kmalloc failure\n",
- im_name);
- return;
+ unsigned int i;
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_offset;
+
+ /* Search (defined) internal symbols first. */
+ for (i = 1; i < sechdrs[symindex].sh_size/sizeof(*sym); i++) {
+ if (sym[i].st_shndx != SHN_UNDEF
+ && strcmp(name, strtab + sym[i].st_name) == 0)
+ return sym[i].st_value;
}
- else {
- /* Program logic error, fatal */
- printk(KERN_ERR "inter_module_unregister: no entry for '%s'", im_name);
- BUG();
+ return 0;
+}
+
+/* Search for module by name: must hold module_mutex. */
+static struct module *find_module(const char *name)
+{
+ struct module *mod;
+
+ list_for_each_entry(mod, &modules, list) {
+ if (strcmp(mod->name, name) == 0)
+ return mod;
}
+ return NULL;
}
-/**
- * inter_module_get - return arbitrary userdata from another module.
- * @im_name: an arbitrary string to identify the data, must be unique
- *
- * Description: If the im_name has not been registered, return NULL.
- * Try to increment the use count on the owning module, if that fails
- * then return NULL. Otherwise return the userdata.
- */
-const void *inter_module_get(const char *im_name)
+#ifdef CONFIG_MODULE_UNLOAD
+/* Init the unload section of the module. */
+static void module_unload_init(struct module *mod)
{
- struct list_head *tmp;
- struct inter_module_entry *ime;
- const void *result = NULL;
-
- spin_lock(&ime_lock);
- list_for_each(tmp, &ime_list) {
- ime = list_entry(tmp, struct inter_module_entry, list);
- if (strcmp(ime->im_name, im_name) == 0) {
- if (try_inc_mod_count(ime->owner))
- result = ime->userdata;
- break;
+ unsigned int i;
+
+ INIT_LIST_HEAD(&mod->modules_which_use_me);
+ for (i = 0; i < NR_CPUS; i++)
+ atomic_set(&mod->ref[i].count, 0);
+ /* Backwards compatibility macros put refcount during init. */
+ mod->waiter = current;
+}
+
+/* modules using other modules */
+struct module_use
+{
+ struct list_head list;
+ struct module *module_which_uses;
+};
+
+/* Does a already use b? */
+static int already_uses(struct module *a, struct module *b)
+{
+ struct module_use *use;
+
+ list_for_each_entry(use, &b->modules_which_use_me, list) {
+ if (use->module_which_uses == a) {
+ DEBUGP("%s uses %s!\n", a->name, b->name);
+ return 1;
}
}
- spin_unlock(&ime_lock);
- return(result);
+ DEBUGP("%s does not use %s!\n", a->name, b->name);
+ return 0;
}
-/**
- * inter_module_get_request - im get with automatic request_module.
- * @im_name: an arbitrary string to identify the data, must be unique
- * @modname: module that is expected to register im_name
- *
- * Description: If inter_module_get fails, do request_module then retry.
- */
-const void *inter_module_get_request(const char *im_name, const char *modname)
+/* Module a uses b */
+static int use_module(struct module *a, struct module *b)
{
- const void *result = inter_module_get(im_name);
- if (!result) {
- request_module(modname);
- result = inter_module_get(im_name);
+ struct module_use *use;
+ if (b == NULL || already_uses(a, b)) return 1;
+
+ DEBUGP("Allocating new usage for %s.\n", a->name);
+ use = kmalloc(sizeof(*use), GFP_ATOMIC);
+ if (!use) {
+ printk("%s: out of memory loading\n", a->name);
+ return 0;
}
- return(result);
+
+ use->module_which_uses = a;
+ list_add(&use->list, &b->modules_which_use_me);
+ try_module_get(b); /* Can't fail */
+ return 1;
}
-/**
- * inter_module_put - release use of data from another module.
- * @im_name: an arbitrary string to identify the data, must be unique
- *
- * Description: If the im_name has not been registered, complain,
- * otherwise decrement the use count on the owning module.
- */
-void inter_module_put(const char *im_name)
+/* Clear the unload stuff of the module. */
+static void module_unload_free(struct module *mod)
{
- struct list_head *tmp;
- struct inter_module_entry *ime;
-
- spin_lock(&ime_lock);
- list_for_each(tmp, &ime_list) {
- ime = list_entry(tmp, struct inter_module_entry, list);
- if (strcmp(ime->im_name, im_name) == 0) {
- if (ime->owner)
- __MOD_DEC_USE_COUNT(ime->owner);
- spin_unlock(&ime_lock);
- return;
+ struct module *i;
+
+ list_for_each_entry(i, &modules, list) {
+ struct module_use *use;
+
+ list_for_each_entry(use, &i->modules_which_use_me, list) {
+ if (use->module_which_uses == mod) {
+ DEBUGP("%s unusing %s\n", mod->name, i->name);
+ module_put(i);
+ list_del(&use->list);
+ kfree(use);
+ /* There can be at most one match. */
+ break;
+ }
}
}
- spin_unlock(&ime_lock);
- printk(KERN_ERR "inter_module_put: no entry for '%s'", im_name);
- BUG();
}
+#ifdef CONFIG_SMP
+/* Thread to stop each CPU in user context. */
+enum stopref_state {
+ STOPREF_WAIT,
+ STOPREF_PREPARE,
+ STOPREF_DISABLE_IRQ,
+ STOPREF_EXIT,
+};
+
+static enum stopref_state stopref_state;
+static unsigned int stopref_num_threads;
+static atomic_t stopref_thread_ack;
-#if defined(CONFIG_MODULES) /* The rest of the source */
+static int stopref(void *cpu)
+{
+ int irqs_disabled = 0;
+ int prepared = 0;
-static long get_mod_name(const char *user_name, char **buf);
-static void put_mod_name(char *buf);
-struct module *find_module(const char *name);
-void free_module(struct module *, int tag_freed);
+ sprintf(current->comm, "kmodule%lu\n", (unsigned long)cpu);
+ /* Highest priority we can manage, and move to right CPU. */
+#if 0 /* FIXME */
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ setscheduler(current->pid, SCHED_FIFO, &param);
+#endif
+ set_cpus_allowed(current, 1 << (unsigned long)cpu);
+
+ /* Ack: we are alive */
+ atomic_inc(&stopref_thread_ack);
+
+ /* Simple state machine */
+ while (stopref_state != STOPREF_EXIT) {
+ if (stopref_state == STOPREF_DISABLE_IRQ && !irqs_disabled) {
+ local_irq_disable();
+ irqs_disabled = 1;
+ /* Ack: irqs disabled. */
+ atomic_inc(&stopref_thread_ack);
+ } else if (stopref_state == STOPREF_PREPARE && !prepared) {
+ /* Everyone is in place, hold CPU. */
+ preempt_disable();
+ prepared = 1;
+ atomic_inc(&stopref_thread_ack);
+ }
+ if (irqs_disabled || prepared)
+ cpu_relax();
+ else
+ yield();
+ }
-/*
- * Called at boot time
- */
+ /* Ack: we are exiting. */
+ atomic_inc(&stopref_thread_ack);
-void __init init_modules(void)
-{
- kernel_module.nsyms = __stop___ksymtab - __start___ksymtab;
+ if (irqs_disabled)
+ local_irq_enable();
+ if (prepared)
+ preempt_enable();
- arch_init_modules(&kernel_module);
+ return 0;
}
-/*
- * Copy the name of a module from user space.
- */
-
-static inline long
-get_mod_name(const char *user_name, char **buf)
+/* Change the thread state */
+static void stopref_set_state(enum stopref_state state, int sleep)
{
- unsigned long page;
- long retval;
-
- page = __get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- retval = strncpy_from_user((char *)page, user_name, PAGE_SIZE);
- if (retval > 0) {
- if (retval < PAGE_SIZE) {
- *buf = (char *)page;
- return retval;
- }
- retval = -ENAMETOOLONG;
- } else if (!retval)
- retval = -EINVAL;
-
- free_page(page);
- return retval;
+ atomic_set(&stopref_thread_ack, 0);
+ wmb();
+ stopref_state = state;
+ while (atomic_read(&stopref_thread_ack) != stopref_num_threads) {
+ if (sleep)
+ yield();
+ else
+ cpu_relax();
+ }
}
-static inline void
-put_mod_name(char *buf)
+/* Stop the machine. Disables irqs. */
+static int stop_refcounts(void)
{
- free_page((unsigned long)buf);
-}
+ unsigned int i, cpu;
+ unsigned long old_allowed;
+ int ret = 0;
-/*
- * Allocate space for a module.
- */
+ /* One thread per cpu. We'll do our own. */
+ cpu = smp_processor_id();
-asmlinkage unsigned long
-sys_create_module(const char *name_user, size_t size)
-{
- char *name;
- long namelen, error;
- struct module *mod;
- unsigned long flags;
+ /* FIXME: racy with set_cpus_allowed. */
+ old_allowed = current->cpus_allowed;
+ set_cpus_allowed(current, 1 << (unsigned long)cpu);
- if (!capable(CAP_SYS_MODULE))
- return -EPERM;
- lock_kernel();
- if ((namelen = get_mod_name(name_user, &name)) < 0) {
- error = namelen;
- goto err0;
- }
- if (size < sizeof(struct module)+namelen) {
- error = -EINVAL;
- goto err1;
- }
- if (find_module(name) != NULL) {
- error = -EEXIST;
- goto err1;
+ atomic_set(&stopref_thread_ack, 0);
+ stopref_num_threads = 0;
+ stopref_state = STOPREF_WAIT;
+
+ /* No CPUs can come up or down during this. */
+ down(&cpucontrol);
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (i == cpu || !cpu_online(i))
+ continue;
+ ret = kernel_thread(stopref, (void *)(long)i, CLONE_KERNEL);
+ if (ret < 0)
+ break;
+ stopref_num_threads++;
}
- if ((mod = (struct module *)module_map(size)) == NULL) {
- error = -ENOMEM;
- goto err1;
+
+ /* Wait for them all to come to life. */
+ while (atomic_read(&stopref_thread_ack) != stopref_num_threads)
+ yield();
+
+ /* If some failed, kill them all. */
+ if (ret < 0) {
+ stopref_set_state(STOPREF_EXIT, 1);
+ up(&cpucontrol);
+ return ret;
}
- memset(mod, 0, sizeof(*mod));
- mod->size_of_struct = sizeof(*mod);
- mod->name = (char *)(mod + 1);
- mod->size = size;
- memcpy((char*)(mod+1), name, namelen+1);
+ /* Don't schedule us away at this point, please. */
+ preempt_disable();
- put_mod_name(name);
+ /* Now they are all scheduled, make them hold the CPUs, ready. */
+ stopref_set_state(STOPREF_PREPARE, 0);
- spin_lock_irqsave(&modlist_lock, flags);
- mod->next = module_list;
- module_list = mod; /* link it in */
- spin_unlock_irqrestore(&modlist_lock, flags);
+ /* Make them disable irqs. */
+ stopref_set_state(STOPREF_DISABLE_IRQ, 0);
- error = (long) mod;
- goto err0;
-err1:
- put_mod_name(name);
-err0:
- unlock_kernel();
- return error;
+ local_irq_disable();
+ return 0;
}
-/*
- * Initialize a module.
- */
+/* Restart the machine. Re-enables irqs. */
+static void restart_refcounts(void)
+{
+ stopref_set_state(STOPREF_EXIT, 0);
+ local_irq_enable();
+ preempt_enable();
+ up(&cpucontrol);
+}
+#else /* ...!SMP */
+static inline int stop_refcounts(void)
+{
+ local_irq_disable();
+ return 0;
+}
+static inline void restart_refcounts(void)
+{
+ local_irq_enable();
+}
+#endif
+
+static unsigned int module_refcount(struct module *mod)
+{
+ unsigned int i, total = 0;
+
+ for (i = 0; i < NR_CPUS; i++)
+ total += atomic_read(&mod->ref[i].count);
+ return total;
+}
+
+/* This exists whether we can unload or not */
+static void free_module(struct module *mod);
asmlinkage long
-sys_init_module(const char *name_user, struct module *mod_user)
+sys_delete_module(const char *name_user, unsigned int flags)
{
- struct module mod_tmp, *mod;
- char *name, *n_name, *name_tmp = NULL;
- long namelen, n_namelen, i, error;
- unsigned long mod_user_size;
- struct module_ref *dep;
+ struct module *mod;
+ char name[MODULE_NAME_LEN];
+ int ret;
if (!capable(CAP_SYS_MODULE))
return -EPERM;
- lock_kernel();
- if ((namelen = get_mod_name(name_user, &name)) < 0) {
- error = namelen;
- goto err0;
- }
- if ((mod = find_module(name)) == NULL) {
- error = -ENOENT;
- goto err1;
- }
- /* Check module header size. We allow a bit of slop over the
- size we are familiar with to cope with a version of insmod
- for a newer kernel. But don't over do it. */
- if ((error = get_user(mod_user_size, &mod_user->size_of_struct)) != 0)
- goto err1;
- if (mod_user_size < (unsigned long)&((struct module *)0L)->persist_start
- || mod_user_size > sizeof(struct module) + 16*sizeof(void*)) {
- printk(KERN_ERR "init_module: Invalid module header size.\n"
- KERN_ERR "A new version of the modutils is likely "
- "needed.\n");
- error = -EINVAL;
- goto err1;
- }
+ if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
+ return -EFAULT;
+ name[MODULE_NAME_LEN-1] = '\0';
- /* Hold the current contents while we play with the user's idea
- of righteousness. */
- mod_tmp = *mod;
- name_tmp = kmalloc(strlen(mod->name) + 1, GFP_KERNEL); /* Where's kstrdup()? */
- if (name_tmp == NULL) {
- error = -ENOMEM;
- goto err1;
- }
- strcpy(name_tmp, mod->name);
+ if (down_interruptible(&module_mutex) != 0)
+ return -EINTR;
- error = copy_from_user(mod, mod_user, mod_user_size);
- if (error) {
- error = -EFAULT;
- goto err2;
+ mod = find_module(name);
+ if (!mod) {
+ ret = -ENOENT;
+ goto out;
}
- /* Sanity check the size of the module. */
- error = -EINVAL;
-
- if (mod->size > mod_tmp.size) {
- printk(KERN_ERR "init_module: Size of initialized module "
- "exceeds size of created module.\n");
- goto err2;
+ /* Already dying? */
+ if (!mod->live) {
+ DEBUGP("%s already dying\n", mod->name);
+ ret = -EBUSY;
+ goto out;
}
- /* Make sure all interesting pointers are sane. */
-
- if (!mod_bound(mod->name, namelen, mod)) {
- printk(KERN_ERR "init_module: mod->name out of bounds.\n");
- goto err2;
- }
- if (mod->nsyms && !mod_bound(mod->syms, mod->nsyms, mod)) {
- printk(KERN_ERR "init_module: mod->syms out of bounds.\n");
- goto err2;
- }
- if (mod->ndeps && !mod_bound(mod->deps, mod->ndeps, mod)) {
- printk(KERN_ERR "init_module: mod->deps out of bounds.\n");
- goto err2;
- }
- if (mod->init && !mod_bound((unsigned long)mod->init, 0, mod)) {
- printk(KERN_ERR "init_module: mod->init out of bounds.\n");
- goto err2;
- }
- if (mod->cleanup && !mod_bound((unsigned long)mod->cleanup, 0, mod)) {
- printk(KERN_ERR "init_module: mod->cleanup out of bounds.\n");
- goto err2;
- }
- if (mod->ex_table_start > mod->ex_table_end
- || (mod->ex_table_start &&
- !((unsigned long)mod->ex_table_start >= ((unsigned long)mod + mod->size_of_struct)
- && ((unsigned long)mod->ex_table_end
- < (unsigned long)mod + mod->size)))
- || (((unsigned long)mod->ex_table_start
- - (unsigned long)mod->ex_table_end)
- % sizeof(struct exception_table_entry))) {
- printk(KERN_ERR "init_module: mod->ex_table_* invalid.\n");
- goto err2;
- }
- if (mod->flags & ~MOD_AUTOCLEAN) {
- printk(KERN_ERR "init_module: mod->flags invalid.\n");
- goto err2;
- }
- if (mod_member_present(mod, can_unload)
- && mod->can_unload && !mod_bound((unsigned long)mod->can_unload, 0, mod)) {
- printk(KERN_ERR "init_module: mod->can_unload out of bounds.\n");
- goto err2;
- }
- if (mod_member_present(mod, kallsyms_end)) {
- if (mod->kallsyms_end &&
- (!mod_bound(mod->kallsyms_start, 0, mod) ||
- !mod_bound(mod->kallsyms_end, 0, mod))) {
- printk(KERN_ERR "init_module: mod->kallsyms out of bounds.\n");
- goto err2;
- }
- if (mod->kallsyms_start > mod->kallsyms_end) {
- printk(KERN_ERR "init_module: mod->kallsyms invalid.\n");
- goto err2;
- }
- }
- if (mod_member_present(mod, archdata_end)) {
- if (mod->archdata_end &&
- (!mod_bound(mod->archdata_start, 0, mod) ||
- !mod_bound(mod->archdata_end, 0, mod))) {
- printk(KERN_ERR "init_module: mod->archdata out of bounds.\n");
- goto err2;
- }
- if (mod->archdata_start > mod->archdata_end) {
- printk(KERN_ERR "init_module: mod->archdata invalid.\n");
- goto err2;
- }
+ if (!mod->exit || mod->unsafe) {
+ /* This module can't be removed */
+ ret = -EBUSY;
+ goto out;
}
- if (mod_member_present(mod, kernel_data) && mod->kernel_data) {
- printk(KERN_ERR "init_module: mod->kernel_data must be zero.\n");
- goto err2;
+ if (!list_empty(&mod->modules_which_use_me)) {
+ /* Other modules depend on us: get rid of them first. */
+ ret = -EWOULDBLOCK;
+ goto out;
}
- /* Check that the user isn't doing something silly with the name. */
+ /* Stop the machine so refcounts can't move: irqs disabled. */
+ DEBUGP("Stopping refcounts...\n");
+ ret = stop_refcounts();
+ if (ret != 0)
+ goto out;
- if ((n_namelen = get_mod_name(mod->name - (unsigned long)mod
- + (unsigned long)mod_user,
- &n_name)) < 0) {
- printk(KERN_ERR "init_module: get_mod_name failure.\n");
- error = n_namelen;
- goto err2;
- }
- if (namelen != n_namelen || strcmp(n_name, mod_tmp.name) != 0) {
- printk(KERN_ERR "init_module: changed module name to "
- "`%s' from `%s'\n",
- n_name, mod_tmp.name);
- goto err3;
+ /* If it's not unused, quit unless we are told to block. */
+ if ((flags & O_NONBLOCK) && module_refcount(mod) != 0)
+ ret = -EWOULDBLOCK;
+ else {
+ mod->waiter = current;
+ mod->live = 0;
}
+ restart_refcounts();
- /* Ok, that's about all the sanity we can stomach; copy the rest. */
+ if (ret != 0)
+ goto out;
- if (copy_from_user((char *)mod+mod_user_size,
- (char *)mod_user+mod_user_size,
- mod->size-mod_user_size)) {
- error = -EFAULT;
- goto err3;
+ /* Since we might sleep for some time, drop the semaphore first */
+ up(&module_mutex);
+ for (;;) {
+ DEBUGP("Looking at refcount...\n");
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (module_refcount(mod) == 0)
+ break;
+ schedule();
}
+ current->state = TASK_RUNNING;
- if (module_arch_init(mod))
- goto err3;
-
- /* On some machines it is necessary to do something here
- to make the I and D caches consistent. */
- flush_icache_range((unsigned long)mod, (unsigned long)mod + mod->size);
+ DEBUGP("Regrabbing mutex...\n");
+ down(&module_mutex);
- mod->next = mod_tmp.next;
- mod->refs = NULL;
+ /* Final destruction now noone is using it. */
+ mod->exit();
+ free_module(mod);
+ ret = 0;
- /* Sanity check the module's dependents */
- for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) {
- struct module *o, *d = dep->dep;
+ out:
+ up(&module_mutex);
+ return ret;
+}
- /* Make sure the indicated dependencies are really modules. */
- if (d == mod) {
- printk(KERN_ERR "init_module: self-referential "
- "dependency in mod->deps.\n");
- goto err3;
- }
+static void print_unload_info(struct seq_file *m, struct module *mod)
+{
+ struct module_use *use;
- /* Scan the current modules for this dependency */
- for (o = module_list; o != &kernel_module && o != d; o = o->next)
- ;
+ seq_printf(m, " %u", module_refcount(mod));
- if (o != d) {
- printk(KERN_ERR "init_module: found dependency that is "
- "(no longer?) a module.\n");
- goto err3;
- }
- }
+ list_for_each_entry(use, &mod->modules_which_use_me, list)
+ seq_printf(m, " %s", use->module_which_uses->name);
- /* Update module references. */
- for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) {
- struct module *d = dep->dep;
+ if (mod->unsafe)
+ seq_printf(m, " [unsafe]");
- dep->ref = mod;
- dep->next_ref = d->refs;
- d->refs = dep;
- /* Being referenced by a dependent module counts as a
- use as far as kmod is concerned. */
- d->flags |= MOD_USED_ONCE;
- }
+ if (!mod->exit)
+ seq_printf(m, " [permanent]");
- /* Free our temporary memory. */
- put_mod_name(n_name);
- put_mod_name(name);
-
- /* Initialize the module. */
- atomic_set(&mod->uc.usecount,1);
- mod->flags |= MOD_INITIALIZING;
- if (mod->init && (error = mod->init()) != 0) {
- atomic_set(&mod->uc.usecount,0);
- mod->flags &= ~MOD_INITIALIZING;
- if (error > 0) /* Buggy module */
- error = -EBUSY;
- goto err0;
- }
- atomic_dec(&mod->uc.usecount);
-
- /* And set it running. */
- mod->flags = (mod->flags | MOD_RUNNING) & ~MOD_INITIALIZING;
- error = 0;
- goto err0;
-
-err3:
- put_mod_name(n_name);
-err2:
- *mod = mod_tmp;
- strcpy((char *)mod->name, name_tmp); /* We know there is room for this */
-err1:
- put_mod_name(name);
-err0:
- unlock_kernel();
- kfree(name_tmp);
- return error;
+ seq_printf(m, "\n");
}
-static spinlock_t unload_lock = SPIN_LOCK_UNLOCKED;
-int try_inc_mod_count(struct module *mod)
+void __symbol_put(const char *symbol)
{
- int res = 1;
- if (mod) {
- spin_lock(&unload_lock);
- if (mod->flags & MOD_DELETED)
- res = 0;
- else
- __MOD_INC_USE_COUNT(mod);
- spin_unlock(&unload_lock);
- }
- return res;
+ struct kernel_symbol_group *ksg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&modlist_lock, flags);
+ if (!__find_symbol(symbol, &ksg))
+ BUG();
+ module_put(ksg->owner);
+ spin_unlock_irqrestore(&modlist_lock, flags);
}
+EXPORT_SYMBOL(__symbol_put);
-asmlinkage long
-sys_delete_module(const char *name_user)
+void symbol_put_addr(void *addr)
{
- struct module *mod, *next;
- char *name;
- long error;
- int something_changed;
-
- if (!capable(CAP_SYS_MODULE))
- return -EPERM;
-
- lock_kernel();
- if (name_user) {
- if ((error = get_mod_name(name_user, &name)) < 0)
- goto out;
- error = -ENOENT;
- if ((mod = find_module(name)) == NULL) {
- put_mod_name(name);
- goto out;
- }
- put_mod_name(name);
- error = -EBUSY;
- if (mod->refs != NULL)
- goto out;
-
- spin_lock(&unload_lock);
- if (!__MOD_IN_USE(mod)) {
- mod->flags |= MOD_DELETED;
- spin_unlock(&unload_lock);
- free_module(mod, 0);
- error = 0;
- } else {
- spin_unlock(&unload_lock);
- }
- goto out;
- }
+ struct kernel_symbol_group *ks;
+ unsigned long flags;
- /* Do automatic reaping */
-restart:
- something_changed = 0;
-
- for (mod = module_list; mod != &kernel_module; mod = next) {
- next = mod->next;
- spin_lock(&unload_lock);
- if (mod->refs == NULL
- && (mod->flags & MOD_AUTOCLEAN)
- && (mod->flags & MOD_RUNNING)
- && !(mod->flags & MOD_DELETED)
- && (mod->flags & MOD_USED_ONCE)
- && !__MOD_IN_USE(mod)) {
- if ((mod->flags & MOD_VISITED)
- && !(mod->flags & MOD_JUST_FREED)) {
- spin_unlock(&unload_lock);
- mod->flags &= ~MOD_VISITED;
- } else {
- mod->flags |= MOD_DELETED;
- spin_unlock(&unload_lock);
- free_module(mod, 1);
- something_changed = 1;
+ spin_lock_irqsave(&modlist_lock, flags);
+ list_for_each_entry(ks, &symbols, list) {
+ unsigned int i;
+
+ for (i = 0; i < ks->num_syms; i++) {
+ if (ks->syms[i].value == (unsigned long)addr) {
+ module_put(ks->owner);
+ spin_unlock_irqrestore(&modlist_lock, flags);
+ return;
}
- } else {
- spin_unlock(&unload_lock);
}
}
-
- if (something_changed)
- goto restart;
-
- for (mod = module_list; mod != &kernel_module; mod = mod->next)
- mod->flags &= ~MOD_JUST_FREED;
-
- error = 0;
-out:
- unlock_kernel();
- return error;
+ spin_unlock_irqrestore(&modlist_lock, flags);
+ BUG();
}
+EXPORT_SYMBOL_GPL(symbol_put_addr);
-/* Query various bits about modules. */
-
-static int
-qm_modules(char *buf, size_t bufsize, size_t *ret)
+#else /* !CONFIG_MODULE_UNLOAD */
+static void print_unload_info(struct seq_file *m, struct module *mod)
{
- struct module *mod;
- size_t nmod, space, len;
-
- nmod = space = 0;
-
- for (mod=module_list; mod != &kernel_module; mod=mod->next, ++nmod) {
- len = strlen(mod->name)+1;
- if (len > bufsize)
- goto calc_space_needed;
- if (copy_to_user(buf, mod->name, len))
- return -EFAULT;
- buf += len;
- bufsize -= len;
- space += len;
- }
-
- if (put_user(nmod, ret))
- return -EFAULT;
- else
- return 0;
-
-calc_space_needed:
- space += len;
- while ((mod = mod->next) != &kernel_module)
- space += strlen(mod->name)+1;
-
- if (put_user(space, ret))
- return -EFAULT;
- else
- return -ENOSPC;
+ seq_printf(m, "\n");
}
-static int
-qm_deps(struct module *mod, char *buf, size_t bufsize, size_t *ret)
+static inline void module_unload_free(struct module *mod)
{
- size_t i, space, len;
-
- if (mod == &kernel_module)
- return -EINVAL;
- if (!MOD_CAN_QUERY(mod))
- if (put_user(0, ret))
- return -EFAULT;
- else
- return 0;
-
- space = 0;
- for (i = 0; i < mod->ndeps; ++i) {
- const char *dep_name = mod->deps[i].dep->name;
-
- len = strlen(dep_name)+1;
- if (len > bufsize)
- goto calc_space_needed;
- if (copy_to_user(buf, dep_name, len))
- return -EFAULT;
- buf += len;
- bufsize -= len;
- space += len;
- }
-
- if (put_user(i, ret))
- return -EFAULT;
- else
- return 0;
-
-calc_space_needed:
- space += len;
- while (++i < mod->ndeps)
- space += strlen(mod->deps[i].dep->name)+1;
-
- if (put_user(space, ret))
- return -EFAULT;
- else
- return -ENOSPC;
}
-static int
-qm_refs(struct module *mod, char *buf, size_t bufsize, size_t *ret)
+static inline int use_module(struct module *a, struct module *b)
{
- size_t nrefs, space, len;
- struct module_ref *ref;
-
- if (mod == &kernel_module)
- return -EINVAL;
- if (!MOD_CAN_QUERY(mod))
- if (put_user(0, ret))
- return -EFAULT;
- else
- return 0;
-
- space = 0;
- for (nrefs = 0, ref = mod->refs; ref ; ++nrefs, ref = ref->next_ref) {
- const char *ref_name = ref->ref->name;
-
- len = strlen(ref_name)+1;
- if (len > bufsize)
- goto calc_space_needed;
- if (copy_to_user(buf, ref_name, len))
- return -EFAULT;
- buf += len;
- bufsize -= len;
- space += len;
- }
-
- if (put_user(nrefs, ret))
- return -EFAULT;
- else
- return 0;
-
-calc_space_needed:
- space += len;
- while ((ref = ref->next_ref) != NULL)
- space += strlen(ref->ref->name)+1;
+ return try_module_get(b);
+}
- if (put_user(space, ret))
- return -EFAULT;
- else
- return -ENOSPC;
+static inline void module_unload_init(struct module *mod)
+{
}
-static int
-qm_symbols(struct module *mod, char *buf, size_t bufsize, size_t *ret)
+asmlinkage long
+sys_delete_module(const char *name_user, unsigned int flags)
{
- size_t i, space, len;
- struct module_symbol *s;
- char *strings;
- unsigned long *vals;
-
- if (!MOD_CAN_QUERY(mod))
- if (put_user(0, ret))
- return -EFAULT;
- else
- return 0;
+ return -ENOSYS;
+}
- space = mod->nsyms * 2*sizeof(void *);
+#endif /* CONFIG_MODULE_UNLOAD */
- i = len = 0;
- s = mod->syms;
+/* Find an symbol for this module (ie. resolve internals first).
+ It we find one, record usage. Must be holding module_mutex. */
+unsigned long find_symbol_internal(Elf_Shdr *sechdrs,
+ unsigned int symindex,
+ const char *strtab,
+ const char *name,
+ struct module *mod,
+ struct kernel_symbol_group **ksg)
+{
+ unsigned long ret;
- if (space > bufsize)
- goto calc_space_needed;
+ ret = find_local_symbol(sechdrs, symindex, strtab, name);
+ if (ret) {
+ *ksg = NULL;
+ return ret;
+ }
+ /* Look in other modules... */
+ spin_lock_irq(&modlist_lock);
+ ret = __find_symbol(name, ksg);
+ if (ret) {
+ /* This can fail due to OOM, or module unloading */
+ if (!use_module(mod, (*ksg)->owner))
+ ret = 0;
+ }
+ spin_unlock_irq(&modlist_lock);
+ return ret;
+}
- if (!access_ok(VERIFY_WRITE, buf, space))
- return -EFAULT;
+/* Free a module, remove from lists, etc (must hold module mutex). */
+static void free_module(struct module *mod)
+{
+ /* Delete from various lists */
+ list_del(&mod->list);
+ spin_lock_irq(&modlist_lock);
+ list_del(&mod->symbols.list);
+ list_del(&mod->extable.list);
+ spin_unlock_irq(&modlist_lock);
+
+ /* These may be NULL, but that's OK */
+ module_free(mod, mod->module_init);
+ module_free(mod, mod->module_core);
+
+ /* Module unload stuff */
+ module_unload_free(mod);
+
+ /* Finally, free the module structure */
+ kfree(mod);
+}
- bufsize -= space;
- vals = (unsigned long *)buf;
- strings = buf+space;
+void *__symbol_get(const char *symbol)
+{
+ struct kernel_symbol_group *ksg;
+ unsigned long value, flags;
- for (; i < mod->nsyms ; ++i, ++s, vals += 2) {
- len = strlen(s->name)+1;
- if (len > bufsize)
- goto calc_space_needed;
+ spin_lock_irqsave(&modlist_lock, flags);
+ value = __find_symbol(symbol, &ksg);
+ if (value && !try_module_get(ksg->owner))
+ value = 0;
+ spin_unlock_irqrestore(&modlist_lock, flags);
- if (copy_to_user(strings, s->name, len)
- || __put_user(s->value, vals+0)
- || __put_user(space, vals+1))
- return -EFAULT;
+ return (void *)value;
+}
+EXPORT_SYMBOL_GPL(__symbol_get);
+
+/* Transfer one ELF section to the correct (init or core) area. */
+static void *copy_section(const char *name,
+ void *base,
+ Elf_Shdr *sechdr,
+ struct module *mod,
+ struct sizes *used)
+{
+ void *dest;
+ unsigned long *use;
- strings += len;
- bufsize -= len;
- space += len;
+ /* Only copy to init section if there is one */
+ if (strstr(name, ".init") && mod->module_init) {
+ dest = mod->module_init;
+ use = &used->init_size;
+ } else {
+ dest = mod->module_core;
+ use = &used->core_size;
}
- if (put_user(i, ret))
- return -EFAULT;
- else
- return 0;
-calc_space_needed:
- for (; i < mod->nsyms; ++i, ++s)
- space += strlen(s->name)+1;
+ /* Align up */
+ *use = ALIGN(*use, sechdr->sh_addralign);
+ dest += *use;
+ *use += sechdr->sh_size;
- if (put_user(space, ret))
- return -EFAULT;
- else
- return -ENOSPC;
+ /* May not actually be in the file (eg. bss). */
+ if (sechdr->sh_type != SHT_NOBITS)
+ memcpy(dest, base + sechdr->sh_offset, sechdr->sh_size);
+
+ return dest;
}
-static int
-qm_info(struct module *mod, char *buf, size_t bufsize, size_t *ret)
+/* Look for the special symbols */
+static int grab_private_symbols(Elf_Shdr *sechdrs,
+ unsigned int symbolsec,
+ const char *strtab,
+ struct module *mod)
{
- int error = 0;
-
- if (mod == &kernel_module)
- return -EINVAL;
-
- if (sizeof(struct module_info) <= bufsize) {
- struct module_info info;
- info.addr = (unsigned long)mod;
- info.size = mod->size;
- info.flags = mod->flags;
-
- /* usecount is one too high here - report appropriately to
- compensate for locking */
- info.usecount = (mod_member_present(mod, can_unload)
- && mod->can_unload ? -1 : atomic_read(&mod->uc.usecount)-1);
-
- if (copy_to_user(buf, &info, sizeof(struct module_info)))
- return -EFAULT;
- } else
- error = -ENOSPC;
-
- if (put_user(sizeof(struct module_info), ret))
- return -EFAULT;
+ Elf_Sym *sym = (void *)sechdrs[symbolsec].sh_offset;
+ unsigned int i;
+
+ for (i = 1; i < sechdrs[symbolsec].sh_size/sizeof(*sym); i++) {
+ if (strcmp("__initfn", strtab + sym[i].st_name) == 0)
+ mod->init = (void *)sym[i].st_value;
+#ifdef CONFIG_MODULE_UNLOAD
+ if (strcmp("__exitfn", strtab + sym[i].st_name) == 0)
+ mod->exit = (void *)sym[i].st_value;
+#endif
+ }
- return error;
+ return 0;
}
-asmlinkage long
-sys_query_module(const char *name_user, int which, char *buf, size_t bufsize,
- size_t *ret)
+/* Deal with the given section */
+static int handle_section(const char *name,
+ Elf_Shdr *sechdrs,
+ unsigned int strindex,
+ unsigned int symindex,
+ unsigned int i,
+ struct module *mod)
{
- struct module *mod;
- int err;
-
- lock_kernel();
- if (name_user == NULL)
- mod = &kernel_module;
- else {
- long namelen;
- char *name;
+ int ret;
+ const char *strtab = (char *)sechdrs[strindex].sh_offset;
- if ((namelen = get_mod_name(name_user, &name)) < 0) {
- err = namelen;
- goto out;
- }
- err = -ENOENT;
- if ((mod = find_module(name)) == NULL) {
- put_mod_name(name);
- goto out;
- }
- put_mod_name(name);
- }
-
- /* __MOD_ touches the flags. We must avoid that */
-
- atomic_inc(&mod->uc.usecount);
-
- switch (which)
- {
- case 0:
- err = 0;
- break;
- case QM_MODULES:
- err = qm_modules(buf, bufsize, ret);
+ switch (sechdrs[i].sh_type) {
+ case SHT_REL:
+ ret = apply_relocate(sechdrs, strtab, symindex, i, mod);
break;
- case QM_DEPS:
- err = qm_deps(mod, buf, bufsize, ret);
+ case SHT_RELA:
+ ret = apply_relocate_add(sechdrs, strtab, symindex, i, mod);
break;
- case QM_REFS:
- err = qm_refs(mod, buf, bufsize, ret);
- break;
- case QM_SYMBOLS:
- err = qm_symbols(mod, buf, bufsize, ret);
- break;
- case QM_INFO:
- err = qm_info(mod, buf, bufsize, ret);
+ case SHT_SYMTAB:
+ ret = grab_private_symbols(sechdrs, i, strtab, mod);
break;
default:
- err = -EINVAL;
- break;
+ DEBUGP("Ignoring section %u: %s\n", i,
+ sechdrs[i].sh_type==SHT_NULL ? "NULL":
+ sechdrs[i].sh_type==SHT_PROGBITS ? "PROGBITS":
+ sechdrs[i].sh_type==SHT_SYMTAB ? "SYMTAB":
+ sechdrs[i].sh_type==SHT_STRTAB ? "STRTAB":
+ sechdrs[i].sh_type==SHT_RELA ? "RELA":
+ sechdrs[i].sh_type==SHT_HASH ? "HASH":
+ sechdrs[i].sh_type==SHT_DYNAMIC ? "DYNAMIC":
+ sechdrs[i].sh_type==SHT_NOTE ? "NOTE":
+ sechdrs[i].sh_type==SHT_NOBITS ? "NOBITS":
+ sechdrs[i].sh_type==SHT_REL ? "REL":
+ sechdrs[i].sh_type==SHT_SHLIB ? "SHLIB":
+ sechdrs[i].sh_type==SHT_DYNSYM ? "DYNSYM":
+ sechdrs[i].sh_type==SHT_NUM ? "NUM":
+ "UNKNOWN");
+ ret = 0;
}
- atomic_dec(&mod->uc.usecount);
-
-out:
- unlock_kernel();
- return err;
+ return ret;
}
-/*
- * Copy the kernel symbol table to user space. If the argument is
- * NULL, just return the size of the table.
- *
- * This call is obsolete. New programs should use query_module+QM_SYMBOLS
- * which does not arbitrarily limit the length of symbols.
- */
-
-asmlinkage long
-sys_get_kernel_syms(struct kernel_sym *table)
+/* Figure out total size desired for the common vars */
+static unsigned long read_commons(void *start, Elf_Shdr *sechdr)
{
- struct module *mod;
- int i;
- struct kernel_sym ksym;
-
- lock_kernel();
- for (mod = module_list, i = 0; mod; mod = mod->next) {
- /* include the count for the module name! */
- i += mod->nsyms + 1;
+ unsigned long size, i, max_align;
+ Elf_Sym *sym;
+
+ size = max_align = 0;
+
+ for (sym = start + sechdr->sh_offset, i = 0;
+ i < sechdr->sh_size / sizeof(Elf_Sym);
+ i++) {
+ if (sym[i].st_shndx == SHN_COMMON) {
+ /* Value encodes alignment. */
+ if (sym[i].st_value > max_align)
+ max_align = sym[i].st_value;
+ /* Pad to required alignment */
+ size = ALIGN(size, sym[i].st_value) + sym[i].st_size;
+ }
}
- if (table == NULL)
- goto out;
-
- /* So that we don't give the user our stack content */
- memset (&ksym, 0, sizeof (ksym));
-
- for (mod = module_list, i = 0; mod; mod = mod->next) {
- struct module_symbol *msym;
- unsigned int j;
-
- if (!MOD_CAN_QUERY(mod))
- continue;
+ /* Now, add in max alignment requirement (with align
+ attribute, this could be large), so we know we have space
+ whatever the start alignment is */
+ return size + max_align;
+}
- /* magic: write module info as a pseudo symbol */
- ksym.value = (unsigned long)mod;
- ksym.name[0] = '#';
- strncpy(ksym.name+1, mod->name, sizeof(ksym.name)-1);
- ksym.name[sizeof(ksym.name)-1] = '\0';
+/* Change all symbols so that sh_value encodes the pointer directly. */
+static void simplify_symbols(Elf_Shdr *sechdrs,
+ unsigned int symindex,
+ unsigned int strindex,
+ void *common,
+ struct module *mod)
+{
+ unsigned int i;
+ Elf_Sym *sym;
+
+ /* First simplify defined symbols, so if they become the
+ "answer" to undefined symbols, copying their st_value us
+ correct. */
+ for (sym = (void *)sechdrs[symindex].sh_offset, i = 0;
+ i < sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+ i++) {
+ switch (sym[i].st_shndx) {
+ case SHN_COMMON:
+ /* Value encodes alignment. */
+ common = (void *)ALIGN((unsigned long)common,
+ sym[i].st_value);
+ /* Change it to encode pointer */
+ sym[i].st_value = (unsigned long)common;
+ common += sym[i].st_size;
+ break;
- if (copy_to_user(table, &ksym, sizeof(ksym)) != 0)
- goto out;
- ++i, ++table;
+ case SHN_ABS:
+ /* Don't need to do anything */
+ DEBUGP("Absolute symbol: 0x%08lx\n",
+ (long)sym[i].st_value);
+ break;
- if (mod->nsyms == 0)
- continue;
+ case SHN_UNDEF:
+ break;
- for (j = 0, msym = mod->syms; j < mod->nsyms; ++j, ++msym) {
- ksym.value = msym->value;
- strncpy(ksym.name, msym->name, sizeof(ksym.name));
- ksym.name[sizeof(ksym.name)-1] = '\0';
+ default:
+ sym[i].st_value
+ = (unsigned long)
+ (sechdrs[sym[i].st_shndx].sh_offset
+ + sym[i].st_value);
+ }
+ }
- if (copy_to_user(table, &ksym, sizeof(ksym)) != 0)
- goto out;
- ++i, ++table;
+ /* Now try to resolve undefined symbols */
+ for (sym = (void *)sechdrs[symindex].sh_offset, i = 0;
+ i < sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+ i++) {
+ if (sym[i].st_shndx == SHN_UNDEF) {
+ /* Look for symbol */
+ struct kernel_symbol_group *ksg = NULL;
+ const char *strtab
+ = (char *)sechdrs[strindex].sh_offset;
+
+ sym[i].st_value
+ = find_symbol_internal(sechdrs,
+ symindex,
+ strtab,
+ strtab + sym[i].st_name,
+ mod,
+ &ksg);
+ /* We fake up "__this_module" */
+ if (strcmp(strtab+sym[i].st_name, "__this_module")==0)
+ sym[i].st_value = (unsigned long)mod;
}
}
-out:
- unlock_kernel();
- return i;
}
-/*
- * Look for a module by name, ignoring modules marked for deletion.
- */
-
-struct module *
-find_module(const char *name)
+/* Get the total allocation size of the init and non-init sections */
+static struct sizes get_sizes(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *secstrings)
{
- struct module *mod;
+ struct sizes ret = { 0, 0 };
+ unsigned i;
- for (mod = module_list; mod ; mod = mod->next) {
- if (mod->flags & MOD_DELETED)
- continue;
- if (!strcmp(mod->name, name))
- break;
+ /* Everything marked ALLOC (this includes the exported
+ symbols) */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ unsigned long *add;
+
+ /* If it's called *.init*, and we're init, we're interested */
+ if (strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
+ add = &ret.init_size;
+ else
+ add = &ret.core_size;
+
+ if (sechdrs[i].sh_flags & SHF_ALLOC) {
+ /* Pad up to required alignment */
+ *add = ALIGN(*add, sechdrs[i].sh_addralign ?: 1);
+ *add += sechdrs[i].sh_size;
+ }
}
- return mod;
+ return ret;
}
-/*
- * Free the given module.
- */
-
-void
-free_module(struct module *mod, int tag_freed)
+/* Allocate and load the module */
+static struct module *load_module(void *umod,
+ unsigned long len,
+ const char *uargs)
{
- struct module_ref *dep;
- unsigned i;
- unsigned long flags;
+ Elf_Ehdr *hdr;
+ Elf_Shdr *sechdrs;
+ char *secstrings;
+ unsigned int i, symindex, exportindex, strindex, setupindex, exindex,
+ modnameindex;
+ long arglen;
+ unsigned long common_length;
+ struct sizes sizes, used;
+ struct module *mod;
+ int err = 0;
+ void *ptr = NULL; /* Stops spurious gcc uninitialized warning */
- /* Let the module clean up. */
+ DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
+ umod, len, uargs);
+ if (len < sizeof(*hdr))
+ return ERR_PTR(-ENOEXEC);
- if (mod->flags & MOD_RUNNING)
- {
- if(mod->cleanup)
- mod->cleanup();
- mod->flags &= ~MOD_RUNNING;
+ /* Suck in entire file: we'll want most of it. */
+ /* vmalloc barfs on "unusual" numbers. Check here */
+ if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_user(hdr, umod, len) != 0) {
+ err = -EFAULT;
+ goto free_hdr;
}
- /* Remove the module from the dependency lists. */
-
- for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) {
- struct module_ref **pp;
- for (pp = &dep->dep->refs; *pp != dep; pp = &(*pp)->next_ref)
- continue;
- *pp = dep->next_ref;
- if (tag_freed && dep->dep->refs == NULL)
- dep->dep->flags |= MOD_JUST_FREED;
+ /* Sanity checks against insmoding binaries or wrong arch,
+ weird elf version */
+ if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
+ || hdr->e_type != ET_REL
+ || !elf_check_arch(hdr)
+ || hdr->e_shentsize != sizeof(*sechdrs)) {
+ err = -ENOEXEC;
+ goto free_hdr;
}
- /* And from the main module list. */
-
- spin_lock_irqsave(&modlist_lock, flags);
- if (mod == module_list) {
- module_list = mod->next;
- } else {
- struct module *p;
- for (p = module_list; p->next != mod; p = p->next)
- continue;
- p->next = mod->next;
+ /* Convenience variables */
+ sechdrs = (void *)hdr + hdr->e_shoff;
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ /* May not export symbols, or have setup params, so these may
+ not exist */
+ exportindex = setupindex = 0;
+
+ /* And these should exist, but gcc whinges if we don't init them */
+ symindex = strindex = exindex = modnameindex = 0;
+
+ /* Find where important sections are */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ /* Internal symbols */
+ DEBUGP("Symbol table in section %u\n", i);
+ symindex = i;
+ } else if (strcmp(secstrings+sechdrs[i].sh_name, ".modulename")
+ == 0) {
+ /* This module's name */
+ DEBUGP("Module name in section %u\n", i);
+ modnameindex = i;
+ } else if (strcmp(secstrings+sechdrs[i].sh_name, "__ksymtab")
+ == 0) {
+ /* Exported symbols. */
+ DEBUGP("EXPORT table in section %u\n", i);
+ exportindex = i;
+ } else if (strcmp(secstrings + sechdrs[i].sh_name, ".strtab")
+ == 0) {
+ /* Strings */
+ DEBUGP("String table found in section %u\n", i);
+ strindex = i;
+ } else if (strcmp(secstrings+sechdrs[i].sh_name, ".setup.init")
+ == 0) {
+ /* Setup parameter info */
+ DEBUGP("Setup table found in section %u\n", i);
+ setupindex = i;
+ } else if (strcmp(secstrings+sechdrs[i].sh_name, "__ex_table")
+ == 0) {
+ /* Exception table */
+ DEBUGP("Exception table found in section %u\n", i);
+ exindex = i;
+ }
+#ifndef CONFIG_MODULE_UNLOAD
+ /* Don't load .exit sections */
+ if (strstr(secstrings+sechdrs[i].sh_name, ".exit"))
+ sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC;
+#endif
}
- spin_unlock_irqrestore(&modlist_lock, flags);
-
- /* And free the memory. */
-
- module_unmap(mod);
-}
-/*
- * Called by the /proc file system to return a current list of modules.
- */
-static void *m_start(struct seq_file *m, loff_t *pos)
-{
- struct module *v;
- loff_t n = *pos;
- lock_kernel();
- for (v = module_list; v && n--; v = v->next)
- ;
- return v;
-}
-static void *m_next(struct seq_file *m, void *p, loff_t *pos)
-{
- struct module *v = p;
- (*pos)++;
- return v->next;
-}
-static void m_stop(struct seq_file *m, void *p)
-{
- unlock_kernel();
-}
-static int m_show(struct seq_file *m, void *p)
-{
- struct module *mod = p;
- struct module_ref *ref = mod->refs;
+ if (!modnameindex) {
+ DEBUGP("Module has no name!\n");
+ err = -ENOEXEC;
+ goto free_hdr;
+ }
- if (mod == &kernel_module)
- return 0;
+ /* Now allocate space for the module proper, and copy name and args. */
+ err = strlen_user(uargs);
+ if (err < 0)
+ goto free_hdr;
+ arglen = err;
- seq_printf(m, "%-20s%8lu", mod->name, mod->size);
- if (mod->flags & MOD_RUNNING)
- seq_printf(m, "%4ld",
- (mod_member_present(mod, can_unload)
- && mod->can_unload
- ? -1L : (long)atomic_read(&mod->uc.usecount)));
-
- if (mod->flags & MOD_DELETED)
- seq_puts(m, " (deleted)");
- else if (mod->flags & MOD_RUNNING) {
- if (mod->flags & MOD_AUTOCLEAN)
- seq_puts(m, " (autoclean)");
- if (!(mod->flags & MOD_USED_ONCE))
- seq_puts(m, " (unused)");
- } else if (mod->flags & MOD_INITIALIZING)
- seq_puts(m, " (initializing)");
- else
- seq_puts(m, " (uninitialized)");
- if (ref) {
- char c;
- seq_putc(m, ' ');
- for (c = '[' ; ref; c = ' ', ref = ref->next_ref)
- seq_printf(m, "%c%s", c, ref->ref->name);
- seq_putc(m, ']');
+ mod = kmalloc(sizeof(*mod) + arglen+1, GFP_KERNEL);
+ if (!mod) {
+ err = -ENOMEM;
+ goto free_hdr;
}
- seq_putc(m, '\n');
- return 0;
-}
-struct seq_operations modules_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = m_show
-};
-
-/*
- * Called by the /proc file system to return a current list of ksyms.
- */
+ memset(mod, 0, sizeof(*mod) + arglen+1);
+ if (copy_from_user(mod->args, uargs, arglen) != 0) {
+ err = -EFAULT;
+ goto free_mod;
+ }
+ strncpy(mod->name, (char *)hdr + sechdrs[modnameindex].sh_offset,
+ sizeof(mod->name)-1);
-struct mod_sym {
- struct module *mod;
- int index;
-};
+ if (find_module(mod->name)) {
+ err = -EEXIST;
+ goto free_mod;
+ }
-/* iterator */
+ /* Initialize the lists, since they will be list_del'd if init fails */
+ INIT_LIST_HEAD(&mod->extable.list);
+ INIT_LIST_HEAD(&mod->list);
+ INIT_LIST_HEAD(&mod->symbols.list);
+ mod->symbols.owner = mod;
+ mod->live = 0;
+ module_unload_init(mod);
+
+ /* How much space will we need? (Common area in core) */
+ sizes = get_sizes(hdr, sechdrs, secstrings);
+ common_length = read_commons(hdr, &sechdrs[symindex]);
+ sizes.core_size += common_length;
+
+ /* Set these up: arch's can add to them */
+ mod->core_size = sizes.core_size;
+ mod->init_size = sizes.init_size;
+
+ /* Allocate (this is arch specific) */
+ ptr = module_core_alloc(hdr, sechdrs, secstrings, mod);
+ if (IS_ERR(ptr))
+ goto free_mod;
+
+ mod->module_core = ptr;
+
+ ptr = module_init_alloc(hdr, sechdrs, secstrings, mod);
+ if (IS_ERR(ptr))
+ goto free_core;
+ mod->module_init = ptr;
+
+ /* Transfer each section which requires ALLOC, and set sh_offset
+ fields to absolute addresses. */
+ used.core_size = common_length;
+ used.init_size = 0;
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (sechdrs[i].sh_flags & SHF_ALLOC) {
+ ptr = copy_section(secstrings + sechdrs[i].sh_name,
+ hdr, &sechdrs[i], mod, &used);
+ if (IS_ERR(ptr))
+ goto cleanup;
+ sechdrs[i].sh_offset = (unsigned long)ptr;
+ } else {
+ sechdrs[i].sh_offset += (unsigned long)hdr;
+ }
+ }
+ /* Don't use more than we allocated! */
+ if (used.init_size > mod->init_size || used.core_size > mod->core_size)
+ BUG();
-static void *s_start(struct seq_file *m, loff_t *pos)
-{
- struct mod_sym *p = kmalloc(sizeof(*p), GFP_KERNEL);
- struct module *v;
- loff_t n = *pos;
+ /* Fix up syms, so that st_value is a pointer to location. */
+ simplify_symbols(sechdrs, symindex, strindex, mod->module_core, mod);
- if (!p)
- return ERR_PTR(-ENOMEM);
- lock_kernel();
- for (v = module_list; v; n -= v->nsyms, v = v->next) {
- if (n < v->nsyms) {
- p->mod = v;
- p->index = n;
- return p;
- }
+ /* Set up EXPORTed symbols */
+ if (exportindex) {
+ mod->symbols.num_syms = (sechdrs[exportindex].sh_size
+ / sizeof(*mod->symbols.syms));
+ mod->symbols.syms = (void *)sechdrs[exportindex].sh_offset;
}
- unlock_kernel();
- kfree(p);
- return NULL;
-}
-static void *s_next(struct seq_file *m, void *p, loff_t *pos)
-{
- struct mod_sym *v = p;
- (*pos)++;
- if (++v->index >= v->mod->nsyms) {
- do {
- v->mod = v->mod->next;
- if (!v->mod) {
- unlock_kernel();
- kfree(p);
- return NULL;
- }
- } while (!v->mod->nsyms);
- v->index = 0;
+ /* Set up exception table */
+ if (exindex) {
+ /* FIXME: Sort exception table. */
+ mod->extable.num_entries = (sechdrs[exindex].sh_size
+ / sizeof(struct
+ exception_table_entry));
+ mod->extable.entry = (void *)sechdrs[exindex].sh_offset;
}
- return p;
-}
-static void s_stop(struct seq_file *m, void *p)
-{
- if (p && !IS_ERR(p)) {
- unlock_kernel();
- kfree(p);
+ /* Now handle each section. */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ err = handle_section(secstrings + sechdrs[i].sh_name,
+ sechdrs, strindex, symindex, i, mod);
+ if (err < 0)
+ goto cleanup;
}
-}
-static int s_show(struct seq_file *m, void *p)
-{
- struct mod_sym *v = p;
- struct module_symbol *sym;
+ err = module_finalize(hdr, sechdrs, mod);
+ if (err < 0)
+ goto cleanup;
+
+#if 0 /* Needs param support */
+ /* Size of section 0 is 0, so this works well */
+ err = parse_args(mod->args,
+ (struct kernel_param *)
+ sechdrs[setupindex].sh_offset,
+ sechdrs[setupindex].sh_size
+ / sizeof(struct kernel_param),
+ NULL);
+ if (err < 0)
+ goto cleanup;
+#endif
- if (!MOD_CAN_QUERY(v->mod))
- return 0;
- sym = &v->mod->syms[v->index];
- if (*v->mod->name)
- seq_printf(m, "%0*lx %s\t[%s]\n", (int)(2*sizeof(void*)),
- sym->value, sym->name, v->mod->name);
- else
- seq_printf(m, "%0*lx %s\n", (int)(2*sizeof(void*)),
- sym->value, sym->name);
- return 0;
-}
+ /* Get rid of temporary copy */
+ vfree(hdr);
-struct seq_operations ksyms_op = {
- .start = s_start,
- .next = s_next,
- .stop = s_stop,
- .show = s_show
-};
+ /* Done! */
+ return mod;
-#define MODLIST_SIZE 4096
+ cleanup:
+ module_unload_free(mod);
+ module_free(mod, mod->module_init);
+ free_core:
+ module_free(mod, mod->module_core);
+ free_mod:
+ kfree(mod);
+ free_hdr:
+ vfree(hdr);
+ if (err < 0) return ERR_PTR(err);
+ else return ptr;
+}
-/*
- * this function isn't smp safe but that's not really a problem; it's
- * called from oops context only and any locking could actually prevent
- * the oops from going out; the line that is generated is informational
- * only and should NEVER prevent the real oops from going out.
- */
-void print_modules(void)
+/* This is where the real work happens */
+asmlinkage long
+sys_init_module(void *umod,
+ unsigned long len,
+ const char *uargs)
{
- static char modlist[MODLIST_SIZE];
- struct module *this_mod;
- int pos = 0;
-
- this_mod = module_list;
- while (this_mod) {
- if (this_mod->name)
- pos += snprintf(modlist+pos, MODLIST_SIZE-pos-1,
- "%s ", this_mod->name);
- this_mod = this_mod->next;
- }
- printk("%s\n",modlist);
-}
+ struct module *mod;
+ int ret;
-#else /* CONFIG_MODULES */
+ /* Must have permission */
+ if (!capable(CAP_SYS_MODULE))
+ return -EPERM;
-/* Dummy syscalls for people who don't want modules */
+ /* Only one module load at a time, please */
+ if (down_interruptible(&module_mutex) != 0)
+ return -EINTR;
-asmlinkage unsigned long
-sys_create_module(const char *name_user, size_t size)
-{
- return -ENOSYS;
-}
+ /* Do all the hard work */
+ mod = load_module(umod, len, uargs);
+ if (IS_ERR(mod)) {
+ up(&module_mutex);
+ return PTR_ERR(mod);
+ }
-asmlinkage long
-sys_init_module(const char *name_user, struct module *mod_user)
-{
- return -ENOSYS;
-}
+ /* Flush the instruction cache, since we've played with text */
+ if (mod->module_init)
+ flush_icache_range((unsigned long)mod->module_init,
+ (unsigned long)mod->module_init
+ + mod->init_size);
+ flush_icache_range((unsigned long)mod->module_core,
+ (unsigned long)mod->module_core + mod->core_size);
+
+ /* Now sew it into exception list (just in case...). */
+ spin_lock_irq(&modlist_lock);
+ list_add(&mod->extable.list, &extables);
+ spin_unlock_irq(&modlist_lock);
+
+ /* Start the module */
+ ret = mod->init ? mod->init() : 0;
+ if (ret < 0) {
+ /* Init routine failed: abort. Try to protect us from
+ buggy refcounters. */
+ synchronize_kernel();
+ if (mod->unsafe) {
+ printk(KERN_ERR "%s: module is now stuck!\n",
+ mod->name);
+ /* Mark it "live" so that they can force
+ deletion later, and we don't keep getting
+ woken on every decrement. */
+ mod->live = 1;
+ } else
+ free_module(mod);
+ up(&module_mutex);
+ return ret;
+ }
-asmlinkage long
-sys_delete_module(const char *name_user)
-{
- return -ENOSYS;
-}
+ /* Now it's a first class citizen! */
+ spin_lock_irq(&modlist_lock);
+ list_add(&mod->symbols.list, &kernel_symbols.list);
+ spin_unlock_irq(&modlist_lock);
+ list_add(&mod->list, &modules);
-asmlinkage long
-sys_query_module(const char *name_user, int which, char *buf, size_t bufsize,
- size_t *ret)
-{
- /* Let the program know about the new interface. Not that
- it'll do them much good. */
- if (which == 0)
- return 0;
+ module_free(mod, mod->module_init);
+ mod->module_init = NULL;
- return -ENOSYS;
+ /* All ok! */
+ mod->live = 1;
+ up(&module_mutex);
+ return 0;
}
-asmlinkage long
-sys_get_kernel_syms(struct kernel_sym *table)
+/* Called by the /proc file system to return a current list of
+ modules. Al Viro came up with this interface as an "improvement".
+ God save us from any more such interface improvements. */
+static void *m_start(struct seq_file *m, loff_t *pos)
{
- return -ENOSYS;
+ struct list_head *i;
+ loff_t n = 0;
+
+ down(&module_mutex);
+ list_for_each(i, &modules) {
+ if (n++ == *pos)
+ break;
+ }
+ if (i == &modules)
+ return NULL;
+ return i;
}
-int try_inc_mod_count(struct module *mod)
+static void *m_next(struct seq_file *m, void *p, loff_t *pos)
{
- return 1;
+ struct list_head *i = p;
+ (*pos)++;
+ if (i->next == &modules)
+ return NULL;
+ return i->next;
}
-void print_modules(void)
+static void m_stop(struct seq_file *m, void *p)
{
+ up(&module_mutex);
}
-#endif /* CONFIG_MODULES */
-
-
-#if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS)
-
-#define MAX_SYMBOL_SIZE 512
-
-static void
-address_to_exported_symbol(unsigned long address, const char **mod_name,
- const char **sym_name, unsigned long *sym_start,
- unsigned long *sym_end)
+static int m_show(struct seq_file *m, void *p)
{
- struct module *this_mod;
- int i;
-
- for (this_mod = module_list; this_mod; this_mod = this_mod->next) {
- /* walk the symbol list of this module. Only symbols
- who's address is smaller than the searched for address
- are relevant; and only if it's better than the best so far */
- for (i = 0; i < this_mod->nsyms; i++)
- if ((this_mod->syms[i].value <= address) &&
- (*sym_start < this_mod->syms[i].value)) {
- *sym_start = this_mod->syms[i].value;
- *sym_name = this_mod->syms[i].name;
- *mod_name = this_mod->name;
- if (i + 1 < this_mod->nsyms)
- *sym_end = this_mod->syms[i+1].value;
- else
- *sym_end = (unsigned long) this_mod + this_mod->size;
- }
- }
+ struct module *mod = list_entry(p, struct module, list);
+ seq_printf(m, "%s %lu",
+ mod->name, mod->init_size + mod->core_size);
+ print_unload_info(m, mod);
+ return 0;
}
+struct seq_operations modules_op = {
+ .start = m_start,
+ .next = m_next,
+ .stop = m_stop,
+ .show = m_show
+};
-void
-print_symbol(const char *fmt, unsigned long address)
+static int __init init(void)
{
- /* static to not take up stackspace; if we race here too bad */
- static char buffer[MAX_SYMBOL_SIZE];
-
- const char *mod_name = NULL, *sec_name = NULL, *sym_name = NULL;
- unsigned long mod_start, mod_end, sec_start, sec_end,
- sym_start, sym_end;
- char *tag = "";
-
- memset(buffer, 0, MAX_SYMBOL_SIZE);
-
- sym_start = 0;
- if (!kallsyms_address_to_symbol(address, &mod_name, &mod_start, &mod_end, &sec_name, &sec_start, &sec_end, &sym_name, &sym_start, &sym_end)) {
- tag = "E ";
- address_to_exported_symbol(address, &mod_name, &sym_name, &sym_start, &sym_end);
- }
-
- if (sym_start) {
- if (*mod_name)
- snprintf(buffer, MAX_SYMBOL_SIZE - 1, "%s%s+%#x/%#x [%s]",
- tag, sym_name,
- (unsigned int)(address - sym_start),
- (unsigned int)(sym_end - sym_start),
- mod_name);
- else
- snprintf(buffer, MAX_SYMBOL_SIZE - 1, "%s%s+%#x/%#x",
- tag, sym_name,
- (unsigned int)(address - sym_start),
- (unsigned int)(sym_end - sym_start));
- printk(fmt, buffer);
- }
-#if 0
- else {
- printk(fmt, "[unresolved]");
- }
-#endif
+ /* Add kernel symbols to symbol table */
+ kernel_symbols.num_syms = (__stop___ksymtab - __start___ksymtab);
+ kernel_symbols.syms = __start___ksymtab;
+ list_add(&kernel_symbols.list, &symbols);
+
+ /* Add kernel exception table to exception tables */
+ kernel_extable.num_entries = (__stop___ex_table -__start___ex_table);
+ kernel_extable.entry = __start___ex_table;
+ list_add(&kernel_extable.list, &extables);
+ return 0;
}
-#endif
+/* Obsolete lvalue for broken code which asks about usage */
+int module_dummy_usage = 1;
+
+/* Call this at boot */
+__initcall(init);
diff --git a/kernel/suspend.c b/kernel/suspend.c
index 1f2aa93db988..838efbc16787 100644
--- a/kernel/suspend.c
+++ b/kernel/suspend.c
@@ -103,7 +103,7 @@ static int nr_copy_pages_check;
static int resume_status = 0;
static char resume_file[256] = ""; /* For resume= kernel option */
-static kdev_t resume_device;
+static dev_t resume_device;
/* Local variables that should not be affected by save */
unsigned int nr_copy_pages __nosavedata = 0;
@@ -1143,13 +1143,13 @@ static int read_suspend_image(const char * specialfile, int noresume)
unsigned long scratch_page = 0;
int error;
- resume_device = to_kdev_t(name_to_dev_t(specialfile));
+ resume_device = name_to_dev_t(specialfile);
scratch_page = get_zeroed_page(GFP_ATOMIC);
cur = (void *) scratch_page;
if (cur) {
struct block_device *bdev;
printk("Resuming from device %s\n", __bdevname(resume_device));
- bdev = bdget(kdev_t_to_nr(resume_device));
+ bdev = bdget(resume_device);
if (!bdev) {
printk("No such block device ?!\n");
BUG();
diff --git a/kernel/sys.c b/kernel/sys.c
index 22c1c93ec6da..5997c8b85e6e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -206,6 +206,8 @@ cond_syscall(sys_acct)
cond_syscall(sys_lookup_dcookie)
cond_syscall(sys_swapon)
cond_syscall(sys_swapoff)
+cond_syscall(sys_init_module)
+cond_syscall(sys_delete_module)
static int set_one_prio(struct task_struct *p, int niceval, int error)
{