diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-03-06 08:48:11 -0800 |
|---|---|---|
| committer | Jaroslav Kysela <perex@suse.cz> | 2004-03-06 08:48:11 -0800 |
| commit | 20e39386f528478e1da126050db7ec739d760c1b (patch) | |
| tree | e0b44fa5365f9a6b38619b00755c7b369acba75f | |
| parent | 38ace63203dddd7a039c530c4f34e4591168ed0f (diff) | |
[PATCH] fastcall / regparm fixes
From: Gerd Knorr <kraxel@suse.de>
Current gcc's error out if a function's declaration and definition disagree
about the register passing convention.
The patch adds a new `fastcall' declatation primitive, and uses that in all
the FASTCALL functions which we could find. A number of inconsistencies were
fixed up along the way.
36 files changed, 118 insertions, 115 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 7aea2c8f5a44..b862de291977 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -493,7 +493,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ -struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) +struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index edd844bdb0c1..ff0cf6addcd1 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c @@ -551,7 +551,7 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -int do_signal(struct pt_regs *regs, sigset_t *oldset) +int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset) { siginfo_t info; int signr; diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 54e7082c7c29..9f7987a6cc8d 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c @@ -150,7 +150,7 @@ inline void __send_IPI_shortcut(unsigned int shortcut, int vector) apic_write_around(APIC_ICR, cfg); } -void send_IPI_self(int vector) +void fastcall send_IPI_self(int vector) { __send_IPI_shortcut(APIC_DEST_SELF, vector); } diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index 3fb0b829b3f5..a5af5ec33039 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -95,7 +95,7 @@ #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1) struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); -struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) +struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) { struct tss_struct *tss; struct pt_regs *ret; diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index a525174f11f0..41fc5f9df013 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -598,7 +598,7 @@ static inline int rx_refill(struct net_device *ndev, int gfp) } static void FASTCALL(rx_refill_atomic(struct net_device *ndev)); -static void rx_refill_atomic(struct net_device *ndev) +static void fastcall rx_refill_atomic(struct net_device *ndev) { rx_refill(ndev, GFP_ATOMIC); } @@ -620,7 +620,7 @@ static inline void clear_rx_desc(struct ns83820 *dev, unsigned i) } static void FASTCALL(phy_intr(struct net_device *ndev)); -static void phy_intr(struct net_device *ndev) +static void fastcall phy_intr(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; @@ -807,7 +807,7 @@ static void ns83820_cleanup_rx(struct ns83820 *dev) } static void FASTCALL(ns83820_rx_kick(struct net_device *ndev)); -static void ns83820_rx_kick(struct net_device *ndev) +static void fastcall ns83820_rx_kick(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); /*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ { @@ -829,7 +829,7 @@ static void ns83820_rx_kick(struct net_device *ndev) * */ static void FASTCALL(rx_irq(struct net_device *ndev)); -static void rx_irq(struct net_device *ndev) +static void fastcall rx_irq(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); struct rx_info *info = &dev->rx_info; @@ -312,7 +312,7 @@ void wait_for_all_aios(struct kioctx *ctx) /* wait_on_sync_kiocb: * Waits on the given sync kiocb to complete. */ -ssize_t wait_on_sync_kiocb(struct kiocb *iocb) +ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) { while (iocb->ki_users) { set_current_state(TASK_UNINTERRUPTIBLE); @@ -331,7 +331,7 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb) * go away, they will call put_ioctx and release any pinned memory * associated with the request (held via struct page * references). */ -void exit_aio(struct mm_struct *mm) +void fastcall exit_aio(struct mm_struct *mm) { struct kioctx *ctx = mm->ioctx_list; mm->ioctx_list = NULL; @@ -356,7 +356,7 @@ void exit_aio(struct mm_struct *mm) * Called when the last user of an aio context has gone away, * and the struct needs to be freed. */ -void __put_ioctx(struct kioctx *ctx) +void fastcall __put_ioctx(struct kioctx *ctx) { unsigned nr_events = ctx->max_reqs; @@ -383,7 +383,7 @@ void __put_ioctx(struct kioctx *ctx) * req (after submitting it) and aio_complete() freeing the req. */ static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx)); -static struct kiocb *__aio_get_req(struct kioctx *ctx) +static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) { struct kiocb *req = NULL; struct aio_ring *ring; @@ -509,7 +509,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) * Returns true if this put was the last user of the kiocb, * false if the request is still in use. */ -int aio_put_req(struct kiocb *req) +int fastcall aio_put_req(struct kiocb *req) { struct kioctx *ctx = req->ki_ctx; int ret; @@ -596,7 +596,7 @@ static void aio_kick_handler(void *data) unuse_mm(ctx->mm); } -void kick_iocb(struct kiocb *iocb) +void fastcall kick_iocb(struct kiocb *iocb) { struct kioctx *ctx = iocb->ki_ctx; @@ -622,7 +622,7 @@ void kick_iocb(struct kiocb *iocb) * Returns true if this is the last user of the request. The * only other user of the request can be the cancellation code. */ -int aio_complete(struct kiocb *iocb, long res, long res2) +int fastcall aio_complete(struct kiocb *iocb, long res, long res2) { struct kioctx *ctx = iocb->ki_ctx; struct aio_ring_info *info; @@ -985,7 +985,7 @@ asmlinkage long sys_io_destroy(aio_context_t ctx) return -EINVAL; } -int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, +int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, struct iocb *iocb) { struct kiocb *req; diff --git a/fs/buffer.c b/fs/buffer.c index d46f1daf7620..9ac73dcf0bd3 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -97,7 +97,7 @@ void wake_up_buffer(struct buffer_head *bh) } EXPORT_SYMBOL(wake_up_buffer); -void unlock_buffer(struct buffer_head *bh) +void fastcall unlock_buffer(struct buffer_head *bh) { /* * unlock_buffer against a zero-count bh is a bug, if the page @@ -1256,7 +1256,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, * mapping->page_lock and the global inode_lock. */ -void mark_buffer_dirty(struct buffer_head *bh) +void fastcall mark_buffer_dirty(struct buffer_head *bh) { if (!buffer_uptodate(bh)) buffer_error(); diff --git a/fs/fcntl.c b/fs/fcntl.c index 7e824087e71f..f1eb8bb7da04 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -19,7 +19,7 @@ #include <asm/siginfo.h> #include <asm/uaccess.h> -void set_close_on_exec(unsigned int fd, int flag) +void fastcall set_close_on_exec(unsigned int fd, int flag) { struct files_struct *files = current->files; spin_lock(&files->file_lock); diff --git a/fs/file_table.c b/fs/file_table.c index 6af997fc8e01..339f7dce4a8e 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -152,7 +152,7 @@ void close_private_file(struct file *file) EXPORT_SYMBOL(close_private_file); -void fput(struct file *file) +void fastcall fput(struct file *file) { if (atomic_dec_and_test(&file->f_count)) __fput(file); @@ -163,7 +163,7 @@ EXPORT_SYMBOL(fput); /* __fput is called from task context when aio completion releases the last * last use of a struct file *. Do not use otherwise. */ -void __fput(struct file *file) +void fastcall __fput(struct file *file) { struct dentry *dentry = file->f_dentry; struct vfsmount *mnt = file->f_vfsmnt; @@ -192,7 +192,7 @@ void __fput(struct file *file) mntput(mnt); } -struct file *fget(unsigned int fd) +struct file fastcall *fget(unsigned int fd) { struct file *file; struct files_struct *files = current->files; @@ -214,7 +214,7 @@ EXPORT_SYMBOL(fget); * and a flag is returned to be passed to the corresponding fput_light(). * There must not be a cloning between an fget_light/fput_light pair. */ -struct file *fget_light(unsigned int fd, int *fput_needed) +struct file fastcall *fget_light(unsigned int fd, int *fput_needed) { struct file *file; struct files_struct *files = current->files; diff --git a/fs/namei.c b/fs/namei.c index bddfed612696..28c2be94ac6b 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -571,7 +571,7 @@ fail: * * We expect 'base' to be positive and a directory. */ -int link_path_walk(const char * name, struct nameidata *nd) +int fastcall link_path_walk(const char * name, struct nameidata *nd) { struct path next; struct inode *inode; @@ -771,7 +771,7 @@ return_err: return err; } -int path_walk(const char * name, struct nameidata *nd) +int fastcall path_walk(const char * name, struct nameidata *nd) { current->total_link_count = 0; return link_path_walk(name, nd); @@ -858,7 +858,7 @@ walk_init_root(const char *name, struct nameidata *nd) return 1; } -int path_lookup(const char *name, unsigned int flags, struct nameidata *nd) +int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata *nd) { nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags; @@ -971,7 +971,7 @@ access: * that namei follows links, while lnamei does not. * SMP-safe */ -int __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) +int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) { char *tmp = getname(name); int err = PTR_ERR(tmp); diff --git a/fs/open.c b/fs/open.c index e2adcbcb99b1..f37efc83c6fc 100644 --- a/fs/open.c +++ b/fs/open.c @@ -890,7 +890,7 @@ static inline void __put_unused_fd(struct files_struct *files, unsigned int fd) files->next_fd = fd; } -void put_unused_fd(unsigned int fd) +void fastcall put_unused_fd(unsigned int fd) { struct files_struct *files = current->files; spin_lock(&files->file_lock); @@ -913,7 +913,7 @@ EXPORT_SYMBOL(put_unused_fd); * will follow. */ -void fd_install(unsigned int fd, struct file * file) +void fastcall fd_install(unsigned int fd, struct file * file) { struct files_struct *files = current->files; spin_lock(&files->file_lock); diff --git a/include/asm-i386/linkage.h b/include/asm-i386/linkage.h index 9bca2cdf3c72..e48009fd93c7 100644 --- a/include/asm-i386/linkage.h +++ b/include/asm-i386/linkage.h @@ -3,6 +3,7 @@ #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) #define FASTCALL(x) x __attribute__((regparm(3))) +#define fastcall __attribute__((regparm(3))) #ifdef CONFIG_X86_ALIGNMENT_16 #define __ALIGN .align 16,0x90 diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index edbf16ce86ab..268dabd46653 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h @@ -38,7 +38,6 @@ extern int cpu_sibling_map[]; extern void smp_flush_tlb(void); extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); -extern void smp_send_reschedule(int cpu); extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); diff --git a/include/asm-um/linkage.h b/include/asm-um/linkage.h index 1504efe84e24..27011652b015 100644 --- a/include/asm-um/linkage.h +++ b/include/asm-um/linkage.h @@ -2,5 +2,6 @@ #define __ASM_LINKAGE_H #define FASTCALL(x) x __attribute__((regparm(3))) +#define fastcall __attribute__((regparm(3))) #endif diff --git a/include/linux/linkage.h b/include/linux/linkage.h index f85642639950..09955c0ce848 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -37,6 +37,7 @@ #ifndef FASTCALL #define FASTCALL(x) x +#define fastcall #endif #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 9dd6606bb9cc..72d6edfe401a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -602,7 +602,7 @@ extern void do_timer(struct pt_regs *); extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); extern int FASTCALL(wake_up_process(struct task_struct * tsk)); #ifdef CONFIG_SMP - extern void FASTCALL(kick_process(struct task_struct * tsk)); + extern void kick_process(struct task_struct *tsk); #else static inline void kick_process(struct task_struct *tsk) { } #endif diff --git a/include/linux/smp.h b/include/linux/smp.h index 9f7feff3ee9e..312ec58a4025 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -30,7 +30,7 @@ extern void smp_send_stop(void); /* * sends a 'reschedule' event to another CPU: */ -extern void FASTCALL(smp_send_reschedule(int cpu)); +extern void smp_send_reschedule(int cpu); /* diff --git a/kernel/exit.c b/kernel/exit.c index bbde654c70a7..d4d6d707c6d6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -386,7 +386,7 @@ static inline void close_files(struct files_struct * files) } } -void put_files_struct(struct files_struct *files) +void fastcall put_files_struct(struct files_struct *files) { if (atomic_dec_and_test(&files->count)) { close_files(files); @@ -810,7 +810,7 @@ asmlinkage long sys_exit(int error_code) do_exit((error_code&0xff)<<8); } -task_t *next_thread(task_t *p) +task_t fastcall *next_thread(task_t *p) { struct pid_link *link = p->pids + PIDTYPE_TGID; struct list_head *tmp, *head = &link->pidptr->task_list; diff --git a/kernel/fork.c b/kernel/fork.c index c0c180375a84..1c334ebca506 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -91,7 +91,7 @@ void __put_task_struct(struct task_struct *tsk) free_task(tsk); } -void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) +void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) { unsigned long flags; @@ -103,7 +103,7 @@ void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) EXPORT_SYMBOL(add_wait_queue); -void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait) +void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait) { unsigned long flags; @@ -115,7 +115,7 @@ void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait) EXPORT_SYMBOL(add_wait_queue_exclusive); -void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) +void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) { unsigned long flags; @@ -139,7 +139,7 @@ EXPORT_SYMBOL(remove_wait_queue); * stops them from bleeding out - it would still allow subsequent * loads to move into the the critical region). */ -void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) +void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; @@ -153,7 +153,7 @@ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) EXPORT_SYMBOL(prepare_to_wait); -void +void fastcall prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; @@ -168,7 +168,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) EXPORT_SYMBOL(prepare_to_wait_exclusive); -void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) +void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; @@ -418,7 +418,7 @@ struct mm_struct * mm_alloc(void) * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */ -void __mmdrop(struct mm_struct *mm) +void fastcall __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); mm_free_pgd(mm); diff --git a/kernel/pid.c b/kernel/pid.c index 4bb0bc0159d4..4c85144759c5 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -57,7 +57,7 @@ static pidmap_t *map_limit = pidmap_array + PIDMAP_ENTRIES; static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; -inline void free_pidmap(int pid) +fastcall void free_pidmap(int pid) { pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE; int offset = pid & BITS_PER_PAGE_MASK; @@ -146,7 +146,7 @@ failure: return -1; } -inline struct pid *find_pid(enum pid_type type, int nr) +fastcall struct pid *find_pid(enum pid_type type, int nr) { struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)]; struct pid *pid; @@ -159,14 +159,14 @@ inline struct pid *find_pid(enum pid_type type, int nr) return NULL; } -void link_pid(task_t *task, struct pid_link *link, struct pid *pid) +void fastcall link_pid(task_t *task, struct pid_link *link, struct pid *pid) { atomic_inc(&pid->count); list_add_tail(&link->pid_chain, &pid->task_list); link->pidptr = pid; } -int attach_pid(task_t *task, enum pid_type type, int nr) +int fastcall attach_pid(task_t *task, enum pid_type type, int nr) { struct pid *pid = find_pid(type, nr); @@ -209,7 +209,7 @@ static void _detach_pid(task_t *task, enum pid_type type) __detach_pid(task, type); } -void detach_pid(task_t *task, enum pid_type type) +void fastcall detach_pid(task_t *task, enum pid_type type) { int nr = __detach_pid(task, type); diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 58bf4ca820a1..a4011dc46b6c 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; * The read-side of critical section that use call_rcu() for updation must * be protected by rcu_read_lock()/rcu_read_unlock(). */ -void call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg) +void fastcall call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg) { int cpu; unsigned long flags; diff --git a/kernel/sched.c b/kernel/sched.c index 4a97717f748f..4f6fbe7d0b95 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -33,6 +33,7 @@ #include <linux/suspend.h> #include <linux/blkdev.h> #include <linux/delay.h> +#include <linux/smp.h> #include <linux/timer.h> #include <linux/rcupdate.h> #include <linux/cpu.h> @@ -700,7 +701,7 @@ repeat_lock_task: return success; } -int wake_up_process(task_t * p) +int fastcall wake_up_process(task_t * p) { return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); @@ -708,7 +709,7 @@ int wake_up_process(task_t * p) EXPORT_SYMBOL(wake_up_process); -int wake_up_state(task_t *p, unsigned int state) +int fastcall wake_up_state(task_t *p, unsigned int state) { return try_to_wake_up(p, state, 0); } @@ -717,7 +718,7 @@ int wake_up_state(task_t *p, unsigned int state) * Perform scheduler related setup for a newly forked process p. * p is forked by current. */ -void sched_fork(task_t *p) +void fastcall sched_fork(task_t *p) { /* * We mark the process as running here, but have not actually @@ -773,7 +774,7 @@ void sched_fork(task_t *p) * This function will do some initial scheduler statistics housekeeping * that must be done for every newly created process. */ -void wake_up_forked_process(task_t * p) +void fastcall wake_up_forked_process(task_t * p) { unsigned long flags; runqueue_t *rq = task_rq_lock(current, &flags); @@ -817,7 +818,7 @@ void wake_up_forked_process(task_t * p) * artificially, because any timeslice recovered here * was given away by the parent in the first place.) */ -void sched_exit(task_t * p) +void fastcall sched_exit(task_t * p) { unsigned long flags; runqueue_t *rq; @@ -1796,7 +1797,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up */ -void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) +void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; @@ -1810,7 +1811,7 @@ EXPORT_SYMBOL(__wake_up); /* * Same as __wake_up but called with the spinlock in wait_queue_head_t held. */ -void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) +void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) { __wake_up_common(q, mode, 1, 0); } @@ -1828,7 +1829,7 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) * * On UP it can prevent extra preemption. */ -void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) +void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; @@ -1845,7 +1846,7 @@ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ -void complete(struct completion *x) +void fastcall complete(struct completion *x) { unsigned long flags; @@ -1858,7 +1859,7 @@ void complete(struct completion *x) EXPORT_SYMBOL(complete); -void complete_all(struct completion *x) +void fastcall complete_all(struct completion *x) { unsigned long flags; @@ -1869,7 +1870,7 @@ void complete_all(struct completion *x) spin_unlock_irqrestore(&x->wait.lock, flags); } -void wait_for_completion(struct completion *x) +void fastcall wait_for_completion(struct completion *x) { might_sleep(); spin_lock_irq(&x->wait.lock); @@ -1907,7 +1908,7 @@ EXPORT_SYMBOL(wait_for_completion); __remove_wait_queue(q, &wait); \ spin_unlock_irqrestore(&q->lock, flags); -void interruptible_sleep_on(wait_queue_head_t *q) +void fastcall interruptible_sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR @@ -1920,7 +1921,7 @@ void interruptible_sleep_on(wait_queue_head_t *q) EXPORT_SYMBOL(interruptible_sleep_on); -long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) +long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR @@ -1935,7 +1936,7 @@ long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) EXPORT_SYMBOL(interruptible_sleep_on_timeout); -void sleep_on(wait_queue_head_t *q) +void fastcall sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR @@ -1948,7 +1949,7 @@ void sleep_on(wait_queue_head_t *q) EXPORT_SYMBOL(sleep_on); -long sleep_on_timeout(wait_queue_head_t *q, long timeout) +long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR diff --git a/kernel/signal.c b/kernel/signal.c index 7cbb9c60dd00..77a96ad541f6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -213,7 +213,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) -inline void recalc_sigpending_tsk(struct task_struct *t) +fastcall void recalc_sigpending_tsk(struct task_struct *t) { if (t->signal->group_stop_count > 0 || PENDING(&t->pending, &t->blocked) || diff --git a/kernel/softirq.c b/kernel/softirq.c index 386ddad307ea..7e350a660b56 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -130,7 +130,7 @@ EXPORT_SYMBOL(local_bh_enable); /* * This function must run with irqs disabled! */ -inline void raise_softirq_irqoff(unsigned int nr) +inline fastcall void raise_softirq_irqoff(unsigned int nr) { __raise_softirq_irqoff(nr); @@ -149,7 +149,7 @@ inline void raise_softirq_irqoff(unsigned int nr) EXPORT_SYMBOL(raise_softirq_irqoff); -void raise_softirq(unsigned int nr) +void fastcall raise_softirq(unsigned int nr) { unsigned long flags; @@ -179,7 +179,7 @@ struct tasklet_head static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL }; static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL }; -void __tasklet_schedule(struct tasklet_struct *t) +void fastcall __tasklet_schedule(struct tasklet_struct *t) { unsigned long flags; @@ -192,7 +192,7 @@ void __tasklet_schedule(struct tasklet_struct *t) EXPORT_SYMBOL(__tasklet_schedule); -void __tasklet_hi_schedule(struct tasklet_struct *t) +void fastcall __tasklet_hi_schedule(struct tasklet_struct *t) { unsigned long flags; diff --git a/kernel/timer.c b/kernel/timer.c index f88763c4d820..6f1ca8842250 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -997,7 +997,7 @@ static void process_timeout(unsigned long __data) * * In all cases the return value is guaranteed to be non-negative. */ -signed long schedule_timeout(signed long timeout) +fastcall signed long schedule_timeout(signed long timeout) { struct timer_list timer; unsigned long expire; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 56f9065c3ec8..eee885ccf539 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -78,7 +78,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, * We queue the work to the CPU it was submitted, but there is no * guarantee that it will be processed by that CPU. */ -int queue_work(struct workqueue_struct *wq, struct work_struct *work) +int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) { int ret = 0, cpu = get_cpu(); @@ -99,7 +99,7 @@ static void delayed_work_timer_fn(unsigned long __data) __queue_work(wq->cpu_wq + smp_processor_id(), work); } -int queue_delayed_work(struct workqueue_struct *wq, +int fastcall queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay) { int ret = 0; @@ -203,7 +203,7 @@ static int worker_thread(void *__cwq) * This function used to run the workqueues itself. Now we just wait for the * helper threads to do it. */ -void flush_workqueue(struct workqueue_struct *wq) +void fastcall flush_workqueue(struct workqueue_struct *wq) { struct cpu_workqueue_struct *cwq; int cpu; @@ -310,12 +310,12 @@ void destroy_workqueue(struct workqueue_struct *wq) static struct workqueue_struct *keventd_wq; -int schedule_work(struct work_struct *work) +int fastcall schedule_work(struct work_struct *work) { return queue_work(keventd_wq, work); } -int schedule_delayed_work(struct work_struct *work, unsigned long delay) +int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) { return queue_delayed_work(keventd_wq, work, delay); } diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 8bd3c393b7c7..84c2c4fc58dc 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -29,7 +29,7 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str) /* * initialise the semaphore */ -void init_rwsem(struct rw_semaphore *sem) +void fastcall init_rwsem(struct rw_semaphore *sem) { sem->activity = 0; spin_lock_init(&sem->wait_lock); @@ -117,7 +117,7 @@ static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore * /* * get a read lock on the semaphore */ -void __down_read(struct rw_semaphore *sem) +void fastcall __down_read(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -162,7 +162,7 @@ void __down_read(struct rw_semaphore *sem) /* * trylock for reading -- returns 1 if successful, 0 if contention */ -int __down_read_trylock(struct rw_semaphore *sem) +int fastcall __down_read_trylock(struct rw_semaphore *sem) { int ret = 0; rwsemtrace(sem,"Entering __down_read_trylock"); @@ -185,7 +185,7 @@ int __down_read_trylock(struct rw_semaphore *sem) * get a write lock on the semaphore * - note that we increment the waiting count anyway to indicate an exclusive lock */ -void __down_write(struct rw_semaphore *sem) +void fastcall __down_write(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -230,7 +230,7 @@ void __down_write(struct rw_semaphore *sem) /* * trylock for writing -- returns 1 if successful, 0 if contention */ -int __down_write_trylock(struct rw_semaphore *sem) +int fastcall __down_write_trylock(struct rw_semaphore *sem) { int ret = 0; rwsemtrace(sem,"Entering __down_write_trylock"); @@ -252,7 +252,7 @@ int __down_write_trylock(struct rw_semaphore *sem) /* * release a read lock on the semaphore */ -void __up_read(struct rw_semaphore *sem) +void fastcall __up_read(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering __up_read"); @@ -269,7 +269,7 @@ void __up_read(struct rw_semaphore *sem) /* * release a write lock on the semaphore */ -void __up_write(struct rw_semaphore *sem) +void fastcall __up_write(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering __up_write"); @@ -288,7 +288,7 @@ void __up_write(struct rw_semaphore *sem) * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ -void __downgrade_write(struct rw_semaphore *sem) +void fastcall __downgrade_write(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering __downgrade_write"); diff --git a/lib/rwsem.c b/lib/rwsem.c index 34998acee1b2..95469d7fb796 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -162,7 +162,7 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore /* * wait for the read lock to be granted */ -struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem) +struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; @@ -178,7 +178,7 @@ struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem) /* * wait for the write lock to be granted */ -struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem) +struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; @@ -195,7 +195,7 @@ struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem) * handle waking up a waiter on the semaphore * - up_read has decremented the active part of the count if we come here */ -struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) +struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering rwsem_wake"); @@ -217,7 +217,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) * - caller incremented waiting part of count, and discovered it to be still negative * - just wake up any readers at the front of the queue */ -struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) +struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering rwsem_downgrade_wake"); diff --git a/mm/filemap.c b/mm/filemap.c index 4c457c718065..51175013eb21 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -292,7 +292,7 @@ static wait_queue_head_t *page_waitqueue(struct page *page) return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; } -void wait_on_page_bit(struct page *page, int bit_nr) +void fastcall wait_on_page_bit(struct page *page, int bit_nr) { wait_queue_head_t *waitqueue = page_waitqueue(page); DEFINE_WAIT(wait); @@ -324,7 +324,7 @@ EXPORT_SYMBOL(wait_on_page_bit); * the clear_bit and the read of the waitqueue (to avoid SMP races with a * parallel wait_on_page_locked()). */ -void unlock_page(struct page *page) +void fastcall unlock_page(struct page *page) { wait_queue_head_t *waitqueue = page_waitqueue(page); smp_mb__before_clear_bit(); @@ -365,7 +365,7 @@ EXPORT_SYMBOL(end_page_writeback); * chances are that on the second loop, the block layer's plug list is empty, * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. */ -void __lock_page(struct page *page) +void fastcall __lock_page(struct page *page) { wait_queue_head_t *wqh = page_waitqueue(page); DEFINE_WAIT(wait); @@ -953,7 +953,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) * and schedules an I/O to read in its contents from disk. */ static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); -static int page_cache_read(struct file * file, unsigned long offset) +static int fastcall page_cache_read(struct file * file, unsigned long offset) { struct address_space *mapping = file->f_mapping; struct page *page; diff --git a/mm/highmem.c b/mm/highmem.c index b7d63e710bdd..9fc1106994ef 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -147,7 +147,7 @@ start: return vaddr; } -void *kmap_high(struct page *page) +void fastcall *kmap_high(struct page *page) { unsigned long vaddr; @@ -170,7 +170,7 @@ void *kmap_high(struct page *page) EXPORT_SYMBOL(kmap_high); -void kunmap_high(struct page *page) +void fastcall kunmap_high(struct page *page) { unsigned long vaddr; unsigned long nr; diff --git a/mm/memory.c b/mm/memory.c index 2bcb04faad18..2540e2dcf7d7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -145,7 +145,7 @@ void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr) } while (--nr); } -pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) +pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { if (!pmd_present(*pmd)) { struct page *new; @@ -171,7 +171,7 @@ out: return pte_offset_map(pmd, address); } -pte_t * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address) +pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { if (!pmd_present(*pmd)) { pte_t *new; @@ -1646,7 +1646,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, * On a two-level page table, this ends up actually being entirely * optimized away. */ -pmd_t *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { pmd_t *new; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 36f83d9779be..6ac432fb6029 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -443,7 +443,7 @@ void drain_local_pages(void) * Free a 0-order page */ static void FASTCALL(free_hot_cold_page(struct page *page, int cold)); -static void free_hot_cold_page(struct page *page, int cold) +static void fastcall free_hot_cold_page(struct page *page, int cold) { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; @@ -462,12 +462,12 @@ static void free_hot_cold_page(struct page *page, int cold) put_cpu(); } -void free_hot_page(struct page *page) +void fastcall free_hot_page(struct page *page) { free_hot_cold_page(page, 0); } -void free_cold_page(struct page *page) +void fastcall free_cold_page(struct page *page) { free_hot_cold_page(page, 1); } @@ -532,7 +532,7 @@ static struct page *buffered_rmqueue(struct zone *zone, int order, int cold) * sized machine, GFP_HIGHMEM and GFP_KERNEL requests basically leave the DMA * zone untouched. */ -struct page * +struct page * fastcall __alloc_pages(unsigned int gfp_mask, unsigned int order, struct zonelist *zonelist) { @@ -685,7 +685,7 @@ EXPORT_SYMBOL(__alloc_pages); /* * Common helper functions. */ -unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order) +fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order) { struct page * page; @@ -697,7 +697,7 @@ unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order) EXPORT_SYMBOL(__get_free_pages); -unsigned long get_zeroed_page(unsigned int gfp_mask) +fastcall unsigned long get_zeroed_page(unsigned int gfp_mask) { struct page * page; @@ -726,7 +726,7 @@ void __pagevec_free(struct pagevec *pvec) free_hot_cold_page(pvec->pages[i], pvec->cold); } -void __free_pages(struct page *page, unsigned int order) +fastcall void __free_pages(struct page *page, unsigned int order) { if (!PageReserved(page) && put_page_testzero(page)) { if (order == 0) @@ -738,7 +738,7 @@ void __free_pages(struct page *page, unsigned int order) EXPORT_SYMBOL(__free_pages); -void free_pages(unsigned long addr, unsigned int order) +fastcall void free_pages(unsigned long addr, unsigned int order) { if (addr != 0) { BUG_ON(!virt_addr_valid(addr)); diff --git a/mm/rmap.c b/mm/rmap.c index 4c8447e8ca89..eb315c2f498f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -112,7 +112,7 @@ pte_chain_encode(struct pte_chain *pte_chain, int idx) * If the page has a single-entry pte_chain, collapse that back to a PageDirect * representation. This way, it's only done under memory pressure. */ -int page_referenced(struct page * page) +int fastcall page_referenced(struct page * page) { struct pte_chain *pc; int referenced = 0; @@ -165,7 +165,7 @@ int page_referenced(struct page * page) * Add a new pte reverse mapping to a page. * The caller needs to hold the mm->page_table_lock. */ -struct pte_chain * +struct pte_chain * fastcall page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain) { pte_addr_t pte_paddr = ptep_to_paddr(ptep); @@ -221,7 +221,7 @@ out: * the page. * Caller needs to hold the mm->page_table_lock. */ -void page_remove_rmap(struct page *page, pte_t *ptep) +void fastcall page_remove_rmap(struct page *page, pte_t *ptep) { pte_addr_t pte_paddr = ptep_to_paddr(ptep); struct pte_chain *pc; @@ -293,7 +293,7 @@ out_unlock: * mm->page_table_lock try_to_unmap_one(), trylock */ static int FASTCALL(try_to_unmap_one(struct page *, pte_addr_t)); -static int try_to_unmap_one(struct page * page, pte_addr_t paddr) +static int fastcall try_to_unmap_one(struct page * page, pte_addr_t paddr) { pte_t *ptep = rmap_ptep_map(paddr); unsigned long address = ptep_to_address(ptep); @@ -382,7 +382,7 @@ out_unlock: * SWAP_AGAIN - we missed a trylock, try again later * SWAP_FAIL - the page is unswappable */ -int try_to_unmap(struct page * page) +int fastcall try_to_unmap(struct page * page) { struct pte_chain *pc, *next_pc, *start; int ret = SWAP_SUCCESS; diff --git a/mm/slab.c b/mm/slab.c index d6a09da814b0..fa20943653d6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2134,7 +2134,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); * * Currently only used for dentry validation. */ -int kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) +int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) { unsigned long addr = (unsigned long) ptr; unsigned long min_addr = PAGE_OFFSET; diff --git a/mm/swap.c b/mm/swap.c index a714334ecb36..2f0cb0a78852 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -76,7 +76,7 @@ int rotate_reclaimable_page(struct page *page) /* * FIXME: speed this up? */ -void activate_page(struct page *page) +void fastcall activate_page(struct page *page) { struct zone *zone = page_zone(page); @@ -97,7 +97,7 @@ void activate_page(struct page *page) * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ -void mark_page_accessed(struct page *page) +void fastcall mark_page_accessed(struct page *page) { if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); @@ -116,7 +116,7 @@ EXPORT_SYMBOL(mark_page_accessed); static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; -void lru_cache_add(struct page *page) +void fastcall lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); @@ -126,7 +126,7 @@ void lru_cache_add(struct page *page) put_cpu_var(lru_add_pvecs); } -void lru_cache_add_active(struct page *page) +void fastcall lru_cache_add_active(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); @@ -152,7 +152,7 @@ void lru_add_drain(void) * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ -void __page_cache_release(struct page *page) +void fastcall __page_cache_release(struct page *page) { unsigned long flags; struct zone *zone = page_zone(page); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index aacfaafc6478..4679a0242b70 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -409,7 +409,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) return len; } -void __rfcomm_dlc_throttle(struct rfcomm_dlc *d) +void fastcall __rfcomm_dlc_throttle(struct rfcomm_dlc *d) { BT_DBG("dlc %p state %ld", d, d->state); @@ -420,7 +420,7 @@ void __rfcomm_dlc_throttle(struct rfcomm_dlc *d) rfcomm_schedule(RFCOMM_SCHED_TX); } -void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) +void fastcall __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) { BT_DBG("dlc %p state %ld", d, d->state); |
