diff options
| -rw-r--r-- | fs/buffer.c | 9 | ||||
| -rw-r--r-- | kernel/exit.c | 2 | ||||
| -rw-r--r-- | kernel/fork.c | 2 | ||||
| -rw-r--r-- | kernel/timer.c | 9 | ||||
| -rw-r--r-- | mm/page-writeback.c | 10 | ||||
| -rw-r--r-- | mm/rmap.c | 13 | ||||
| -rw-r--r-- | mm/swap.c | 15 | ||||
| -rw-r--r-- | net/ipv4/icmp.c | 2 |
8 files changed, 27 insertions, 35 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index e2a89caf51fd..56c9f4e03bdd 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1337,7 +1337,7 @@ static void bh_lru_install(struct buffer_head *bh) check_irqs_on(); bh_lru_lock(); - lru = &per_cpu(bh_lrus, smp_processor_id()); + lru = &__get_cpu_var(bh_lrus); if (lru->bhs[0] != bh) { struct buffer_head *bhs[BH_LRU_SIZE]; int in; @@ -1381,7 +1381,7 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, int size) check_irqs_on(); bh_lru_lock(); - lru = &per_cpu(bh_lrus, smp_processor_id()); + lru = &__get_cpu_var(bh_lrus); for (i = 0; i < BH_LRU_SIZE; i++) { struct buffer_head *bh = lru->bhs[i]; @@ -1474,15 +1474,14 @@ EXPORT_SYMBOL(__bread); */ static void invalidate_bh_lru(void *arg) { - const int cpu = get_cpu(); - struct bh_lru *b = &per_cpu(bh_lrus, cpu); + struct bh_lru *b = &get_cpu_var(bh_lrus); int i; for (i = 0; i < BH_LRU_SIZE; i++) { brelse(b->bhs[i]); b->bhs[i] = NULL; } - put_cpu(); + put_cpu_var(bh_lrus); } static void invalidate_bh_lrus(void) diff --git a/kernel/exit.c b/kernel/exit.c index 7dee095b31bd..367854d246ef 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -41,7 +41,7 @@ static void __unhash_process(struct task_struct *p) detach_pid(p, PIDTYPE_PGID); detach_pid(p, PIDTYPE_SID); if (p->pid) - per_cpu(process_counts, smp_processor_id())--; + __get_cpu_var(process_counts)--; } REMOVE_LINKS(p); diff --git a/kernel/fork.c b/kernel/fork.c index 5ef2dca02354..2abbc9c2da23 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1006,7 +1006,7 @@ struct task_struct *copy_process(unsigned long clone_flags, attach_pid(p, PIDTYPE_PGID, p->pgrp); attach_pid(p, PIDTYPE_SID, p->session); if (p->pid) - per_cpu(process_counts, smp_processor_id())++; + __get_cpu_var(process_counts)++; } else link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid); diff --git a/kernel/timer.c b/kernel/timer.c index ad3758c663d4..7bce7a7cb2c2 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -156,8 +156,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) */ void add_timer(struct timer_list *timer) { - int cpu = get_cpu(); - tvec_base_t *base = &per_cpu(tvec_bases, cpu); + tvec_base_t *base = &get_cpu_var(tvec_bases); unsigned long flags; BUG_ON(timer_pending(timer) || !timer->function); @@ -168,7 +167,7 @@ void add_timer(struct timer_list *timer) internal_add_timer(base, timer); timer->base = base; spin_unlock_irqrestore(&base->lock, flags); - put_cpu(); + put_cpu_var(tvec_bases); } /*** @@ -231,7 +230,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires) return 1; spin_lock_irqsave(&timer->lock, flags); - new_base = &per_cpu(tvec_bases, smp_processor_id()); + new_base = &__get_cpu_var(tvec_bases); repeat: old_base = timer->base; @@ -789,7 +788,7 @@ seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; */ static void run_timer_softirq(struct softirq_action *h) { - tvec_base_t *base = &per_cpu(tvec_bases, smp_processor_id()); + tvec_base_t *base = &__get_cpu_var(tvec_bases); if (time_after_eq(jiffies, base->timer_jiffies)) __run_timers(base); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index db2e96ea0fdf..1bdfbac68637 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -213,21 +213,19 @@ void balance_dirty_pages(struct address_space *mapping) void balance_dirty_pages_ratelimited(struct address_space *mapping) { static DEFINE_PER_CPU(int, ratelimits) = 0; - int cpu; long ratelimit; ratelimit = ratelimit_pages; if (dirty_exceeded) ratelimit = 8; - cpu = get_cpu(); - if (per_cpu(ratelimits, cpu)++ >= ratelimit) { - per_cpu(ratelimits, cpu) = 0; - put_cpu(); + if (get_cpu_var(ratelimits)++ >= ratelimit) { + __get_cpu_var(ratelimits) = 0; + put_cpu_var(ratelimits); balance_dirty_pages(mapping); return; } - put_cpu(); + put_cpu_var(ratelimits); } /* diff --git a/mm/rmap.c b/mm/rmap.c index a6040d1e3f89..06377cb9c907 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -477,16 +477,15 @@ DEFINE_PER_CPU(struct pte_chain *, local_pte_chain) = 0; */ void __pte_chain_free(struct pte_chain *pte_chain) { - int cpu = get_cpu(); struct pte_chain **pte_chainp; + pte_chainp = &get_cpu_var(local_pte_chain); if (pte_chain->next_and_idx) pte_chain->next_and_idx = 0; - pte_chainp = &per_cpu(local_pte_chain, cpu); if (*pte_chainp) kmem_cache_free(pte_chain_cache, *pte_chainp); *pte_chainp = pte_chain; - put_cpu(); + put_cpu_var(local_pte_chain); } /* @@ -501,21 +500,19 @@ void __pte_chain_free(struct pte_chain *pte_chain) */ struct pte_chain *pte_chain_alloc(int gfp_flags) { - int cpu; struct pte_chain *ret; struct pte_chain **pte_chainp; if (gfp_flags & __GFP_WAIT) might_sleep(); - cpu = get_cpu(); - pte_chainp = &per_cpu(local_pte_chain, cpu); + pte_chainp = &get_cpu_var(local_pte_chain); if (*pte_chainp) { ret = *pte_chainp; *pte_chainp = NULL; - put_cpu(); + put_cpu_var(local_pte_chain); } else { - put_cpu(); + put_cpu_var(local_pte_chain); ret = kmem_cache_alloc(pte_chain_cache, gfp_flags); } return ret; diff --git a/mm/swap.c b/mm/swap.c index f6442275cda5..5818b0a5a72d 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -112,35 +112,34 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; void lru_cache_add(struct page *page) { - struct pagevec *pvec = &per_cpu(lru_add_pvecs, get_cpu()); + struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add(pvec); - put_cpu(); + put_cpu_var(lru_add_pvecs); } void lru_cache_add_active(struct page *page) { - struct pagevec *pvec = &per_cpu(lru_add_active_pvecs, get_cpu()); + struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add_active(pvec); - put_cpu(); + put_cpu_var(lru_add_active_pvecs); } void lru_add_drain(void) { - int cpu = get_cpu(); - struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); + struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); if (pagevec_count(pvec)) __pagevec_lru_add(pvec); - pvec = &per_cpu(lru_add_active_pvecs, cpu); + pvec = &__get_cpu_var(lru_add_active_pvecs); if (pagevec_count(pvec)) __pagevec_lru_add_active(pvec); - put_cpu(); + put_cpu_var(lru_add_pvecs); } /* diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 42155224b6e7..c9f456eaa745 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -228,7 +228,7 @@ static struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; * On SMP we have one ICMP socket per-cpu. */ static DEFINE_PER_CPU(struct socket *, __icmp_socket) = NULL; -#define icmp_socket per_cpu(__icmp_socket, smp_processor_id()) +#define icmp_socket __get_cpu_var(__icmp_socket) static __inline__ void icmp_xmit_lock(void) { |
