diff options
| author | Linus Torvalds <torvalds@athlon.transmeta.com> | 2002-02-04 17:50:31 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@athlon.transmeta.com> | 2002-02-04 17:50:31 -0800 |
| commit | 3192b2dcbe00fdfd6a50be32c8c626cf26b66076 (patch) | |
| tree | db7568051c1a846868c8eb0e4c8b2c35e7ae3d70 | |
| parent | 43e9282d088b07f03fc16a5325ff74cf49fb2990 (diff) | |
v2.4.0.1 -> v2.4.0.2
- ISDN fixes
- VM balancing tuning
| -rw-r--r-- | Makefile | 2 | ||||
| -rw-r--r-- | arch/i386/Makefile | 2 | ||||
| -rw-r--r-- | drivers/isdn/hisax/md5sums.asc | 12 | ||||
| -rw-r--r-- | drivers/scsi/megaraid.c | 1 | ||||
| -rw-r--r-- | drivers/scsi/ppa.c | 2 | ||||
| -rw-r--r-- | fs/nfs/flushd.c | 14 | ||||
| -rw-r--r-- | include/linux/sched.h | 1 | ||||
| -rw-r--r-- | include/linux/swap.h | 2 | ||||
| -rw-r--r-- | kernel/fork.c | 1 | ||||
| -rw-r--r-- | mm/filemap.c | 11 | ||||
| -rw-r--r-- | mm/page_alloc.c | 46 | ||||
| -rw-r--r-- | mm/slab.c | 2 | ||||
| -rw-r--r-- | mm/vmscan.c | 218 | ||||
| -rw-r--r-- | net/ipv4/igmp.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 2 |
16 files changed, 122 insertions, 198 deletions
@@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 SUBLEVEL = 1 -EXTRAVERSION =-pre1 +EXTRAVERSION =-pre2 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) diff --git a/arch/i386/Makefile b/arch/i386/Makefile index 25b523026811..f7941bcbe7d5 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile @@ -50,7 +50,7 @@ ifdef CONFIG_M686 CFLAGS += -march=i686 endif -ifdef CONFIG_M686FXSR +ifdef CONFIG_MPENTIUMIII CFLAGS += -march=i686 endif diff --git a/drivers/isdn/hisax/md5sums.asc b/drivers/isdn/hisax/md5sums.asc index 4cc1f8b83329..5ffd61db0905 100644 --- a/drivers/isdn/hisax/md5sums.asc +++ b/drivers/isdn/hisax/md5sums.asc @@ -10,7 +10,7 @@ ca7bd9bac39203f3074f3f093948cc3c isac.c a2ad619fd404b3149099a2984de9d23c isdnl1.c d2a78e407f3d94876deac160c6f9aae6 isdnl2.c -a109841c2e75b11fc8ef2c8718e24c3e isdnl3.c +e7932ca7ae39c497c17f13a2e1434fcd isdnl3.c afb5f2f4ac296d6de45c856993b161e1 tei.c 00023e2a482cb86a26ea870577ade5d6 callc.c a1834e9b2ec068440cff2e899eff4710 cert.c @@ -25,9 +25,9 @@ a1834e9b2ec068440cff2e899eff4710 cert.c Version: 2.6.3i Charset: noconv -iQCVAwUBOlMTgDpxHvX/mS9tAQFSbgP/W9y6tnnWHTRLGqyr3EY1OHZiQXERkAAu -hp+Y8PIoX1GgAh4yZ7xhYwUsk6y0z5USdGuhC9ZHh+oZd57lPsJMnhkEZR5BVsYT -r7jHwelP527+QCLkVUCHIVIWUW0ANzeZBhDV2vefkFb+gWLiZsBhaHssbcKGsMNG -Ak4xS1ByqsM= -=lsIJ +iQCVAwUBOlxeLTpxHvX/mS9tAQH6RwP8DhyvqAnXFV6WIGi16iQ3vKikkPoqnDQs +GEn5uCW0dPYKlwthD2Grj/JbMYZhOmCFuDxF7ufJnjTSDe/D8XNe2wngxzAiwcIe +WjCrT8X95cuP3HZHscbFTEinVV0GAnoI0ZEgs5eBDhVHDqILLYMaTFBQaRH3jgXc +i5VH88jPfUM= +=qc+J -----END PGP SIGNATURE----- diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 264ae2fe04c6..106e39b8aacc 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -149,7 +149,6 @@ #include <linux/version.h> #ifdef MODULE -#include <linux/modversions.h> #include <linux/module.h> char kernel_version[] = UTS_RELEASE; diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index d82eed0b77df..fc0af0cbffaa 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -222,8 +222,8 @@ int ppa_detect(Scsi_Host_Template * host) printk(" supported by the imm (ZIP Plus) driver. If the\n"); printk(" cable is marked with \"AutoDetect\", this is what has\n"); printk(" happened.\n"); - return 0; spin_lock_irq(&io_request_lock); + return 0; } try_again = 1; goto retry_entry; diff --git a/fs/nfs/flushd.c b/fs/nfs/flushd.c index 700504dedac6..c03465c02846 100644 --- a/fs/nfs/flushd.c +++ b/fs/nfs/flushd.c @@ -71,18 +71,17 @@ int nfs_reqlist_init(struct nfs_server *server) int status = 0; dprintk("NFS: writecache_init\n"); + + /* Create the RPC task */ + if (!(task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC))) + return -ENOMEM; + spin_lock(&nfs_flushd_lock); cache = server->rw_requests; if (cache->task) goto out_unlock; - /* Create the RPC task */ - status = -ENOMEM; - task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC); - if (!task) - goto out_unlock; - task->tk_calldata = server; cache->task = task; @@ -99,6 +98,7 @@ int nfs_reqlist_init(struct nfs_server *server) return 0; out_unlock: spin_unlock(&nfs_flushd_lock); + rpc_release_task(task); return status; } @@ -195,7 +195,9 @@ void inode_remove_flushd(struct inode *inode) if (*q) { *q = inode->u.nfs_i.hash_next; NFS_FLAGS(inode) &= ~NFS_INO_FLUSH; + spin_unlock(&nfs_flushd_lock); iput(inode); + return; } out: spin_unlock(&nfs_flushd_lock); diff --git a/include/linux/sched.h b/include/linux/sched.h index f946b153276e..4198025856d3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -219,7 +219,6 @@ struct mm_struct { unsigned long rss, total_vm, locked_vm; unsigned long def_flags; unsigned long cpu_vm_mask; - unsigned long swap_cnt; /* number of pages to swap on next pass */ unsigned long swap_address; /* Architecture-specific MM context */ diff --git a/include/linux/swap.h b/include/linux/swap.h index e001de887148..afb1d96b38f9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -107,7 +107,7 @@ extern wait_queue_head_t kreclaimd_wait; extern int page_launder(int, int); extern int free_shortage(void); extern int inactive_shortage(void); -extern void wakeup_kswapd(int); +extern void wakeup_kswapd(void); extern int try_to_free_pages(unsigned int gfp_mask); /* linux/mm/page_io.c */ diff --git a/kernel/fork.c b/kernel/fork.c index f58c2a87fe56..cee975bdc29c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -134,7 +134,6 @@ static inline int dup_mmap(struct mm_struct * mm) mm->mmap_cache = NULL; mm->map_count = 0; mm->cpu_vm_mask = 0; - mm->swap_cnt = 0; mm->swap_address = 0; pprev = &mm->mmap; for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) { diff --git a/mm/filemap.c b/mm/filemap.c index dedd7911e3fc..e16b34b6fb12 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -306,7 +306,7 @@ inside: */ age_page_up(page); if (inactive_shortage() > inactive_target / 2 && free_shortage()) - wakeup_kswapd(0); + wakeup_kswapd(); not_found: return page; } @@ -1835,7 +1835,8 @@ static long madvise_fixup_start(struct vm_area_struct * vma, n->vm_end = end; setup_read_behavior(n, behavior); n->vm_raend = 0; - get_file(n->vm_file); + if (n->vm_file) + get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); lock_vma_mappings(vma); @@ -1861,7 +1862,8 @@ static long madvise_fixup_end(struct vm_area_struct * vma, n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT; setup_read_behavior(n, behavior); n->vm_raend = 0; - get_file(n->vm_file); + if (n->vm_file) + get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); lock_vma_mappings(vma); @@ -1893,7 +1895,8 @@ static long madvise_fixup_middle(struct vm_area_struct * vma, right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT; left->vm_raend = 0; right->vm_raend = 0; - atomic_add(2, &vma->vm_file->f_count); + if (vma->vm_file) + atomic_add(2, &vma->vm_file->f_count); if (vma->vm_ops && vma->vm_ops->open) { vma->vm_ops->open(left); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b67aa49130d0..537386a112bd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/pagemap.h> #include <linux/bootmem.h> +#include <linux/slab.h> int nr_swap_pages; int nr_active_pages; @@ -303,7 +304,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order) * an inactive page shortage, wake up kswapd. */ if (inactive_shortage() > inactive_target / 2 && free_shortage()) - wakeup_kswapd(0); + wakeup_kswapd(); /* * If we are about to get low on free pages and cleaning * the inactive_dirty pages would fix the situation, @@ -379,7 +380,7 @@ try_again: * - if we don't have __GFP_IO set, kswapd may be * able to free some memory we can't free ourselves */ - wakeup_kswapd(0); + wakeup_kswapd(); if (gfp_mask & __GFP_WAIT) { __set_current_state(TASK_RUNNING); current->policy |= SCHED_YIELD; @@ -404,7 +405,7 @@ try_again: * - we're doing a higher-order allocation * --> move pages to the free list until we succeed * - we're /really/ tight on memory - * --> wait on the kswapd waitqueue until memory is freed + * --> try to free pages ourselves with page_launder */ if (!(current->flags & PF_MEMALLOC)) { /* @@ -443,36 +444,23 @@ try_again: /* * When we arrive here, we are really tight on memory. * - * We wake up kswapd and sleep until kswapd wakes us - * up again. After that we loop back to the start. - * - * We have to do this because something else might eat - * the memory kswapd frees for us and we need to be - * reliable. Note that we don't loop back for higher - * order allocations since it is possible that kswapd - * simply cannot free a large enough contiguous area - * of memory *ever*. - */ - if ((gfp_mask & (__GFP_WAIT|__GFP_IO)) == (__GFP_WAIT|__GFP_IO)) { - wakeup_kswapd(1); - memory_pressure++; - if (!order) - goto try_again; - /* - * If __GFP_IO isn't set, we can't wait on kswapd because - * kswapd just might need some IO locks /we/ are holding ... - * - * SUBTLE: The scheduling point above makes sure that - * kswapd does get the chance to free memory we can't - * free ourselves... + * We try to free pages ourselves by: + * - shrinking the i/d caches. + * - reclaiming unused memory from the slab caches. + * - swapping/syncing pages to disk (done by page_launder) + * - moving clean pages from the inactive dirty list to + * the inactive clean list. (done by page_launder) */ - } else if (gfp_mask & __GFP_WAIT) { - try_to_free_pages(gfp_mask); - memory_pressure++; + if (gfp_mask & __GFP_WAIT) { + shrink_icache_memory(6, gfp_mask); + shrink_dcache_memory(6, gfp_mask); + kmem_cache_reap(gfp_mask); + + page_launder(gfp_mask, 1); + if (!order) goto try_again; } - } /* diff --git a/mm/slab.c b/mm/slab.c index b3bd852d1c6c..f6f8be1dbf41 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1702,7 +1702,7 @@ static void enable_all_cpucaches (void) * kmem_cache_reap - Reclaim memory from caches. * @gfp_mask: the type of memory required. * - * Called from try_to_free_page(). + * Called from do_try_to_free_pages() and __alloc_pages() */ void kmem_cache_reap (int gfp_mask) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 947fde568a4c..d9d14754b8fd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -35,45 +35,21 @@ * using a process that no longer actually exists (it might * have died while we slept). */ -static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table) +static void try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page) { pte_t pte; swp_entry_t entry; - struct page * page; - int onlist; - - pte = *page_table; - if (!pte_present(pte)) - goto out_failed; - page = pte_page(pte); - if ((!VALID_PAGE(page)) || PageReserved(page)) - goto out_failed; - - if (!mm->swap_cnt) - return 1; - mm->swap_cnt--; - - onlist = PageActive(page); /* Don't look at this pte if it's been accessed recently. */ if (ptep_test_and_clear_young(page_table)) { - age_page_up(page); - goto out_failed; + page->age += PAGE_AGE_ADV; + if (page->age > PAGE_AGE_MAX) + page->age = PAGE_AGE_MAX; + return; } - if (!onlist) - /* The page is still mapped, so it can't be freeable... */ - age_page_down_ageonly(page); - - /* - * If the page is in active use by us, or if the page - * is in active use by others, don't unmap it or - * (worse) start unneeded IO. - */ - if (page->age > 0) - goto out_failed; if (TryLockPage(page)) - goto out_failed; + return; /* From this point on, the odds are that we're going to * nuke this pte, so read and clear the pte. This hook @@ -87,9 +63,6 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un * Is the page already in the swap cache? If so, then * we can just drop our reference to it without doing * any IO - it's already up-to-date on disk. - * - * Return 0, as we didn't actually free any real - * memory, and we should just continue our scan. */ if (PageSwapCache(page)) { entry.val = page->index; @@ -103,8 +76,7 @@ drop_pte: mm->rss--; deactivate_page(page); page_cache_release(page); -out_failed: - return 0; + return; } /* @@ -153,34 +125,20 @@ out_failed: out_unlock_restore: set_pte(page_table, pte); UnlockPage(page); - return 0; + return; } -/* - * A new implementation of swap_out(). We do not swap complete processes, - * but only a small number of blocks, before we continue with the next - * process. The number of blocks actually swapped is determined on the - * number of page faults, that this process actually had in the last time, - * so we won't swap heavily used processes all the time ... - * - * Note: the priority argument is a hint on much CPU to waste with the - * swap block search, not a hint, of how much blocks to swap with - * each process. - * - * (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de - */ - -static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end) +static int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count) { pte_t * pte; unsigned long pmd_end; if (pmd_none(*dir)) - return 0; + return count; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); - return 0; + return count; } pte = pte_offset(dir, address); @@ -190,28 +148,33 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm end = pmd_end; do { - int result; - mm->swap_address = address + PAGE_SIZE; - result = try_to_swap_out(mm, vma, address, pte); - if (result) - return result; + if (pte_present(*pte)) { + struct page *page = pte_page(*pte); + + if (VALID_PAGE(page) && !PageReserved(page)) { + try_to_swap_out(mm, vma, address, pte, page); + if (--count) + break; + } + } address += PAGE_SIZE; pte++; } while (address && (address < end)); - return 0; + mm->swap_address = address + PAGE_SIZE; + return count; } -static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end) +static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count) { pmd_t * pmd; unsigned long pgd_end; if (pgd_none(*dir)) - return 0; + return count; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); - return 0; + return count; } pmd = pmd_offset(dir, address); @@ -221,23 +184,23 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm end = pgd_end; do { - int result = swap_out_pmd(mm, vma, pmd, address, end); - if (result) - return result; + count = swap_out_pmd(mm, vma, pmd, address, end, count); + if (!count) + break; address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); - return 0; + return count; } -static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address) +static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count) { pgd_t *pgdir; unsigned long end; /* Don't swap out areas which are locked down */ if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) - return 0; + return count; pgdir = pgd_offset(mm, address); @@ -245,18 +208,17 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi if (address >= end) BUG(); do { - int result = swap_out_pgd(mm, vma, pgdir, address, end); - if (result) - return result; + count = swap_out_pgd(mm, vma, pgdir, address, end, count); + if (!count) + break; address = (address + PGDIR_SIZE) & PGDIR_MASK; pgdir++; } while (address && (address < end)); - return 0; + return count; } -static int swap_out_mm(struct mm_struct * mm) +static int swap_out_mm(struct mm_struct * mm, int count) { - int result = 0; unsigned long address; struct vm_area_struct* vma; @@ -270,15 +232,14 @@ static int swap_out_mm(struct mm_struct * mm) */ spin_lock(&mm->page_table_lock); address = mm->swap_address; - mm->swap_cnt = mm->rss >> 4; vma = find_vma(mm, address); if (vma) { if (address < vma->vm_start) address = vma->vm_start; for (;;) { - result = swap_out_vma(mm, vma, address); - if (result) + count = swap_out_vma(mm, vma, address, count); + if (!count) goto out_unlock; vma = vma->vm_next; if (!vma) @@ -288,30 +249,39 @@ static int swap_out_mm(struct mm_struct * mm) } /* Reset to 0 when we reach the end of address space */ mm->swap_address = 0; - mm->swap_cnt = 0; out_unlock: spin_unlock(&mm->page_table_lock); - return result; + return !count; } /* - * Select the task with maximal swap_cnt and try to swap out a page. * N.B. This function returns only 0 or 1. Return values != 1 from * the lower level routines result in continued processing. */ #define SWAP_SHIFT 5 #define SWAP_MIN 8 +static inline int swap_amount(struct mm_struct *mm) +{ + int nr = mm->rss >> SWAP_SHIFT; + return nr < SWAP_MIN ? SWAP_MIN : nr; +} + static int swap_out(unsigned int priority, int gfp_mask) { int counter; int retval = 0; + struct mm_struct *mm = current->mm; + /* Always start by trying to penalize the process that is allocating memory */ + if (mm) + retval = swap_out_mm(mm, swap_amount(mm)); + + /* Then, look at the other mm's */ counter = mmlist_nr >> priority; do { struct list_head *p; - struct mm_struct *mm; spin_lock(&mmlist_lock); p = init_mm.mmlist.next; @@ -327,13 +297,14 @@ static int swap_out(unsigned int priority, int gfp_mask) atomic_inc(&mm->mm_users); spin_unlock(&mmlist_lock); - retval |= swap_out_mm(mm); + /* Walk about 6% of the address space each time */ + retval |= swap_out_mm(mm, swap_amount(mm)); mmput(mm); } while (--counter >= 0); return retval; empty: - spin_lock(&mmlist_lock); + spin_unlock(&mmlist_lock); return 0; } @@ -816,33 +787,35 @@ int inactive_shortage(void) * really care about latency. In that case we don't try * to free too many pages. */ +#define DEF_PRIORITY (6) static int refill_inactive(unsigned int gfp_mask, int user) { - int priority, count, start_count; + int count, start_count, maxtry; count = inactive_shortage() + free_shortage(); if (user) count = (1 << page_cluster); start_count = count; - /* Always trim SLAB caches when memory gets low. */ - kmem_cache_reap(gfp_mask); - - priority = 6; + maxtry = 6; do { if (current->need_resched) { __set_current_state(TASK_RUNNING); schedule(); } - while (refill_inactive_scan(priority, 1)) { + while (refill_inactive_scan(DEF_PRIORITY, 1)) { if (--count <= 0) goto done; } /* If refill_inactive_scan failed, try to page stuff out.. */ - swap_out(priority, gfp_mask); - } while (!inactive_shortage()); + swap_out(DEF_PRIORITY, gfp_mask); + + if (--maxtry <= 0) + return 0; + + } while (inactive_shortage()); done: return (count < start_count); @@ -872,20 +845,14 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user) ret += refill_inactive(gfp_mask, user); /* - * Delete pages from the inode and dentry cache - * if memory is low. + * Delete pages from the inode and dentry caches and + * reclaim unused slab cache if memory is low. */ if (free_shortage()) { - shrink_dcache_memory(6, gfp_mask); - shrink_icache_memory(6, gfp_mask); - } else { - - /* - * Reclaim unused slab cache memory. - */ + shrink_dcache_memory(DEF_PRIORITY, gfp_mask); + shrink_icache_memory(DEF_PRIORITY, gfp_mask); kmem_cache_reap(gfp_mask); - ret = 1; - } + } return ret; } @@ -938,13 +905,8 @@ int kswapd(void *unused) static int recalc = 0; /* If needed, try to free some memory. */ - if (inactive_shortage() || free_shortage()) { - int wait = 0; - /* Do we need to do some synchronous flushing? */ - if (waitqueue_active(&kswapd_done)) - wait = 1; - do_try_to_free_pages(GFP_KSWAPD, wait); - } + if (inactive_shortage() || free_shortage()) + do_try_to_free_pages(GFP_KSWAPD, 0); /* * Do some (very minimal) background scanning. This @@ -952,7 +914,7 @@ int kswapd(void *unused) * every minute. This clears old referenced bits * and moves unused pages to the inactive list. */ - refill_inactive_scan(6, 0); + refill_inactive_scan(DEF_PRIORITY, 0); /* Once a second, recalculate some VM stats. */ if (time_after(jiffies, recalc + HZ)) { @@ -960,11 +922,6 @@ int kswapd(void *unused) recalculate_vm_stats(); } - /* - * Wake up everybody waiting for free memory - * and unplug the disk queue. - */ - wake_up_all(&kswapd_done); run_task_queue(&tq_disk); /* @@ -995,33 +952,10 @@ int kswapd(void *unused) } } -void wakeup_kswapd(int block) +void wakeup_kswapd(void) { - DECLARE_WAITQUEUE(wait, current); - - if (current == kswapd_task) - return; - - if (!block) { - if (waitqueue_active(&kswapd_wait)) - wake_up(&kswapd_wait); - return; - } - - /* - * Kswapd could wake us up before we get a chance - * to sleep, so we have to be very careful here to - * prevent SMP races... - */ - __set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&kswapd_done, &wait); - - if (waitqueue_active(&kswapd_wait)) - wake_up(&kswapd_wait); - schedule(); - - remove_wait_queue(&kswapd_done, &wait); - __set_current_state(TASK_RUNNING); + if (current != kswapd_task) + wake_up_process(kswapd_task); } /* @@ -1046,7 +980,7 @@ DECLARE_WAIT_QUEUE_HEAD(kreclaimd_wait); /* * Kreclaimd will move pages from the inactive_clean list to the * free list, in order to keep atomic allocations possible under - * all circumstances. Even when kswapd is blocked on IO. + * all circumstances. */ int kreclaimd(void *unused) { diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 1d8002bdd818..81fae9233744 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -504,8 +504,8 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr) im->timer.function=&igmp_timer_expire; im->unsolicit_count = IGMP_Unsolicited_Report_Count; im->reporter = 0; - im->loaded = 0; #endif + im->loaded = 0; write_lock_bh(&in_dev->lock); im->next=in_dev->mc_list; in_dev->mc_list=im; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b370fcdf9dc3..6660e0f7292f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -954,7 +954,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size) */ skb = sk->write_queue.prev; if (tp->send_head && - (mss_now - skb->len) > 0) { + (mss_now > skb->len)) { copy = skb->len; if (skb_tailroom(skb) > 0) { int last_byte_was_odd = (copy % 4); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4e3eab087f8f..a3f83272b9fc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1705,7 +1705,7 @@ static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp) if ((__s32)when < (__s32)tp->rttvar) when = tp->rttvar; - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, when); + tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, min(when, TCP_RTO_MAX)); } } |
