From 01a8d063e419a1ed197d8454b89d06400938fa84 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 19 Aug 2002 04:07:40 -0700 Subject: [PATCH] NUMA-Q disable irqbalance Here's a patch from Andrea's tree that uses IRQ_BALANCE_INTERVAL to define how often interrupts are balanced, staying independent from HZ. It also makes sure that there _is_ a change to the configuration before it actually writes it. It reminds me of the mod_timer optimization. --- arch/i386/kernel/io_apic.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index fdea2bd53f9b..ed8a1f9275f6 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -220,6 +220,9 @@ extern unsigned long irq_affinity [NR_IRQS]; ((1 << cpu) & (allowed_mask)) #if CONFIG_SMP + +#define IRQ_BALANCE_INTERVAL (HZ/50) + static unsigned long move(int curr_cpu, unsigned long allowed_mask, unsigned long now, int direction) { int search_idle = 1; @@ -254,8 +257,9 @@ static inline void balance_irq(int irq) if (clustered_apic_mode) return; - if (entry->timestamp != now) { + if (unlikely(time_after(now, entry->timestamp + IRQ_BALANCE_INTERVAL))) { unsigned long allowed_mask; + unsigned int new_cpu; int random_number; rdtscl(random_number); @@ -263,8 +267,11 @@ static inline void balance_irq(int irq) allowed_mask = cpu_online_map & irq_affinity[irq]; entry->timestamp = now; - entry->cpu = move(entry->cpu, allowed_mask, now, random_number); - set_ioapic_affinity(irq, 1 << entry->cpu); + new_cpu = move(entry->cpu, allowed_mask, now, random_number); + if (entry->cpu != new_cpu) { + entry->cpu = new_cpu; + set_ioapic_affinity(irq, 1 << new_cpu); + } } } #else /* !SMP */ -- cgit v1.2.3 From ac31cf7092e7e8a1d1f6057ad3550482dc50bd4c Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 19 Aug 2002 04:07:45 -0700 Subject: [PATCH] fix link problem in ips driver --- drivers/scsi/ips.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 3d22bcaf7072..00b9da6e2268 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -326,21 +326,21 @@ IPS_DEFINE_COMPAT_TABLE( Compatable ); /* Version Compatability Ta name: ips_hot_plug_name, id_table: ips_pci_table, probe: ips_insert_device, - remove: ips_remove_device, + remove: __devexit_p(ips_remove_device), }; struct pci_driver ips_pci_driver_5i = { name: ips_hot_plug_name, id_table: ips_pci_table_5i, probe: ips_insert_device, - remove: ips_remove_device, + remove: __devexit_p(ips_remove_device), }; struct pci_driver ips_pci_driver_i960 = { name: ips_hot_plug_name, id_table: ips_pci_table_i960, probe: ips_insert_device, - remove: ips_remove_device, + remove: __devexit_p(ips_remove_device), }; #endif -- cgit v1.2.3 From 9bdedfceabd0892444c6581997a34278e41fd80d Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 19 Aug 2002 06:04:46 -0700 Subject: [PATCH] Fix a race between __page_cache_release() and shrink_cache() __page_cache_release() needs to recheck the page count inside the LRU lock, because shrink_cache() may have found the page on the LRU and incremented its refcount again. Which is carefully documented over __pagevec_release(). Duh. --- mm/swap.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 10e6d4a3683b..1d9eba6744e8 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -81,15 +81,18 @@ void __page_cache_release(struct page *page) unsigned long flags; spin_lock_irqsave(&_pagemap_lru_lock, flags); - if (!TestClearPageLRU(page)) - BUG(); - if (PageActive(page)) - del_page_from_active_list(page); - else - del_page_from_inactive_list(page); + if (TestClearPageLRU(page)) { + if (PageActive(page)) + del_page_from_active_list(page); + else + del_page_from_inactive_list(page); + } + if (page_count(page) != 0) + page = NULL; spin_unlock_irqrestore(&_pagemap_lru_lock, flags); } - __free_pages_ok(page, 0); + if (page) + __free_pages_ok(page, 0); } /* -- cgit v1.2.3 From 891975a10ed395bf2997c75e2a6776523404a0ef Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 19 Aug 2002 06:04:50 -0700 Subject: [PATCH] fix uniprocessor lockups I have a test_and_set_bit(PG_chainlock, page->flags) in page reclaim. Which works fine on SMP. But on uniprocessor, we made pte_chain_unlock() a no-op, so all pages end up with PG_chainlock set. refill_inactive() cannot move any pages onto the inactive list and the machine dies. The patch removes the test_and_set_bit optimisation in there and just uses pte_chain_lock(). If we want that (dubious) optimisation back then let's do it right and create pte_chain_trylock(). --- mm/vmscan.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 53b337114308..2a28170d78e4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -398,10 +398,7 @@ static /* inline */ void refill_inactive(const int nr_pages_in) page = list_entry(l_hold.prev, struct page, lru); list_del(&page->lru); if (page->pte.chain) { - if (test_and_set_bit(PG_chainlock, &page->flags)) { - list_add(&page->lru, &l_active); - continue; - } + pte_chain_lock(page); if (page->pte.chain && page_referenced(page)) { pte_chain_unlock(page); list_add(&page->lru, &l_active); -- cgit v1.2.3 From 0ee29e6010df87ffb563f2f9bfdf019e0976c6cb Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 19 Aug 2002 06:04:55 -0700 Subject: [PATCH] Fix a BUG in try_to_unmap() try_to_unmap() dies if the to-be-unmapped page has a non-NULL ->mapping. But the preceding logic in shrink_cache() forgot about the rarely-occurring pages which have buffers and no ->mapping. --- mm/vmscan.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 2a28170d78e4..d2dc38fd7bec 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -132,13 +132,15 @@ shrink_list(struct list_head *page_list, int nr_pages, zone_t *classzone, goto activate_locked; } + mapping = page->mapping; + /* * Anonymous process memory without backing store. Try to * allocate it some swap space here. * * XXX: implement swap clustering ? */ - if (page->pte.chain && !page->mapping && !PagePrivate(page)) { + if (page->pte.chain && !mapping && !PagePrivate(page)) { pte_chain_unlock(page); if (!add_to_swap(page)) goto activate_locked; @@ -149,7 +151,7 @@ shrink_list(struct list_head *page_list, int nr_pages, zone_t *classzone, * The page is mapped into the page tables of one or more * processes. Try to unmap it here. */ - if (page->pte.chain) { + if (page->pte.chain && mapping) { switch (try_to_unmap(page)) { case SWAP_ERROR: case SWAP_FAIL: @@ -163,7 +165,6 @@ shrink_list(struct list_head *page_list, int nr_pages, zone_t *classzone, } } pte_chain_unlock(page); - mapping = page->mapping; /* * FIXME: this is CPU-inefficient for shared mappings. -- cgit v1.2.3 From 6424ea03c9ac36098e59c89264494ee69818ed5c Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 19 Aug 2002 18:10:50 -0700 Subject: [PATCH] ROMFS superblock cleanup. This patch from Christoph Hellwig divorces ROMFS from the struct superblock union, as has been done to various other filesystems during 2.5 --- fs/romfs/inode.c | 21 +++++++++++++-------- include/linux/romfs_fs_sb.h | 10 ---------- 2 files changed, 13 insertions(+), 18 deletions(-) delete mode 100644 include/linux/romfs_fs_sb.h diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c index 8ae9ce03cab6..c527558c8aea 100644 --- a/fs/romfs/inode.c +++ b/fs/romfs/inode.c @@ -83,6 +83,12 @@ struct romfs_inode_info { struct inode vfs_inode; }; +/* instead of private superblock data */ +static inline unsigned long romfs_maxsize(struct super_block *sb) +{ + return (unsigned long)sb->u.generic_sbp; +} + static inline struct romfs_inode_info *ROMFS_I(struct inode *inode) { return list_entry(inode, struct romfs_inode_info, vfs_inode); @@ -113,7 +119,6 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent) /* I would parse the options here, but there are none.. :) */ sb_set_blocksize(s, ROMBSIZE); - s->u.generic_sbp = (void *) 0; s->s_maxbytes = 0xFFFFFFFF; bh = sb_bread(s, 0); @@ -139,7 +144,7 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent) } s->s_magic = ROMFS_MAGIC; - s->u.romfs_sb.s_maxsize = sz; + s->u.generic_sbp = (void *)sz; s->s_flags |= MS_RDONLY; @@ -175,7 +180,7 @@ romfs_statfs(struct super_block *sb, struct statfs *buf) buf->f_type = ROMFS_MAGIC; buf->f_bsize = ROMBSIZE; buf->f_bfree = buf->f_bavail = buf->f_ffree; - buf->f_blocks = (sb->u.romfs_sb.s_maxsize+ROMBSIZE-1)>>ROMBSBITS; + buf->f_blocks = (romfs_maxsize(sb)+ROMBSIZE-1)>>ROMBSBITS; buf->f_namelen = ROMFS_MAXFN; return 0; } @@ -188,7 +193,7 @@ romfs_strnlen(struct inode *i, unsigned long offset, unsigned long count) struct buffer_head *bh; unsigned long avail, maxsize, res; - maxsize = i->i_sb->u.romfs_sb.s_maxsize; + maxsize = romfs_maxsize(i->i_sb); if (offset >= maxsize) return -1; @@ -230,7 +235,7 @@ romfs_copyfrom(struct inode *i, void *dest, unsigned long offset, unsigned long struct buffer_head *bh; unsigned long avail, maxsize, res; - maxsize = i->i_sb->u.romfs_sb.s_maxsize; + maxsize = romfs_maxsize(i->i_sb); if (offset >= maxsize || count > maxsize || offset+count>maxsize) return -1; @@ -275,8 +280,8 @@ romfs_readdir(struct file *filp, void *dirent, filldir_t filldir) char fsname[ROMFS_MAXFN]; /* XXX dynamic? */ lock_kernel(); - - maxoff = i->i_sb->u.romfs_sb.s_maxsize; + + maxoff = romfs_maxsize(i->i_sb); offset = filp->f_pos; if (!offset) { @@ -339,7 +344,7 @@ romfs_lookup(struct inode *dir, struct dentry *dentry) if (romfs_copyfrom(dir, &ri, offset, ROMFH_SIZE) <= 0) goto out; - maxoff = dir->i_sb->u.romfs_sb.s_maxsize; + maxoff = romfs_maxsize(dir->i_sb); offset = ntohl(ri.spec) & ROMFH_MASK; /* OK, now find the file whose name is in "dentry" in the diff --git a/include/linux/romfs_fs_sb.h b/include/linux/romfs_fs_sb.h deleted file mode 100644 index 02da2280a6df..000000000000 --- a/include/linux/romfs_fs_sb.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ROMFS_FS_SB -#define __ROMFS_FS_SB - -/* romfs superblock in-core data */ - -struct romfs_sb_info { - unsigned long s_maxsize; -}; - -#endif -- cgit v1.2.3 From 136b6223bfb668b502f6ef20e0106d27dbf9495c Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 19 Aug 2002 18:10:54 -0700 Subject: [PATCH] UFS superblock cleanup. This one from Brian Gerst seperates UFS from the struct superblock union. --- fs/ufs/balloc.c | 48 ++++++++--------- fs/ufs/cylinder.c | 73 +++++++++++++------------- fs/ufs/dir.c | 8 +-- fs/ufs/ialloc.c | 20 +++---- fs/ufs/inode.c | 18 +++---- fs/ufs/namei.c | 2 +- fs/ufs/super.c | 138 ++++++++++++++++++++++++++++--------------------- fs/ufs/swab.h | 24 ++++----- fs/ufs/truncate.c | 10 ++-- fs/ufs/util.h | 26 +++++----- include/linux/ufs_fs.h | 10 +++- 11 files changed, 203 insertions(+), 174 deletions(-) diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 921fd31d5236..8b622b001d46 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -47,7 +47,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno; sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) @@ -89,7 +89,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count fs32_add(sb, &ucg->cg_cs.cs_nffree, count); fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count); - fs32_add(sb, &sb->fs_cs(cgno).cs_nffree, count); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); @@ -100,12 +100,12 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb); - fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, uspi->s_fpb); - if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) + fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); + if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1); - fs32_add(sb, &sb->fs_cs(cgno).cs_nbfree, 1); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); cylno = ufs_cbtocylno (bbase); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); @@ -141,7 +141,7 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { unsigned overflow, cgno, bit, end_bit, blkno, i, cylno; sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) @@ -184,13 +184,13 @@ do_more: ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); } ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno); - if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) + if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); DQUOT_FREE_BLOCK(inode, uspi->s_fpb); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1); - fs32_add(sb, &sb->fs_cs(cgno).cs_nbfree, 1); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); cylno = ufs_cbtocylno(i); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); @@ -247,7 +247,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment, UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); *err = -ENOSPC; @@ -285,7 +285,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment, return 0; } } - + /* * There is not enough space for user on the device */ @@ -293,8 +293,8 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment, unlock_super (sb); UFSD(("EXIT (FAILED)\n")) return 0; - } - + } + if (goal >= uspi->s_size) goal = 0; if (goal == 0) @@ -407,12 +407,12 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment, UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first (USPI_UBH); count = newcount - oldcount; cgno = ufs_dtog(fragment); - if (sb->fs_cs(cgno).cs_nffree < count) + if (UFS_SB(sb)->fs_cs(cgno).cs_nffree < count) return 0; if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb) return 0; @@ -453,7 +453,7 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment, } fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); - fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, count); + fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); ubh_mark_buffer_dirty (USPI_UBH); @@ -470,7 +470,7 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment, } #define UFS_TEST_FREE_SPACE_CG \ - ucg = (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[cgno]->b_data; \ + ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \ if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \ goto cg_found; \ for (k = count; k < uspi->s_fpb; k++) \ @@ -490,7 +490,7 @@ unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno, UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); oldcg = cgno; @@ -557,7 +557,7 @@ cg_found: fs32_add(sb, &ucg->cg_cs.cs_nffree, i); fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i); - fs32_add(sb, &sb->fs_cs(cgno).cs_nffree, i); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); fs32_add(sb, &ucg->cg_frsum[i], 1); goto succed; } @@ -574,7 +574,7 @@ cg_found: fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); - fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, count); + fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); fs32_sub(sb, &ucg->cg_frsum[allocsize], 1); if (count != allocsize) @@ -606,7 +606,7 @@ unsigned ufs_alloccg_block (struct inode * inode, UFSD(("ENTER, goal %u\n", goal)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); ucg = ubh_get_ucg(UCPI_UBH); @@ -633,7 +633,7 @@ norot: gotit: blkno = ufs_fragstoblks(result); ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno); - if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) + if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, -1); if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { *err = -EDQUOT; @@ -642,7 +642,7 @@ gotit: fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1); fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1); - fs32_sub(sb, &sb->fs_cs(ucpi->c_cgx).cs_nbfree, 1); + fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1); cylno = ufs_cbtocylno(result); fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1); fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1); @@ -663,7 +663,7 @@ unsigned ufs_bitmap_search (struct super_block * sb, UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count)) - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first (USPI_UBH); ucg = ubh_get_ucg(UCPI_UBH); @@ -729,7 +729,7 @@ void ufs_clusteracct(struct super_block * sb, struct ufs_sb_private_info * uspi; int i, start, end, forw, back; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; if (uspi->s_contigsumsize <= 0) return; diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c index daf11e4dcf66..105a695aad8f 100644 --- a/fs/ufs/cylinder.c +++ b/fs/ufs/cylinder.c @@ -36,26 +36,27 @@ static void ufs_read_cylinder (struct super_block * sb, unsigned cgno, unsigned bitmap_nr) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned i, j; UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr)) - uspi = sb->u.ufs_sb.s_uspi; - ucpi = sb->u.ufs_sb.s_ucpi[bitmap_nr]; - ucg = (struct ufs_cylinder_group *)sb->u.ufs_sb.s_ucg[cgno]->b_data; + uspi = sbi->s_uspi; + ucpi = sbi->s_ucpi[bitmap_nr]; + ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; UCPI_UBH->fragment = ufs_cgcmin(cgno); UCPI_UBH->count = uspi->s_cgsize >> sb->s_blocksize_bits; /* * We have already the first fragment of cylinder group block in buffer */ - UCPI_UBH->bh[0] = sb->u.ufs_sb.s_ucg[cgno]; + UCPI_UBH->bh[0] = sbi->s_ucg[cgno]; for (i = 1; i < UCPI_UBH->count; i++) if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i))) goto failed; - sb->u.ufs_sb.s_cgno[bitmap_nr] = cgno; + sbi->s_cgno[bitmap_nr] = cgno; ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx); ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl); @@ -77,8 +78,8 @@ static void ufs_read_cylinder (struct super_block * sb, failed: for (j = 1; j < i; j++) - brelse (sb->u.ufs_sb.s_ucg[j]); - sb->u.ufs_sb.s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; + brelse (sbi->s_ucg[j]); + sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno); } @@ -88,6 +89,7 @@ failed: */ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; @@ -95,15 +97,15 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr)) - uspi = sb->u.ufs_sb.s_uspi; - if (sb->u.ufs_sb.s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) { + uspi = sbi->s_uspi; + if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) { UFSD(("EXIT\n")) return; } - ucpi = sb->u.ufs_sb.s_ucpi[bitmap_nr]; + ucpi = sbi->s_ucpi[bitmap_nr]; ucg = ubh_get_ucg(UCPI_UBH); - if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sb->u.ufs_sb.s_cg_loaded) { + if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) { ufs_panic (sb, "ufs_put_cylinder", "internal error"); return; } @@ -119,7 +121,7 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) brelse (UCPI_UBH->bh[i]); } - sb->u.ufs_sb.s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; + sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; UFSD(("EXIT\n")) } @@ -132,13 +134,14 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) struct ufs_cg_private_info * ufs_load_cylinder ( struct super_block * sb, unsigned cgno) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; unsigned cg, i, j; UFSD(("ENTER, cgno %u\n", cgno)) - uspi = sb->u.ufs_sb.s_uspi; + uspi = sbi->s_uspi; if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg"); return NULL; @@ -146,61 +149,61 @@ struct ufs_cg_private_info * ufs_load_cylinder ( /* * Cylinder group number cg it in cache and it was last used */ - if (sb->u.ufs_sb.s_cgno[0] == cgno) { + if (sbi->s_cgno[0] == cgno) { UFSD(("EXIT\n")) - return sb->u.ufs_sb.s_ucpi[0]; + return sbi->s_ucpi[0]; } /* * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED */ if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) { - if (sb->u.ufs_sb.s_cgno[cgno] != UFS_CGNO_EMPTY) { - if (sb->u.ufs_sb.s_cgno[cgno] != cgno) { + if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) { + if (sbi->s_cgno[cgno] != cgno) { ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache"); UFSD(("EXIT (FAILED)\n")) return NULL; } else { UFSD(("EXIT\n")) - return sb->u.ufs_sb.s_ucpi[cgno]; + return sbi->s_ucpi[cgno]; } } else { ufs_read_cylinder (sb, cgno, cgno); UFSD(("EXIT\n")) - return sb->u.ufs_sb.s_ucpi[cgno]; + return sbi->s_ucpi[cgno]; } } /* * Cylinder group number cg is in cache but it was not last used, * we will move to the first position */ - for (i = 0; i < sb->u.ufs_sb.s_cg_loaded && sb->u.ufs_sb.s_cgno[i] != cgno; i++); - if (i < sb->u.ufs_sb.s_cg_loaded && sb->u.ufs_sb.s_cgno[i] == cgno) { - cg = sb->u.ufs_sb.s_cgno[i]; - ucpi = sb->u.ufs_sb.s_ucpi[i]; + for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++); + if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) { + cg = sbi->s_cgno[i]; + ucpi = sbi->s_ucpi[i]; for (j = i; j > 0; j--) { - sb->u.ufs_sb.s_cgno[j] = sb->u.ufs_sb.s_cgno[j-1]; - sb->u.ufs_sb.s_ucpi[j] = sb->u.ufs_sb.s_ucpi[j-1]; + sbi->s_cgno[j] = sbi->s_cgno[j-1]; + sbi->s_ucpi[j] = sbi->s_ucpi[j-1]; } - sb->u.ufs_sb.s_cgno[0] = cg; - sb->u.ufs_sb.s_ucpi[0] = ucpi; + sbi->s_cgno[0] = cg; + sbi->s_ucpi[0] = ucpi; /* * Cylinder group number cg is not in cache, we will read it from disk * and put it to the first position */ } else { - if (sb->u.ufs_sb.s_cg_loaded < UFS_MAX_GROUP_LOADED) - sb->u.ufs_sb.s_cg_loaded++; + if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED) + sbi->s_cg_loaded++; else ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1); - ucpi = sb->u.ufs_sb.s_ucpi[sb->u.ufs_sb.s_cg_loaded - 1]; - for (j = sb->u.ufs_sb.s_cg_loaded - 1; j > 0; j--) { - sb->u.ufs_sb.s_cgno[j] = sb->u.ufs_sb.s_cgno[j-1]; - sb->u.ufs_sb.s_ucpi[j] = sb->u.ufs_sb.s_ucpi[j-1]; + ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1]; + for (j = sbi->s_cg_loaded - 1; j > 0; j--) { + sbi->s_cgno[j] = sbi->s_cgno[j-1]; + sbi->s_ucpi[j] = sbi->s_ucpi[j-1]; } - sb->u.ufs_sb.s_ucpi[0] = ucpi; + sbi->s_ucpi[0] = ucpi; ufs_read_cylinder (sb, cgno, 0); } UFSD(("EXIT\n")) - return sb->u.ufs_sb.s_ucpi[0]; + return sbi->s_ucpi[0]; } diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 49764ea033de..7253a3c12780 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -67,7 +67,7 @@ ufs_readdir (struct file * filp, void * dirent, filldir_t filldir) lock_kernel(); sb = inode->i_sb; - flags = sb->u.ufs_sb.s_flags; + flags = UFS_SB(sb)->s_flags; UFSD(("ENTER, ino %lu f_pos %lu\n", inode->i_ino, (unsigned long) filp->f_pos)) @@ -308,8 +308,8 @@ int ufs_check_dir_entry (const char * function, struct inode * dir, error_msg = "reclen is too small for namlen"; else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) error_msg = "directory entry across blocks"; - else if (fs32_to_cpu(sb, de->d_ino) > (sb->u.ufs_sb.s_uspi->s_ipg * - sb->u.ufs_sb.s_uspi->s_ncg)) + else if (fs32_to_cpu(sb, de->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg * + UFS_SB(sb)->s_uspi->s_ncg)) error_msg = "inode out of bounds"; if (error_msg != NULL) @@ -386,7 +386,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode) UFSD(("ENTER, name %s, namelen %u\n", name, namelen)) sb = dir->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; if (!namelen) return -EINVAL; diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index 302d09d27bf1..8c9596404f9a 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c @@ -71,7 +71,7 @@ void ufs_free_inode (struct inode * inode) UFSD(("ENTER, ino %lu\n", inode->i_ino)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); ino = inode->i_ino; @@ -112,12 +112,12 @@ void ufs_free_inode (struct inode * inode) ucpi->c_irotor = ino; fs32_add(sb, &ucg->cg_cs.cs_nifree, 1); fs32_add(sb, &usb1->fs_cstotal.cs_nifree, 1); - fs32_add(sb, &sb->fs_cs(cg).cs_nifree, 1); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1); if (is_directory) { fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1); fs32_sub(sb, &usb1->fs_cstotal.cs_ndir, 1); - fs32_sub(sb, &sb->fs_cs(cg).cs_ndir, 1); + fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1); } } @@ -146,6 +146,7 @@ void ufs_free_inode (struct inode * inode) struct inode * ufs_new_inode(struct inode * dir, int mode) { struct super_block * sb; + struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; @@ -164,7 +165,8 @@ struct inode * ufs_new_inode(struct inode * dir, int mode) if (!inode) return ERR_PTR(-ENOMEM); ufsi = UFS_I(inode); - uspi = sb->u.ufs_sb.s_uspi; + sbi = UFS_SB(sb); + uspi = sbi->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); lock_super (sb); @@ -173,7 +175,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode) * Try to place the inode in its parent directory */ i = ufs_inotocg(dir->i_ino); - if (sb->fs_cs(i).cs_nifree) { + if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } @@ -185,7 +187,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode) i += j; if (i >= uspi->s_ncg) i -= uspi->s_ncg; - if (sb->fs_cs(i).cs_nifree) { + if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } @@ -199,7 +201,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode) i++; if (i >= uspi->s_ncg) i = 0; - if (sb->fs_cs(i).cs_nifree) { + if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } @@ -235,12 +237,12 @@ cg_found: fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1); fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1); - fs32_sub(sb, &sb->fs_cs(cg).cs_nifree, 1); + fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); if (S_ISDIR(mode)) { fs32_add(sb, &ucg->cg_cs.cs_ndir, 1); fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1); - fs32_add(sb, &sb->fs_cs(cg).cs_ndir, 1); + fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); } ubh_mark_buffer_dirty (USPI_UBH); diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index d740f00123cd..d406a3c62bc7 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -52,7 +52,7 @@ static int ufs_block_to_path(struct inode *inode, long i_block, int offsets[4]) { - struct ufs_sb_private_info *uspi = inode->i_sb->u.ufs_sb.s_uspi; + struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; int ptrs = uspi->s_apb; int ptrs_bits = uspi->s_apbshift; const long direct_blocks = UFS_NDADDR, @@ -86,7 +86,7 @@ int ufs_frag_map(struct inode *inode, int frag) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; - struct ufs_sb_private_info *uspi = sb->u.ufs_sb.s_uspi; + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; int mask = uspi->s_apbmask>>uspi->s_fpbshift; int shift = uspi->s_apbshift-uspi->s_fpbshift; int offsets[4], *p; @@ -137,7 +137,7 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode, inode->i_ino, fragment, new_fragment, required)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; block = ufs_fragstoblks (fragment); blockoff = ufs_fragnum (fragment); p = ufsi->i_u1.i_data + block; @@ -243,7 +243,7 @@ static struct buffer_head * ufs_block_getfrag (struct inode *inode, u32 * p; sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; block = ufs_fragstoblks (fragment); blockoff = ufs_fragnum (fragment); @@ -313,7 +313,7 @@ out: static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) { struct super_block * sb = inode->i_sb; - struct ufs_sb_private_info * uspi = sb->u.ufs_sb.s_uspi; + struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; struct buffer_head * bh; int ret, err, new; unsigned long ptr, phys; @@ -483,8 +483,8 @@ void ufs_read_inode (struct inode * inode) UFSD(("ENTER, ino %lu\n", inode->i_ino)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; - flags = sb->u.ufs_sb.s_flags; + uspi = UFS_SB(sb)->s_uspi; + flags = UFS_SB(sb)->s_flags; if (inode->i_ino < UFS_ROOTINO || inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { @@ -579,8 +579,8 @@ static int ufs_update_inode(struct inode * inode, int do_sync) UFSD(("ENTER, ino %lu\n", inode->i_ino)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; - flags = sb->u.ufs_sb.s_flags; + uspi = UFS_SB(sb)->s_uspi; + flags = UFS_SB(sb)->s_flags; if (inode->i_ino < UFS_ROOTINO || inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index faea20eb2069..50f0e8c2705b 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -139,7 +139,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, if (IS_ERR(inode)) goto out; - if (l > sb->u.ufs_sb.s_uspi->s_maxsymlinklen) { + if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { /* slow symlink */ inode->i_op = &page_symlink_inode_operations; inode->i_mapping->a_ops = &ufs_aops; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index c60ae7e1fcde..eda1ea41f86a 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -189,7 +189,7 @@ void ufs_error (struct super_block * sb, const char * function, struct ufs_super_block_first * usb1; va_list args; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); if (!(sb->s_flags & MS_RDONLY)) { @@ -201,7 +201,7 @@ void ufs_error (struct super_block * sb, const char * function, va_start (args, fmt); vsprintf (error_buf, fmt, args); va_end (args); - switch (sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_ONERROR) { + switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) { case UFS_MOUNT_ONERROR_PANIC: panic ("UFS-fs panic (device %s): %s: %s\n", sb->s_id, function, error_buf); @@ -221,7 +221,7 @@ void ufs_panic (struct super_block * sb, const char * function, struct ufs_super_block_first * usb1; va_list args; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); if (!(sb->s_flags & MS_RDONLY)) { @@ -317,6 +317,7 @@ static int ufs_parse_options (char * options, unsigned * mount_options) * Read on-disk structures associated with cylinder groups */ int ufs_read_cylinder_structures (struct super_block * sb) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_buffer_head * ubh; unsigned char * base, * space; @@ -324,7 +325,7 @@ int ufs_read_cylinder_structures (struct super_block * sb) { UFSD(("ENTER\n")) - uspi = sb->u.ufs_sb.s_uspi; + uspi = sbi->s_uspi; /* * Read cs structures from (usually) first data block @@ -343,7 +344,7 @@ int ufs_read_cylinder_structures (struct super_block * sb) { if (!ubh) goto failed; ubh_ubhcpymem (space, ubh, size); - sb->u.ufs_sb.s_csp[ufs_fragstoblks(i)] = (struct ufs_csum *)space; + sbi->s_csp[ufs_fragstoblks(i)] = (struct ufs_csum *)space; space += size; ubh_brelse (ubh); ubh = NULL; @@ -353,41 +354,41 @@ int ufs_read_cylinder_structures (struct super_block * sb) { * Read cylinder group (we read only first fragment from block * at this time) and prepare internal data structures for cg caching. */ - if (!(sb->u.ufs_sb.s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_KERNEL))) + if (!(sbi->s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_KERNEL))) goto failed; for (i = 0; i < uspi->s_ncg; i++) - sb->u.ufs_sb.s_ucg[i] = NULL; + sbi->s_ucg[i] = NULL; for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) { - sb->u.ufs_sb.s_ucpi[i] = NULL; - sb->u.ufs_sb.s_cgno[i] = UFS_CGNO_EMPTY; + sbi->s_ucpi[i] = NULL; + sbi->s_cgno[i] = UFS_CGNO_EMPTY; } for (i = 0; i < uspi->s_ncg; i++) { UFSD(("read cg %u\n", i)) - if (!(sb->u.ufs_sb.s_ucg[i] = sb_bread(sb, ufs_cgcmin(i)))) + if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i)))) goto failed; - if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[i]->b_data)) + if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data)) goto failed; #ifdef UFS_SUPER_DEBUG_MORE - ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[i]->b_data); + ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data); #endif } for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) { - if (!(sb->u.ufs_sb.s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL))) + if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL))) goto failed; - sb->u.ufs_sb.s_cgno[i] = UFS_CGNO_EMPTY; + sbi->s_cgno[i] = UFS_CGNO_EMPTY; } - sb->u.ufs_sb.s_cg_loaded = 0; + sbi->s_cg_loaded = 0; UFSD(("EXIT\n")) return 1; failed: if (base) kfree (base); - if (sb->u.ufs_sb.s_ucg) { + if (sbi->s_ucg) { for (i = 0; i < uspi->s_ncg; i++) - if (sb->u.ufs_sb.s_ucg[i]) brelse (sb->u.ufs_sb.s_ucg[i]); - kfree (sb->u.ufs_sb.s_ucg); + if (sbi->s_ucg[i]) brelse (sbi->s_ucg[i]); + kfree (sbi->s_ucg); for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) - if (sb->u.ufs_sb.s_ucpi[i]) kfree (sb->u.ufs_sb.s_ucpi[i]); + if (sbi->s_ucpi[i]) kfree (sbi->s_ucpi[i]); } UFSD(("EXIT (FAILED)\n")) return 0; @@ -398,6 +399,7 @@ failed: * write them back to disk */ void ufs_put_cylinder_structures (struct super_block * sb) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_buffer_head * ubh; unsigned char * base, * space; @@ -405,11 +407,11 @@ void ufs_put_cylinder_structures (struct super_block * sb) { UFSD(("ENTER\n")) - uspi = sb->u.ufs_sb.s_uspi; + uspi = sbi->s_uspi; size = uspi->s_cssize; blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift; - base = space = (char*) sb->u.ufs_sb.s_csp[0]; + base = space = (char*) sbi->s_csp[0]; for (i = 0; i < blks; i += uspi->s_fpb) { size = uspi->s_bsize; if (i + uspi->s_fpb > blks) @@ -421,21 +423,22 @@ void ufs_put_cylinder_structures (struct super_block * sb) { ubh_mark_buffer_dirty (ubh); ubh_brelse (ubh); } - for (i = 0; i < sb->u.ufs_sb.s_cg_loaded; i++) { + for (i = 0; i < sbi->s_cg_loaded; i++) { ufs_put_cylinder (sb, i); - kfree (sb->u.ufs_sb.s_ucpi[i]); + kfree (sbi->s_ucpi[i]); } for (; i < UFS_MAX_GROUP_LOADED; i++) - kfree (sb->u.ufs_sb.s_ucpi[i]); + kfree (sbi->s_ucpi[i]); for (i = 0; i < uspi->s_ncg; i++) - brelse (sb->u.ufs_sb.s_ucg[i]); - kfree (sb->u.ufs_sb.s_ucg); + brelse (sbi->s_ucg[i]); + kfree (sbi->s_ucg); kfree (base); UFSD(("EXIT\n")) } static int ufs_fill_super(struct super_block *sb, void *data, int silent) { + struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_super_block_second * usb2; @@ -451,6 +454,12 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) UFSD(("ENTER\n")) + sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL); + if (!sbi) + goto failed_nomem; + sb->u.generic_sbp = sbi; + memset(sbi, 0, sizeof(struct ufs_sb_info)); + UFSD(("flag %u\n", (int)(sb->s_flags & MS_RDONLY))) #ifndef CONFIG_UFS_FS_WRITE @@ -464,22 +473,22 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) * Set default mount options * Parse mount options */ - sb->u.ufs_sb.s_mount_opt = 0; - ufs_set_opt (sb->u.ufs_sb.s_mount_opt, ONERROR_LOCK); - if (!ufs_parse_options ((char *) data, &sb->u.ufs_sb.s_mount_opt)) { + sbi->s_mount_opt = 0; + ufs_set_opt (sbi->s_mount_opt, ONERROR_LOCK); + if (!ufs_parse_options ((char *) data, &sbi->s_mount_opt)) { printk("wrong mount options\n"); goto failed; } - if (!(sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE)) { + if (!(sbi->s_mount_opt & UFS_MOUNT_UFSTYPE)) { printk("You didn't specify the type of your ufs filesystem\n\n" "mount -t ufs -o ufstype=" "sun|sunx86|44bsd|old|hp|nextstep|netxstep-cd|openstep ...\n\n" ">>>WARNING<<< Wrong ufstype may corrupt your filesystem, " "default is ufstype=old\n"); - ufs_set_opt (sb->u.ufs_sb.s_mount_opt, UFSTYPE_OLD); + ufs_set_opt (sbi->s_mount_opt, UFSTYPE_OLD); } - sb->u.ufs_sb.s_uspi = uspi = + sbi->s_uspi = uspi = kmalloc (sizeof(struct ufs_sb_private_info), GFP_KERNEL); if (!uspi) goto failed; @@ -488,7 +497,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) this but as I don't know which I'll let those in the know loosen the rules */ - switch (sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) { + switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) { case UFS_MOUNT_UFSTYPE_44BSD: UFSD(("ufstype=44bsd\n")) uspi->s_fsize = block_size = 512; @@ -596,7 +605,10 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) } again: - sb_set_blocksize(sb, block_size); + if (sb_set_blocksize(sb, block_size)) { + printk(KERN_ERR "UFS: failed to set blocksize\n"); + goto failed; + } /* * read ufs super block from device @@ -617,7 +629,7 @@ again: case UFS_MAGIC_LFN: case UFS_MAGIC_FEA: case UFS_MAGIC_4GB: - sb->u.ufs_sb.s_bytesex = BYTESEX_LE; + sbi->s_bytesex = BYTESEX_LE; goto magic_found; } switch (__constant_be32_to_cpu(usb3->fs_magic)) { @@ -625,13 +637,13 @@ again: case UFS_MAGIC_LFN: case UFS_MAGIC_FEA: case UFS_MAGIC_4GB: - sb->u.ufs_sb.s_bytesex = BYTESEX_BE; + sbi->s_bytesex = BYTESEX_BE; goto magic_found; } - if ((((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP) - || ((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD) - || ((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP)) + if ((((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP) + || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD) + || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP)) && uspi->s_sbbase < 256) { ubh_brelse_uspi(uspi); ubh = NULL; @@ -652,32 +664,32 @@ magic_found: uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift); if (uspi->s_fsize & (uspi->s_fsize - 1)) { - printk("ufs_read_super: fragment size %u is not a power of 2\n", + printk(KERN_ERR "ufs_read_super: fragment size %u is not a power of 2\n", uspi->s_fsize); goto failed; } if (uspi->s_fsize < 512) { - printk("ufs_read_super: fragment size %u is too small\n", + printk(KERN_ERR "ufs_read_super: fragment size %u is too small\n", uspi->s_fsize); goto failed; } if (uspi->s_fsize > 4096) { - printk("ufs_read_super: fragment size %u is too large\n", + printk(KERN_ERR "ufs_read_super: fragment size %u is too large\n", uspi->s_fsize); goto failed; } if (uspi->s_bsize & (uspi->s_bsize - 1)) { - printk("ufs_read_super: block size %u is not a power of 2\n", + printk(KERN_ERR "ufs_read_super: block size %u is not a power of 2\n", uspi->s_bsize); goto failed; } if (uspi->s_bsize < 4096) { - printk("ufs_read_super: block size %u is too small\n", + printk(KERN_ERR "ufs_read_super: block size %u is too small\n", uspi->s_bsize); goto failed; } if (uspi->s_bsize / uspi->s_fsize > 8) { - printk("ufs_read_super: too many fragments per block (%u)\n", + printk(KERN_ERR "ufs_read_super: too many fragments per block (%u)\n", uspi->s_bsize / uspi->s_fsize); goto failed; } @@ -801,12 +813,12 @@ magic_found: uspi->s_bpf = uspi->s_fsize << 3; uspi->s_bpfshift = uspi->s_fshift + 3; uspi->s_bpfmask = uspi->s_bpf - 1; - if ((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == + if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_44BSD) uspi->s_maxsymlinklen = fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_maxsymlinklen); - sb->u.ufs_sb.s_flags = flags; + sbi->s_flags = flags; inode = iget(sb, UFS_ROOTINO); if (!inode || is_bad_inode(inode)) @@ -831,8 +843,14 @@ dalloc_failed: failed: if (ubh) ubh_brelse_uspi (uspi); if (uspi) kfree (uspi); + if (sbi) kfree(sbi); + sb->u.generic_sbp = NULL; UFSD(("EXIT (FAILED)\n")) return -EINVAL; + +failed_nomem: + UFSD(("EXIT (NOMEM)\n")) + return -ENOMEM; } void ufs_write_super (struct super_block * sb) { @@ -844,8 +862,8 @@ void ufs_write_super (struct super_block * sb) { lock_kernel(); UFSD(("ENTER\n")) - flags = sb->u.ufs_sb.s_flags; - uspi = sb->u.ufs_sb.s_uspi; + flags = UFS_SB(sb)->s_flags; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); usb3 = ubh_get_usb_third(USPI_UBH); @@ -864,17 +882,17 @@ void ufs_write_super (struct super_block * sb) { void ufs_put_super (struct super_block * sb) { - struct ufs_sb_private_info * uspi; + struct ufs_sb_info * sbi = UFS_SB(sb); UFSD(("ENTER\n")) - uspi = sb->u.ufs_sb.s_uspi; - if (!(sb->s_flags & MS_RDONLY)) ufs_put_cylinder_structures (sb); - ubh_brelse_uspi (uspi); - kfree (sb->u.ufs_sb.s_uspi); + ubh_brelse_uspi (sbi->s_uspi); + kfree (sbi->s_uspi); + kfree (sbi); + sb->u.generic_sbp = NULL; return; } @@ -887,8 +905,8 @@ int ufs_remount (struct super_block * sb, int * mount_flags, char * data) unsigned new_mount_opt, ufstype; unsigned flags; - uspi = sb->u.ufs_sb.s_uspi; - flags = sb->u.ufs_sb.s_flags; + uspi = UFS_SB(sb)->s_uspi; + flags = UFS_SB(sb)->s_flags; usb1 = ubh_get_usb_first(USPI_UBH); usb3 = ubh_get_usb_third(USPI_UBH); @@ -896,7 +914,7 @@ int ufs_remount (struct super_block * sb, int * mount_flags, char * data) * Allow the "check" option to be passed as a remount option. * It is not possible to change ufstype option during remount */ - ufstype = sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE; + ufstype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE; new_mount_opt = 0; ufs_set_opt (new_mount_opt, ONERROR_LOCK); if (!ufs_parse_options (data, &new_mount_opt)) @@ -910,7 +928,7 @@ int ufs_remount (struct super_block * sb, int * mount_flags, char * data) } if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { - sb->u.ufs_sb.s_mount_opt = new_mount_opt; + UFS_SB(sb)->s_mount_opt = new_mount_opt; return 0; } @@ -950,7 +968,7 @@ int ufs_remount (struct super_block * sb, int * mount_flags, char * data) sb->s_flags &= ~MS_RDONLY; #endif } - sb->u.ufs_sb.s_mount_opt = new_mount_opt; + UFS_SB(sb)->s_mount_opt = new_mount_opt; return 0; } @@ -961,7 +979,7 @@ int ufs_statfs (struct super_block * sb, struct statfs * buf) lock_kernel(); - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first (USPI_UBH); buf->f_type = UFS_MAGIC; diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h index 4fc781eba71f..a19000f8d6f4 100644 --- a/fs/ufs/swab.h +++ b/fs/ufs/swab.h @@ -25,7 +25,7 @@ enum { static __inline u64 fs64_to_cpu(struct super_block *sbp, u64 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return le64_to_cpu(n); else return be64_to_cpu(n); @@ -34,7 +34,7 @@ fs64_to_cpu(struct super_block *sbp, u64 n) static __inline u64 cpu_to_fs64(struct super_block *sbp, u64 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return cpu_to_le64(n); else return cpu_to_be64(n); @@ -43,7 +43,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n) static __inline u32 fs64_add(struct super_block *sbp, u32 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le64(le64_to_cpu(*n)+d); else return *n = cpu_to_be64(be64_to_cpu(*n)+d); @@ -52,7 +52,7 @@ fs64_add(struct super_block *sbp, u32 *n, int d) static __inline u32 fs64_sub(struct super_block *sbp, u32 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le64(le64_to_cpu(*n)-d); else return *n = cpu_to_be64(be64_to_cpu(*n)-d); @@ -61,7 +61,7 @@ fs64_sub(struct super_block *sbp, u32 *n, int d) static __inline u32 fs32_to_cpu(struct super_block *sbp, u32 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return le32_to_cpu(n); else return be32_to_cpu(n); @@ -70,7 +70,7 @@ fs32_to_cpu(struct super_block *sbp, u32 n) static __inline u32 cpu_to_fs32(struct super_block *sbp, u32 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return cpu_to_le32(n); else return cpu_to_be32(n); @@ -79,7 +79,7 @@ cpu_to_fs32(struct super_block *sbp, u32 n) static __inline u32 fs32_add(struct super_block *sbp, u32 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le32(le32_to_cpu(*n)+d); else return *n = cpu_to_be32(be32_to_cpu(*n)+d); @@ -88,7 +88,7 @@ fs32_add(struct super_block *sbp, u32 *n, int d) static __inline u32 fs32_sub(struct super_block *sbp, u32 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le32(le32_to_cpu(*n)-d); else return *n = cpu_to_be32(be32_to_cpu(*n)-d); @@ -97,7 +97,7 @@ fs32_sub(struct super_block *sbp, u32 *n, int d) static __inline u16 fs16_to_cpu(struct super_block *sbp, u16 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return le16_to_cpu(n); else return be16_to_cpu(n); @@ -106,7 +106,7 @@ fs16_to_cpu(struct super_block *sbp, u16 n) static __inline u16 cpu_to_fs16(struct super_block *sbp, u16 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return cpu_to_le16(n); else return cpu_to_be16(n); @@ -115,7 +115,7 @@ cpu_to_fs16(struct super_block *sbp, u16 n) static __inline u16 fs16_add(struct super_block *sbp, u16 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le16(le16_to_cpu(*n)+d); else return *n = cpu_to_be16(be16_to_cpu(*n)+d); @@ -124,7 +124,7 @@ fs16_add(struct super_block *sbp, u16 *n, int d) static __inline u16 fs16_sub(struct super_block *sbp, u16 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le16(le16_to_cpu(*n)-d); else return *n = cpu_to_be16(be16_to_cpu(*n)-d); diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index 6b87c6f26702..636bdbdbf3ce 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c @@ -82,7 +82,7 @@ static int ufs_trunc_direct (struct inode * inode) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; frag_to_free = 0; free_count = 0; @@ -212,7 +212,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, u32 * p) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; frag_to_free = 0; free_count = 0; @@ -306,7 +306,7 @@ static int ufs_trunc_dindirect (struct inode * inode, unsigned offset, u32 * p) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; dindirect_block = (DIRECT_BLOCK > offset) ? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0; @@ -374,7 +374,7 @@ static int ufs_trunc_tindirect (struct inode * inode) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; retry = 0; tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb)) @@ -435,7 +435,7 @@ void ufs_truncate (struct inode * inode) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 2ce89b83801c..426b26874f2e 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -31,7 +31,7 @@ static inline s32 ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, struct ufs_super_block_third *usb3) { - switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUN: return fs32_to_cpu(sb, usb3->fs_u2.fs_sun.fs_state); case UFS_ST_SUNx86: @@ -46,7 +46,7 @@ static inline void ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, struct ufs_super_block_third *usb3, s32 value) { - switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUN: usb3->fs_u2.fs_sun.fs_state = cpu_to_fs32(sb, value); break; @@ -63,7 +63,7 @@ static inline u32 ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1, struct ufs_super_block_third *usb3) { - if ((sb->u.ufs_sb.s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) + if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) return fs32_to_cpu(sb, usb3->fs_u2.fs_sunx86.fs_npsect); else return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect); @@ -74,7 +74,7 @@ ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3) { u64 tmp; - switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUN: ((u32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qbmask[0]; ((u32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qbmask[1]; @@ -97,7 +97,7 @@ ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3) { u64 tmp; - switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUN: ((u32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qfmask[0]; ((u32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qfmask[1]; @@ -118,7 +118,7 @@ ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3) static inline u16 ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de) { - if ((sb->u.ufs_sb.s_flags & UFS_DE_MASK) == UFS_DE_OLD) + if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD) return fs16_to_cpu(sb, de->d_u.d_namlen); else return de->d_u.d_44.d_namlen; /* XXX this seems wrong */ @@ -127,7 +127,7 @@ ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de) static inline void ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value) { - if ((sb->u.ufs_sb.s_flags & UFS_DE_MASK) == UFS_DE_OLD) + if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD) de->d_u.d_namlen = cpu_to_fs16(sb, value); else de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */ @@ -136,7 +136,7 @@ ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value) static inline void ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode) { - if ((sb->u.ufs_sb.s_flags & UFS_DE_MASK) != UFS_DE_44BSD) + if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD) return; /* @@ -172,7 +172,7 @@ ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode) static inline u32 ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode) { - switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { case UFS_UID_EFT: return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid); case UFS_UID_44BSD: @@ -185,7 +185,7 @@ ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode) static inline void ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value) { - switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { case UFS_UID_EFT: inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value); break; @@ -199,7 +199,7 @@ ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value) static inline u32 ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) { - switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { case UFS_UID_EFT: return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid); case UFS_UID_44BSD: @@ -212,7 +212,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) static inline void ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value) { - switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { case UFS_UID_EFT: inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value); break; @@ -481,7 +481,7 @@ static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap, struct ufs_sb_private_info * uspi; unsigned fragsize, pos; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; fragsize = 0; for (pos = 0; pos < uspi->s_fpb; pos++) { diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h index 7ba4e3e66e4e..dd9bc72d795e 100644 --- a/include/linux/ufs_fs.h +++ b/include/linux/ufs_fs.h @@ -33,6 +33,9 @@ #include #include +#include +#include + #define UFS_BBLOCK 0 #define UFS_BBSIZE 8192 #define UFS_SBLOCK 8192 @@ -398,7 +401,7 @@ struct ufs_super_block { * Convert cylinder group to base address of its global summary info. */ #define fs_cs(indx) \ - u.ufs_sb.s_csp[(indx) >> uspi->s_csshift][(indx) & ~uspi->s_csmask] + s_csp[(indx) >> uspi->s_csshift][(indx) & ~uspi->s_csmask] /* * Cylinder group block for a file system. @@ -780,7 +783,10 @@ extern struct inode_operations ufs_fast_symlink_inode_operations; /* truncate.c */ extern void ufs_truncate (struct inode *); -#include +static inline struct ufs_sb_info *UFS_SB(struct super_block *sb) +{ + return sb->u.generic_sbp; +} static inline struct ufs_inode_info *UFS_I(struct inode *inode) { -- cgit v1.2.3 From 7e2e73a7ab2520d658985ac0e24287f8a524171d Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 19 Aug 2002 18:10:58 -0700 Subject: [PATCH] struct superblock cleanups. Finally, this chunk removes the references to the UFS & ROMFS entries in struct superblock, leaving just ext3 and hpfs as the only remaining fs's to be fixed up. --- include/linux/fs.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/include/linux/fs.h b/include/linux/fs.h index ec0f6edac31b..f773053fdbc5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -624,8 +624,6 @@ extern void __kill_fasync(struct fasync_struct *, int, int); #include #include -#include -#include extern struct list_head super_blocks; extern spinlock_t sb_lock; @@ -670,8 +668,6 @@ struct super_block { union { struct ext3_sb_info ext3_sb; struct hpfs_sb_info hpfs_sb; - struct ufs_sb_info ufs_sb; - struct romfs_sb_info romfs_sb; void *generic_sbp; } u; /* -- cgit v1.2.3 From 1edfa64279794d193f64339fc97d49d858824588 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 19 Aug 2002 18:15:30 -0700 Subject: [PATCH] O(1) sys_exit(), threading, scalable-exit-2.5.31-A6 This fixes the ptrace wait4() anomaly that can be observed in any previous Linux kernel i could get my hands at. If the parent still has other children (that are being traced by somebody), we wait for them or return immediately without an error in case of WNOHANG. --- kernel/exit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/exit.c b/kernel/exit.c index f2390db88ab6..6526b6b94849 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -731,7 +731,7 @@ repeat: tsk = next_thread(tsk); } while (tsk != current); read_unlock(&tasklist_lock); - if (flag) { + if (flag || !list_empty(¤t->ptrace_children)) { retval = 0; if (options & WNOHANG) goto end_wait4; -- cgit v1.2.3 From e4039bb24e43c6c5d1a2b406d6c2a6191580e1fd Mon Sep 17 00:00:00 2001 From: Robert Love Date: Mon, 19 Aug 2002 22:23:02 -0700 Subject: [PATCH] spinlock.h cleanup - cleanup #defines: I do not follow the rationale behind the odd line-wrapped defines at the beginning of the file. If we have to use multiple lines, then we might as well do so cleanly and according to normal practice... - Remove a level of indirection: do not have spin_lock_foo use spin_lock - just explicitly call what is needed. - we do not need to define the spin_lock functions twice, once for CONFIG_PREEMPT and once for !CONFIG_PREEMPT. Defining them once with the preempt macros will optimize away fine. - cleanup preempt.h too - other misc. cleanup, improved comments, reordering, etc. --- include/linux/preempt.h | 18 ++-- include/linux/spinlock.h | 272 +++++++++++++++++++++++++++++++++-------------- 2 files changed, 201 insertions(+), 89 deletions(-) diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 3864d46eadba..b4ff1a7c881c 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -1,9 +1,14 @@ #ifndef __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H +/* + * include/linux/preempt.h - macros for accessing and manipulating + * preempt_count (used for kernel preemption, interrupt count, etc.) + */ + #include -#define preempt_count() (current_thread_info()->preempt_count) +#define preempt_count() (current_thread_info()->preempt_count) #define inc_preempt_count() \ do { \ @@ -31,17 +36,16 @@ do { \ barrier(); \ } while (0) -#define preempt_enable() \ +#define preempt_check_resched() \ do { \ - preempt_enable_no_resched(); \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ preempt_schedule(); \ } while (0) -#define preempt_check_resched() \ +#define preempt_enable() \ do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ - preempt_schedule(); \ + preempt_enable_no_resched(); \ + preempt_check_resched(); \ } while (0) #define inc_preempt_count_non_preempt() do { } while (0) @@ -50,7 +54,7 @@ do { \ #else #define preempt_disable() do { } while (0) -#define preempt_enable_no_resched() do {} while(0) +#define preempt_enable_no_resched() do { } while (0) #define preempt_enable() do { } while (0) #define preempt_check_resched() do { } while (0) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 749d3054b2dc..6de41e91171f 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -1,52 +1,23 @@ #ifndef __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H +/* + * include/linux/spinlock.h - generic locking declarations + */ + #include #include #include #include #include #include +#include #include /* - * These are the generic versions of the spinlocks and read-write - * locks.. + * Must define these before including other files, inline functions need them */ -#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0) -#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0) -#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0) - -#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0) -#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0) -#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0) - -#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0) -#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0) -#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0) - -#define spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0) -#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0) -#define spin_unlock_irq(lock) do { _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } while (0) -#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0) - -#define read_unlock_irqrestore(lock, flags) do { _raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0) -#define read_unlock_irq(lock) do { _raw_read_unlock(lock); local_irq_enable(); preempt_enable(); } while (0) -#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0) - -#define write_unlock_irqrestore(lock, flags) do { _raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0) -#define write_unlock_irq(lock) do { _raw_write_unlock(lock); local_irq_enable(); preempt_enable(); } while (0) -#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0) -#define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\ - __r = spin_trylock(lock); \ - if (!__r) local_bh_enable(); \ - __r; }) - -/* Must define these before including other files, inline functions need them */ - -#include - #define LOCK_SECTION_NAME \ ".text.lock." __stringify(KBUILD_BASENAME) @@ -60,11 +31,17 @@ #define LOCK_SECTION_END \ ".previous\n\t" +/* + * If CONFIG_SMP is set, pull in the _raw_* definitions + */ #ifdef CONFIG_SMP #include -#elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously - defined (e.g. by including asm/spinlock.h */ +/* + * !CONFIG_SMP and spin_lock_init not previously defined + * (e.g. by including include/asm/spinlock.h) + */ +#elif !defined(spin_lock_init) #ifndef CONFIG_PREEMPT # define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) @@ -72,55 +49,42 @@ #endif /* - * Your basic spinlocks, allowing only a single CPU anywhere - * - * Most gcc versions have a nasty bug with empty initializers. + * gcc versions before ~2.95 have a nasty bug with empty initializers. */ #if (__GNUC__ > 2) typedef struct { } spinlock_t; -# define SPIN_LOCK_UNLOCKED (spinlock_t) { } + typedef struct { } rwlock_t; + #define SPIN_LOCK_UNLOCKED (spinlock_t) { } + #define RW_LOCK_UNLOCKED (rwlock_t) { } #else typedef struct { int gcc_is_buggy; } spinlock_t; -# define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } + typedef struct { int gcc_is_buggy; } rwlock_t; + #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } + #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } #endif +/* + * If CONFIG_SMP is unset, declare the _raw_* definitions as nops + */ #define spin_lock_init(lock) do { (void)(lock); } while(0) -#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */ +#define _raw_spin_lock(lock) (void)(lock) #define spin_is_locked(lock) ((void)(lock), 0) #define _raw_spin_trylock(lock) ((void)(lock), 1) #define spin_unlock_wait(lock) do { (void)(lock); } while(0) #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) - -/* - * Read-write spinlocks, allowing multiple readers - * but only one writer. - * - * NOTE! it is quite common to have readers in interrupts - * but no interrupt writers. For those circumstances we - * can "mix" irq-safe locks - any writer needs to get a - * irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - * - * Most gcc versions have a nasty bug with empty initializers. - */ -#if (__GNUC__ > 2) - typedef struct { } rwlock_t; - #define RW_LOCK_UNLOCKED (rwlock_t) { } -#else - typedef struct { int gcc_is_buggy; } rwlock_t; - #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } -#endif - #define rwlock_init(lock) do { } while(0) -#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */ +#define _raw_read_lock(lock) (void)(lock) #define _raw_read_unlock(lock) do { } while(0) -#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */ +#define _raw_write_lock(lock) (void)(lock) #define _raw_write_unlock(lock) do { } while(0) #endif /* !SMP */ -#ifdef CONFIG_PREEMPT - +/* + * Define the various spin_lock and rw_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various + * methods are defined as nops in the case they are not required. + */ #define spin_lock(lock) \ do { \ preempt_disable(); \ @@ -129,31 +93,175 @@ do { \ #define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ 1 : ({preempt_enable(); 0;});}) + #define spin_unlock(lock) \ do { \ _raw_spin_unlock(lock); \ preempt_enable(); \ } while (0) -#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);}) -#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();}) -#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);}) -#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();}) +#define read_lock(lock) \ +do { \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while(0) + +#define read_unlock(lock) \ +do { \ + _raw_read_unlock(lock); \ + preempt_enable(); \ +} while(0) + +#define write_lock(lock) \ +do { \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while(0) + +#define write_unlock(lock) \ +do { \ + _raw_write_unlock(lock); \ + preempt_enable(); \ +} while(0) + #define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \ 1 : ({preempt_enable(); 0;});}) -#else +#define spin_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _raw_spin_lock(lock); \ +} while (0) -#define spin_lock(lock) _raw_spin_lock(lock) -#define spin_trylock(lock) _raw_spin_trylock(lock) -#define spin_unlock(lock) _raw_spin_unlock(lock) +#define spin_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _raw_spin_lock(lock); \ +} while (0) -#define read_lock(lock) _raw_read_lock(lock) -#define read_unlock(lock) _raw_read_unlock(lock) -#define write_lock(lock) _raw_write_lock(lock) -#define write_unlock(lock) _raw_write_unlock(lock) -#define write_trylock(lock) _raw_write_trylock(lock) -#endif +#define spin_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _raw_spin_lock(lock); \ +} while (0) + +#define read_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while (0) + +#define read_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while (0) + +#define read_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while (0) + +#define write_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while (0) + +#define write_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while (0) + +#define write_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while (0) + +#define spin_unlock_irqrestore(lock, flags) \ +do { \ + _raw_spin_unlock(lock); \ + local_irq_restore(flags); \ + preempt_enable(); \ +} while (0) + +#define _raw_spin_unlock_irqrestore(lock, flags) \ +do { \ + _raw_spin_unlock(lock); \ + local_irq_restore(flags); \ +} while (0) + +#define spin_unlock_irq(lock) \ +do { \ + _raw_spin_unlock(lock); \ + local_irq_enable(); \ + preempt_enable(); \ +} while (0) + +#define spin_unlock_bh(lock) \ +do { \ + _raw_spin_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define read_unlock_irqrestore(lock, flags) \ +do { \ + _raw_read_unlock(lock); \ + local_irq_restore(flags); \ + preempt_enable(); \ +} while (0) + +#define read_unlock_irq(lock) \ +do { \ + _raw_read_unlock(lock); \ + local_irq_enable(); \ + preempt_enable(); \ +} while (0) + +#define read_unlock_bh(lock) \ +do { \ + _raw_read_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define write_unlock_irqrestore(lock, flags) \ +do { \ + _raw_write_unlock(lock); \ + local_irq_restore(flags); \ + preempt_enable(); \ +} while (0) + +#define write_unlock_irq(lock) \ +do { \ + _raw_write_unlock(lock); \ + local_irq_enable(); \ + preempt_enable(); \ +} while (0) + +#define write_unlock_bh(lock) \ +do { \ + _raw_write_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define spin_trylock_bh(lock) ({ local_bh_disable(); preempt_disable(); \ + _raw_spin_trylock(lock) ? 1 : \ + ({preempt_enable(); local_bh_enable(); 0;});}) /* "lock on reference count zero" */ #ifndef ATOMIC_DEC_AND_LOCK -- cgit v1.2.3 From d9a4ea27ed612fcf988e9c20fdf71310ce1bdea8 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 19 Aug 2002 22:24:14 -0700 Subject: [PATCH] Improve NFS READ reply sanity checking - Fix the check for whether or not the received message length has somehow been truncated: we need to use req->rq_received rather than the receive buffer length (req->rq_rlen). - Ensure that we set res->eof correctly. In particular, we need to clear it if we find ourselves attempting to recover from a truncated READ. - Don't set PageUptodate() on those pages that are the victim of message truncation. --- fs/nfs/nfs2xdr.c | 21 +++++++++++++++------ fs/nfs/nfs3xdr.c | 9 +++++++-- fs/nfs/read.c | 9 +++++++-- 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 4883b923d5f1..e2272ca7e202 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -233,6 +233,7 @@ nfs_xdr_readargs(struct rpc_rqst *req, u32 *p, struct nfs_readargs *args) static int nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res) { + struct xdr_buf *rcvbuf = &req->rq_rcv_buf; struct iovec *iov = req->rq_rvec; int status, count, recvd, hdrlen; @@ -241,25 +242,33 @@ nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res) p = xdr_decode_fattr(p, res->fattr); count = ntohl(*p++); + res->eof = 0; + if (rcvbuf->page_len) { + u32 end = page_offset(rcvbuf->pages[0]) + rcvbuf->page_base + count; + if (end >= res->fattr->size) + res->eof = 1; + } hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len > hdrlen) { + if (iov->iov_len < hdrlen) { + printk(KERN_WARNING "NFS: READ reply header overflowed:" + "length %d > %d\n", hdrlen, iov->iov_len); + return -errno_NFSERR_IO; + } else if (iov->iov_len != hdrlen) { dprintk("NFS: READ header is short. iovec will be shifted.\n"); xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen); } - recvd = req->rq_rlen - hdrlen; + recvd = req->rq_received - hdrlen; if (count > recvd) { printk(KERN_WARNING "NFS: server cheating in read reply: " "count %d > recvd %d\n", count, recvd); count = recvd; + res->eof = 0; } dprintk("RPC: readres OK count %d\n", count); - if (count < res->count) { + if (count < res->count) res->count = count; - res->eof = 1; /* Silly NFSv3ism which can't be helped */ - } else - res->eof = 0; return count; } diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 4ebce82ad8e0..2c72ae9f2361 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -793,16 +793,21 @@ nfs3_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res) } hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len > hdrlen) { + if (iov->iov_len < hdrlen) { + printk(KERN_WARNING "NFS: READ reply header overflowed:" + "length %d > %d\n", hdrlen, iov->iov_len); + return -errno_NFSERR_IO; + } else if (iov->iov_len != hdrlen) { dprintk("NFS: READ header is short. iovec will be shifted.\n"); xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen); } - recvd = req->rq_rlen - hdrlen; + recvd = req->rq_received - hdrlen; if (count > recvd) { printk(KERN_WARNING "NFS: server cheating in read reply: " "count %d > recvd %d\n", count, recvd); count = recvd; + res->eof = 0; } if (count < res->count) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index b6231a33cb39..8d5bbe64cb8d 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -424,9 +424,14 @@ nfs_readpage_result(struct rpc_task *task) memset(p + count, 0, PAGE_CACHE_SIZE - count); kunmap(page); count = 0; - } else + if (data->res.eof) + SetPageUptodate(page); + else + SetPageError(page); + } else { count -= PAGE_CACHE_SIZE; - SetPageUptodate(page); + SetPageUptodate(page); + } } else SetPageError(page); flush_dcache_page(page); -- cgit v1.2.3 From 29ceefc7a304497d15d8d3ab71179046c7464557 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 19 Aug 2002 22:24:20 -0700 Subject: [PATCH] Improve READDIR/READDIRPLUS sanity checking.. - Use req->rq_received to determine the message length instead of assuming that it goes to the end of the page. - If the server returned an illegal record so that we cannot make progress by retrying the request on a fresh page, truncate the entire listing and return a syslog error. --- fs/nfs/nfs2xdr.c | 26 ++++++++++++++++++++------ fs/nfs/nfs3xdr.c | 27 ++++++++++++++++++++------- 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index e2272ca7e202..09d6baa1d99e 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -393,7 +393,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy) struct xdr_buf *rcvbuf = &req->rq_rcv_buf; struct iovec *iov = rcvbuf->head; struct page **page; - int hdrlen; + int hdrlen, recvd; int status, nr; unsigned int len, pglen; u32 *end, *entry; @@ -402,17 +402,24 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy) return -nfs_stat_to_errno(status); hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len > hdrlen) { + if (iov->iov_len < hdrlen) { + printk(KERN_WARNING "NFS: READDIR reply header overflowed:" + "length %d > %d\n", hdrlen, iov->iov_len); + return -errno_NFSERR_IO; + } else if (iov->iov_len != hdrlen) { dprintk("NFS: READDIR header is short. iovec will be shifted.\n"); xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); } pglen = rcvbuf->page_len; + recvd = req->rq_received - hdrlen; + if (pglen > recvd) + pglen = recvd; page = rcvbuf->pages; p = kmap(*page); end = (u32 *)((char *)p + pglen); + entry = p; for (nr = 0; *p++; nr++) { - entry = p - 1; if (p + 2 > end) goto short_pkt; p++; /* fileid */ @@ -425,14 +432,21 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy) } if (p + 2 > end) goto short_pkt; + entry = p; } + if (!nr) + goto short_pkt; + out: kunmap(*page); return nr; short_pkt: - printk(KERN_NOTICE "NFS: short packet in readdir reply!\n"); entry[0] = entry[1] = 0; - kunmap(*page); - return nr; + /* truncate listing ? */ + if (!nr) { + printk(KERN_NOTICE "NFS: readdir reply truncated!\n"); + entry[1] = 1; + } + goto out; err_unmap: kunmap(*page); return -errno_NFSERR_IO; diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 2c72ae9f2361..0efc06b7cc14 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -504,7 +504,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res) struct xdr_buf *rcvbuf = &req->rq_rcv_buf; struct iovec *iov = rcvbuf->head; struct page **page; - int hdrlen; + int hdrlen, recvd; int status, nr; unsigned int len, pglen; u32 *entry, *end; @@ -523,17 +523,24 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res) } hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len > hdrlen) { + if (iov->iov_len < hdrlen) { + printk(KERN_WARNING "NFS: READDIR reply header overflowed:" + "length %d > %d\n", hdrlen, iov->iov_len); + return -errno_NFSERR_IO; + } else if (iov->iov_len != hdrlen) { dprintk("NFS: READDIR header is short. iovec will be shifted.\n"); xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); } pglen = rcvbuf->page_len; + recvd = req->rq_received - hdrlen; + if (pglen > recvd) + pglen = recvd; page = rcvbuf->pages; p = kmap(*page); end = (u32 *)((char *)p + pglen); + entry = p; for (nr = 0; *p++; nr++) { - entry = p - 1; if (p + 3 > end) goto short_pkt; p += 2; /* inode # */ @@ -570,15 +577,21 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res) if (p + 2 > end) goto short_pkt; + entry = p; } + if (!nr) + goto short_pkt; + out: kunmap(*page); return nr; short_pkt: - printk(KERN_NOTICE "NFS: short packet in readdir reply!\n"); - /* truncate listing */ entry[0] = entry[1] = 0; - kunmap(*page); - return nr; + /* truncate listing ? */ + if (!nr) { + printk(KERN_NOTICE "NFS: readdir reply truncated!\n"); + entry[1] = 1; + } + goto out; err_unmap: kunmap(*page); return -errno_NFSERR_IO; -- cgit v1.2.3 From e3adef999631dc7067fe4c34d86b8819d5a76e97 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 20 Aug 2002 00:01:08 -0700 Subject: Move x86 big-kernel-lock implementation into , since it was generic. Remove all architecture-specific files. --- include/asm-alpha/smplock.h | 56 ------------------------- include/asm-arm/smplock.h | 60 --------------------------- include/asm-cris/smplock.h | 25 ------------ include/asm-generic/smplock.h | 50 ----------------------- include/asm-i386/smplock.h | 58 -------------------------- include/asm-ia64/smplock.h | 58 -------------------------- include/asm-m68k/smplock.h | 51 ----------------------- include/asm-mips/smplock.h | 54 ------------------------ include/asm-mips64/smplock.h | 56 ------------------------- include/asm-parisc/smplock.h | 49 ---------------------- include/asm-ppc/smplock.h | 68 ------------------------------- include/asm-ppc64/smplock.h | 55 ------------------------- include/asm-s390/smplock.h | 62 ---------------------------- include/asm-s390x/smplock.h | 62 ---------------------------- include/asm-sh/smplock.h | 23 ----------- include/asm-sparc/smplock.h | 55 ------------------------- include/asm-sparc64/smplock.h | 60 --------------------------- include/asm-x86_64/smplock.h | 95 ------------------------------------------- include/linux/smp_lock.h | 54 +++++++++++++++++++++++- 19 files changed, 53 insertions(+), 998 deletions(-) delete mode 100644 include/asm-alpha/smplock.h delete mode 100644 include/asm-arm/smplock.h delete mode 100644 include/asm-cris/smplock.h delete mode 100644 include/asm-generic/smplock.h delete mode 100644 include/asm-i386/smplock.h delete mode 100644 include/asm-ia64/smplock.h delete mode 100644 include/asm-m68k/smplock.h delete mode 100644 include/asm-mips/smplock.h delete mode 100644 include/asm-mips64/smplock.h delete mode 100644 include/asm-parisc/smplock.h delete mode 100644 include/asm-ppc/smplock.h delete mode 100644 include/asm-ppc64/smplock.h delete mode 100644 include/asm-s390/smplock.h delete mode 100644 include/asm-s390x/smplock.h delete mode 100644 include/asm-sh/smplock.h delete mode 100644 include/asm-sparc/smplock.h delete mode 100644 include/asm-sparc64/smplock.h delete mode 100644 include/asm-x86_64/smplock.h diff --git a/include/asm-alpha/smplock.h b/include/asm-alpha/smplock.h deleted file mode 100644 index cfd36450cb24..000000000000 --- a/include/asm-alpha/smplock.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ - -#include -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -static __inline__ void release_kernel_lock(struct task_struct *task) -{ - if (unlikely(task->lock_depth >= 0)) - spin_unlock(&kernel_flag); -} - -/* - * Re-acquire the kernel lock - */ -static __inline__ void reacquire_kernel_lock(struct task_struct *task) -{ - if (unlikely(task->lock_depth >= 0)) - spin_lock(&kernel_flag); -} - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ -#ifdef CONFIG_PREEMPT - if (current->lock_depth == -1) - spin_lock(&kernel_flag); - ++current->lock_depth; -#else - if (!++current->lock_depth) - spin_lock(&kernel_flag); -#endif -} - -static __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-arm/smplock.h b/include/asm-arm/smplock.h deleted file mode 100644 index 7b70d4629ad4..000000000000 --- a/include/asm-arm/smplock.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_PREEMPT -#define kernel_locked() preempt_get_count() -#else -#define kernel_locked() spin_is_locked(&kernel_flag) -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static inline void lock_kernel(void) -{ -#ifdef CONFIG_PREEMPT - if (current->lock_depth == -1) - spin_lock(&kernel_flag); - ++current->lock_depth; -#else - if (!++current->lock_depth) - spin_lock(&kernel_flag); -#endif -} - -static inline void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-cris/smplock.h b/include/asm-cris/smplock.h deleted file mode 100644 index 398562059dbd..000000000000 --- a/include/asm-cris/smplock.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_CRIS_SMPLOCK_H -#define __ASM_CRIS_SMPLOCK_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include - -#ifndef CONFIG_SMP - -#define lock_kernel() do { } while(0) -#define unlock_kernel() do { } while(0) -#define release_kernel_lock(task, cpu, depth) ((depth) = 1) -#define reacquire_kernel_lock(task, cpu, depth) do { } while(0) - -#else - -#error "We do not support SMP on CRIS" - -#endif - -#endif diff --git a/include/asm-generic/smplock.h b/include/asm-generic/smplock.h deleted file mode 100644 index f02afc9ffd6e..000000000000 --- a/include/asm-generic/smplock.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-i386/smplock.h b/include/asm-i386/smplock.h deleted file mode 100644 index 2134982b9d93..000000000000 --- a/include/asm-i386/smplock.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * - * i386 SMP lock implementation - */ -#include -#include -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() (current->lock_depth >= 0) - -#define get_kernel_lock() spin_lock(&kernel_flag) -#define put_kernel_lock() spin_unlock(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - put_kernel_lock(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - get_kernel_lock(); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ - int depth = current->lock_depth+1; - if (!depth) - get_kernel_lock(); - current->lock_depth = depth; -} - -static __inline__ void unlock_kernel(void) -{ - if (current->lock_depth < 0) - BUG(); - if (--current->lock_depth < 0) - put_kernel_lock(); -} diff --git a/include/asm-ia64/smplock.h b/include/asm-ia64/smplock.h deleted file mode 100644 index 103185f86e30..000000000000 --- a/include/asm-ia64/smplock.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include -#include - -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_SMP -# define kernel_locked() spin_is_locked(&kernel_flag) -#else -# define kernel_locked() (1) -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void -lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -static __inline__ void -unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-m68k/smplock.h b/include/asm-m68k/smplock.h deleted file mode 100644 index 3e98a6afd154..000000000000 --- a/include/asm-m68k/smplock.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-mips/smplock.h b/include/asm-mips/smplock.h deleted file mode 100644 index 43da07e41222..000000000000 --- a/include/asm-mips/smplock.h +++ /dev/null @@ -1,54 +0,0 @@ -/* $Id: smplock.h,v 1.2 1999/10/09 00:01:43 ralf Exp $ - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Default SMP lock implementation - */ -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-mips64/smplock.h b/include/asm-mips64/smplock.h deleted file mode 100644 index 68345b04d68f..000000000000 --- a/include/asm-mips64/smplock.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#ifndef _ASM_SMPLOCK_H -#define _ASM_SMPLOCK_H - -#include -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -static __inline__ void release_kernel_lock(struct task_struct *task, int cpu) -{ - if (task->lock_depth >= 0) - spin_unlock(&kernel_flag); - release_irqlock(cpu); - local_irq_enable(); -} - -/* - * Re-acquire the kernel lock - */ -static __inline__ void reacquire_kernel_lock(struct task_struct *task) -{ - if (task->lock_depth >= 0) - spin_lock(&kernel_flag); -} - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -static __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} - -#endif /* _ASM_SMPLOCK_H */ diff --git a/include/asm-parisc/smplock.h b/include/asm-parisc/smplock.h deleted file mode 100644 index 06fb015d5cb9..000000000000 --- a/include/asm-parisc/smplock.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include - -extern spinlock_t kernel_flag; - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-ppc/smplock.h b/include/asm-ppc/smplock.h deleted file mode 100644 index 8e8ec92af714..000000000000 --- a/include/asm-ppc/smplock.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * BK Id: %F% %I% %G% %U% %#% - */ -/* - * - * - * Default SMP lock implementation - */ -#ifdef __KERNEL__ -#ifndef __ASM_SMPLOCK_H__ -#define __ASM_SMPLOCK_H__ - -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_SMP -#define kernel_locked() spin_is_locked(&kernel_flag) -#elif defined(CONFIG_PREEMPT) -#define kernel_locked() preempt_count() -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ -#ifdef CONFIG_PREEMPT - if (current->lock_depth == -1) - spin_lock(&kernel_flag); - ++current->lock_depth; -#else - if (!++current->lock_depth) - spin_lock(&kernel_flag); -#endif /* CONFIG_PREEMPT */ -} - -static __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} -#endif /* __ASM_SMPLOCK_H__ */ -#endif /* __KERNEL__ */ diff --git a/include/asm-ppc64/smplock.h b/include/asm-ppc64/smplock.h deleted file mode 100644 index 16b0b2f72b0c..000000000000 --- a/include/asm-ppc64/smplock.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * - * Default SMP lock implementation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -static __inline__ void unlock_kernel(void) -{ - if (current->lock_depth < 0) - BUG(); - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-s390/smplock.h b/include/asm-s390/smplock.h deleted file mode 100644 index a12df4a3f882..000000000000 --- a/include/asm-s390/smplock.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * include/asm-s390/smplock.h - * - * S390 version - * - * Derived from "include/asm-i386/smplock.h" - */ - -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} - diff --git a/include/asm-s390x/smplock.h b/include/asm-s390x/smplock.h deleted file mode 100644 index a12df4a3f882..000000000000 --- a/include/asm-s390x/smplock.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * include/asm-s390/smplock.h - * - * S390 version - * - * Derived from "include/asm-i386/smplock.h" - */ - -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} - diff --git a/include/asm-sh/smplock.h b/include/asm-sh/smplock.h deleted file mode 100644 index 33499815d011..000000000000 --- a/include/asm-sh/smplock.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __ASM_SH_SMPLOCK_H -#define __ASM_SH_SMPLOCK_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include - -#ifndef CONFIG_SMP - -#define lock_kernel() do { } while(0) -#define unlock_kernel() do { } while(0) -#define release_kernel_lock(task, cpu, depth) ((depth) = 1) -#define reacquire_kernel_lock(task, cpu, depth) do { } while(0) - -#else -#error "We do not support SMP on SH" -#endif /* CONFIG_SMP */ - -#endif /* __ASM_SH_SMPLOCK_H */ diff --git a/include/asm-sparc/smplock.h b/include/asm-sparc/smplock.h deleted file mode 100644 index bd931bb5c511..000000000000 --- a/include/asm-sparc/smplock.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() \ - (spin_is_locked(&kernel_flag) &&\ - (current->lock_depth >= 0)) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (unlikely(task->lock_depth >= 0)) { \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ - } \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -#define lock_kernel() \ -do { \ - if (!++current->lock_depth) \ - spin_lock(&kernel_flag); \ -} while(0) - -#define unlock_kernel() \ -do { \ - if (--current->lock_depth < 0) \ - spin_unlock(&kernel_flag); \ -} while(0) diff --git a/include/asm-sparc64/smplock.h b/include/asm-sparc64/smplock.h deleted file mode 100644 index b7edf0156893..000000000000 --- a/include/asm-sparc64/smplock.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_SMP -#define kernel_locked() \ - (spin_is_locked(&kernel_flag) &&\ - (current->lock_depth >= 0)) -#else -#ifdef CONFIG_PREEMPT -#define kernel_locked() preempt_get_count() -#else -#define kernel_locked() 1 -#endif -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -#define lock_kernel() \ -do { \ - if (!++current->lock_depth) \ - spin_lock(&kernel_flag); \ -} while(0) - -#define unlock_kernel() \ -do { \ - if (--current->lock_depth < 0) \ - spin_unlock(&kernel_flag); \ -} while(0) diff --git a/include/asm-x86_64/smplock.h b/include/asm-x86_64/smplock.h deleted file mode 100644 index 6c0b652a63a2..000000000000 --- a/include/asm-x86_64/smplock.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - */ -#include -#include -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_SMP -#define kernel_locked() spin_is_locked(&kernel_flag) -#define check_irq_holder(cpu) \ - if (global_irq_holder == (cpu)) \ - BUG(); -#else -#ifdef CONFIG_PREEMPT -#define kernel_locked() preempt_get_count() -#define global_irq_holder 0 -#define check_irq_holder(cpu) do {} while(0) -#else -#define kernel_locked() 1 -#define check_irq_holder(cpu) \ - if (global_irq_holder == (cpu)) \ - BUG(); -#endif -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (unlikely(task->lock_depth >= 0)) { \ - spin_unlock(&kernel_flag); \ - check_irq_holder(cpu); \ - } \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ -#ifdef CONFIG_PREEMPT - if (current->lock_depth == -1) - spin_lock(&kernel_flag); - ++current->lock_depth; -#else -#if 1 - if (!++current->lock_depth) - spin_lock(&kernel_flag); -#else - __asm__ __volatile__( - "incl %1\n\t" - "jne 9f" - spin_lock_string - "\n9:" - :"=m" (__dummy_lock(&kernel_flag)), - "=m" (current->lock_depth)); -#endif -#endif -} - -extern __inline__ void unlock_kernel(void) -{ - if (current->lock_depth < 0) - BUG(); -#if 1 - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -#else - __asm__ __volatile__( - "decl %1\n\t" - "jns 9f\n\t" - spin_unlock_string - "\n9:" - :"=m" (__dummy_lock(&kernel_flag)), - "=m" (current->lock_depth)); -#endif -} diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index cfb23f363e61..40f5358fc856 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -13,7 +13,59 @@ #else -#include +#include +#include +#include +#include + +extern spinlock_t kernel_flag; + +#define kernel_locked() (current->lock_depth >= 0) + +#define get_kernel_lock() spin_lock(&kernel_flag) +#define put_kernel_lock() spin_unlock(&kernel_flag) + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task) \ +do { \ + if (unlikely(task->lock_depth >= 0)) \ + put_kernel_lock(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (unlikely(task->lock_depth >= 0)) \ + get_kernel_lock(); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +static __inline__ void lock_kernel(void) +{ + int depth = current->lock_depth+1; + if (!depth) + get_kernel_lock(); + current->lock_depth = depth; +} + +static __inline__ void unlock_kernel(void) +{ + if (current->lock_depth < 0) + BUG(); + if (--current->lock_depth < 0) + put_kernel_lock(); +} #endif /* CONFIG_SMP */ -- cgit v1.2.3 From d17e9bb6daa227e88f596fd0918e3af20e423261 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 20 Aug 2002 00:09:57 -0700 Subject: Hmm.. It was never correct to directly include , but some files still did (and got the wrong results on UP). Since they didn't actually _use_ the BKL anyway, remove the include. --- drivers/ieee1394/ieee1394_core.c | 1 - mm/rmap.c | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index 347346599671..febc92067e94 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c @@ -22,7 +22,6 @@ #include #include #include -#include #include "ieee1394_types.h" #include "ieee1394.h" diff --git a/mm/rmap.c b/mm/rmap.c index 714395e12878..c39ca4425719 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -28,7 +28,6 @@ #include #include -#include #include #include -- cgit v1.2.3