From 6424ea03c9ac36098e59c89264494ee69818ed5c Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 19 Aug 2002 18:10:50 -0700 Subject: [PATCH] ROMFS superblock cleanup. This patch from Christoph Hellwig divorces ROMFS from the struct superblock union, as has been done to various other filesystems during 2.5 --- include/linux/romfs_fs_sb.h | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 include/linux/romfs_fs_sb.h (limited to 'include/linux') diff --git a/include/linux/romfs_fs_sb.h b/include/linux/romfs_fs_sb.h deleted file mode 100644 index 02da2280a6df..000000000000 --- a/include/linux/romfs_fs_sb.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ROMFS_FS_SB -#define __ROMFS_FS_SB - -/* romfs superblock in-core data */ - -struct romfs_sb_info { - unsigned long s_maxsize; -}; - -#endif -- cgit v1.2.3 From 136b6223bfb668b502f6ef20e0106d27dbf9495c Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 19 Aug 2002 18:10:54 -0700 Subject: [PATCH] UFS superblock cleanup. This one from Brian Gerst seperates UFS from the struct superblock union. --- fs/ufs/balloc.c | 48 ++++++++--------- fs/ufs/cylinder.c | 73 +++++++++++++------------- fs/ufs/dir.c | 8 +-- fs/ufs/ialloc.c | 20 +++---- fs/ufs/inode.c | 18 +++---- fs/ufs/namei.c | 2 +- fs/ufs/super.c | 138 ++++++++++++++++++++++++++++--------------------- fs/ufs/swab.h | 24 ++++----- fs/ufs/truncate.c | 10 ++-- fs/ufs/util.h | 26 +++++----- include/linux/ufs_fs.h | 10 +++- 11 files changed, 203 insertions(+), 174 deletions(-) (limited to 'include/linux') diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 921fd31d5236..8b622b001d46 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -47,7 +47,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno; sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) @@ -89,7 +89,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count fs32_add(sb, &ucg->cg_cs.cs_nffree, count); fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count); - fs32_add(sb, &sb->fs_cs(cgno).cs_nffree, count); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); @@ -100,12 +100,12 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb); - fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, uspi->s_fpb); - if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) + fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); + if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1); - fs32_add(sb, &sb->fs_cs(cgno).cs_nbfree, 1); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); cylno = ufs_cbtocylno (bbase); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); @@ -141,7 +141,7 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { unsigned overflow, cgno, bit, end_bit, blkno, i, cylno; sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) @@ -184,13 +184,13 @@ do_more: ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); } ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno); - if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) + if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); DQUOT_FREE_BLOCK(inode, uspi->s_fpb); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1); - fs32_add(sb, &sb->fs_cs(cgno).cs_nbfree, 1); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); cylno = ufs_cbtocylno(i); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); @@ -247,7 +247,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment, UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); *err = -ENOSPC; @@ -285,7 +285,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment, return 0; } } - + /* * There is not enough space for user on the device */ @@ -293,8 +293,8 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment, unlock_super (sb); UFSD(("EXIT (FAILED)\n")) return 0; - } - + } + if (goal >= uspi->s_size) goal = 0; if (goal == 0) @@ -407,12 +407,12 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment, UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first (USPI_UBH); count = newcount - oldcount; cgno = ufs_dtog(fragment); - if (sb->fs_cs(cgno).cs_nffree < count) + if (UFS_SB(sb)->fs_cs(cgno).cs_nffree < count) return 0; if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb) return 0; @@ -453,7 +453,7 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment, } fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); - fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, count); + fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); ubh_mark_buffer_dirty (USPI_UBH); @@ -470,7 +470,7 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment, } #define UFS_TEST_FREE_SPACE_CG \ - ucg = (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[cgno]->b_data; \ + ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \ if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \ goto cg_found; \ for (k = count; k < uspi->s_fpb; k++) \ @@ -490,7 +490,7 @@ unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno, UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); oldcg = cgno; @@ -557,7 +557,7 @@ cg_found: fs32_add(sb, &ucg->cg_cs.cs_nffree, i); fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i); - fs32_add(sb, &sb->fs_cs(cgno).cs_nffree, i); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); fs32_add(sb, &ucg->cg_frsum[i], 1); goto succed; } @@ -574,7 +574,7 @@ cg_found: fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); - fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, count); + fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); fs32_sub(sb, &ucg->cg_frsum[allocsize], 1); if (count != allocsize) @@ -606,7 +606,7 @@ unsigned ufs_alloccg_block (struct inode * inode, UFSD(("ENTER, goal %u\n", goal)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); ucg = ubh_get_ucg(UCPI_UBH); @@ -633,7 +633,7 @@ norot: gotit: blkno = ufs_fragstoblks(result); ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno); - if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) + if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, -1); if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { *err = -EDQUOT; @@ -642,7 +642,7 @@ gotit: fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1); fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1); - fs32_sub(sb, &sb->fs_cs(ucpi->c_cgx).cs_nbfree, 1); + fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1); cylno = ufs_cbtocylno(result); fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1); fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1); @@ -663,7 +663,7 @@ unsigned ufs_bitmap_search (struct super_block * sb, UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count)) - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first (USPI_UBH); ucg = ubh_get_ucg(UCPI_UBH); @@ -729,7 +729,7 @@ void ufs_clusteracct(struct super_block * sb, struct ufs_sb_private_info * uspi; int i, start, end, forw, back; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; if (uspi->s_contigsumsize <= 0) return; diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c index daf11e4dcf66..105a695aad8f 100644 --- a/fs/ufs/cylinder.c +++ b/fs/ufs/cylinder.c @@ -36,26 +36,27 @@ static void ufs_read_cylinder (struct super_block * sb, unsigned cgno, unsigned bitmap_nr) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned i, j; UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr)) - uspi = sb->u.ufs_sb.s_uspi; - ucpi = sb->u.ufs_sb.s_ucpi[bitmap_nr]; - ucg = (struct ufs_cylinder_group *)sb->u.ufs_sb.s_ucg[cgno]->b_data; + uspi = sbi->s_uspi; + ucpi = sbi->s_ucpi[bitmap_nr]; + ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; UCPI_UBH->fragment = ufs_cgcmin(cgno); UCPI_UBH->count = uspi->s_cgsize >> sb->s_blocksize_bits; /* * We have already the first fragment of cylinder group block in buffer */ - UCPI_UBH->bh[0] = sb->u.ufs_sb.s_ucg[cgno]; + UCPI_UBH->bh[0] = sbi->s_ucg[cgno]; for (i = 1; i < UCPI_UBH->count; i++) if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i))) goto failed; - sb->u.ufs_sb.s_cgno[bitmap_nr] = cgno; + sbi->s_cgno[bitmap_nr] = cgno; ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx); ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl); @@ -77,8 +78,8 @@ static void ufs_read_cylinder (struct super_block * sb, failed: for (j = 1; j < i; j++) - brelse (sb->u.ufs_sb.s_ucg[j]); - sb->u.ufs_sb.s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; + brelse (sbi->s_ucg[j]); + sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno); } @@ -88,6 +89,7 @@ failed: */ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; @@ -95,15 +97,15 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr)) - uspi = sb->u.ufs_sb.s_uspi; - if (sb->u.ufs_sb.s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) { + uspi = sbi->s_uspi; + if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) { UFSD(("EXIT\n")) return; } - ucpi = sb->u.ufs_sb.s_ucpi[bitmap_nr]; + ucpi = sbi->s_ucpi[bitmap_nr]; ucg = ubh_get_ucg(UCPI_UBH); - if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sb->u.ufs_sb.s_cg_loaded) { + if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) { ufs_panic (sb, "ufs_put_cylinder", "internal error"); return; } @@ -119,7 +121,7 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) brelse (UCPI_UBH->bh[i]); } - sb->u.ufs_sb.s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; + sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; UFSD(("EXIT\n")) } @@ -132,13 +134,14 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) struct ufs_cg_private_info * ufs_load_cylinder ( struct super_block * sb, unsigned cgno) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; unsigned cg, i, j; UFSD(("ENTER, cgno %u\n", cgno)) - uspi = sb->u.ufs_sb.s_uspi; + uspi = sbi->s_uspi; if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg"); return NULL; @@ -146,61 +149,61 @@ struct ufs_cg_private_info * ufs_load_cylinder ( /* * Cylinder group number cg it in cache and it was last used */ - if (sb->u.ufs_sb.s_cgno[0] == cgno) { + if (sbi->s_cgno[0] == cgno) { UFSD(("EXIT\n")) - return sb->u.ufs_sb.s_ucpi[0]; + return sbi->s_ucpi[0]; } /* * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED */ if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) { - if (sb->u.ufs_sb.s_cgno[cgno] != UFS_CGNO_EMPTY) { - if (sb->u.ufs_sb.s_cgno[cgno] != cgno) { + if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) { + if (sbi->s_cgno[cgno] != cgno) { ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache"); UFSD(("EXIT (FAILED)\n")) return NULL; } else { UFSD(("EXIT\n")) - return sb->u.ufs_sb.s_ucpi[cgno]; + return sbi->s_ucpi[cgno]; } } else { ufs_read_cylinder (sb, cgno, cgno); UFSD(("EXIT\n")) - return sb->u.ufs_sb.s_ucpi[cgno]; + return sbi->s_ucpi[cgno]; } } /* * Cylinder group number cg is in cache but it was not last used, * we will move to the first position */ - for (i = 0; i < sb->u.ufs_sb.s_cg_loaded && sb->u.ufs_sb.s_cgno[i] != cgno; i++); - if (i < sb->u.ufs_sb.s_cg_loaded && sb->u.ufs_sb.s_cgno[i] == cgno) { - cg = sb->u.ufs_sb.s_cgno[i]; - ucpi = sb->u.ufs_sb.s_ucpi[i]; + for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++); + if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) { + cg = sbi->s_cgno[i]; + ucpi = sbi->s_ucpi[i]; for (j = i; j > 0; j--) { - sb->u.ufs_sb.s_cgno[j] = sb->u.ufs_sb.s_cgno[j-1]; - sb->u.ufs_sb.s_ucpi[j] = sb->u.ufs_sb.s_ucpi[j-1]; + sbi->s_cgno[j] = sbi->s_cgno[j-1]; + sbi->s_ucpi[j] = sbi->s_ucpi[j-1]; } - sb->u.ufs_sb.s_cgno[0] = cg; - sb->u.ufs_sb.s_ucpi[0] = ucpi; + sbi->s_cgno[0] = cg; + sbi->s_ucpi[0] = ucpi; /* * Cylinder group number cg is not in cache, we will read it from disk * and put it to the first position */ } else { - if (sb->u.ufs_sb.s_cg_loaded < UFS_MAX_GROUP_LOADED) - sb->u.ufs_sb.s_cg_loaded++; + if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED) + sbi->s_cg_loaded++; else ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1); - ucpi = sb->u.ufs_sb.s_ucpi[sb->u.ufs_sb.s_cg_loaded - 1]; - for (j = sb->u.ufs_sb.s_cg_loaded - 1; j > 0; j--) { - sb->u.ufs_sb.s_cgno[j] = sb->u.ufs_sb.s_cgno[j-1]; - sb->u.ufs_sb.s_ucpi[j] = sb->u.ufs_sb.s_ucpi[j-1]; + ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1]; + for (j = sbi->s_cg_loaded - 1; j > 0; j--) { + sbi->s_cgno[j] = sbi->s_cgno[j-1]; + sbi->s_ucpi[j] = sbi->s_ucpi[j-1]; } - sb->u.ufs_sb.s_ucpi[0] = ucpi; + sbi->s_ucpi[0] = ucpi; ufs_read_cylinder (sb, cgno, 0); } UFSD(("EXIT\n")) - return sb->u.ufs_sb.s_ucpi[0]; + return sbi->s_ucpi[0]; } diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 49764ea033de..7253a3c12780 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -67,7 +67,7 @@ ufs_readdir (struct file * filp, void * dirent, filldir_t filldir) lock_kernel(); sb = inode->i_sb; - flags = sb->u.ufs_sb.s_flags; + flags = UFS_SB(sb)->s_flags; UFSD(("ENTER, ino %lu f_pos %lu\n", inode->i_ino, (unsigned long) filp->f_pos)) @@ -308,8 +308,8 @@ int ufs_check_dir_entry (const char * function, struct inode * dir, error_msg = "reclen is too small for namlen"; else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) error_msg = "directory entry across blocks"; - else if (fs32_to_cpu(sb, de->d_ino) > (sb->u.ufs_sb.s_uspi->s_ipg * - sb->u.ufs_sb.s_uspi->s_ncg)) + else if (fs32_to_cpu(sb, de->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg * + UFS_SB(sb)->s_uspi->s_ncg)) error_msg = "inode out of bounds"; if (error_msg != NULL) @@ -386,7 +386,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode) UFSD(("ENTER, name %s, namelen %u\n", name, namelen)) sb = dir->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; if (!namelen) return -EINVAL; diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index 302d09d27bf1..8c9596404f9a 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c @@ -71,7 +71,7 @@ void ufs_free_inode (struct inode * inode) UFSD(("ENTER, ino %lu\n", inode->i_ino)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); ino = inode->i_ino; @@ -112,12 +112,12 @@ void ufs_free_inode (struct inode * inode) ucpi->c_irotor = ino; fs32_add(sb, &ucg->cg_cs.cs_nifree, 1); fs32_add(sb, &usb1->fs_cstotal.cs_nifree, 1); - fs32_add(sb, &sb->fs_cs(cg).cs_nifree, 1); + fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1); if (is_directory) { fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1); fs32_sub(sb, &usb1->fs_cstotal.cs_ndir, 1); - fs32_sub(sb, &sb->fs_cs(cg).cs_ndir, 1); + fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1); } } @@ -146,6 +146,7 @@ void ufs_free_inode (struct inode * inode) struct inode * ufs_new_inode(struct inode * dir, int mode) { struct super_block * sb; + struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; @@ -164,7 +165,8 @@ struct inode * ufs_new_inode(struct inode * dir, int mode) if (!inode) return ERR_PTR(-ENOMEM); ufsi = UFS_I(inode); - uspi = sb->u.ufs_sb.s_uspi; + sbi = UFS_SB(sb); + uspi = sbi->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); lock_super (sb); @@ -173,7 +175,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode) * Try to place the inode in its parent directory */ i = ufs_inotocg(dir->i_ino); - if (sb->fs_cs(i).cs_nifree) { + if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } @@ -185,7 +187,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode) i += j; if (i >= uspi->s_ncg) i -= uspi->s_ncg; - if (sb->fs_cs(i).cs_nifree) { + if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } @@ -199,7 +201,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode) i++; if (i >= uspi->s_ncg) i = 0; - if (sb->fs_cs(i).cs_nifree) { + if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } @@ -235,12 +237,12 @@ cg_found: fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1); fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1); - fs32_sub(sb, &sb->fs_cs(cg).cs_nifree, 1); + fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); if (S_ISDIR(mode)) { fs32_add(sb, &ucg->cg_cs.cs_ndir, 1); fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1); - fs32_add(sb, &sb->fs_cs(cg).cs_ndir, 1); + fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); } ubh_mark_buffer_dirty (USPI_UBH); diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index d740f00123cd..d406a3c62bc7 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -52,7 +52,7 @@ static int ufs_block_to_path(struct inode *inode, long i_block, int offsets[4]) { - struct ufs_sb_private_info *uspi = inode->i_sb->u.ufs_sb.s_uspi; + struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; int ptrs = uspi->s_apb; int ptrs_bits = uspi->s_apbshift; const long direct_blocks = UFS_NDADDR, @@ -86,7 +86,7 @@ int ufs_frag_map(struct inode *inode, int frag) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; - struct ufs_sb_private_info *uspi = sb->u.ufs_sb.s_uspi; + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; int mask = uspi->s_apbmask>>uspi->s_fpbshift; int shift = uspi->s_apbshift-uspi->s_fpbshift; int offsets[4], *p; @@ -137,7 +137,7 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode, inode->i_ino, fragment, new_fragment, required)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; block = ufs_fragstoblks (fragment); blockoff = ufs_fragnum (fragment); p = ufsi->i_u1.i_data + block; @@ -243,7 +243,7 @@ static struct buffer_head * ufs_block_getfrag (struct inode *inode, u32 * p; sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; block = ufs_fragstoblks (fragment); blockoff = ufs_fragnum (fragment); @@ -313,7 +313,7 @@ out: static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) { struct super_block * sb = inode->i_sb; - struct ufs_sb_private_info * uspi = sb->u.ufs_sb.s_uspi; + struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; struct buffer_head * bh; int ret, err, new; unsigned long ptr, phys; @@ -483,8 +483,8 @@ void ufs_read_inode (struct inode * inode) UFSD(("ENTER, ino %lu\n", inode->i_ino)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; - flags = sb->u.ufs_sb.s_flags; + uspi = UFS_SB(sb)->s_uspi; + flags = UFS_SB(sb)->s_flags; if (inode->i_ino < UFS_ROOTINO || inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { @@ -579,8 +579,8 @@ static int ufs_update_inode(struct inode * inode, int do_sync) UFSD(("ENTER, ino %lu\n", inode->i_ino)) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; - flags = sb->u.ufs_sb.s_flags; + uspi = UFS_SB(sb)->s_uspi; + flags = UFS_SB(sb)->s_flags; if (inode->i_ino < UFS_ROOTINO || inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index faea20eb2069..50f0e8c2705b 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -139,7 +139,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, if (IS_ERR(inode)) goto out; - if (l > sb->u.ufs_sb.s_uspi->s_maxsymlinklen) { + if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { /* slow symlink */ inode->i_op = &page_symlink_inode_operations; inode->i_mapping->a_ops = &ufs_aops; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index c60ae7e1fcde..eda1ea41f86a 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -189,7 +189,7 @@ void ufs_error (struct super_block * sb, const char * function, struct ufs_super_block_first * usb1; va_list args; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); if (!(sb->s_flags & MS_RDONLY)) { @@ -201,7 +201,7 @@ void ufs_error (struct super_block * sb, const char * function, va_start (args, fmt); vsprintf (error_buf, fmt, args); va_end (args); - switch (sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_ONERROR) { + switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) { case UFS_MOUNT_ONERROR_PANIC: panic ("UFS-fs panic (device %s): %s: %s\n", sb->s_id, function, error_buf); @@ -221,7 +221,7 @@ void ufs_panic (struct super_block * sb, const char * function, struct ufs_super_block_first * usb1; va_list args; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); if (!(sb->s_flags & MS_RDONLY)) { @@ -317,6 +317,7 @@ static int ufs_parse_options (char * options, unsigned * mount_options) * Read on-disk structures associated with cylinder groups */ int ufs_read_cylinder_structures (struct super_block * sb) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_buffer_head * ubh; unsigned char * base, * space; @@ -324,7 +325,7 @@ int ufs_read_cylinder_structures (struct super_block * sb) { UFSD(("ENTER\n")) - uspi = sb->u.ufs_sb.s_uspi; + uspi = sbi->s_uspi; /* * Read cs structures from (usually) first data block @@ -343,7 +344,7 @@ int ufs_read_cylinder_structures (struct super_block * sb) { if (!ubh) goto failed; ubh_ubhcpymem (space, ubh, size); - sb->u.ufs_sb.s_csp[ufs_fragstoblks(i)] = (struct ufs_csum *)space; + sbi->s_csp[ufs_fragstoblks(i)] = (struct ufs_csum *)space; space += size; ubh_brelse (ubh); ubh = NULL; @@ -353,41 +354,41 @@ int ufs_read_cylinder_structures (struct super_block * sb) { * Read cylinder group (we read only first fragment from block * at this time) and prepare internal data structures for cg caching. */ - if (!(sb->u.ufs_sb.s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_KERNEL))) + if (!(sbi->s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_KERNEL))) goto failed; for (i = 0; i < uspi->s_ncg; i++) - sb->u.ufs_sb.s_ucg[i] = NULL; + sbi->s_ucg[i] = NULL; for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) { - sb->u.ufs_sb.s_ucpi[i] = NULL; - sb->u.ufs_sb.s_cgno[i] = UFS_CGNO_EMPTY; + sbi->s_ucpi[i] = NULL; + sbi->s_cgno[i] = UFS_CGNO_EMPTY; } for (i = 0; i < uspi->s_ncg; i++) { UFSD(("read cg %u\n", i)) - if (!(sb->u.ufs_sb.s_ucg[i] = sb_bread(sb, ufs_cgcmin(i)))) + if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i)))) goto failed; - if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[i]->b_data)) + if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data)) goto failed; #ifdef UFS_SUPER_DEBUG_MORE - ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[i]->b_data); + ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data); #endif } for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) { - if (!(sb->u.ufs_sb.s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL))) + if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL))) goto failed; - sb->u.ufs_sb.s_cgno[i] = UFS_CGNO_EMPTY; + sbi->s_cgno[i] = UFS_CGNO_EMPTY; } - sb->u.ufs_sb.s_cg_loaded = 0; + sbi->s_cg_loaded = 0; UFSD(("EXIT\n")) return 1; failed: if (base) kfree (base); - if (sb->u.ufs_sb.s_ucg) { + if (sbi->s_ucg) { for (i = 0; i < uspi->s_ncg; i++) - if (sb->u.ufs_sb.s_ucg[i]) brelse (sb->u.ufs_sb.s_ucg[i]); - kfree (sb->u.ufs_sb.s_ucg); + if (sbi->s_ucg[i]) brelse (sbi->s_ucg[i]); + kfree (sbi->s_ucg); for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) - if (sb->u.ufs_sb.s_ucpi[i]) kfree (sb->u.ufs_sb.s_ucpi[i]); + if (sbi->s_ucpi[i]) kfree (sbi->s_ucpi[i]); } UFSD(("EXIT (FAILED)\n")) return 0; @@ -398,6 +399,7 @@ failed: * write them back to disk */ void ufs_put_cylinder_structures (struct super_block * sb) { + struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_buffer_head * ubh; unsigned char * base, * space; @@ -405,11 +407,11 @@ void ufs_put_cylinder_structures (struct super_block * sb) { UFSD(("ENTER\n")) - uspi = sb->u.ufs_sb.s_uspi; + uspi = sbi->s_uspi; size = uspi->s_cssize; blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift; - base = space = (char*) sb->u.ufs_sb.s_csp[0]; + base = space = (char*) sbi->s_csp[0]; for (i = 0; i < blks; i += uspi->s_fpb) { size = uspi->s_bsize; if (i + uspi->s_fpb > blks) @@ -421,21 +423,22 @@ void ufs_put_cylinder_structures (struct super_block * sb) { ubh_mark_buffer_dirty (ubh); ubh_brelse (ubh); } - for (i = 0; i < sb->u.ufs_sb.s_cg_loaded; i++) { + for (i = 0; i < sbi->s_cg_loaded; i++) { ufs_put_cylinder (sb, i); - kfree (sb->u.ufs_sb.s_ucpi[i]); + kfree (sbi->s_ucpi[i]); } for (; i < UFS_MAX_GROUP_LOADED; i++) - kfree (sb->u.ufs_sb.s_ucpi[i]); + kfree (sbi->s_ucpi[i]); for (i = 0; i < uspi->s_ncg; i++) - brelse (sb->u.ufs_sb.s_ucg[i]); - kfree (sb->u.ufs_sb.s_ucg); + brelse (sbi->s_ucg[i]); + kfree (sbi->s_ucg); kfree (base); UFSD(("EXIT\n")) } static int ufs_fill_super(struct super_block *sb, void *data, int silent) { + struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_super_block_second * usb2; @@ -451,6 +454,12 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) UFSD(("ENTER\n")) + sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL); + if (!sbi) + goto failed_nomem; + sb->u.generic_sbp = sbi; + memset(sbi, 0, sizeof(struct ufs_sb_info)); + UFSD(("flag %u\n", (int)(sb->s_flags & MS_RDONLY))) #ifndef CONFIG_UFS_FS_WRITE @@ -464,22 +473,22 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) * Set default mount options * Parse mount options */ - sb->u.ufs_sb.s_mount_opt = 0; - ufs_set_opt (sb->u.ufs_sb.s_mount_opt, ONERROR_LOCK); - if (!ufs_parse_options ((char *) data, &sb->u.ufs_sb.s_mount_opt)) { + sbi->s_mount_opt = 0; + ufs_set_opt (sbi->s_mount_opt, ONERROR_LOCK); + if (!ufs_parse_options ((char *) data, &sbi->s_mount_opt)) { printk("wrong mount options\n"); goto failed; } - if (!(sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE)) { + if (!(sbi->s_mount_opt & UFS_MOUNT_UFSTYPE)) { printk("You didn't specify the type of your ufs filesystem\n\n" "mount -t ufs -o ufstype=" "sun|sunx86|44bsd|old|hp|nextstep|netxstep-cd|openstep ...\n\n" ">>>WARNING<<< Wrong ufstype may corrupt your filesystem, " "default is ufstype=old\n"); - ufs_set_opt (sb->u.ufs_sb.s_mount_opt, UFSTYPE_OLD); + ufs_set_opt (sbi->s_mount_opt, UFSTYPE_OLD); } - sb->u.ufs_sb.s_uspi = uspi = + sbi->s_uspi = uspi = kmalloc (sizeof(struct ufs_sb_private_info), GFP_KERNEL); if (!uspi) goto failed; @@ -488,7 +497,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) this but as I don't know which I'll let those in the know loosen the rules */ - switch (sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) { + switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) { case UFS_MOUNT_UFSTYPE_44BSD: UFSD(("ufstype=44bsd\n")) uspi->s_fsize = block_size = 512; @@ -596,7 +605,10 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) } again: - sb_set_blocksize(sb, block_size); + if (sb_set_blocksize(sb, block_size)) { + printk(KERN_ERR "UFS: failed to set blocksize\n"); + goto failed; + } /* * read ufs super block from device @@ -617,7 +629,7 @@ again: case UFS_MAGIC_LFN: case UFS_MAGIC_FEA: case UFS_MAGIC_4GB: - sb->u.ufs_sb.s_bytesex = BYTESEX_LE; + sbi->s_bytesex = BYTESEX_LE; goto magic_found; } switch (__constant_be32_to_cpu(usb3->fs_magic)) { @@ -625,13 +637,13 @@ again: case UFS_MAGIC_LFN: case UFS_MAGIC_FEA: case UFS_MAGIC_4GB: - sb->u.ufs_sb.s_bytesex = BYTESEX_BE; + sbi->s_bytesex = BYTESEX_BE; goto magic_found; } - if ((((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP) - || ((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD) - || ((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP)) + if ((((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP) + || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD) + || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP)) && uspi->s_sbbase < 256) { ubh_brelse_uspi(uspi); ubh = NULL; @@ -652,32 +664,32 @@ magic_found: uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift); if (uspi->s_fsize & (uspi->s_fsize - 1)) { - printk("ufs_read_super: fragment size %u is not a power of 2\n", + printk(KERN_ERR "ufs_read_super: fragment size %u is not a power of 2\n", uspi->s_fsize); goto failed; } if (uspi->s_fsize < 512) { - printk("ufs_read_super: fragment size %u is too small\n", + printk(KERN_ERR "ufs_read_super: fragment size %u is too small\n", uspi->s_fsize); goto failed; } if (uspi->s_fsize > 4096) { - printk("ufs_read_super: fragment size %u is too large\n", + printk(KERN_ERR "ufs_read_super: fragment size %u is too large\n", uspi->s_fsize); goto failed; } if (uspi->s_bsize & (uspi->s_bsize - 1)) { - printk("ufs_read_super: block size %u is not a power of 2\n", + printk(KERN_ERR "ufs_read_super: block size %u is not a power of 2\n", uspi->s_bsize); goto failed; } if (uspi->s_bsize < 4096) { - printk("ufs_read_super: block size %u is too small\n", + printk(KERN_ERR "ufs_read_super: block size %u is too small\n", uspi->s_bsize); goto failed; } if (uspi->s_bsize / uspi->s_fsize > 8) { - printk("ufs_read_super: too many fragments per block (%u)\n", + printk(KERN_ERR "ufs_read_super: too many fragments per block (%u)\n", uspi->s_bsize / uspi->s_fsize); goto failed; } @@ -801,12 +813,12 @@ magic_found: uspi->s_bpf = uspi->s_fsize << 3; uspi->s_bpfshift = uspi->s_fshift + 3; uspi->s_bpfmask = uspi->s_bpf - 1; - if ((sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE) == + if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_44BSD) uspi->s_maxsymlinklen = fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_maxsymlinklen); - sb->u.ufs_sb.s_flags = flags; + sbi->s_flags = flags; inode = iget(sb, UFS_ROOTINO); if (!inode || is_bad_inode(inode)) @@ -831,8 +843,14 @@ dalloc_failed: failed: if (ubh) ubh_brelse_uspi (uspi); if (uspi) kfree (uspi); + if (sbi) kfree(sbi); + sb->u.generic_sbp = NULL; UFSD(("EXIT (FAILED)\n")) return -EINVAL; + +failed_nomem: + UFSD(("EXIT (NOMEM)\n")) + return -ENOMEM; } void ufs_write_super (struct super_block * sb) { @@ -844,8 +862,8 @@ void ufs_write_super (struct super_block * sb) { lock_kernel(); UFSD(("ENTER\n")) - flags = sb->u.ufs_sb.s_flags; - uspi = sb->u.ufs_sb.s_uspi; + flags = UFS_SB(sb)->s_flags; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); usb3 = ubh_get_usb_third(USPI_UBH); @@ -864,17 +882,17 @@ void ufs_write_super (struct super_block * sb) { void ufs_put_super (struct super_block * sb) { - struct ufs_sb_private_info * uspi; + struct ufs_sb_info * sbi = UFS_SB(sb); UFSD(("ENTER\n")) - uspi = sb->u.ufs_sb.s_uspi; - if (!(sb->s_flags & MS_RDONLY)) ufs_put_cylinder_structures (sb); - ubh_brelse_uspi (uspi); - kfree (sb->u.ufs_sb.s_uspi); + ubh_brelse_uspi (sbi->s_uspi); + kfree (sbi->s_uspi); + kfree (sbi); + sb->u.generic_sbp = NULL; return; } @@ -887,8 +905,8 @@ int ufs_remount (struct super_block * sb, int * mount_flags, char * data) unsigned new_mount_opt, ufstype; unsigned flags; - uspi = sb->u.ufs_sb.s_uspi; - flags = sb->u.ufs_sb.s_flags; + uspi = UFS_SB(sb)->s_uspi; + flags = UFS_SB(sb)->s_flags; usb1 = ubh_get_usb_first(USPI_UBH); usb3 = ubh_get_usb_third(USPI_UBH); @@ -896,7 +914,7 @@ int ufs_remount (struct super_block * sb, int * mount_flags, char * data) * Allow the "check" option to be passed as a remount option. * It is not possible to change ufstype option during remount */ - ufstype = sb->u.ufs_sb.s_mount_opt & UFS_MOUNT_UFSTYPE; + ufstype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE; new_mount_opt = 0; ufs_set_opt (new_mount_opt, ONERROR_LOCK); if (!ufs_parse_options (data, &new_mount_opt)) @@ -910,7 +928,7 @@ int ufs_remount (struct super_block * sb, int * mount_flags, char * data) } if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { - sb->u.ufs_sb.s_mount_opt = new_mount_opt; + UFS_SB(sb)->s_mount_opt = new_mount_opt; return 0; } @@ -950,7 +968,7 @@ int ufs_remount (struct super_block * sb, int * mount_flags, char * data) sb->s_flags &= ~MS_RDONLY; #endif } - sb->u.ufs_sb.s_mount_opt = new_mount_opt; + UFS_SB(sb)->s_mount_opt = new_mount_opt; return 0; } @@ -961,7 +979,7 @@ int ufs_statfs (struct super_block * sb, struct statfs * buf) lock_kernel(); - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first (USPI_UBH); buf->f_type = UFS_MAGIC; diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h index 4fc781eba71f..a19000f8d6f4 100644 --- a/fs/ufs/swab.h +++ b/fs/ufs/swab.h @@ -25,7 +25,7 @@ enum { static __inline u64 fs64_to_cpu(struct super_block *sbp, u64 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return le64_to_cpu(n); else return be64_to_cpu(n); @@ -34,7 +34,7 @@ fs64_to_cpu(struct super_block *sbp, u64 n) static __inline u64 cpu_to_fs64(struct super_block *sbp, u64 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return cpu_to_le64(n); else return cpu_to_be64(n); @@ -43,7 +43,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n) static __inline u32 fs64_add(struct super_block *sbp, u32 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le64(le64_to_cpu(*n)+d); else return *n = cpu_to_be64(be64_to_cpu(*n)+d); @@ -52,7 +52,7 @@ fs64_add(struct super_block *sbp, u32 *n, int d) static __inline u32 fs64_sub(struct super_block *sbp, u32 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le64(le64_to_cpu(*n)-d); else return *n = cpu_to_be64(be64_to_cpu(*n)-d); @@ -61,7 +61,7 @@ fs64_sub(struct super_block *sbp, u32 *n, int d) static __inline u32 fs32_to_cpu(struct super_block *sbp, u32 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return le32_to_cpu(n); else return be32_to_cpu(n); @@ -70,7 +70,7 @@ fs32_to_cpu(struct super_block *sbp, u32 n) static __inline u32 cpu_to_fs32(struct super_block *sbp, u32 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return cpu_to_le32(n); else return cpu_to_be32(n); @@ -79,7 +79,7 @@ cpu_to_fs32(struct super_block *sbp, u32 n) static __inline u32 fs32_add(struct super_block *sbp, u32 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le32(le32_to_cpu(*n)+d); else return *n = cpu_to_be32(be32_to_cpu(*n)+d); @@ -88,7 +88,7 @@ fs32_add(struct super_block *sbp, u32 *n, int d) static __inline u32 fs32_sub(struct super_block *sbp, u32 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le32(le32_to_cpu(*n)-d); else return *n = cpu_to_be32(be32_to_cpu(*n)-d); @@ -97,7 +97,7 @@ fs32_sub(struct super_block *sbp, u32 *n, int d) static __inline u16 fs16_to_cpu(struct super_block *sbp, u16 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return le16_to_cpu(n); else return be16_to_cpu(n); @@ -106,7 +106,7 @@ fs16_to_cpu(struct super_block *sbp, u16 n) static __inline u16 cpu_to_fs16(struct super_block *sbp, u16 n) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return cpu_to_le16(n); else return cpu_to_be16(n); @@ -115,7 +115,7 @@ cpu_to_fs16(struct super_block *sbp, u16 n) static __inline u16 fs16_add(struct super_block *sbp, u16 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le16(le16_to_cpu(*n)+d); else return *n = cpu_to_be16(be16_to_cpu(*n)+d); @@ -124,7 +124,7 @@ fs16_add(struct super_block *sbp, u16 *n, int d) static __inline u16 fs16_sub(struct super_block *sbp, u16 *n, int d) { - if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE) + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return *n = cpu_to_le16(le16_to_cpu(*n)-d); else return *n = cpu_to_be16(be16_to_cpu(*n)-d); diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index 6b87c6f26702..636bdbdbf3ce 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c @@ -82,7 +82,7 @@ static int ufs_trunc_direct (struct inode * inode) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; frag_to_free = 0; free_count = 0; @@ -212,7 +212,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, u32 * p) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; frag_to_free = 0; free_count = 0; @@ -306,7 +306,7 @@ static int ufs_trunc_dindirect (struct inode * inode, unsigned offset, u32 * p) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; dindirect_block = (DIRECT_BLOCK > offset) ? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0; @@ -374,7 +374,7 @@ static int ufs_trunc_tindirect (struct inode * inode) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; retry = 0; tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb)) @@ -435,7 +435,7 @@ void ufs_truncate (struct inode * inode) UFSD(("ENTER\n")) sb = inode->i_sb; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 2ce89b83801c..426b26874f2e 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -31,7 +31,7 @@ static inline s32 ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, struct ufs_super_block_third *usb3) { - switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUN: return fs32_to_cpu(sb, usb3->fs_u2.fs_sun.fs_state); case UFS_ST_SUNx86: @@ -46,7 +46,7 @@ static inline void ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, struct ufs_super_block_third *usb3, s32 value) { - switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUN: usb3->fs_u2.fs_sun.fs_state = cpu_to_fs32(sb, value); break; @@ -63,7 +63,7 @@ static inline u32 ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1, struct ufs_super_block_third *usb3) { - if ((sb->u.ufs_sb.s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) + if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) return fs32_to_cpu(sb, usb3->fs_u2.fs_sunx86.fs_npsect); else return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect); @@ -74,7 +74,7 @@ ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3) { u64 tmp; - switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUN: ((u32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qbmask[0]; ((u32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qbmask[1]; @@ -97,7 +97,7 @@ ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3) { u64 tmp; - switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUN: ((u32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qfmask[0]; ((u32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qfmask[1]; @@ -118,7 +118,7 @@ ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3) static inline u16 ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de) { - if ((sb->u.ufs_sb.s_flags & UFS_DE_MASK) == UFS_DE_OLD) + if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD) return fs16_to_cpu(sb, de->d_u.d_namlen); else return de->d_u.d_44.d_namlen; /* XXX this seems wrong */ @@ -127,7 +127,7 @@ ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de) static inline void ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value) { - if ((sb->u.ufs_sb.s_flags & UFS_DE_MASK) == UFS_DE_OLD) + if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD) de->d_u.d_namlen = cpu_to_fs16(sb, value); else de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */ @@ -136,7 +136,7 @@ ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value) static inline void ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode) { - if ((sb->u.ufs_sb.s_flags & UFS_DE_MASK) != UFS_DE_44BSD) + if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD) return; /* @@ -172,7 +172,7 @@ ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode) static inline u32 ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode) { - switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { case UFS_UID_EFT: return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid); case UFS_UID_44BSD: @@ -185,7 +185,7 @@ ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode) static inline void ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value) { - switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { case UFS_UID_EFT: inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value); break; @@ -199,7 +199,7 @@ ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value) static inline u32 ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) { - switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { case UFS_UID_EFT: return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid); case UFS_UID_44BSD: @@ -212,7 +212,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) static inline void ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value) { - switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) { + switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) { case UFS_UID_EFT: inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value); break; @@ -481,7 +481,7 @@ static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap, struct ufs_sb_private_info * uspi; unsigned fragsize, pos; - uspi = sb->u.ufs_sb.s_uspi; + uspi = UFS_SB(sb)->s_uspi; fragsize = 0; for (pos = 0; pos < uspi->s_fpb; pos++) { diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h index 7ba4e3e66e4e..dd9bc72d795e 100644 --- a/include/linux/ufs_fs.h +++ b/include/linux/ufs_fs.h @@ -33,6 +33,9 @@ #include #include +#include +#include + #define UFS_BBLOCK 0 #define UFS_BBSIZE 8192 #define UFS_SBLOCK 8192 @@ -398,7 +401,7 @@ struct ufs_super_block { * Convert cylinder group to base address of its global summary info. */ #define fs_cs(indx) \ - u.ufs_sb.s_csp[(indx) >> uspi->s_csshift][(indx) & ~uspi->s_csmask] + s_csp[(indx) >> uspi->s_csshift][(indx) & ~uspi->s_csmask] /* * Cylinder group block for a file system. @@ -780,7 +783,10 @@ extern struct inode_operations ufs_fast_symlink_inode_operations; /* truncate.c */ extern void ufs_truncate (struct inode *); -#include +static inline struct ufs_sb_info *UFS_SB(struct super_block *sb) +{ + return sb->u.generic_sbp; +} static inline struct ufs_inode_info *UFS_I(struct inode *inode) { -- cgit v1.2.3 From 7e2e73a7ab2520d658985ac0e24287f8a524171d Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 19 Aug 2002 18:10:58 -0700 Subject: [PATCH] struct superblock cleanups. Finally, this chunk removes the references to the UFS & ROMFS entries in struct superblock, leaving just ext3 and hpfs as the only remaining fs's to be fixed up. --- include/linux/fs.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index ec0f6edac31b..f773053fdbc5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -624,8 +624,6 @@ extern void __kill_fasync(struct fasync_struct *, int, int); #include #include -#include -#include extern struct list_head super_blocks; extern spinlock_t sb_lock; @@ -670,8 +668,6 @@ struct super_block { union { struct ext3_sb_info ext3_sb; struct hpfs_sb_info hpfs_sb; - struct ufs_sb_info ufs_sb; - struct romfs_sb_info romfs_sb; void *generic_sbp; } u; /* -- cgit v1.2.3 From e4039bb24e43c6c5d1a2b406d6c2a6191580e1fd Mon Sep 17 00:00:00 2001 From: Robert Love Date: Mon, 19 Aug 2002 22:23:02 -0700 Subject: [PATCH] spinlock.h cleanup - cleanup #defines: I do not follow the rationale behind the odd line-wrapped defines at the beginning of the file. If we have to use multiple lines, then we might as well do so cleanly and according to normal practice... - Remove a level of indirection: do not have spin_lock_foo use spin_lock - just explicitly call what is needed. - we do not need to define the spin_lock functions twice, once for CONFIG_PREEMPT and once for !CONFIG_PREEMPT. Defining them once with the preempt macros will optimize away fine. - cleanup preempt.h too - other misc. cleanup, improved comments, reordering, etc. --- include/linux/preempt.h | 18 ++-- include/linux/spinlock.h | 272 +++++++++++++++++++++++++++++++++-------------- 2 files changed, 201 insertions(+), 89 deletions(-) (limited to 'include/linux') diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 3864d46eadba..b4ff1a7c881c 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -1,9 +1,14 @@ #ifndef __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H +/* + * include/linux/preempt.h - macros for accessing and manipulating + * preempt_count (used for kernel preemption, interrupt count, etc.) + */ + #include -#define preempt_count() (current_thread_info()->preempt_count) +#define preempt_count() (current_thread_info()->preempt_count) #define inc_preempt_count() \ do { \ @@ -31,17 +36,16 @@ do { \ barrier(); \ } while (0) -#define preempt_enable() \ +#define preempt_check_resched() \ do { \ - preempt_enable_no_resched(); \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ preempt_schedule(); \ } while (0) -#define preempt_check_resched() \ +#define preempt_enable() \ do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ - preempt_schedule(); \ + preempt_enable_no_resched(); \ + preempt_check_resched(); \ } while (0) #define inc_preempt_count_non_preempt() do { } while (0) @@ -50,7 +54,7 @@ do { \ #else #define preempt_disable() do { } while (0) -#define preempt_enable_no_resched() do {} while(0) +#define preempt_enable_no_resched() do { } while (0) #define preempt_enable() do { } while (0) #define preempt_check_resched() do { } while (0) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 749d3054b2dc..6de41e91171f 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -1,52 +1,23 @@ #ifndef __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H +/* + * include/linux/spinlock.h - generic locking declarations + */ + #include #include #include #include #include #include +#include #include /* - * These are the generic versions of the spinlocks and read-write - * locks.. + * Must define these before including other files, inline functions need them */ -#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0) -#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0) -#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0) - -#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0) -#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0) -#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0) - -#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0) -#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0) -#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0) - -#define spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0) -#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0) -#define spin_unlock_irq(lock) do { _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } while (0) -#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0) - -#define read_unlock_irqrestore(lock, flags) do { _raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0) -#define read_unlock_irq(lock) do { _raw_read_unlock(lock); local_irq_enable(); preempt_enable(); } while (0) -#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0) - -#define write_unlock_irqrestore(lock, flags) do { _raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0) -#define write_unlock_irq(lock) do { _raw_write_unlock(lock); local_irq_enable(); preempt_enable(); } while (0) -#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0) -#define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\ - __r = spin_trylock(lock); \ - if (!__r) local_bh_enable(); \ - __r; }) - -/* Must define these before including other files, inline functions need them */ - -#include - #define LOCK_SECTION_NAME \ ".text.lock." __stringify(KBUILD_BASENAME) @@ -60,11 +31,17 @@ #define LOCK_SECTION_END \ ".previous\n\t" +/* + * If CONFIG_SMP is set, pull in the _raw_* definitions + */ #ifdef CONFIG_SMP #include -#elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously - defined (e.g. by including asm/spinlock.h */ +/* + * !CONFIG_SMP and spin_lock_init not previously defined + * (e.g. by including include/asm/spinlock.h) + */ +#elif !defined(spin_lock_init) #ifndef CONFIG_PREEMPT # define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) @@ -72,55 +49,42 @@ #endif /* - * Your basic spinlocks, allowing only a single CPU anywhere - * - * Most gcc versions have a nasty bug with empty initializers. + * gcc versions before ~2.95 have a nasty bug with empty initializers. */ #if (__GNUC__ > 2) typedef struct { } spinlock_t; -# define SPIN_LOCK_UNLOCKED (spinlock_t) { } + typedef struct { } rwlock_t; + #define SPIN_LOCK_UNLOCKED (spinlock_t) { } + #define RW_LOCK_UNLOCKED (rwlock_t) { } #else typedef struct { int gcc_is_buggy; } spinlock_t; -# define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } + typedef struct { int gcc_is_buggy; } rwlock_t; + #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } + #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } #endif +/* + * If CONFIG_SMP is unset, declare the _raw_* definitions as nops + */ #define spin_lock_init(lock) do { (void)(lock); } while(0) -#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */ +#define _raw_spin_lock(lock) (void)(lock) #define spin_is_locked(lock) ((void)(lock), 0) #define _raw_spin_trylock(lock) ((void)(lock), 1) #define spin_unlock_wait(lock) do { (void)(lock); } while(0) #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) - -/* - * Read-write spinlocks, allowing multiple readers - * but only one writer. - * - * NOTE! it is quite common to have readers in interrupts - * but no interrupt writers. For those circumstances we - * can "mix" irq-safe locks - any writer needs to get a - * irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - * - * Most gcc versions have a nasty bug with empty initializers. - */ -#if (__GNUC__ > 2) - typedef struct { } rwlock_t; - #define RW_LOCK_UNLOCKED (rwlock_t) { } -#else - typedef struct { int gcc_is_buggy; } rwlock_t; - #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } -#endif - #define rwlock_init(lock) do { } while(0) -#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */ +#define _raw_read_lock(lock) (void)(lock) #define _raw_read_unlock(lock) do { } while(0) -#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */ +#define _raw_write_lock(lock) (void)(lock) #define _raw_write_unlock(lock) do { } while(0) #endif /* !SMP */ -#ifdef CONFIG_PREEMPT - +/* + * Define the various spin_lock and rw_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various + * methods are defined as nops in the case they are not required. + */ #define spin_lock(lock) \ do { \ preempt_disable(); \ @@ -129,31 +93,175 @@ do { \ #define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ 1 : ({preempt_enable(); 0;});}) + #define spin_unlock(lock) \ do { \ _raw_spin_unlock(lock); \ preempt_enable(); \ } while (0) -#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);}) -#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();}) -#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);}) -#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();}) +#define read_lock(lock) \ +do { \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while(0) + +#define read_unlock(lock) \ +do { \ + _raw_read_unlock(lock); \ + preempt_enable(); \ +} while(0) + +#define write_lock(lock) \ +do { \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while(0) + +#define write_unlock(lock) \ +do { \ + _raw_write_unlock(lock); \ + preempt_enable(); \ +} while(0) + #define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \ 1 : ({preempt_enable(); 0;});}) -#else +#define spin_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _raw_spin_lock(lock); \ +} while (0) -#define spin_lock(lock) _raw_spin_lock(lock) -#define spin_trylock(lock) _raw_spin_trylock(lock) -#define spin_unlock(lock) _raw_spin_unlock(lock) +#define spin_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _raw_spin_lock(lock); \ +} while (0) -#define read_lock(lock) _raw_read_lock(lock) -#define read_unlock(lock) _raw_read_unlock(lock) -#define write_lock(lock) _raw_write_lock(lock) -#define write_unlock(lock) _raw_write_unlock(lock) -#define write_trylock(lock) _raw_write_trylock(lock) -#endif +#define spin_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _raw_spin_lock(lock); \ +} while (0) + +#define read_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while (0) + +#define read_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while (0) + +#define read_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while (0) + +#define write_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while (0) + +#define write_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while (0) + +#define write_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while (0) + +#define spin_unlock_irqrestore(lock, flags) \ +do { \ + _raw_spin_unlock(lock); \ + local_irq_restore(flags); \ + preempt_enable(); \ +} while (0) + +#define _raw_spin_unlock_irqrestore(lock, flags) \ +do { \ + _raw_spin_unlock(lock); \ + local_irq_restore(flags); \ +} while (0) + +#define spin_unlock_irq(lock) \ +do { \ + _raw_spin_unlock(lock); \ + local_irq_enable(); \ + preempt_enable(); \ +} while (0) + +#define spin_unlock_bh(lock) \ +do { \ + _raw_spin_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define read_unlock_irqrestore(lock, flags) \ +do { \ + _raw_read_unlock(lock); \ + local_irq_restore(flags); \ + preempt_enable(); \ +} while (0) + +#define read_unlock_irq(lock) \ +do { \ + _raw_read_unlock(lock); \ + local_irq_enable(); \ + preempt_enable(); \ +} while (0) + +#define read_unlock_bh(lock) \ +do { \ + _raw_read_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define write_unlock_irqrestore(lock, flags) \ +do { \ + _raw_write_unlock(lock); \ + local_irq_restore(flags); \ + preempt_enable(); \ +} while (0) + +#define write_unlock_irq(lock) \ +do { \ + _raw_write_unlock(lock); \ + local_irq_enable(); \ + preempt_enable(); \ +} while (0) + +#define write_unlock_bh(lock) \ +do { \ + _raw_write_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define spin_trylock_bh(lock) ({ local_bh_disable(); preempt_disable(); \ + _raw_spin_trylock(lock) ? 1 : \ + ({preempt_enable(); local_bh_enable(); 0;});}) /* "lock on reference count zero" */ #ifndef ATOMIC_DEC_AND_LOCK -- cgit v1.2.3 From e3adef999631dc7067fe4c34d86b8819d5a76e97 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 20 Aug 2002 00:01:08 -0700 Subject: Move x86 big-kernel-lock implementation into , since it was generic. Remove all architecture-specific files. --- include/asm-alpha/smplock.h | 56 ------------------------- include/asm-arm/smplock.h | 60 --------------------------- include/asm-cris/smplock.h | 25 ------------ include/asm-generic/smplock.h | 50 ----------------------- include/asm-i386/smplock.h | 58 -------------------------- include/asm-ia64/smplock.h | 58 -------------------------- include/asm-m68k/smplock.h | 51 ----------------------- include/asm-mips/smplock.h | 54 ------------------------ include/asm-mips64/smplock.h | 56 ------------------------- include/asm-parisc/smplock.h | 49 ---------------------- include/asm-ppc/smplock.h | 68 ------------------------------- include/asm-ppc64/smplock.h | 55 ------------------------- include/asm-s390/smplock.h | 62 ---------------------------- include/asm-s390x/smplock.h | 62 ---------------------------- include/asm-sh/smplock.h | 23 ----------- include/asm-sparc/smplock.h | 55 ------------------------- include/asm-sparc64/smplock.h | 60 --------------------------- include/asm-x86_64/smplock.h | 95 ------------------------------------------- include/linux/smp_lock.h | 54 +++++++++++++++++++++++- 19 files changed, 53 insertions(+), 998 deletions(-) delete mode 100644 include/asm-alpha/smplock.h delete mode 100644 include/asm-arm/smplock.h delete mode 100644 include/asm-cris/smplock.h delete mode 100644 include/asm-generic/smplock.h delete mode 100644 include/asm-i386/smplock.h delete mode 100644 include/asm-ia64/smplock.h delete mode 100644 include/asm-m68k/smplock.h delete mode 100644 include/asm-mips/smplock.h delete mode 100644 include/asm-mips64/smplock.h delete mode 100644 include/asm-parisc/smplock.h delete mode 100644 include/asm-ppc/smplock.h delete mode 100644 include/asm-ppc64/smplock.h delete mode 100644 include/asm-s390/smplock.h delete mode 100644 include/asm-s390x/smplock.h delete mode 100644 include/asm-sh/smplock.h delete mode 100644 include/asm-sparc/smplock.h delete mode 100644 include/asm-sparc64/smplock.h delete mode 100644 include/asm-x86_64/smplock.h (limited to 'include/linux') diff --git a/include/asm-alpha/smplock.h b/include/asm-alpha/smplock.h deleted file mode 100644 index cfd36450cb24..000000000000 --- a/include/asm-alpha/smplock.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ - -#include -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -static __inline__ void release_kernel_lock(struct task_struct *task) -{ - if (unlikely(task->lock_depth >= 0)) - spin_unlock(&kernel_flag); -} - -/* - * Re-acquire the kernel lock - */ -static __inline__ void reacquire_kernel_lock(struct task_struct *task) -{ - if (unlikely(task->lock_depth >= 0)) - spin_lock(&kernel_flag); -} - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ -#ifdef CONFIG_PREEMPT - if (current->lock_depth == -1) - spin_lock(&kernel_flag); - ++current->lock_depth; -#else - if (!++current->lock_depth) - spin_lock(&kernel_flag); -#endif -} - -static __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-arm/smplock.h b/include/asm-arm/smplock.h deleted file mode 100644 index 7b70d4629ad4..000000000000 --- a/include/asm-arm/smplock.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_PREEMPT -#define kernel_locked() preempt_get_count() -#else -#define kernel_locked() spin_is_locked(&kernel_flag) -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static inline void lock_kernel(void) -{ -#ifdef CONFIG_PREEMPT - if (current->lock_depth == -1) - spin_lock(&kernel_flag); - ++current->lock_depth; -#else - if (!++current->lock_depth) - spin_lock(&kernel_flag); -#endif -} - -static inline void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-cris/smplock.h b/include/asm-cris/smplock.h deleted file mode 100644 index 398562059dbd..000000000000 --- a/include/asm-cris/smplock.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_CRIS_SMPLOCK_H -#define __ASM_CRIS_SMPLOCK_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include - -#ifndef CONFIG_SMP - -#define lock_kernel() do { } while(0) -#define unlock_kernel() do { } while(0) -#define release_kernel_lock(task, cpu, depth) ((depth) = 1) -#define reacquire_kernel_lock(task, cpu, depth) do { } while(0) - -#else - -#error "We do not support SMP on CRIS" - -#endif - -#endif diff --git a/include/asm-generic/smplock.h b/include/asm-generic/smplock.h deleted file mode 100644 index f02afc9ffd6e..000000000000 --- a/include/asm-generic/smplock.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-i386/smplock.h b/include/asm-i386/smplock.h deleted file mode 100644 index 2134982b9d93..000000000000 --- a/include/asm-i386/smplock.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * - * i386 SMP lock implementation - */ -#include -#include -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() (current->lock_depth >= 0) - -#define get_kernel_lock() spin_lock(&kernel_flag) -#define put_kernel_lock() spin_unlock(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - put_kernel_lock(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - get_kernel_lock(); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ - int depth = current->lock_depth+1; - if (!depth) - get_kernel_lock(); - current->lock_depth = depth; -} - -static __inline__ void unlock_kernel(void) -{ - if (current->lock_depth < 0) - BUG(); - if (--current->lock_depth < 0) - put_kernel_lock(); -} diff --git a/include/asm-ia64/smplock.h b/include/asm-ia64/smplock.h deleted file mode 100644 index 103185f86e30..000000000000 --- a/include/asm-ia64/smplock.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include -#include - -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_SMP -# define kernel_locked() spin_is_locked(&kernel_flag) -#else -# define kernel_locked() (1) -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void -lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -static __inline__ void -unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-m68k/smplock.h b/include/asm-m68k/smplock.h deleted file mode 100644 index 3e98a6afd154..000000000000 --- a/include/asm-m68k/smplock.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-mips/smplock.h b/include/asm-mips/smplock.h deleted file mode 100644 index 43da07e41222..000000000000 --- a/include/asm-mips/smplock.h +++ /dev/null @@ -1,54 +0,0 @@ -/* $Id: smplock.h,v 1.2 1999/10/09 00:01:43 ralf Exp $ - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Default SMP lock implementation - */ -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-mips64/smplock.h b/include/asm-mips64/smplock.h deleted file mode 100644 index 68345b04d68f..000000000000 --- a/include/asm-mips64/smplock.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#ifndef _ASM_SMPLOCK_H -#define _ASM_SMPLOCK_H - -#include -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -static __inline__ void release_kernel_lock(struct task_struct *task, int cpu) -{ - if (task->lock_depth >= 0) - spin_unlock(&kernel_flag); - release_irqlock(cpu); - local_irq_enable(); -} - -/* - * Re-acquire the kernel lock - */ -static __inline__ void reacquire_kernel_lock(struct task_struct *task) -{ - if (task->lock_depth >= 0) - spin_lock(&kernel_flag); -} - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -static __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} - -#endif /* _ASM_SMPLOCK_H */ diff --git a/include/asm-parisc/smplock.h b/include/asm-parisc/smplock.h deleted file mode 100644 index 06fb015d5cb9..000000000000 --- a/include/asm-parisc/smplock.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include - -extern spinlock_t kernel_flag; - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-ppc/smplock.h b/include/asm-ppc/smplock.h deleted file mode 100644 index 8e8ec92af714..000000000000 --- a/include/asm-ppc/smplock.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * BK Id: %F% %I% %G% %U% %#% - */ -/* - * - * - * Default SMP lock implementation - */ -#ifdef __KERNEL__ -#ifndef __ASM_SMPLOCK_H__ -#define __ASM_SMPLOCK_H__ - -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_SMP -#define kernel_locked() spin_is_locked(&kernel_flag) -#elif defined(CONFIG_PREEMPT) -#define kernel_locked() preempt_count() -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ -#ifdef CONFIG_PREEMPT - if (current->lock_depth == -1) - spin_lock(&kernel_flag); - ++current->lock_depth; -#else - if (!++current->lock_depth) - spin_lock(&kernel_flag); -#endif /* CONFIG_PREEMPT */ -} - -static __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} -#endif /* __ASM_SMPLOCK_H__ */ -#endif /* __KERNEL__ */ diff --git a/include/asm-ppc64/smplock.h b/include/asm-ppc64/smplock.h deleted file mode 100644 index 16b0b2f72b0c..000000000000 --- a/include/asm-ppc64/smplock.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * - * Default SMP lock implementation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -static __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -static __inline__ void unlock_kernel(void) -{ - if (current->lock_depth < 0) - BUG(); - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} diff --git a/include/asm-s390/smplock.h b/include/asm-s390/smplock.h deleted file mode 100644 index a12df4a3f882..000000000000 --- a/include/asm-s390/smplock.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * include/asm-s390/smplock.h - * - * S390 version - * - * Derived from "include/asm-i386/smplock.h" - */ - -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} - diff --git a/include/asm-s390x/smplock.h b/include/asm-s390x/smplock.h deleted file mode 100644 index a12df4a3f882..000000000000 --- a/include/asm-s390x/smplock.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * include/asm-s390/smplock.h - * - * S390 version - * - * Derived from "include/asm-i386/smplock.h" - */ - -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() spin_is_locked(&kernel_flag) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth >= 0) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth >= 0) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - if (!++current->lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -} - diff --git a/include/asm-sh/smplock.h b/include/asm-sh/smplock.h deleted file mode 100644 index 33499815d011..000000000000 --- a/include/asm-sh/smplock.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __ASM_SH_SMPLOCK_H -#define __ASM_SH_SMPLOCK_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include - -#ifndef CONFIG_SMP - -#define lock_kernel() do { } while(0) -#define unlock_kernel() do { } while(0) -#define release_kernel_lock(task, cpu, depth) ((depth) = 1) -#define reacquire_kernel_lock(task, cpu, depth) do { } while(0) - -#else -#error "We do not support SMP on SH" -#endif /* CONFIG_SMP */ - -#endif /* __ASM_SH_SMPLOCK_H */ diff --git a/include/asm-sparc/smplock.h b/include/asm-sparc/smplock.h deleted file mode 100644 index bd931bb5c511..000000000000 --- a/include/asm-sparc/smplock.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include -#include - -extern spinlock_t kernel_flag; - -#define kernel_locked() \ - (spin_is_locked(&kernel_flag) &&\ - (current->lock_depth >= 0)) - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (unlikely(task->lock_depth >= 0)) { \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - local_irq_enable(); \ - } \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -#define lock_kernel() \ -do { \ - if (!++current->lock_depth) \ - spin_lock(&kernel_flag); \ -} while(0) - -#define unlock_kernel() \ -do { \ - if (--current->lock_depth < 0) \ - spin_unlock(&kernel_flag); \ -} while(0) diff --git a/include/asm-sparc64/smplock.h b/include/asm-sparc64/smplock.h deleted file mode 100644 index b7edf0156893..000000000000 --- a/include/asm-sparc64/smplock.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * - * - * Default SMP lock implementation - */ -#include -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_SMP -#define kernel_locked() \ - (spin_is_locked(&kernel_flag) &&\ - (current->lock_depth >= 0)) -#else -#ifdef CONFIG_PREEMPT -#define kernel_locked() preempt_get_count() -#else -#define kernel_locked() 1 -#endif -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_unlock(&kernel_flag); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -#define lock_kernel() \ -do { \ - if (!++current->lock_depth) \ - spin_lock(&kernel_flag); \ -} while(0) - -#define unlock_kernel() \ -do { \ - if (--current->lock_depth < 0) \ - spin_unlock(&kernel_flag); \ -} while(0) diff --git a/include/asm-x86_64/smplock.h b/include/asm-x86_64/smplock.h deleted file mode 100644 index 6c0b652a63a2..000000000000 --- a/include/asm-x86_64/smplock.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - */ -#include -#include -#include -#include - -extern spinlock_t kernel_flag; - -#ifdef CONFIG_SMP -#define kernel_locked() spin_is_locked(&kernel_flag) -#define check_irq_holder(cpu) \ - if (global_irq_holder == (cpu)) \ - BUG(); -#else -#ifdef CONFIG_PREEMPT -#define kernel_locked() preempt_get_count() -#define global_irq_holder 0 -#define check_irq_holder(cpu) do {} while(0) -#else -#define kernel_locked() 1 -#define check_irq_holder(cpu) \ - if (global_irq_holder == (cpu)) \ - BUG(); -#endif -#endif - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (unlikely(task->lock_depth >= 0)) { \ - spin_unlock(&kernel_flag); \ - check_irq_holder(cpu); \ - } \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ -#ifdef CONFIG_PREEMPT - if (current->lock_depth == -1) - spin_lock(&kernel_flag); - ++current->lock_depth; -#else -#if 1 - if (!++current->lock_depth) - spin_lock(&kernel_flag); -#else - __asm__ __volatile__( - "incl %1\n\t" - "jne 9f" - spin_lock_string - "\n9:" - :"=m" (__dummy_lock(&kernel_flag)), - "=m" (current->lock_depth)); -#endif -#endif -} - -extern __inline__ void unlock_kernel(void) -{ - if (current->lock_depth < 0) - BUG(); -#if 1 - if (--current->lock_depth < 0) - spin_unlock(&kernel_flag); -#else - __asm__ __volatile__( - "decl %1\n\t" - "jns 9f\n\t" - spin_unlock_string - "\n9:" - :"=m" (__dummy_lock(&kernel_flag)), - "=m" (current->lock_depth)); -#endif -} diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index cfb23f363e61..40f5358fc856 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -13,7 +13,59 @@ #else -#include +#include +#include +#include +#include + +extern spinlock_t kernel_flag; + +#define kernel_locked() (current->lock_depth >= 0) + +#define get_kernel_lock() spin_lock(&kernel_flag) +#define put_kernel_lock() spin_unlock(&kernel_flag) + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task) \ +do { \ + if (unlikely(task->lock_depth >= 0)) \ + put_kernel_lock(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (unlikely(task->lock_depth >= 0)) \ + get_kernel_lock(); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +static __inline__ void lock_kernel(void) +{ + int depth = current->lock_depth+1; + if (!depth) + get_kernel_lock(); + current->lock_depth = depth; +} + +static __inline__ void unlock_kernel(void) +{ + if (current->lock_depth < 0) + BUG(); + if (--current->lock_depth < 0) + put_kernel_lock(); +} #endif /* CONFIG_SMP */ -- cgit v1.2.3