summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/afs.txt155
-rw-r--r--Documentation/filesystems/xfs.txt6
-rw-r--r--Documentation/kernel-parameters.txt24
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/irq.c14
-rw-r--r--arch/alpha/kernel/irq_alpha.c32
-rw-r--r--arch/alpha/kernel/irq_i8259.c18
-rw-r--r--arch/alpha/kernel/irq_pyxis.c14
-rw-r--r--arch/alpha/kernel/irq_srm.c14
-rw-r--r--arch/alpha/kernel/setup.c32
-rw-r--r--arch/alpha/kernel/sys_alcor.c90
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c168
-rw-r--r--arch/alpha/kernel/sys_dp264.c202
-rw-r--r--arch/alpha/kernel/sys_eb64p.c76
-rw-r--r--arch/alpha/kernel/sys_eiger.c48
-rw-r--r--arch/alpha/kernel/sys_jensen.c40
-rw-r--r--arch/alpha/kernel/sys_miata.c30
-rw-r--r--arch/alpha/kernel/sys_mikasa.c76
-rw-r--r--arch/alpha/kernel/sys_nautilus.c32
-rw-r--r--arch/alpha/kernel/sys_noritake.c76
-rw-r--r--arch/alpha/kernel/sys_rawhide.c48
-rw-r--r--arch/alpha/kernel/sys_ruffian.c34
-rw-r--r--arch/alpha/kernel/sys_rx164.c46
-rw-r--r--arch/alpha/kernel/sys_sable.c88
-rw-r--r--arch/alpha/kernel/sys_sio.c182
-rw-r--r--arch/alpha/kernel/sys_sx164.c34
-rw-r--r--arch/alpha/kernel/sys_takara.c46
-rw-r--r--arch/alpha/kernel/sys_titan.c50
-rw-r--r--arch/alpha/kernel/sys_wildfire.c50
-rw-r--r--arch/alpha/kernel/systbls.S1
-rw-r--r--arch/alpha/lib/ev6-stxncpy.S2
-rw-r--r--arch/alpha/lib/stxncpy.S2
-rw-r--r--arch/i386/Config.help11
-rw-r--r--arch/i386/Makefile2
-rw-r--r--arch/i386/config.in5
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/apic.c19
-rw-r--r--arch/i386/kernel/entry.S1
-rw-r--r--arch/i386/kernel/i386_ksyms.c10
-rw-r--r--arch/i386/kernel/io_apic.c6
-rw-r--r--arch/i386/kernel/mpparse.c17
-rw-r--r--arch/i386/kernel/nmi.c12
-rw-r--r--arch/i386/kernel/profile.c45
-rw-r--r--arch/i386/kernel/smpboot.c1
-rw-r--r--arch/i386/kernel/time.c5
-rw-r--r--arch/i386/kernel/traps.c37
-rw-r--r--arch/i386/mach-generic/do_timer.h3
-rw-r--r--arch/i386/mach-generic/mach_apic.h46
-rw-r--r--arch/i386/mach-summit/mach_apic.h57
-rw-r--r--arch/i386/mach-visws/do_timer.h3
-rw-r--r--arch/i386/oprofile/Config.help6
-rw-r--r--arch/i386/oprofile/Config.in9
-rw-r--r--arch/i386/oprofile/Makefile16
-rw-r--r--arch/i386/oprofile/init.c30
-rw-r--r--arch/i386/oprofile/nmi_int.c258
-rw-r--r--arch/i386/oprofile/op_counter.h29
-rw-r--r--arch/i386/oprofile/op_model_athlon.c149
-rw-r--r--arch/i386/oprofile/op_model_ppro.c133
-rw-r--r--arch/i386/oprofile/op_x86_model.h52
-rw-r--r--arch/i386/oprofile/timer_int.c57
-rw-r--r--arch/m68k/atari/stram.c5
-rw-r--r--arch/sh/kernel/mach_7751se.c86
-rw-r--r--arch/sh/kernel/mach_adx.c68
-rw-r--r--arch/sh/kernel/mach_bigsur.c70
-rw-r--r--arch/sh/kernel/mach_cat68701.c70
-rw-r--r--arch/sh/kernel/mach_dc.c86
-rw-r--r--arch/sh/kernel/mach_dmida.c60
-rw-r--r--arch/sh/kernel/mach_ec3104.c58
-rw-r--r--arch/sh/kernel/mach_hp600.c240
-rw-r--r--arch/sh/kernel/mach_se.c86
-rw-r--r--arch/sh/kernel/mach_unknown.c66
-rw-r--r--arch/sh/kernel/setup.c20
-rw-r--r--arch/sh/kernel/setup_cqreek.c84
-rw-r--r--arch/sh/kernel/setup_dc.c14
-rw-r--r--arch/sh/kernel/setup_ec3104.c14
-rw-r--r--arch/sh/kernel/setup_hd64465.c14
-rw-r--r--arch/sh/kernel/setup_sh2000.c66
-rw-r--r--arch/sh/stboards/mach.c68
-rw-r--r--arch/sparc64/kernel/pci.c126
-rw-r--r--arch/sparc64/kernel/pci_impl.h133
-rw-r--r--arch/sparc64/kernel/semaphore.c181
-rw-r--r--arch/sparc64/kernel/signal32.c18
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c8
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c16
-rw-r--r--arch/sparc64/kernel/unaligned.c4
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/ipcsum.S32
-rw-r--r--arch/sparc64/mm/fault.c2
-rw-r--r--arch/um/Makefile5
-rw-r--r--arch/um/Makefile-i3862
-rw-r--r--arch/um/config.in5
-rw-r--r--arch/um/drivers/chan_user.c2
-rw-r--r--arch/um/drivers/harddog_kern.c4
-rw-r--r--arch/um/drivers/hostaudio_kern.c1
-rw-r--r--arch/um/drivers/line.c25
-rw-r--r--arch/um/drivers/mconsole_kern.c28
-rw-r--r--arch/um/drivers/mconsole_user.c10
-rw-r--r--arch/um/drivers/mmapper_kern.c3
-rw-r--r--arch/um/drivers/net_kern.c25
-rw-r--r--arch/um/drivers/port_kern.c14
-rw-r--r--arch/um/drivers/ssl.c52
-rw-r--r--arch/um/drivers/stdio_console.c30
-rw-r--r--arch/um/drivers/ubd_kern.c197
-rw-r--r--arch/um/drivers/ubd_user.c4
-rw-r--r--arch/um/drivers/xterm.c1
-rw-r--r--arch/um/include/2_5compat.h2
-rw-r--r--arch/um/include/irq_user.h4
-rw-r--r--arch/um/include/kern_util.h10
-rw-r--r--arch/um/include/line.h5
-rw-r--r--arch/um/include/mconsole.h2
-rw-r--r--arch/um/include/sigio.h2
-rw-r--r--arch/um/include/tempfile.h21
-rw-r--r--arch/um/include/time_user.h6
-rw-r--r--arch/um/include/user_util.h18
-rw-r--r--arch/um/kernel/Makefile13
-rw-r--r--arch/um/kernel/exec_kern.c3
-rw-r--r--arch/um/kernel/exitcode.c3
-rw-r--r--arch/um/kernel/frame.c4
-rw-r--r--arch/um/kernel/helper.c1
-rw-r--r--arch/um/kernel/initrd_kern.c1
-rw-r--r--arch/um/kernel/irq.c29
-rw-r--r--arch/um/kernel/irq_user.c162
-rw-r--r--arch/um/kernel/ksyms.c1
-rw-r--r--arch/um/kernel/mem.c45
-rw-r--r--arch/um/kernel/mem_user.c5
-rw-r--r--arch/um/kernel/process.c14
-rw-r--r--arch/um/kernel/process_kern.c57
-rw-r--r--arch/um/kernel/sigio_kern.c13
-rw-r--r--arch/um/kernel/sigio_user.c59
-rw-r--r--arch/um/kernel/signal_user.c29
-rw-r--r--arch/um/kernel/smp.c157
-rw-r--r--arch/um/kernel/syscall_kern.c15
-rw-r--r--arch/um/kernel/syscall_user.c13
-rw-r--r--arch/um/kernel/tempfile.c79
-rw-r--r--arch/um/kernel/time.c44
-rw-r--r--arch/um/kernel/time_kern.c32
-rw-r--r--arch/um/kernel/trap_kern.c15
-rw-r--r--arch/um/kernel/trap_user.c92
-rw-r--r--arch/um/kernel/tty_log.c6
-rw-r--r--arch/um/kernel/um_arch.c36
-rw-r--r--arch/um/kernel/umid.c4
-rw-r--r--arch/um/kernel/user_util.c82
-rw-r--r--arch/um/main.c13
-rw-r--r--arch/um/ptproxy/proxy.c7
-rw-r--r--arch/um/sys-i386/bugs.c1
-rw-r--r--arch/um/sys-i386/ptrace_user.c1
-rw-r--r--arch/um/sys-ppc/miscthings.c3
-rw-r--r--arch/um/uml.lds.S3
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acorn/block/fd1772.c5
-rw-r--r--drivers/acorn/block/mfmhd.c12
-rw-r--r--drivers/base/core.c92
-rw-r--r--drivers/block/DAC960.c3
-rw-r--r--drivers/block/Makefile4
-rw-r--r--drivers/block/acsi.c18
-rw-r--r--drivers/block/amiflop.c11
-rw-r--r--drivers/block/ataflop.c20
-rw-r--r--drivers/block/blkpg.c310
-rw-r--r--drivers/block/block_ioctl.c83
-rw-r--r--drivers/block/cciss.c9
-rw-r--r--drivers/block/cpqarray.c7
-rw-r--r--drivers/block/floppy.c12
-rw-r--r--drivers/block/genhd.c85
-rw-r--r--drivers/block/ioctl.c215
-rw-r--r--drivers/block/ll_rw_blk.c15
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/block/paride/pcd.c3
-rw-r--r--drivers/block/paride/pd.c6
-rw-r--r--drivers/block/paride/pf.c6
-rw-r--r--drivers/block/ps2esdi.c16
-rw-r--r--drivers/block/rd.c9
-rw-r--r--drivers/block/scsi_ioctl.c215
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/swim_iop.c2
-rw-r--r--drivers/block/umem.c16
-rw-r--r--drivers/block/xd.c15
-rw-r--r--drivers/block/z2ram.c3
-rw-r--r--drivers/bluetooth/Config.help18
-rw-r--r--drivers/bluetooth/Config.in22
-rw-r--r--drivers/bluetooth/Makefile16
-rw-r--r--drivers/bluetooth/bluecard_cs.c6
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/dtl1_cs.c6
-rw-r--r--drivers/bluetooth/hci_bcsp.c14
-rw-r--r--drivers/bluetooth/hci_h4.c8
-rw-r--r--drivers/bluetooth/hci_ldisc.c20
-rw-r--r--drivers/bluetooth/hci_usb.c18
-rw-r--r--drivers/bluetooth/hci_vhci.c10
-rw-r--r--drivers/cdrom/aztcd.c3
-rw-r--r--drivers/cdrom/cdu31a.c3
-rw-r--r--drivers/cdrom/cm206.c3
-rw-r--r--drivers/cdrom/gscd.c3
-rw-r--r--drivers/cdrom/mcd.c3
-rw-r--r--drivers/cdrom/mcdx.c3
-rw-r--r--drivers/cdrom/optcd.c3
-rw-r--r--drivers/cdrom/sbpcd.c3
-rw-r--r--drivers/cdrom/sjcd.c3
-rw-r--r--drivers/cdrom/sonycd535.c3
-rw-r--r--drivers/ide/ide-cd.c433
-rw-r--r--drivers/ide/ide-cd.h7
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/ide.c2
-rw-r--r--drivers/ide/legacy/hd.c14
-rw-r--r--drivers/md/md.c26
-rw-r--r--drivers/md/raid0.c39
-rw-r--r--drivers/media/video/bttv-driver.c1
-rw-r--r--drivers/message/i2o/i2o_block.c3
-rw-r--r--drivers/mtd/Config.help60
-rw-r--r--drivers/mtd/Config.in5
-rw-r--r--drivers/mtd/Makefile11
-rw-r--r--drivers/mtd/bootldr.c214
-rw-r--r--drivers/mtd/cmdline.c343
-rw-r--r--drivers/mtd/ftl.c3
-rw-r--r--drivers/mtd/maps/Config.help109
-rw-r--r--drivers/mtd/maps/Config.in12
-rw-r--r--drivers/mtd/maps/Makefile20
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c179
-rw-r--r--drivers/mtd/maps/ceiva.c408
-rw-r--r--drivers/mtd/maps/dc21285.c25
-rw-r--r--drivers/mtd/maps/edb7312.c202
-rw-r--r--drivers/mtd/maps/epxa10db-flash.c233
-rw-r--r--drivers/mtd/maps/fortunet.c309
-rw-r--r--drivers/mtd/maps/impa7.c234
-rw-r--r--drivers/mtd/maps/iq80310.c5
-rw-r--r--drivers/mtd/maps/pci.c385
-rw-r--r--drivers/mtd/maps/pcmciamtd.c893
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1554
-rw-r--r--drivers/mtd/mtdblock.c5
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdconcat.c675
-rw-r--r--drivers/mtd/nftlcore.c4
-rw-r--r--drivers/oprofile/buffer_sync.c394
-rw-r--r--drivers/oprofile/buffer_sync.h19
-rw-r--r--drivers/oprofile/cpu_buffer.c135
-rw-r--r--drivers/oprofile/cpu_buffer.h45
-rw-r--r--drivers/oprofile/event_buffer.c186
-rw-r--r--drivers/oprofile/event_buffer.h42
-rw-r--r--drivers/oprofile/oprof.c153
-rw-r--r--drivers/oprofile/oprof.h34
-rw-r--r--drivers/oprofile/oprofile_files.c91
-rw-r--r--drivers/oprofile/oprofile_stats.c77
-rw-r--r--drivers/oprofile/oprofile_stats.h31
-rw-r--r--drivers/oprofile/oprofilefs.c306
-rw-r--r--drivers/pcmcia/bulkmem.c26
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c3
-rw-r--r--drivers/s390/block/xpram.c18
-rw-r--r--drivers/sbus/char/jsflash.c50
-rw-r--r--drivers/scsi/BusLogic.c66
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/aacraid/linit.c43
-rw-r--r--drivers/scsi/advansys.c60
-rw-r--r--drivers/scsi/advansys.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_linux.c28
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_linux_host.h2
-rw-r--r--drivers/scsi/dpt_i2o.c23
-rw-r--r--drivers/scsi/dpti.h5
-rw-r--r--drivers/scsi/eata.c77
-rw-r--r--drivers/scsi/eata.h2
-rw-r--r--drivers/scsi/fcal.c23
-rw-r--r--drivers/scsi/fcal.h2
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/hosts.h12
-rw-r--r--drivers/scsi/ips.c10
-rw-r--r--drivers/scsi/ips.h6
-rw-r--r--drivers/scsi/ncr53c8xx.c88
-rw-r--r--drivers/scsi/ncr53c8xx.h2
-rw-r--r--drivers/scsi/pluto.c25
-rw-r--r--drivers/scsi/pluto.h2
-rw-r--r--drivers/scsi/qla1280.c51
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/scsi.c44
-rw-r--r--drivers/scsi/scsi.h7
-rw-r--r--drivers/scsi/scsi_scan.c8
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--drivers/scsi/sr.c37
-rw-r--r--drivers/scsi/st.c124
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/sym53c8xx.c89
-rw-r--r--drivers/scsi/sym53c8xx.h2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym53c8xx.h3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c100
-rw-r--r--fs/Config.help15
-rw-r--r--fs/Config.in11
-rw-r--r--fs/Makefile5
-rw-r--r--fs/afs/Makefile36
-rw-r--r--fs/afs/cache-layout.h224
-rw-r--r--fs/afs/callback.c168
-rw-r--r--fs/afs/cell.c452
-rw-r--r--fs/afs/cell.h63
-rw-r--r--fs/afs/cmservice.c639
-rw-r--r--fs/afs/cmservice.h27
-rw-r--r--fs/afs/dir.c642
-rw-r--r--fs/afs/errors.h34
-rw-r--r--fs/afs/file.c143
-rw-r--r--fs/afs/fsclient.c816
-rw-r--r--fs/afs/fsclient.h53
-rw-r--r--fs/afs/inode.c418
-rw-r--r--fs/afs/internal.h127
-rw-r--r--fs/afs/kafsasyncd.c260
-rw-r--r--fs/afs/kafsasyncd.h49
-rw-r--r--fs/afs/kafstimod.c211
-rw-r--r--fs/afs/kafstimod.h45
-rw-r--r--fs/afs/main.c193
-rw-r--r--fs/afs/misc.c39
-rw-r--r--fs/afs/mntpt.c112
-rw-r--r--fs/afs/mount.h23
-rw-r--r--fs/afs/proc.c739
-rw-r--r--fs/afs/server.c489
-rw-r--r--fs/afs/server.h97
-rw-r--r--fs/afs/super.c595
-rw-r--r--fs/afs/super.h43
-rw-r--r--fs/afs/transport.h21
-rw-r--r--fs/afs/types.h152
-rw-r--r--fs/afs/vlclient.c662
-rw-r--r--fs/afs/vlclient.h95
-rw-r--r--fs/afs/vlocation.c824
-rw-r--r--fs/afs/vnode.c316
-rw-r--r--fs/afs/vnode.h88
-rw-r--r--fs/afs/volume.c430
-rw-r--r--fs/afs/volume.h92
-rw-r--r--fs/block_dev.c119
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/dcache.c1
-rw-r--r--fs/dcookies.c323
-rw-r--r--fs/jfs/jfs_dtree.c6
-rw-r--r--fs/jfs/jfs_mount.c4
-rw-r--r--fs/nfs/Makefile1
-rw-r--r--fs/nfs/file.c6
-rw-r--r--fs/nfs/inode.c578
-rw-r--r--fs/nfs/nfs2xdr.c29
-rw-r--r--fs/nfs/nfs3proc.c40
-rw-r--r--fs/nfs/nfs3xdr.c18
-rw-r--r--fs/nfs/nfs4proc.c1577
-rw-r--r--fs/nfs/nfs4renewd.c110
-rw-r--r--fs/nfs/nfs4state.c81
-rw-r--r--fs/nfs/nfs4xdr.c1777
-rw-r--r--fs/nfs/proc.c53
-rw-r--r--fs/partitions/check.c454
-rw-r--r--fs/proc/proc_misc.c1
-rw-r--r--fs/xfs/linux/xfs_aops.c242
-rw-r--r--fs/xfs/linux/xfs_fs_subr.c2
-rw-r--r--fs/xfs/linux/xfs_globals.c9
-rw-r--r--fs/xfs/linux/xfs_globals.h1
-rw-r--r--fs/xfs/linux/xfs_ioctl.c10
-rw-r--r--fs/xfs/linux/xfs_iops.c64
-rw-r--r--fs/xfs/linux/xfs_iops.h1
-rw-r--r--fs/xfs/linux/xfs_linux.h4
-rw-r--r--fs/xfs/linux/xfs_lrw.c285
-rw-r--r--fs/xfs/linux/xfs_lrw.h8
-rw-r--r--fs/xfs/linux/xfs_super.c195
-rw-r--r--fs/xfs/linux/xfs_super.h10
-rw-r--r--fs/xfs/linux/xfs_sysctl.c61
-rw-r--r--fs/xfs/linux/xfs_sysctl.h15
-rw-r--r--fs/xfs/linux/xfs_vfs.h10
-rw-r--r--fs/xfs/linux/xfs_vnode.c46
-rw-r--r--fs/xfs/linux/xfs_vnode.h27
-rw-r--r--fs/xfs/pagebuf/page_buf.c27
-rw-r--r--fs/xfs/pagebuf/page_buf.h45
-rw-r--r--fs/xfs/pagebuf/page_buf_locking.c81
-rw-r--r--fs/xfs/support/debug.c39
-rw-r--r--fs/xfs/support/move.c4
-rw-r--r--fs/xfs/support/move.h5
-rw-r--r--fs/xfs/support/uuid.c4
-rw-r--r--fs/xfs/xfs_alloc.c22
-rw-r--r--fs/xfs/xfs_alloc_btree.c36
-rw-r--r--fs/xfs/xfs_arch.h4
-rw-r--r--fs/xfs/xfs_attr.c6
-rw-r--r--fs/xfs/xfs_attr_fetch.c2
-rw-r--r--fs/xfs/xfs_attr_leaf.c92
-rw-r--r--fs/xfs/xfs_bmap.c98
-rw-r--r--fs/xfs/xfs_bmap_btree.c321
-rw-r--r--fs/xfs/xfs_bmap_btree.h55
-rw-r--r--fs/xfs/xfs_btree.c6
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_clnt.h9
-rw-r--r--fs/xfs/xfs_da_btree.c40
-rw-r--r--fs/xfs/xfs_dir.c10
-rw-r--r--fs/xfs/xfs_dir2.c6
-rw-r--r--fs/xfs/xfs_dir2_block.c14
-rw-r--r--fs/xfs/xfs_dir2_data.c2
-rw-r--r--fs/xfs/xfs_dir2_leaf.c30
-rw-r--r--fs/xfs/xfs_dir2_node.c24
-rw-r--r--fs/xfs/xfs_dir2_sf.c32
-rw-r--r--fs/xfs/xfs_dir2_sf.h2
-rw-r--r--fs/xfs/xfs_dir2_trace.c4
-rw-r--r--fs/xfs/xfs_dir_leaf.c74
-rw-r--r--fs/xfs/xfs_dir_sf.h2
-rw-r--r--fs/xfs/xfs_dquot.c14
-rw-r--r--fs/xfs/xfs_error.c16
-rw-r--r--fs/xfs/xfs_fs.h12
-rw-r--r--fs/xfs/xfs_fsops.c16
-rw-r--r--fs/xfs/xfs_ialloc.c4
-rw-r--r--fs/xfs/xfs_ialloc_btree.c36
-rw-r--r--fs/xfs/xfs_iget.c69
-rw-r--r--fs/xfs/xfs_inode.c143
-rw-r--r--fs/xfs/xfs_inode.h24
-rw-r--r--fs/xfs/xfs_inode_item.c42
-rw-r--r--fs/xfs/xfs_inode_item.h5
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_log.c66
-rw-r--r--fs/xfs/xfs_log_recover.c66
-rw-r--r--fs/xfs/xfs_mount.c106
-rw-r--r--fs/xfs/xfs_mount.h15
-rw-r--r--fs/xfs/xfs_qm.c37
-rw-r--r--fs/xfs/xfs_qm_syscalls.c6
-rw-r--r--fs/xfs/xfs_quota_priv.h2
-rw-r--r--fs/xfs/xfs_rtalloc.c4
-rw-r--r--fs/xfs/xfs_rw.c4
-rw-r--r--fs/xfs/xfs_rw.h2
-rw-r--r--fs/xfs/xfs_trans.c1
-rw-r--r--fs/xfs/xfs_trans.h5
-rw-r--r--fs/xfs/xfs_trans_buf.c4
-rw-r--r--fs/xfs/xfs_trans_inode.c14
-rw-r--r--fs/xfs/xfs_types.h32
-rw-r--r--fs/xfs/xfs_utils.c4
-rw-r--r--fs/xfs/xfs_utils.h2
-rw-r--r--fs/xfs/xfs_vfsops.c209
-rw-r--r--fs/xfs/xfs_vnodeops.c27
-rw-r--r--fs/xfs/xfsidbg.c315
-rw-r--r--include/asm-i386/apicdef.h8
-rw-r--r--include/asm-i386/hw_irq.h49
-rw-r--r--include/asm-i386/mpspec.h6
-rw-r--r--include/asm-i386/msr.h6
-rw-r--r--include/asm-i386/nmi.h49
-rw-r--r--include/asm-i386/smp.h14
-rw-r--r--include/asm-i386/smpboot.h9
-rw-r--r--include/asm-i386/unistd.h2
-rw-r--r--include/asm-sparc64/checksum.h41
-rw-r--r--include/asm-sparc64/irq.h2
-rw-r--r--include/asm-sparc64/semaphore.h182
-rw-r--r--include/asm-um/cache.h3
-rw-r--r--include/asm-um/smp.h17
-rw-r--r--include/asm-um/thread_info.h4
-rw-r--r--include/linux/blkdev.h13
-rw-r--r--include/linux/blkpg.h1
-rw-r--r--include/linux/cdrom.h1
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/dcookies.h69
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h21
-rw-r--r--include/linux/mtd/concat.h23
-rw-r--r--include/linux/nfs.h2
-rw-r--r--include/linux/nfs3.h5
-rw-r--r--include/linux/nfs4_mount.h70
-rw-r--r--include/linux/nfs_fs.h49
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/nfs_mount.h8
-rw-r--r--include/linux/nfs_xdr.h262
-rw-r--r--include/linux/oprofile.h98
-rw-r--r--include/linux/profile.h67
-rw-r--r--include/linux/rcupdate.h134
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/vcache.h2
-rw-r--r--include/net/bluetooth/bluetooth.h80
-rw-r--r--include/net/bluetooth/hci.h4
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/rxrpc/call.h218
-rw-r--r--include/rxrpc/connection.h83
-rw-r--r--include/rxrpc/krxiod.h27
-rw-r--r--include/rxrpc/krxsecd.h22
-rw-r--r--include/rxrpc/krxtimod.h45
-rw-r--r--include/rxrpc/message.h72
-rw-r--r--include/rxrpc/packet.h128
-rw-r--r--include/rxrpc/peer.h80
-rw-r--r--include/rxrpc/rxrpc.h29
-rw-r--r--include/rxrpc/transport.h115
-rw-r--r--include/rxrpc/types.h39
-rw-r--r--include/sound/asound.h3
-rw-r--r--include/sound/core.h1
-rw-r--r--include/sound/info.h6
-rw-r--r--include/sound/version.h2
-rw-r--r--init/main.c22
-rw-r--r--kernel/Makefile8
-rw-r--r--kernel/exit.c10
-rw-r--r--kernel/futex.c29
-rw-r--r--kernel/profile.c121
-rw-r--r--kernel/rcupdate.c242
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/timer.c4
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/vcache.c19
-rw-r--r--net/Makefile3
-rw-r--r--net/bluetooth/Config.help14
-rw-r--r--net/bluetooth/Makefile12
-rw-r--r--net/bluetooth/af_bluetooth.c111
-rw-r--r--net/bluetooth/bnep/Config.help6
-rw-r--r--net/bluetooth/bnep/Config.in8
-rw-r--r--net/bluetooth/bnep/Makefile2
-rw-r--r--net/bluetooth/bnep/core.c16
-rw-r--r--net/bluetooth/bnep/netdev.c12
-rw-r--r--net/bluetooth/bnep/sock.c8
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/hci_core.c40
-rw-r--r--net/bluetooth/hci_event.c20
-rw-r--r--net/bluetooth/hci_sock.c28
-rw-r--r--net/bluetooth/l2cap.c94
-rw-r--r--net/bluetooth/lib.c8
-rw-r--r--net/bluetooth/rfcomm/Config.help4
-rw-r--r--net/bluetooth/rfcomm/Config.in6
-rw-r--r--net/bluetooth/rfcomm/Makefile4
-rw-r--r--net/bluetooth/rfcomm/core.c24
-rw-r--r--net/bluetooth/rfcomm/sock.c54
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c70
-rw-r--r--net/bluetooth/syms.c33
-rw-r--r--net/rxrpc/Makefile33
-rw-r--r--net/rxrpc/call.c2122
-rw-r--r--net/rxrpc/connection.c687
-rw-r--r--net/rxrpc/internal.h107
-rw-r--r--net/rxrpc/krxiod.c262
-rw-r--r--net/rxrpc/krxsecd.c278
-rw-r--r--net/rxrpc/krxtimod.c210
-rw-r--r--net/rxrpc/main.c127
-rw-r--r--net/rxrpc/peer.c380
-rw-r--r--net/rxrpc/proc.c612
-rw-r--r--net/rxrpc/rxrpc_syms.c51
-rw-r--r--net/rxrpc/sysctl.c73
-rw-r--r--net/rxrpc/transport.c824
-rw-r--r--sound/core/control.c38
-rw-r--r--sound/core/info_oss.c2
-rw-r--r--sound/core/init.c2
-rw-r--r--sound/core/ioctl32/ioctl32.c81
-rw-r--r--sound/core/ioctl32/ioctl32.h38
-rw-r--r--sound/core/ioctl32/pcm32.c53
-rw-r--r--sound/core/memory.c8
-rw-r--r--sound/core/oss/mixer_oss.c174
-rw-r--r--sound/core/oss/pcm_oss.c4
-rw-r--r--sound/core/pcm_lib.c66
-rw-r--r--sound/core/pcm_native.c134
-rw-r--r--sound/core/rawmidi.c8
-rw-r--r--sound/core/seq/oss/seq_oss_readq.c7
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c4
-rw-r--r--sound/core/seq/oss/seq_oss_writeq.c4
-rw-r--r--sound/core/seq/seq.c4
-rw-r--r--sound/core/seq/seq_clientmgr.c8
-rw-r--r--sound/core/seq/seq_fifo.c4
-rw-r--r--sound/core/seq/seq_lock.c2
-rw-r--r--sound/core/seq/seq_lock.h9
-rw-r--r--sound/core/seq/seq_memory.c12
-rw-r--r--sound/core/sound.c11
-rw-r--r--sound/core/sound_oss.c12
-rw-r--r--sound/core/timer.c4
-rw-r--r--sound/isa/es18xx.c24
-rw-r--r--sound/isa/opl3sa2.c1
-rw-r--r--sound/isa/sb/emu8000.c2
-rw-r--r--sound/isa/wavefront/wavefront_synth.c2
-rw-r--r--sound/pci/Config.help2
-rw-r--r--sound/pci/Config.in21
-rw-r--r--sound/pci/emu10k1/emufx.c27
-rw-r--r--sound/pci/ens1370.c6
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c6
-rw-r--r--sound/usb/usbaudio.c28
-rw-r--r--sound/usb/usbmixer.c381
-rw-r--r--sound/usb/usbmixer_maps.c100
-rw-r--r--sound/usb/usbquirks.h66
562 files changed, 36937 insertions, 7792 deletions
diff --git a/Documentation/filesystems/afs.txt b/Documentation/filesystems/afs.txt
new file mode 100644
index 000000000000..2f4237dfb8c7
--- /dev/null
+++ b/Documentation/filesystems/afs.txt
@@ -0,0 +1,155 @@
+ kAFS: AFS FILESYSTEM
+ ====================
+
+ABOUT
+=====
+
+This filesystem provides a fairly simple AFS filesystem driver. It is under
+development and only provides very basic facilities. It does not yet support
+the following AFS features:
+
+ (*) Write support.
+ (*) Communications security.
+ (*) Local caching.
+ (*) pioctl() system call.
+ (*) Automatic mounting of embedded mountpoints.
+
+
+USAGE
+=====
+
+When inserting the driver modules the root cell must be specified along with a
+list of volume location server IP addresses:
+
+ insmod rxrpc.o
+ insmod kafs.o rootcell=cambridge.redhat.com:172.16.18.73:172.16.18.91
+
+The first module is a driver for the RxRPC remote operation protocol, and the
+second is the actual filesystem driver for the AFS filesystem.
+
+Once the module has been loaded, more modules can be added by the following
+procedure:
+
+ echo add grand.central.org 18.7.14.88:128.2.191.224 >/proc/fs/afs/cells
+
+Where the parameters to the "add" command are the name of a cell and a list of
+volume location servers within that cell.
+
+Filesystems can be mounted anywhere by commands similar to the following:
+
+ mount -t afs "%cambridge.redhat.com:root.afs." /afs
+ mount -t afs "#cambridge.redhat.com:root.cell." /afs/cambridge
+ mount -t afs "#root.afs." /afs
+ mount -t afs "#root.cell." /afs/cambridge
+
+ NB: When using this on Linux 2.4, the mount command has to be different,
+ since the filesystem doesn't have access to the device name argument:
+
+ mount -t afs none /afs -ovol="#root.afs."
+
+Where the initial character is either a hash or a percent symbol depending on
+whether you definitely want a R/W volume (hash) or whether you'd prefer a R/O
+volume, but are willing to use a R/W volume instead (percent).
+
+The name of the volume can be suffixes with ".backup" or ".readonly" to
+specify connection to only volumes of those types.
+
+The name of the cell is optional, and if not given during a mount, then the
+named volume will be looked up in the cell specified during insmod.
+
+Additional cells can be added through /proc (see later section).
+
+
+MOUNTPOINTS
+===========
+
+AFS has a concept of mountpoints. These are specially formatted symbolic links
+(of the same form as the "device name" passed to mount). kAFS presents these
+to the user as directories that have special properties:
+
+ (*) They cannot be listed. Running a program like "ls" on them will incur an
+ EREMOTE error (Object is remote).
+
+ (*) Other objects can't be looked up inside of them. This also incurs an
+ EREMOTE error.
+
+ (*) They can be queried with the readlink() system call, which will return
+ the name of the mountpoint to which they point. The "readlink" program
+ will also work.
+
+ (*) They can be mounted on (which symbolic links can't).
+
+
+PROC FILESYSTEM
+===============
+
+The rxrpc module creates a number of files in various places in the /proc
+filesystem:
+
+ (*) Firstly, some information files are made available in a directory called
+ "/proc/net/rxrpc/". These list the extant transport endpoint, peer,
+ connection and call records.
+
+ (*) Secondly, some control files are made available in a directory called
+ "/proc/sys/rxrpc/". Currently, all these files can be used for is to
+ turn on various levels of tracing.
+
+The AFS modules creates a "/proc/fs/afs/" directory and populates it:
+
+ (*) A "cells" file that lists cells currently known to the afs module.
+
+ (*) A directory per cell that contains files that list volume location
+ servers, volumes, and active servers known within that cell.
+
+
+THE CELL DATABASE
+=================
+
+The filesystem maintains an internal database of all the cells it knows and
+the IP addresses of the volume location servers for those cells. The cell to
+which the computer belongs is added to the database when insmod is performed
+by the "rootcell=" argument.
+
+Further cells can be added by commands similar to the following:
+
+ echo add CELLNAME VLADDR[:VLADDR][:VLADDR]... >/proc/fs/afs/cells
+ echo add grand.central.org 18.7.14.88:128.2.191.224 >/proc/fs/afs/cells
+
+No other cell database operations are available at this time.
+
+
+EXAMPLES
+========
+
+Here's what I use to test this. Some of the names and IP addresses are local
+to my internal DNS. My "root.afs" partition has a mount point within it for
+some public volumes volumes.
+
+insmod -S /tmp/rxrpc.o
+insmod -S /tmp/kafs.o rootcell=cambridge.redhat.com:172.16.18.73:172.16.18.91
+
+mount -t afs \%root.afs. /afs
+mount -t afs \%cambridge.redhat.com:root.cell. /afs/cambridge.redhat.com/
+
+echo add grand.central.org 18.7.14.88:128.2.191.224 > /proc/fs/afs/cells
+mount -t afs "#grand.central.org:root.cell." /afs/grand.central.org/
+mount -t afs "#grand.central.org:root.archive." /afs/grand.central.org/archive
+mount -t afs "#grand.central.org:root.contrib." /afs/grand.central.org/contrib
+mount -t afs "#grand.central.org:root.doc." /afs/grand.central.org/doc
+mount -t afs "#grand.central.org:root.project." /afs/grand.central.org/project
+mount -t afs "#grand.central.org:root.service." /afs/grand.central.org/service
+mount -t afs "#grand.central.org:root.software." /afs/grand.central.org/software
+mount -t afs "#grand.central.org:root.user." /afs/grand.central.org/user
+
+umount /afs/grand.central.org/user
+umount /afs/grand.central.org/software
+umount /afs/grand.central.org/service
+umount /afs/grand.central.org/project
+umount /afs/grand.central.org/doc
+umount /afs/grand.central.org/contrib
+umount /afs/grand.central.org/archive
+umount /afs/grand.central.org
+umount /afs/cambridge.redhat.com
+umount /afs
+rmmod kafs
+rmmod rxrpc
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 50be3f374c27..33538b8704f7 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -29,12 +29,6 @@ When mounting an XFS filesystem, the following options are accepted.
The preferred buffered I/O size can also be altered on an
individual file basis using the ioctl(2) system call.
- irixsgid
- Do not inherit the ISGID bit on subdirectories of ISGID
- directories, if the process creating the subdirectory
- is not a member of the parent directory group ID.
- This matches IRIX behavior.
-
logbufs=value
Set the number of in-memory log buffers. Valid numbers range
from 2-8 inclusive.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 3f9fb426bf4f..54668d2be0d2 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -23,6 +23,7 @@ restrictions referred to are that the relevant option is valid if:
HW Appropriate hardware is enabled.
IA-32 IA-32 aka i386 architecture is enabled.
IA-64 IA-64 architecture is enabled.
+ IP_PNP IP DCHP, BOOTP, or RARP is enabled.
ISAPNP ISA PnP code is enabled.
ISDN Appropriate ISDN support is enabled.
JOY Appropriate joystick support is enabled.
@@ -257,7 +258,7 @@ running once the system is up.
initrd= [BOOT] Specify the location of the initial ramdisk.
- ip= [PNP]
+ ip= [IP_PNP]
isapnp= [ISAPNP] Specify RDP, reset, pci_scan and verbosity.
@@ -279,10 +280,14 @@ running once the system is up.
kbd-reset [VT]
- keep_initrd [HW, ARM]
+ keepinitrd [HW, ARM]
load_ramdisk= [RAM] List of ramdisks to load from floppy.
+ lockd.udpport= [NFS]
+
+ lockd.tcpport= [NFS]
+
logi_busmouse= [HW, MOUSE]
lp=0 [LP] Specify parallel ports to use, e.g,
@@ -323,6 +328,8 @@ running once the system is up.
to off as the mainboard support is not always present.
You must activate it as a boot option
+ mca-pentium [BUGS=IA-32]
+
mcd= [HW,CD]
mcdx= [HW,CD]
@@ -335,6 +342,11 @@ running once the system is up.
megaraid= [HW,SCSI]
+ mem=exactmap [KNL,BOOT,IA-32] enable setting of an exact
+ e820 memory map, as specified by the user.
+ Such mem=exactmap lines can be constructed
+ based on BIOS output or other requirements.
+
mem=nn[KMG] [KNL,BOOT] force use of a specific amount of
memory; to be used when the kernel is not able
to see the whole system memory or for test.
@@ -390,7 +402,9 @@ running once the system is up.
nohlt [BUGS=ARM]
- no-hlt [BUGS=IA-32]
+ no-hlt [BUGS=IA-32] Tells the kernel that the hlt
+ instruction doesn't work correctly and not to
+ use it.
noht [SMP,IA-32] Disables P4 Xeon(tm) HyperThreading.
@@ -537,6 +551,10 @@ running once the system is up.
root= [KNL] root filesystem.
+ rootflags= [KNL] set root filesystem mount option string
+
+ rootfstype= [KNL] set root filesystem type
+
rw [KNL] Mount root device read-write on boot.
S [KNL] run init in single mode.
diff --git a/Makefile b/Makefile
index b3df7fb5a849..e94c4aca82ee 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 2
PATCHLEVEL = 5
-SUBLEVEL = 42
+SUBLEVEL = 43
EXTRAVERSION =
# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index ff03ca642089..611a52d60188 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -62,13 +62,13 @@ no_irq_ack(unsigned int irq)
}
struct hw_interrupt_type no_irq_type = {
- typename: "none",
- startup: no_irq_startup,
- shutdown: no_irq_enable_disable,
- enable: no_irq_enable_disable,
- disable: no_irq_enable_disable,
- ack: no_irq_ack,
- end: no_irq_enable_disable,
+ .typename = "none",
+ .startup = no_irq_startup,
+ .shutdown = no_irq_enable_disable,
+ .enable = no_irq_enable_disable,
+ .disable = no_irq_enable_disable,
+ .ack = no_irq_ack,
+ .end = no_irq_enable_disable,
};
int
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index c765add9fa67..476f17980249 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -213,19 +213,19 @@ static void rtc_enable_disable(unsigned int irq) { }
static unsigned int rtc_startup(unsigned int irq) { return 0; }
struct irqaction timer_irqaction = {
- handler: timer_interrupt,
- flags: SA_INTERRUPT,
- name: "timer",
+ .handler = timer_interrupt,
+ .flags = SA_INTERRUPT,
+ .name = "timer",
};
static struct hw_interrupt_type rtc_irq_type = {
- typename: "RTC",
- startup: rtc_startup,
- shutdown: rtc_enable_disable,
- enable: rtc_enable_disable,
- disable: rtc_enable_disable,
- ack: rtc_enable_disable,
- end: rtc_enable_disable,
+ .typename = "RTC",
+ .startup = rtc_startup,
+ .shutdown = rtc_enable_disable,
+ .enable = rtc_enable_disable,
+ .disable = rtc_enable_disable,
+ .ack = rtc_enable_disable,
+ .end = rtc_enable_disable,
};
void __init
@@ -238,16 +238,16 @@ init_rtc_irq(void)
/* Dummy irqactions. */
struct irqaction isa_cascade_irqaction = {
- handler: no_action,
- name: "isa-cascade"
+ .handler = no_action,
+ .name = "isa-cascade"
};
struct irqaction timer_cascade_irqaction = {
- handler: no_action,
- name: "timer-cascade"
+ .handler = no_action,
+ .name = "timer-cascade"
};
struct irqaction halt_switch_irqaction = {
- handler: no_action,
- name: "halt-switch"
+ .handler = no_action,
+ .name = "halt-switch"
};
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 4635ae7ac2ad..f308e98ff210 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -85,21 +85,21 @@ i8259a_end_irq(unsigned int irq)
}
struct hw_interrupt_type i8259a_irq_type = {
- typename: "XT-PIC",
- startup: i8259a_startup_irq,
- shutdown: i8259a_disable_irq,
- enable: i8259a_enable_irq,
- disable: i8259a_disable_irq,
- ack: i8259a_mask_and_ack_irq,
- end: i8259a_end_irq,
+ .typename = "XT-PIC",
+ .startup = i8259a_startup_irq,
+ .shutdown = i8259a_disable_irq,
+ .enable = i8259a_enable_irq,
+ .disable = i8259a_disable_irq,
+ .ack = i8259a_mask_and_ack_irq,
+ .end = i8259a_end_irq,
};
void __init
init_i8259a_irqs(void)
{
static struct irqaction cascade = {
- handler: no_action,
- name: "cascade",
+ .handler = no_action,
+ .name = "cascade",
};
long i;
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index 65414a7f50ee..146a20b9e3d5 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -71,13 +71,13 @@ pyxis_mask_and_ack_irq(unsigned int irq)
}
static struct hw_interrupt_type pyxis_irq_type = {
- typename: "PYXIS",
- startup: pyxis_startup_irq,
- shutdown: pyxis_disable_irq,
- enable: pyxis_enable_irq,
- disable: pyxis_disable_irq,
- ack: pyxis_mask_and_ack_irq,
- end: pyxis_end_irq,
+ .typename = "PYXIS",
+ .startup = pyxis_startup_irq,
+ .shutdown = pyxis_disable_irq,
+ .enable = pyxis_enable_irq,
+ .disable = pyxis_disable_irq,
+ .ack = pyxis_mask_and_ack_irq,
+ .end = pyxis_end_irq,
};
void
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index 51806c9962e4..b51316cfabf6 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -49,13 +49,13 @@ srm_end_irq(unsigned int irq)
/* Handle interrupts from the SRM, assuming no additional weirdness. */
static struct hw_interrupt_type srm_irq_type = {
- typename: "SRM",
- startup: srm_startup_irq,
- shutdown: srm_disable_irq,
- enable: srm_enable_irq,
- disable: srm_disable_irq,
- ack: srm_disable_irq,
- end: srm_end_irq,
+ .typename = "SRM",
+ .startup = srm_startup_irq,
+ .shutdown = srm_disable_irq,
+ .enable = srm_enable_irq,
+ .disable = srm_disable_irq,
+ .ack = srm_disable_irq,
+ .end = srm_end_irq,
};
void __init
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index bec5ea08989f..594e139ebfc6 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -112,12 +112,12 @@ char saved_command_line[COMMAND_LINE_SIZE];
*/
struct screen_info screen_info = {
- orig_x: 0,
- orig_y: 25,
- orig_video_cols: 80,
- orig_video_lines: 25,
- orig_video_isVGA: 1,
- orig_video_points: 16
+ .orig_x = 0,
+ .orig_y = 25,
+ .orig_video_cols = 80,
+ .orig_video_lines = 25,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 16
};
/*
@@ -452,12 +452,12 @@ static int __init srm_console_setup(struct console *co, char *options)
}
static struct console srmcons = {
- name: "srm0",
- write: srm_console_write,
- device: srm_console_device,
- setup: srm_console_setup,
- flags: CON_PRINTBUFFER | CON_ENABLED, /* fake it out */
- index: -1,
+ .name = "srm0",
+ .write = srm_console_write,
+ .device = srm_console_device,
+ .setup = srm_console_setup,
+ .flags = CON_PRINTBUFFER | CON_ENABLED, /* fake it out */
+ .index = -1,
};
#else
@@ -1150,10 +1150,10 @@ c_stop(struct seq_file *f, void *v)
}
struct seq_operations cpuinfo_op = {
- start: c_start,
- next: c_next,
- stop: c_stop,
- show: show_cpuinfo,
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
};
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index fa32465ab44f..9eda25b63086 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -91,13 +91,13 @@ alcor_end_irq(unsigned int irq)
}
static struct hw_interrupt_type alcor_irq_type = {
- typename: "ALCOR",
- startup: alcor_startup_irq,
- shutdown: alcor_disable_irq,
- enable: alcor_enable_irq,
- disable: alcor_disable_irq,
- ack: alcor_mask_and_ack_irq,
- end: alcor_end_irq,
+ .typename = "ALCOR",
+ .startup = alcor_startup_irq,
+ .shutdown = alcor_disable_irq,
+ .enable = alcor_enable_irq,
+ .disable = alcor_disable_irq,
+ .ack = alcor_mask_and_ack_irq,
+ .end = alcor_end_irq,
};
static void
@@ -245,29 +245,29 @@ alcor_kill_arch(int mode)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_ALCOR)
struct alpha_machine_vector alcor_mv __initmv = {
- vector_name: "Alcor",
+ .vector_name = "Alcor",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_ALCOR_MAX_DMA_ADDRESS,
- min_io_address: EISA_DEFAULT_IO_BASE,
- min_mem_address: CIA_DEFAULT_MEM_BASE,
-
- nr_irqs: 48,
- device_interrupt: alcor_device_interrupt,
-
- init_arch: cia_init_arch,
- init_irq: alcor_init_irq,
- init_rtc: common_init_rtc,
- init_pci: cia_init_pci,
- kill_arch: alcor_kill_arch,
- pci_map_irq: alcor_map_irq,
- pci_swizzle: common_swizzle,
-
- sys: { cia: {
- gru_int_req_bits: ALCOR_GRU_INT_REQ_BITS
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_ALCOR_MAX_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 48,
+ .device_interrupt = alcor_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = alcor_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cia_init_pci,
+ .kill_arch = alcor_kill_arch,
+ .pci_map_irq = alcor_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .cia = {
+ .gru_int_req_bits = ALCOR_GRU_INT_REQ_BITS
}}
};
ALIAS_MV(alcor)
@@ -275,29 +275,29 @@ ALIAS_MV(alcor)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_XLT)
struct alpha_machine_vector xlt_mv __initmv = {
- vector_name: "XLT",
+ .vector_name = "XLT",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: EISA_DEFAULT_IO_BASE,
- min_mem_address: CIA_DEFAULT_MEM_BASE,
-
- nr_irqs: 48,
- device_interrupt: alcor_device_interrupt,
-
- init_arch: cia_init_arch,
- init_irq: alcor_init_irq,
- init_rtc: common_init_rtc,
- init_pci: cia_init_pci,
- kill_arch: alcor_kill_arch,
- pci_map_irq: alcor_map_irq,
- pci_swizzle: common_swizzle,
-
- sys: { cia: {
- gru_int_req_bits: XLT_GRU_INT_REQ_BITS
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 48,
+ .device_interrupt = alcor_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = alcor_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cia_init_pci,
+ .kill_arch = alcor_kill_arch,
+ .pci_map_irq = alcor_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .cia = {
+ .gru_int_req_bits = XLT_GRU_INT_REQ_BITS
}}
};
ALIAS_MV(xlt)
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 618cd340a31c..68fb947bb057 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -73,13 +73,13 @@ cabriolet_end_irq(unsigned int irq)
}
static struct hw_interrupt_type cabriolet_irq_type = {
- typename: "CABRIOLET",
- startup: cabriolet_startup_irq,
- shutdown: cabriolet_disable_irq,
- enable: cabriolet_enable_irq,
- disable: cabriolet_disable_irq,
- ack: cabriolet_disable_irq,
- end: cabriolet_end_irq,
+ .typename = "CABRIOLET",
+ .startup = cabriolet_startup_irq,
+ .shutdown = cabriolet_disable_irq,
+ .enable = cabriolet_enable_irq,
+ .disable = cabriolet_disable_irq,
+ .ack = cabriolet_disable_irq,
+ .end = cabriolet_end_irq,
};
static void
@@ -321,26 +321,26 @@ alphapc164_init_pci(void)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET)
struct alpha_machine_vector cabriolet_mv __initmv = {
- vector_name: "Cabriolet",
+ .vector_name = "Cabriolet",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
DO_APECS_BUS,
- machine_check: apecs_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 35,
- device_interrupt: cabriolet_device_interrupt,
-
- init_arch: apecs_init_arch,
- init_irq: cabriolet_init_irq,
- init_rtc: common_init_rtc,
- init_pci: cabriolet_init_pci,
- kill_arch: NULL,
- pci_map_irq: cabriolet_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = apecs_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 35,
+ .device_interrupt = cabriolet_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = cabriolet_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cabriolet_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = cabriolet_map_irq,
+ .pci_swizzle = common_swizzle,
};
#ifndef CONFIG_ALPHA_EB64P
ALIAS_MV(cabriolet)
@@ -349,101 +349,101 @@ ALIAS_MV(cabriolet)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB164)
struct alpha_machine_vector eb164_mv __initmv = {
- vector_name: "EB164",
+ .vector_name = "EB164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: CIA_DEFAULT_MEM_BASE,
-
- nr_irqs: 35,
- device_interrupt: cabriolet_device_interrupt,
-
- init_arch: cia_init_arch,
- init_irq: cabriolet_init_irq,
- init_rtc: common_init_rtc,
- init_pci: cia_cab_init_pci,
- pci_map_irq: cabriolet_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 35,
+ .device_interrupt = cabriolet_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = cabriolet_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cia_cab_init_pci,
+ .pci_map_irq = cabriolet_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(eb164)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66P)
struct alpha_machine_vector eb66p_mv __initmv = {
- vector_name: "EB66+",
+ .vector_name = "EB66+",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
DO_LCA_BUS,
- machine_check: lca_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 35,
- device_interrupt: cabriolet_device_interrupt,
-
- init_arch: lca_init_arch,
- init_irq: cabriolet_init_irq,
- init_rtc: common_init_rtc,
- init_pci: cabriolet_init_pci,
- pci_map_irq: eb66p_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = lca_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 35,
+ .device_interrupt = cabriolet_device_interrupt,
+
+ .init_arch = lca_init_arch,
+ .init_irq = cabriolet_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cabriolet_init_pci,
+ .pci_map_irq = eb66p_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(eb66p)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LX164)
struct alpha_machine_vector lx164_mv __initmv = {
- vector_name: "LX164",
+ .vector_name = "LX164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: PYXIS_DAC_OFFSET,
-
- nr_irqs: 35,
- device_interrupt: cabriolet_device_interrupt,
-
- init_arch: pyxis_init_arch,
- init_irq: cabriolet_init_irq,
- init_rtc: common_init_rtc,
- init_pci: alphapc164_init_pci,
- pci_map_irq: alphapc164_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = PYXIS_DAC_OFFSET,
+
+ .nr_irqs = 35,
+ .device_interrupt = cabriolet_device_interrupt,
+
+ .init_arch = pyxis_init_arch,
+ .init_irq = cabriolet_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = alphapc164_init_pci,
+ .pci_map_irq = alphapc164_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(lx164)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
struct alpha_machine_vector pc164_mv __initmv = {
- vector_name: "PC164",
+ .vector_name = "PC164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: CIA_DEFAULT_MEM_BASE,
-
- nr_irqs: 35,
- device_interrupt: pc164_device_interrupt,
-
- init_arch: cia_init_arch,
- init_irq: pc164_init_irq,
- init_rtc: common_init_rtc,
- init_pci: alphapc164_init_pci,
- pci_map_irq: alphapc164_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 35,
+ .device_interrupt = pc164_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = pc164_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = alphapc164_init_pci,
+ .pci_map_irq = alphapc164_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(pc164)
#endif
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 3ff877f92dde..20d9ef272bf3 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -197,25 +197,25 @@ clipper_set_affinity(unsigned int irq, unsigned long affinity)
}
static struct hw_interrupt_type dp264_irq_type = {
- typename: "DP264",
- startup: dp264_startup_irq,
- shutdown: dp264_disable_irq,
- enable: dp264_enable_irq,
- disable: dp264_disable_irq,
- ack: dp264_disable_irq,
- end: dp264_end_irq,
- set_affinity: dp264_set_affinity,
+ .typename = "DP264",
+ .startup = dp264_startup_irq,
+ .shutdown = dp264_disable_irq,
+ .enable = dp264_enable_irq,
+ .disable = dp264_disable_irq,
+ .ack = dp264_disable_irq,
+ .end = dp264_end_irq,
+ .set_affinity = dp264_set_affinity,
};
static struct hw_interrupt_type clipper_irq_type = {
- typename: "CLIPPER",
- startup: clipper_startup_irq,
- shutdown: clipper_disable_irq,
- enable: clipper_enable_irq,
- disable: clipper_disable_irq,
- ack: clipper_disable_irq,
- end: clipper_end_irq,
- set_affinity: clipper_set_affinity,
+ .typename = "CLIPPER",
+ .startup = clipper_startup_irq,
+ .shutdown = clipper_disable_irq,
+ .enable = clipper_enable_irq,
+ .disable = clipper_disable_irq,
+ .ack = clipper_disable_irq,
+ .end = clipper_end_irq,
+ .set_affinity = clipper_set_affinity,
};
static void
@@ -566,100 +566,100 @@ webbrick_init_arch(void)
*/
struct alpha_machine_vector dp264_mv __initmv = {
- vector_name: "DP264",
+ .vector_name = "DP264",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
DO_TSUNAMI_BUS,
- machine_check: tsunami_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: TSUNAMI_DAC_OFFSET,
-
- nr_irqs: 64,
- device_interrupt: dp264_device_interrupt,
-
- init_arch: tsunami_init_arch,
- init_irq: dp264_init_irq,
- init_rtc: common_init_rtc,
- init_pci: dp264_init_pci,
- kill_arch: tsunami_kill_arch,
- pci_map_irq: dp264_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = tsunami_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = dp264_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = dp264_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = dp264_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(dp264)
struct alpha_machine_vector monet_mv __initmv = {
- vector_name: "Monet",
+ .vector_name = "Monet",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
DO_TSUNAMI_BUS,
- machine_check: tsunami_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: TSUNAMI_DAC_OFFSET,
-
- nr_irqs: 64,
- device_interrupt: dp264_device_interrupt,
-
- init_arch: tsunami_init_arch,
- init_irq: dp264_init_irq,
- init_rtc: common_init_rtc,
- init_pci: monet_init_pci,
- kill_arch: tsunami_kill_arch,
- pci_map_irq: monet_map_irq,
- pci_swizzle: monet_swizzle,
+ .machine_check = tsunami_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = dp264_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = monet_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = monet_map_irq,
+ .pci_swizzle = monet_swizzle,
};
struct alpha_machine_vector webbrick_mv __initmv = {
- vector_name: "Webbrick",
+ .vector_name = "Webbrick",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
DO_TSUNAMI_BUS,
- machine_check: tsunami_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: TSUNAMI_DAC_OFFSET,
-
- nr_irqs: 64,
- device_interrupt: dp264_device_interrupt,
-
- init_arch: webbrick_init_arch,
- init_irq: dp264_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: tsunami_kill_arch,
- pci_map_irq: webbrick_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = tsunami_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = webbrick_init_arch,
+ .init_irq = dp264_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = webbrick_map_irq,
+ .pci_swizzle = common_swizzle,
};
struct alpha_machine_vector clipper_mv __initmv = {
- vector_name: "Clipper",
+ .vector_name = "Clipper",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
DO_TSUNAMI_BUS,
- machine_check: tsunami_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: TSUNAMI_DAC_OFFSET,
-
- nr_irqs: 64,
- device_interrupt: dp264_device_interrupt,
-
- init_arch: tsunami_init_arch,
- init_irq: clipper_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: tsunami_kill_arch,
- pci_map_irq: clipper_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = tsunami_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = clipper_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = clipper_map_irq,
+ .pci_swizzle = common_swizzle,
};
/* Sharks strongly resemble Clipper, at least as far
@@ -668,27 +668,27 @@ struct alpha_machine_vector clipper_mv __initmv = {
*/
struct alpha_machine_vector shark_mv __initmv = {
- vector_name: "Shark",
+ .vector_name = "Shark",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
DO_TSUNAMI_BUS,
- machine_check: tsunami_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: TSUNAMI_DAC_OFFSET,
-
- nr_irqs: 64,
- device_interrupt: dp264_device_interrupt,
-
- init_arch: tsunami_init_arch,
- init_irq: clipper_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: tsunami_kill_arch,
- pci_map_irq: clipper_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = tsunami_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = clipper_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = clipper_map_irq,
+ .pci_swizzle = common_swizzle,
};
/* No alpha_mv alias for webbrick/monet/clipper, since we compile them
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index 665ea9a4b990..eb92418ff98b 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -71,13 +71,13 @@ eb64p_end_irq(unsigned int irq)
}
static struct hw_interrupt_type eb64p_irq_type = {
- typename: "EB64P",
- startup: eb64p_startup_irq,
- shutdown: eb64p_disable_irq,
- enable: eb64p_enable_irq,
- disable: eb64p_disable_irq,
- ack: eb64p_disable_irq,
- end: eb64p_end_irq,
+ .typename = "EB64P",
+ .startup = eb64p_startup_irq,
+ .shutdown = eb64p_disable_irq,
+ .enable = eb64p_enable_irq,
+ .disable = eb64p_disable_irq,
+ .ack = eb64p_disable_irq,
+ .end = eb64p_end_irq,
};
static void
@@ -208,51 +208,51 @@ eb64p_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB64P)
struct alpha_machine_vector eb64p_mv __initmv = {
- vector_name: "EB64+",
+ .vector_name = "EB64+",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
DO_APECS_BUS,
- machine_check: apecs_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 32,
- device_interrupt: eb64p_device_interrupt,
-
- init_arch: apecs_init_arch,
- init_irq: eb64p_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: NULL,
- pci_map_irq: eb64p_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = apecs_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 32,
+ .device_interrupt = eb64p_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = eb64p_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = eb64p_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(eb64p)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66)
struct alpha_machine_vector eb66_mv __initmv = {
- vector_name: "EB66",
+ .vector_name = "EB66",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
DO_LCA_BUS,
- machine_check: lca_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 32,
- device_interrupt: eb64p_device_interrupt,
-
- init_arch: lca_init_arch,
- init_irq: eb64p_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- pci_map_irq: eb64p_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = lca_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 32,
+ .device_interrupt = eb64p_device_interrupt,
+
+ .init_arch = lca_init_arch,
+ .init_irq = eb64p_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .pci_map_irq = eb64p_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(eb66)
#endif
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 68c17b45f76f..5e223221d1b8 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -81,13 +81,13 @@ eiger_end_irq(unsigned int irq)
}
static struct hw_interrupt_type eiger_irq_type = {
- typename: "EIGER",
- startup: eiger_startup_irq,
- shutdown: eiger_disable_irq,
- enable: eiger_enable_irq,
- disable: eiger_disable_irq,
- ack: eiger_disable_irq,
- end: eiger_end_irq,
+ .typename = "EIGER",
+ .startup = eiger_startup_irq,
+ .shutdown = eiger_disable_irq,
+ .enable = eiger_enable_irq,
+ .disable = eiger_disable_irq,
+ .ack = eiger_disable_irq,
+ .end = eiger_end_irq,
};
static void
@@ -225,26 +225,26 @@ eiger_swizzle(struct pci_dev *dev, u8 *pinp)
*/
struct alpha_machine_vector eiger_mv __initmv = {
- vector_name: "Eiger",
+ .vector_name = "Eiger",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
DO_TSUNAMI_BUS,
- machine_check: tsunami_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: TSUNAMI_DAC_OFFSET,
-
- nr_irqs: 128,
- device_interrupt: eiger_device_interrupt,
-
- init_arch: tsunami_init_arch,
- init_irq: eiger_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: tsunami_kill_arch,
- pci_map_irq: eiger_map_irq,
- pci_swizzle: eiger_swizzle,
+ .machine_check = tsunami_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 128,
+ .device_interrupt = eiger_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = eiger_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = eiger_map_irq,
+ .pci_swizzle = eiger_swizzle,
};
ALIAS_MV(eiger)
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 3b03b8e7ee80..f6c5de49843c 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -119,13 +119,13 @@ jensen_local_end(unsigned int irq)
}
static struct hw_interrupt_type jensen_local_irq_type = {
- typename: "LOCAL",
- startup: jensen_local_startup,
- shutdown: jensen_local_shutdown,
- enable: jensen_local_enable,
- disable: jensen_local_disable,
- ack: jensen_local_ack,
- end: jensen_local_end,
+ .typename = "LOCAL",
+ .startup = jensen_local_startup,
+ .shutdown = jensen_local_shutdown,
+ .enable = jensen_local_enable,
+ .disable = jensen_local_disable,
+ .ack = jensen_local_ack,
+ .end = jensen_local_end,
};
static void
@@ -252,21 +252,21 @@ jensen_machine_check (u64 vector, u64 la, struct pt_regs *regs)
*/
struct alpha_machine_vector jensen_mv __initmv = {
- vector_name: "Jensen",
+ .vector_name = "Jensen",
DO_EV4_MMU,
IO_LITE(JENSEN,jensen),
BUS(jensen),
- machine_check: jensen_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- rtc_port: 0x170,
-
- nr_irqs: 16,
- device_interrupt: jensen_device_interrupt,
-
- init_arch: jensen_init_arch,
- init_irq: jensen_init_irq,
- init_rtc: common_init_rtc,
- init_pci: NULL,
- kill_arch: NULL,
+ .machine_check = jensen_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .rtc_port = 0x170,
+
+ .nr_irqs = 16,
+ .device_interrupt = jensen_device_interrupt,
+
+ .init_arch = jensen_init_arch,
+ .init_irq = jensen_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = NULL,
+ .kill_arch = NULL,
};
ALIAS_MV(jensen)
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index fefda1f8dd6a..e2c7ddbeef2f 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -261,26 +261,26 @@ miata_kill_arch(int mode)
*/
struct alpha_machine_vector miata_mv __initmv = {
- vector_name: "Miata",
+ .vector_name = "Miata",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: PYXIS_DAC_OFFSET,
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = PYXIS_DAC_OFFSET,
- nr_irqs: 48,
- device_interrupt: pyxis_device_interrupt,
+ .nr_irqs = 48,
+ .device_interrupt = pyxis_device_interrupt,
- init_arch: pyxis_init_arch,
- init_irq: miata_init_irq,
- init_rtc: common_init_rtc,
- init_pci: miata_init_pci,
- kill_arch: miata_kill_arch,
- pci_map_irq: miata_map_irq,
- pci_swizzle: miata_swizzle,
+ .init_arch = pyxis_init_arch,
+ .init_irq = miata_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = miata_init_pci,
+ .kill_arch = miata_kill_arch,
+ .pci_map_irq = miata_map_irq,
+ .pci_swizzle = miata_swizzle,
};
ALIAS_MV(miata)
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index fb7bf56c367d..f06e342c71d9 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -70,13 +70,13 @@ mikasa_end_irq(unsigned int irq)
}
static struct hw_interrupt_type mikasa_irq_type = {
- typename: "MIKASA",
- startup: mikasa_startup_irq,
- shutdown: mikasa_disable_irq,
- enable: mikasa_enable_irq,
- disable: mikasa_disable_irq,
- ack: mikasa_disable_irq,
- end: mikasa_end_irq,
+ .typename = "MIKASA",
+ .startup = mikasa_startup_irq,
+ .shutdown = mikasa_disable_irq,
+ .enable = mikasa_enable_irq,
+ .disable = mikasa_disable_irq,
+ .ack = mikasa_disable_irq,
+ .end = mikasa_end_irq,
};
static void
@@ -217,51 +217,51 @@ mikasa_apecs_machine_check(unsigned long vector, unsigned long la_ptr,
#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector mikasa_mv __initmv = {
- vector_name: "Mikasa",
+ .vector_name = "Mikasa",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
DO_APECS_BUS,
- machine_check: mikasa_apecs_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 32,
- device_interrupt: mikasa_device_interrupt,
-
- init_arch: apecs_init_arch,
- init_irq: mikasa_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: NULL,
- pci_map_irq: mikasa_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = mikasa_apecs_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 32,
+ .device_interrupt = mikasa_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = mikasa_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = mikasa_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(mikasa)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector mikasa_primo_mv __initmv = {
- vector_name: "Mikasa-Primo",
+ .vector_name = "Mikasa-Primo",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: CIA_DEFAULT_MEM_BASE,
-
- nr_irqs: 32,
- device_interrupt: mikasa_device_interrupt,
-
- init_arch: cia_init_arch,
- init_irq: mikasa_init_irq,
- init_rtc: common_init_rtc,
- init_pci: cia_init_pci,
- pci_map_irq: mikasa_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 32,
+ .device_interrupt = mikasa_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = mikasa_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cia_init_pci,
+ .pci_map_irq = mikasa_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(mikasa_primo)
#endif
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index cbe8bbb8e5f0..bc4538a99ce5 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -510,25 +510,25 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr,
*/
struct alpha_machine_vector nautilus_mv __initmv = {
- vector_name: "Nautilus",
+ .vector_name = "Nautilus",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_IRONGATE_IO,
DO_IRONGATE_BUS,
- machine_check: nautilus_machine_check,
- max_dma_address: ALPHA_NAUTILUS_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: IRONGATE_DEFAULT_MEM_BASE,
-
- nr_irqs: 16,
- device_interrupt: isa_device_interrupt,
-
- init_arch: irongate_init_arch,
- init_irq: nautilus_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: nautilus_kill_arch,
- pci_map_irq: nautilus_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = nautilus_machine_check,
+ .max_dma_address = ALPHA_NAUTILUS_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = IRONGATE_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = isa_device_interrupt,
+
+ .init_arch = irongate_init_arch,
+ .init_irq = nautilus_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = nautilus_kill_arch,
+ .pci_map_irq = nautilus_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(nautilus)
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 16ea1329f3e1..3e3aecf94b36 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -68,13 +68,13 @@ noritake_startup_irq(unsigned int irq)
}
static struct hw_interrupt_type noritake_irq_type = {
- typename: "NORITAKE",
- startup: noritake_startup_irq,
- shutdown: noritake_disable_irq,
- enable: noritake_enable_irq,
- disable: noritake_disable_irq,
- ack: noritake_disable_irq,
- end: noritake_enable_irq,
+ .typename = "NORITAKE",
+ .startup = noritake_startup_irq,
+ .shutdown = noritake_disable_irq,
+ .enable = noritake_enable_irq,
+ .disable = noritake_disable_irq,
+ .ack = noritake_disable_irq,
+ .end = noritake_enable_irq,
};
static void
@@ -299,51 +299,51 @@ noritake_apecs_machine_check(unsigned long vector, unsigned long la_ptr,
#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector noritake_mv __initmv = {
- vector_name: "Noritake",
+ .vector_name = "Noritake",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
DO_APECS_BUS,
- machine_check: noritake_apecs_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: EISA_DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 48,
- device_interrupt: noritake_device_interrupt,
-
- init_arch: apecs_init_arch,
- init_irq: noritake_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: NULL,
- pci_map_irq: noritake_map_irq,
- pci_swizzle: noritake_swizzle,
+ .machine_check = noritake_apecs_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 48,
+ .device_interrupt = noritake_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = noritake_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = noritake_map_irq,
+ .pci_swizzle = noritake_swizzle,
};
ALIAS_MV(noritake)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector noritake_primo_mv __initmv = {
- vector_name: "Noritake-Primo",
+ .vector_name = "Noritake-Primo",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: EISA_DEFAULT_IO_BASE,
- min_mem_address: CIA_DEFAULT_MEM_BASE,
-
- nr_irqs: 48,
- device_interrupt: noritake_device_interrupt,
-
- init_arch: cia_init_arch,
- init_irq: noritake_init_irq,
- init_rtc: common_init_rtc,
- init_pci: cia_init_pci,
- pci_map_irq: noritake_map_irq,
- pci_swizzle: noritake_swizzle,
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 48,
+ .device_interrupt = noritake_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = noritake_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cia_init_pci,
+ .pci_map_irq = noritake_map_irq,
+ .pci_swizzle = noritake_swizzle,
};
ALIAS_MV(noritake_primo)
#endif
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 6518097f7bd4..6ad2ccfd1878 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -124,13 +124,13 @@ rawhide_end_irq(unsigned int irq)
}
static struct hw_interrupt_type rawhide_irq_type = {
- typename: "RAWHIDE",
- startup: rawhide_startup_irq,
- shutdown: rawhide_disable_irq,
- enable: rawhide_enable_irq,
- disable: rawhide_disable_irq,
- ack: rawhide_mask_and_ack_irq,
- end: rawhide_end_irq,
+ .typename = "RAWHIDE",
+ .startup = rawhide_startup_irq,
+ .shutdown = rawhide_disable_irq,
+ .enable = rawhide_enable_irq,
+ .disable = rawhide_disable_irq,
+ .ack = rawhide_mask_and_ack_irq,
+ .end = rawhide_end_irq,
};
static void
@@ -246,26 +246,26 @@ rawhide_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
*/
struct alpha_machine_vector rawhide_mv __initmv = {
- vector_name: "Rawhide",
+ .vector_name = "Rawhide",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_MCPCIA_IO,
DO_MCPCIA_BUS,
- machine_check: mcpcia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: MCPCIA_DEFAULT_MEM_BASE,
- pci_dac_offset: MCPCIA_DAC_OFFSET,
-
- nr_irqs: 128,
- device_interrupt: rawhide_srm_device_interrupt,
-
- init_arch: mcpcia_init_arch,
- init_irq: rawhide_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: NULL,
- pci_map_irq: rawhide_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = mcpcia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = MCPCIA_DEFAULT_MEM_BASE,
+ .pci_dac_offset = MCPCIA_DAC_OFFSET,
+
+ .nr_irqs = 128,
+ .device_interrupt = rawhide_srm_device_interrupt,
+
+ .init_arch = mcpcia_init_arch,
+ .init_irq = rawhide_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = rawhide_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(rawhide)
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index a1966a07b8b3..7f7977c20ecd 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -212,26 +212,26 @@ ruffian_get_bank_size(unsigned long offset)
*/
struct alpha_machine_vector ruffian_mv __initmv = {
- vector_name: "Ruffian",
+ .vector_name = "Ruffian",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_RUFFIAN_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: PYXIS_DAC_OFFSET,
-
- nr_irqs: 48,
- device_interrupt: pyxis_device_interrupt,
-
- init_arch: pyxis_init_arch,
- init_irq: ruffian_init_irq,
- init_rtc: ruffian_init_rtc,
- init_pci: cia_init_pci,
- kill_arch: ruffian_kill_arch,
- pci_map_irq: ruffian_map_irq,
- pci_swizzle: ruffian_swizzle,
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_RUFFIAN_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = PYXIS_DAC_OFFSET,
+
+ .nr_irqs = 48,
+ .device_interrupt = pyxis_device_interrupt,
+
+ .init_arch = pyxis_init_arch,
+ .init_irq = ruffian_init_irq,
+ .init_rtc = ruffian_init_rtc,
+ .init_pci = cia_init_pci,
+ .kill_arch = ruffian_kill_arch,
+ .pci_map_irq = ruffian_map_irq,
+ .pci_swizzle = ruffian_swizzle,
};
ALIAS_MV(ruffian)
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 48214ff70abc..cdf8b666d3ab 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -73,13 +73,13 @@ rx164_end_irq(unsigned int irq)
}
static struct hw_interrupt_type rx164_irq_type = {
- typename: "RX164",
- startup: rx164_startup_irq,
- shutdown: rx164_disable_irq,
- enable: rx164_enable_irq,
- disable: rx164_disable_irq,
- ack: rx164_disable_irq,
- end: rx164_end_irq,
+ .typename = "RX164",
+ .startup = rx164_startup_irq,
+ .shutdown = rx164_disable_irq,
+ .enable = rx164_enable_irq,
+ .disable = rx164_disable_irq,
+ .ack = rx164_disable_irq,
+ .end = rx164_end_irq,
};
static void
@@ -197,25 +197,25 @@ rx164_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
*/
struct alpha_machine_vector rx164_mv __initmv = {
- vector_name: "RX164",
+ .vector_name = "RX164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_POLARIS_IO,
DO_POLARIS_BUS,
- machine_check: polaris_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
-
- nr_irqs: 40,
- device_interrupt: rx164_device_interrupt,
-
- init_arch: polaris_init_arch,
- init_irq: rx164_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: NULL,
- pci_map_irq: rx164_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = polaris_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+
+ .nr_irqs = 40,
+ .device_interrupt = rx164_device_interrupt,
+
+ .init_arch = polaris_init_arch,
+ .init_irq = rx164_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = rx164_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(rx164)
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index dc35fe7ea125..587a4bc4a00f 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -185,13 +185,13 @@ sable_mask_and_ack_irq(unsigned int irq)
}
static struct hw_interrupt_type sable_irq_type = {
- typename: "SABLE",
- startup: sable_startup_irq,
- shutdown: sable_disable_irq,
- enable: sable_enable_irq,
- disable: sable_disable_irq,
- ack: sable_mask_and_ack_irq,
- end: sable_end_irq,
+ .typename = "SABLE",
+ .startup = sable_startup_irq,
+ .shutdown = sable_disable_irq,
+ .enable = sable_enable_irq,
+ .disable = sable_disable_irq,
+ .ack = sable_mask_and_ack_irq,
+ .end = sable_end_irq,
};
static void
@@ -284,29 +284,29 @@ sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
#undef GAMMA_BIAS
#define GAMMA_BIAS 0
struct alpha_machine_vector sable_mv __initmv = {
- vector_name: "Sable",
+ .vector_name = "Sable",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_T2_IO,
DO_T2_BUS,
- machine_check: t2_machine_check,
- max_dma_address: ALPHA_SABLE_MAX_DMA_ADDRESS,
- min_io_address: EISA_DEFAULT_IO_BASE,
- min_mem_address: T2_DEFAULT_MEM_BASE,
-
- nr_irqs: 40,
- device_interrupt: sable_srm_device_interrupt,
-
- init_arch: t2_init_arch,
- init_irq: sable_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: NULL,
- pci_map_irq: sable_map_irq,
- pci_swizzle: common_swizzle,
-
- sys: { t2: {
- gamma_bias: 0
+ .machine_check = t2_machine_check,
+ .max_dma_address = ALPHA_SABLE_MAX_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = T2_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 40,
+ .device_interrupt = sable_srm_device_interrupt,
+
+ .init_arch = t2_init_arch,
+ .init_irq = sable_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = sable_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .t2 = {
+ .gamma_bias = 0
} }
};
ALIAS_MV(sable)
@@ -316,28 +316,28 @@ ALIAS_MV(sable)
#undef GAMMA_BIAS
#define GAMMA_BIAS _GAMMA_BIAS
struct alpha_machine_vector sable_gamma_mv __initmv = {
- vector_name: "Sable-Gamma",
+ .vector_name = "Sable-Gamma",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_T2_IO,
DO_T2_BUS,
- machine_check: t2_machine_check,
- max_dma_address: ALPHA_SABLE_MAX_DMA_ADDRESS,
- min_io_address: EISA_DEFAULT_IO_BASE,
- min_mem_address: T2_DEFAULT_MEM_BASE,
-
- nr_irqs: 40,
- device_interrupt: sable_srm_device_interrupt,
-
- init_arch: t2_init_arch,
- init_irq: sable_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- pci_map_irq: sable_map_irq,
- pci_swizzle: common_swizzle,
-
- sys: { t2: {
- gamma_bias: _GAMMA_BIAS
+ .machine_check = t2_machine_check,
+ .max_dma_address = ALPHA_SABLE_MAX_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = T2_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 40,
+ .device_interrupt = sable_srm_device_interrupt,
+
+ .init_arch = t2_init_arch,
+ .init_irq = sable_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .pci_map_irq = sable_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .t2 = {
+ .gamma_bias = _GAMMA_BIAS
} }
};
ALIAS_MV(sable_gamma)
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index a371399a2e3a..dfa9fe57ad9a 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -252,30 +252,30 @@ alphabook1_init_pci(void)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_BOOK1)
struct alpha_machine_vector alphabook1_mv __initmv = {
- vector_name: "AlphaBook1",
+ .vector_name = "AlphaBook1",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
DO_LCA_BUS,
- machine_check: lca_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 16,
- device_interrupt: isa_device_interrupt,
-
- init_arch: alphabook1_init_arch,
- init_irq: sio_init_irq,
- init_rtc: common_init_rtc,
- init_pci: alphabook1_init_pci,
- kill_arch: NULL,
- pci_map_irq: noname_map_irq,
- pci_swizzle: common_swizzle,
-
- sys: { sio: {
+ .machine_check = lca_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = isa_device_interrupt,
+
+ .init_arch = alphabook1_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = alphabook1_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = noname_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
/* NCR810 SCSI is 14, PCMCIA controller is 15. */
- route_tab: 0x0e0f0a0a,
+ .route_tab = 0x0e0f0a0a,
}}
};
ALIAS_MV(alphabook1)
@@ -283,28 +283,28 @@ ALIAS_MV(alphabook1)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_AVANTI)
struct alpha_machine_vector avanti_mv __initmv = {
- vector_name: "Avanti",
+ .vector_name = "Avanti",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
DO_APECS_BUS,
- machine_check: apecs_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 16,
- device_interrupt: isa_device_interrupt,
-
- init_arch: apecs_init_arch,
- init_irq: sio_init_irq,
- init_rtc: common_init_rtc,
- init_pci: noname_init_pci,
- pci_map_irq: noname_map_irq,
- pci_swizzle: common_swizzle,
-
- sys: { sio: {
- route_tab: 0x0b0a0e0f,
+ .machine_check = apecs_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = isa_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = noname_init_pci,
+ .pci_map_irq = noname_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
+ .route_tab = 0x0b0a0e0f,
}}
};
ALIAS_MV(avanti)
@@ -312,27 +312,27 @@ ALIAS_MV(avanti)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_NONAME)
struct alpha_machine_vector noname_mv __initmv = {
- vector_name: "Noname",
+ .vector_name = "Noname",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
DO_LCA_BUS,
- machine_check: lca_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 16,
- device_interrupt: srm_device_interrupt,
-
- init_arch: lca_init_arch,
- init_irq: sio_init_irq,
- init_rtc: common_init_rtc,
- init_pci: noname_init_pci,
- pci_map_irq: noname_map_irq,
- pci_swizzle: common_swizzle,
-
- sys: { sio: {
+ .machine_check = lca_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = srm_device_interrupt,
+
+ .init_arch = lca_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = noname_init_pci,
+ .pci_map_irq = noname_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
/* For UDB, the only available PCI slot must not map to IRQ 9,
since that's the builtin MSS sound chip. That PCI slot
will map to PIRQ1 (for INTA at least), so we give it IRQ 15
@@ -342,7 +342,7 @@ struct alpha_machine_vector noname_mv __initmv = {
they are co-indicated when the platform type "Noname" is
selected... :-( */
- route_tab: 0x0b0a0f0d,
+ .route_tab = 0x0b0a0f0d,
}}
};
ALIAS_MV(noname)
@@ -350,28 +350,28 @@ ALIAS_MV(noname)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_P2K)
struct alpha_machine_vector p2k_mv __initmv = {
- vector_name: "Platform2000",
+ .vector_name = "Platform2000",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
DO_LCA_BUS,
- machine_check: lca_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: APECS_AND_LCA_DEFAULT_MEM_BASE,
-
- nr_irqs: 16,
- device_interrupt: srm_device_interrupt,
-
- init_arch: lca_init_arch,
- init_irq: sio_init_irq,
- init_rtc: common_init_rtc,
- init_pci: noname_init_pci,
- pci_map_irq: p2k_map_irq,
- pci_swizzle: common_swizzle,
-
- sys: { sio: {
- route_tab: 0x0b0a090f,
+ .machine_check = lca_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = srm_device_interrupt,
+
+ .init_arch = lca_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = noname_init_pci,
+ .pci_map_irq = p2k_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
+ .route_tab = 0x0b0a090f,
}}
};
ALIAS_MV(p2k)
@@ -379,28 +379,28 @@ ALIAS_MV(p2k)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_XL)
struct alpha_machine_vector xl_mv __initmv = {
- vector_name: "XL",
+ .vector_name = "XL",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
BUS(apecs),
- machine_check: apecs_machine_check,
- max_dma_address: ALPHA_XL_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: XL_DEFAULT_MEM_BASE,
-
- nr_irqs: 16,
- device_interrupt: isa_device_interrupt,
-
- init_arch: apecs_init_arch,
- init_irq: sio_init_irq,
- init_rtc: common_init_rtc,
- init_pci: noname_init_pci,
- pci_map_irq: noname_map_irq,
- pci_swizzle: common_swizzle,
-
- sys: { sio: {
- route_tab: 0x0b0a090f,
+ .machine_check = apecs_machine_check,
+ .max_dma_address = ALPHA_XL_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = XL_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = isa_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = noname_init_pci,
+ .pci_map_irq = noname_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
+ .route_tab = 0x0b0a090f,
}}
};
ALIAS_MV(xl)
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c
index efcf059d3d8c..ebd4d89e8f5b 100644
--- a/arch/alpha/kernel/sys_sx164.c
+++ b/arch/alpha/kernel/sys_sx164.c
@@ -154,26 +154,26 @@ sx164_init_arch(void)
*/
struct alpha_machine_vector sx164_mv __initmv = {
- vector_name: "SX164",
+ .vector_name = "SX164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: PYXIS_DAC_OFFSET,
-
- nr_irqs: 48,
- device_interrupt: pyxis_device_interrupt,
-
- init_arch: sx164_init_arch,
- init_irq: sx164_init_irq,
- init_rtc: common_init_rtc,
- init_pci: sx164_init_pci,
- kill_arch: NULL,
- pci_map_irq: sx164_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = PYXIS_DAC_OFFSET,
+
+ .nr_irqs = 48,
+ .device_interrupt = pyxis_device_interrupt,
+
+ .init_arch = sx164_init_arch,
+ .init_irq = sx164_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = sx164_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = sx164_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(sx164)
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 6af89e81b028..6159efaf6386 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -75,13 +75,13 @@ takara_end_irq(unsigned int irq)
}
static struct hw_interrupt_type takara_irq_type = {
- typename: "TAKARA",
- startup: takara_startup_irq,
- shutdown: takara_disable_irq,
- enable: takara_enable_irq,
- disable: takara_disable_irq,
- ack: takara_disable_irq,
- end: takara_end_irq,
+ .typename = "TAKARA",
+ .startup = takara_startup_irq,
+ .shutdown = takara_disable_irq,
+ .enable = takara_enable_irq,
+ .disable = takara_disable_irq,
+ .ack = takara_disable_irq,
+ .end = takara_end_irq,
};
static void
@@ -269,25 +269,25 @@ takara_init_pci(void)
*/
struct alpha_machine_vector takara_mv __initmv = {
- vector_name: "Takara",
+ .vector_name = "Takara",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
DO_CIA_BUS,
- machine_check: cia_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: CIA_DEFAULT_MEM_BASE,
-
- nr_irqs: 128,
- device_interrupt: takara_device_interrupt,
-
- init_arch: cia_init_arch,
- init_irq: takara_init_irq,
- init_rtc: common_init_rtc,
- init_pci: takara_init_pci,
- kill_arch: NULL,
- pci_map_irq: takara_map_irq,
- pci_swizzle: takara_swizzle,
+ .machine_check = cia_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 128,
+ .device_interrupt = takara_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = takara_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = takara_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = takara_map_irq,
+ .pci_swizzle = takara_swizzle,
};
ALIAS_MV(takara)
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index c6ed45836e7d..b7d03baa0ed5 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -152,14 +152,14 @@ privateer_set_affinity(unsigned int irq, unsigned long affinity)
}
static struct hw_interrupt_type privateer_irq_type = {
- typename: "PRIVATEER",
- startup: privateer_startup_irq,
- shutdown: privateer_disable_irq,
- enable: privateer_enable_irq,
- disable: privateer_disable_irq,
- ack: privateer_disable_irq,
- end: privateer_end_irq,
- set_affinity: privateer_set_affinity,
+ .typename = "PRIVATEER",
+ .startup = privateer_startup_irq,
+ .shutdown = privateer_disable_irq,
+ .enable = privateer_enable_irq,
+ .disable = privateer_disable_irq,
+ .ack = privateer_disable_irq,
+ .end = privateer_end_irq,
+ .set_affinity = privateer_set_affinity,
};
static void
@@ -367,26 +367,26 @@ privateer_machine_check(unsigned long vector, unsigned long la_ptr,
*/
struct alpha_machine_vector privateer_mv __initmv = {
- vector_name: "PRIVATEER",
+ .vector_name = "PRIVATEER",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TITAN_IO,
DO_TITAN_BUS,
- machine_check: privateer_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
- pci_dac_offset: TITAN_DAC_OFFSET,
-
- nr_irqs: 80, /* 64 + 16 */
- device_interrupt: privateer_device_interrupt,
-
- init_arch: titan_init_arch,
- init_irq: privateer_init_irq,
- init_rtc: common_init_rtc,
- init_pci: privateer_init_pci,
- kill_arch: titan_kill_arch,
- pci_map_irq: privateer_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = privateer_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TITAN_DAC_OFFSET,
+
+ .nr_irqs = 80, /* 64 + 16 */
+ .device_interrupt = privateer_device_interrupt,
+
+ .init_arch = titan_init_arch,
+ .init_irq = privateer_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = privateer_init_pci,
+ .kill_arch = titan_kill_arch,
+ .pci_map_irq = privateer_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(privateer)
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index bccbbdc0c678..90316b56980a 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -158,13 +158,13 @@ wildfire_end_irq(unsigned int irq)
}
static struct hw_interrupt_type wildfire_irq_type = {
- typename: "WILDFIRE",
- startup: wildfire_startup_irq,
- shutdown: wildfire_disable_irq,
- enable: wildfire_enable_irq,
- disable: wildfire_disable_irq,
- ack: wildfire_mask_and_ack_irq,
- end: wildfire_end_irq,
+ .typename = "WILDFIRE",
+ .startup = wildfire_startup_irq,
+ .shutdown = wildfire_disable_irq,
+ .enable = wildfire_enable_irq,
+ .disable = wildfire_disable_irq,
+ .ack = wildfire_mask_and_ack_irq,
+ .end = wildfire_end_irq,
};
static void __init
@@ -173,8 +173,8 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
int i, irq_bias;
unsigned long io_bias;
static struct irqaction isa_enable = {
- handler: no_action,
- name: "isa_enable",
+ .handler = no_action,
+ .name = "isa_enable",
};
irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
@@ -333,25 +333,25 @@ wildfire_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
*/
struct alpha_machine_vector wildfire_mv __initmv = {
- vector_name: "WILDFIRE",
+ .vector_name = "WILDFIRE",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_WILDFIRE_IO,
DO_WILDFIRE_BUS,
- machine_check: wildfire_machine_check,
- max_dma_address: ALPHA_MAX_DMA_ADDRESS,
- min_io_address: DEFAULT_IO_BASE,
- min_mem_address: DEFAULT_MEM_BASE,
-
- nr_irqs: WILDFIRE_NR_IRQS,
- device_interrupt: wildfire_device_interrupt,
-
- init_arch: wildfire_init_arch,
- init_irq: wildfire_init_irq,
- init_rtc: common_init_rtc,
- init_pci: common_init_pci,
- kill_arch: wildfire_kill_arch,
- pci_map_irq: wildfire_map_irq,
- pci_swizzle: common_swizzle,
+ .machine_check = wildfire_machine_check,
+ .max_dma_address = ALPHA_MAX_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+
+ .nr_irqs = WILDFIRE_NR_IRQS,
+ .device_interrupt = wildfire_device_interrupt,
+
+ .init_arch = wildfire_init_arch,
+ .init_irq = wildfire_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = wildfire_kill_arch,
+ .pci_map_irq = wildfire_map_irq,
+ .pci_swizzle = common_swizzle,
};
ALIAS_MV(wildfire)
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 59a91186ace6..e7fea3935f5b 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -4,6 +4,7 @@
* The system call table.
*/
+#include <linux/config.h> /* CONFIG_OSF4_COMPAT */
#include <asm/unistd.h>
.data
diff --git a/arch/alpha/lib/ev6-stxncpy.S b/arch/alpha/lib/ev6-stxncpy.S
index 0d9aba3865d6..2116a5dd42f9 100644
--- a/arch/alpha/lib/ev6-stxncpy.S
+++ b/arch/alpha/lib/ev6-stxncpy.S
@@ -253,7 +253,7 @@ $u_loop:
stq_u t0, -8(a0) # U : save the current word
beq a2, $u_eoc # U :
- ldq_u t2, 0(a1) # U : Latency=3 load high word for next time
+ ldq_u t2, 8(a1) # U : Latency=3 load high word for next time
addq a1, 8, a1 # E :
extqh t2, a1, t0 # U : extract low bits (2 cycle stall)
diff --git a/arch/alpha/lib/stxncpy.S b/arch/alpha/lib/stxncpy.S
index 9f0d16737cf0..f6fcef4c42a5 100644
--- a/arch/alpha/lib/stxncpy.S
+++ b/arch/alpha/lib/stxncpy.S
@@ -210,7 +210,7 @@ $u_loop:
addq a0, 8, a0 # .. e1 :
extql t2, a1, t1 # e0 : extract high bits for next time
beq a2, $u_eoc # .. e1 :
- ldq_u t2, 0(a1) # e0 : load high word for next time
+ ldq_u t2, 8(a1) # e0 : load high word for next time
addq a1, 8, a1 # .. e1 :
nop # e0 :
cmpbge zero, t2, t8 # e1 : test new word for eos (stall)
diff --git a/arch/i386/Config.help b/arch/i386/Config.help
index 299dea0c6536..be811e61b06f 100644
--- a/arch/i386/Config.help
+++ b/arch/i386/Config.help
@@ -73,6 +73,12 @@ CONFIG_X86_CYCLONE
If you are suffering from time skew using a multi-CEC system, say YES.
Otherwise it is safe to say NO.
+CONFIG_X86_SUMMIT
+ This option is needed for IBM systems that use the Summit/EXA chipset.
+ In particular, it is needed for the x440.
+
+ If you don't have one of these computers, you should say N here.
+
CONFIG_X86_UP_IOAPIC
An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
SMP-capable replacement for PC-style interrupt controllers. Most
@@ -1048,6 +1054,11 @@ CONFIG_DEBUG_OBSOLETE
Say Y here if you want to reduce the chances of the tree compiling,
and are prepared to dig into driver internals to fix compile errors.
+Profiling support
+CONFIG_PROFILING
+ Say Y here to enable the extended profiling support mechanisms used
+ by profilers such as OProfile.
+
Software Suspend
CONFIG_SOFTWARE_SUSPEND
Enable the possibilty of suspendig machine. It doesn't need APM.
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 9d822af70fcb..4df45365d6f0 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -57,6 +57,8 @@ core-y += arch/i386/kernel/ \
arch/i386/$(MACHINE)/
drivers-$(CONFIG_MATH_EMULATION) += arch/i386/math-emu/
drivers-$(CONFIG_PCI) += arch/i386/pci/
+# FIXME: is drivers- right ?
+drivers-$(CONFIG_OPROFILE) += arch/i386/oprofile/
CFLAGS += -Iarch/i386/$(MACHINE)
AFLAGS += -Iarch/i386/$(MACHINE)
diff --git a/arch/i386/config.in b/arch/i386/config.in
index 784e35d23bce..1026d1a0a1f4 100644
--- a/arch/i386/config.in
+++ b/arch/i386/config.in
@@ -172,7 +172,8 @@ else
if [ "$CONFIG_X86_NUMA" = "y" ]; then
#Platform Choices
bool 'Multiquad (IBM/Sequent) NUMAQ support' CONFIG_X86_NUMAQ
- if [ "$CONFIG_X86_NUMAQ" = "y" ]; then
+ bool 'IBM x440 (Summit/EXA) support' CONFIG_X86_SUMMIT
+ if [ "$CONFIG_X86_NUMAQ" = "y" -o "$CONFIG_X86_SUMMIT" = "y" ]; then
define_bool CONFIG_CLUSTERED_APIC y
fi
# Common NUMA Features
@@ -442,6 +443,8 @@ source drivers/usb/Config.in
source net/bluetooth/Config.in
+source arch/i386/oprofile/Config.in
+
mainmenu_option next_comment
comment 'Kernel hacking'
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index d201c60ac5c2..55f9312b7f39 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend.o
obj-$(CONFIG_X86_NUMAQ) += numaq.o
+obj-$(CONFIG_PROFILING) += profile.o
EXTRA_AFLAGS := -traditional
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index c2f56438f749..d7998626bcc0 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -31,6 +31,7 @@
#include <asm/pgalloc.h>
#include <asm/desc.h>
#include <asm/arch_hooks.h>
+#include "mach_apic.h"
void __init apic_intr_init(void)
{
@@ -328,15 +329,13 @@ void __init setup_local_APIC (void)
* Put the APIC into flat delivery mode.
* Must be "all ones" explicitly for 82489DX.
*/
- apic_write_around(APIC_DFR, 0xffffffff);
+ apic_write_around(APIC_DFR, APIC_DFR_VALUE);
/*
* Set up the logical destination ID.
*/
value = apic_read(APIC_LDR);
- value &= ~APIC_LDR_MASK;
- value |= (1<<(smp_processor_id()+24));
- apic_write_around(APIC_LDR, value);
+ apic_write_around(APIC_LDR, calculate_ldr(value));
}
/*
@@ -1008,17 +1007,9 @@ int setup_profiling_timer(unsigned int multiplier)
inline void smp_local_timer_interrupt(struct pt_regs * regs)
{
- int user = user_mode(regs);
int cpu = smp_processor_id();
- /*
- * The profiling function is SMP safe. (nothing can mess
- * around with "current", and the profiling counters are
- * updated with atomic operations). This is especially
- * useful with a profiling multiplier != 1
- */
- if (!user)
- x86_do_profile(regs->eip);
+ x86_do_profile(regs);
if (--prof_counter[cpu] <= 0) {
/*
@@ -1036,7 +1027,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
}
#ifdef CONFIG_SMP
- update_process_times(user);
+ update_process_times(user_mode(regs));
#endif
}
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 557b684431c5..e873703e0c34 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -736,6 +736,7 @@ ENTRY(sys_call_table)
.long sys_alloc_hugepages /* 250 */
.long sys_free_hugepages
.long sys_exit_group
+ .long sys_lookup_dcookie
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 79c204a1f476..f9dc46f34e6d 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -29,6 +29,7 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#include <asm/nmi.h>
extern void dump_thread(struct pt_regs *, struct user *);
extern spinlock_t rtc_lock;
@@ -151,6 +152,10 @@ EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(flush_tlb_page);
#endif
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PM)
+EXPORT_SYMBOL_GPL(set_nmi_pm_callback);
+EXPORT_SYMBOL_GPL(unset_nmi_pm_callback);
+#endif
#ifdef CONFIG_X86_IO_APIC
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
#endif
@@ -167,6 +172,11 @@ EXPORT_SYMBOL(get_wchan);
EXPORT_SYMBOL(rtc_lock);
+EXPORT_SYMBOL_GPL(register_profile_notifier);
+EXPORT_SYMBOL_GPL(unregister_profile_notifier);
+EXPORT_SYMBOL_GPL(set_nmi_callback);
+EXPORT_SYMBOL_GPL(unset_nmi_callback);
+
#undef memcpy
#undef memset
extern void * memset(void *,int,__kernel_size_t);
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 25c6ad13b85b..5bc54a30c2f8 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -35,6 +35,7 @@
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/desc.h>
+#include "mach_apic.h"
#undef APIC_LOCKUP_DEBUG
@@ -1151,7 +1152,7 @@ static void __init setup_ioapic_ids_from_mpc (void)
old_id = mp_ioapics[apic].mpc_apicid;
- if (mp_ioapics[apic].mpc_apicid >= 0xf) {
+ if (mp_ioapics[apic].mpc_apicid >= APIC_BROADCAST_ID) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
apic, mp_ioapics[apic].mpc_apicid);
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
@@ -1164,7 +1165,8 @@ static void __init setup_ioapic_ids_from_mpc (void)
* system must have a unique ID or we get lots of nice
* 'stuck on smp_invalidate_needed IPI wait' messages.
*/
- if (phys_id_present_map & (1 << mp_ioapics[apic].mpc_apicid)) {
+ if (check_apicid_used(phys_id_present_map,
+ mp_ioapics[apic].mpc_apicid)) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
apic, mp_ioapics[apic].mpc_apicid);
for (i = 0; i < 0xf; i++)
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 2f6f8b504523..c9bdad8cb3f1 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -30,6 +30,7 @@
#include <asm/mpspec.h>
#include <asm/pgalloc.h>
#include <asm/io_apic.h>
+#include "mach_apic.h"
/* Have we found an MP table */
int smp_found_config;
@@ -69,6 +70,9 @@ static unsigned int __initdata num_processors;
/* Bitmask of physically existing CPUs */
unsigned long phys_cpu_present_map;
+int summit_x86 = 0;
+u8 raw_phys_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+
/*
* Intel MP BIOS table parsing routines:
*/
@@ -189,7 +193,7 @@ void __init MP_processor_info (struct mpc_config_processor *m)
if (clustered_apic_mode) {
phys_cpu_present_map |= (logical_apicid&0xf) << (4*quad);
} else {
- phys_cpu_present_map |= 1 << m->mpc_apicid;
+ phys_cpu_present_map |= apicid_to_cpu_present(m->mpc_apicid);
}
/*
* Validate version
@@ -199,6 +203,7 @@ void __init MP_processor_info (struct mpc_config_processor *m)
ver = 0x10;
}
apic_version[m->mpc_apicid] = ver;
+ raw_phys_apicid[num_processors - 1] = m->mpc_apicid;
}
static void __init MP_bus_info (struct mpc_config_bus *m)
@@ -356,6 +361,7 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
static int __init smp_read_mpc(struct mp_config_table *mpc)
{
char str[16];
+ char oem[10];
int count=sizeof(*mpc);
unsigned char *mpt=((unsigned char *)mpc)+count;
@@ -380,14 +386,16 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
printk(KERN_ERR "SMP mptable: null local APIC address!\n");
return 0;
}
- memcpy(str,mpc->mpc_oem,8);
- str[8]=0;
- printk("OEM ID: %s ",str);
+ memcpy(oem,mpc->mpc_oem,8);
+ oem[8]=0;
+ printk("OEM ID: %s ",oem);
memcpy(str,mpc->mpc_productid,12);
str[12]=0;
printk("Product ID: %s ",str);
+ summit_check(oem, str);
+
printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
/*
@@ -465,6 +473,7 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
}
++mpc_record;
}
+ clustered_apic_check();
if (!num_processors)
printk(KERN_ERR "SMP mptable: no processors registered!\n");
return num_processors;
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index fb2026daf3f1..fbae2c8deeaf 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -175,6 +175,18 @@ static int nmi_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
return 0;
}
+struct pm_dev * set_nmi_pm_callback(pm_callback callback)
+{
+ apic_pm_unregister(nmi_pmdev);
+ return apic_pm_register(PM_SYS_DEV, 0, callback);
+}
+
+void unset_nmi_pm_callback(struct pm_dev * dev)
+{
+ apic_pm_unregister(dev);
+ nmi_pmdev = apic_pm_register(PM_SYS_DEV, 0, nmi_pm_callback);
+}
+
static void nmi_pm_init(void)
{
if (!nmi_pmdev)
diff --git a/arch/i386/kernel/profile.c b/arch/i386/kernel/profile.c
new file mode 100644
index 000000000000..334af20585cb
--- /dev/null
+++ b/arch/i386/kernel/profile.c
@@ -0,0 +1,45 @@
+/*
+ * linux/arch/i386/kernel/profile.c
+ *
+ * (C) 2002 John Levon <levon@movementarian.org>
+ *
+ */
+
+#include <linux/profile.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/irq.h>
+#include <asm/hw_irq.h>
+
+static struct notifier_block * profile_listeners;
+static rwlock_t profile_lock = RW_LOCK_UNLOCKED;
+
+int register_profile_notifier(struct notifier_block * nb)
+{
+ int err;
+ write_lock_irq(&profile_lock);
+ err = notifier_chain_register(&profile_listeners, nb);
+ write_unlock_irq(&profile_lock);
+ return err;
+}
+
+
+int unregister_profile_notifier(struct notifier_block * nb)
+{
+ int err;
+ write_lock_irq(&profile_lock);
+ err = notifier_chain_unregister(&profile_listeners, nb);
+ write_unlock_irq(&profile_lock);
+ return err;
+}
+
+
+void x86_profile_hook(struct pt_regs * regs)
+{
+ /* we would not even need this lock if
+ * we had a global cli() on register/unregister
+ */
+ read_lock(&profile_lock);
+ notifier_call_chain(&profile_listeners, 0, regs);
+ read_unlock(&profile_lock);
+}
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 9d513dc1ceb2..acc6d8e48075 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -51,6 +51,7 @@
#include <asm/desc.h>
#include <asm/arch_hooks.h>
#include "smpboot_hooks.h"
+#include "mach_apic.h"
/* Set if we find a B stepping CPU */
static int __initdata smp_b_stepping;
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 4e3b4f1cb4b3..cf53d2c1d50a 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -64,11 +64,6 @@ extern spinlock_t i8259A_lock;
#include "do_timer.h"
-/*
- * for x86_do_profile()
- */
-#include <linux/irq.h>
-
u64 jiffies_64;
unsigned long cpu_khz; /* Detected as we calibrate the TSC */
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 68ed7969fe6d..3eeb2c41814b 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -40,6 +40,7 @@
#include <asm/debugreg.h>
#include <asm/desc.h>
#include <asm/i387.h>
+#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/pgalloc.h>
@@ -478,17 +479,16 @@ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
return;
}
#endif
- printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
+ printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+ reason, smp_processor_id());
printk("Dazed and confused, but trying to continue\n");
printk("Do you have a strange power saving mode enabled?\n");
}
-asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
+static void default_do_nmi(struct pt_regs * regs)
{
unsigned char reason = inb(0x61);
-
- ++nmi_count(smp_processor_id());
-
+
if (!(reason & 0xc0)) {
#if CONFIG_X86_LOCAL_APIC
/*
@@ -517,6 +517,33 @@ asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
inb(0x71); /* dummy */
}
+static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
+{
+ return 0;
+}
+
+static nmi_callback_t nmi_callback = dummy_nmi_callback;
+
+asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
+{
+ int cpu = smp_processor_id();
+
+ ++nmi_count(cpu);
+
+ if (!nmi_callback(regs, cpu))
+ default_do_nmi(regs);
+}
+
+void set_nmi_callback(nmi_callback_t callback)
+{
+ nmi_callback = callback;
+}
+
+void unset_nmi_callback(void)
+{
+ nmi_callback = dummy_nmi_callback;
+}
+
/*
* Our handling of the processor debug registers is non-trivial.
* We do not clear them on entry and exit from the kernel. Therefore
diff --git a/arch/i386/mach-generic/do_timer.h b/arch/i386/mach-generic/do_timer.h
index 7ee964b2ebf2..4a24f8ad0635 100644
--- a/arch/i386/mach-generic/do_timer.h
+++ b/arch/i386/mach-generic/do_timer.h
@@ -20,8 +20,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
* system, in that case we have to call the local interrupt handler.
*/
#ifndef CONFIG_X86_LOCAL_APIC
- if (!user_mode(regs))
- x86_do_profile(regs->eip);
+ x86_do_profile(regs);
#else
if (!using_apic_timer)
smp_local_timer_interrupt(regs);
diff --git a/arch/i386/mach-generic/mach_apic.h b/arch/i386/mach-generic/mach_apic.h
new file mode 100644
index 000000000000..f7be859e761e
--- /dev/null
+++ b/arch/i386/mach-generic/mach_apic.h
@@ -0,0 +1,46 @@
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
+
+static inline unsigned long calculate_ldr(unsigned long old)
+{
+ unsigned long id;
+
+ id = 1UL << smp_processor_id();
+ return ((old & ~APIC_LDR_MASK) | SET_APIC_LOGICAL_ID(id));
+}
+
+#define APIC_DFR_VALUE (APIC_DFR_FLAT)
+
+#ifdef CONFIG_SMP
+ #define TARGET_CPUS (clustered_apic_mode ? 0xf : cpu_online_map)
+#else
+ #define TARGET_CPUS 0x01
+#endif
+
+#define APIC_BROADCAST_ID 0x0F
+#define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid))
+
+static inline void summit_check(char *oem, char *productid)
+{
+}
+
+static inline void clustered_apic_check(void)
+{
+ printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
+ (clustered_apic_mode ? "NUMA-Q" : "Flat"), nr_ioapics);
+}
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+ if (clustered_apic_mode)
+ return ( ((mps_cpu/4)*16) + (1<<(mps_cpu%4)) );
+ else
+ return mps_cpu;
+}
+
+static inline unsigned long apicid_to_cpu_present(int apicid)
+{
+ return (1ul << apicid);
+}
+
+#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/i386/mach-summit/mach_apic.h b/arch/i386/mach-summit/mach_apic.h
new file mode 100644
index 000000000000..4cc36cd80092
--- /dev/null
+++ b/arch/i386/mach-summit/mach_apic.h
@@ -0,0 +1,57 @@
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
+
+extern int x86_summit;
+
+#define XAPIC_DEST_CPUS_MASK 0x0Fu
+#define XAPIC_DEST_CLUSTER_MASK 0xF0u
+
+#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\
+ ((phys_apic) & XAPIC_DEST_CLUSTER_MASK) )
+
+static inline unsigned long calculate_ldr(unsigned long old)
+{
+ unsigned long id;
+
+ if (x86_summit)
+ id = xapic_phys_to_log_apicid(hard_smp_processor_id());
+ else
+ id = 1UL << smp_processor_id();
+ return ((old & ~APIC_LDR_MASK) | SET_APIC_LOGICAL_ID(id));
+}
+
+#define APIC_DFR_VALUE (x86_summit ? APIC_DFR_CLUSTER : APIC_DFR_FLAT)
+#define TARGET_CPUS (x86_summit ? XAPIC_DEST_CPUS_MASK : cpu_online_map)
+
+#define APIC_BROADCAST_ID (x86_summit ? 0xFF : 0x0F)
+#define check_apicid_used(bitmap, apicid) (0)
+
+static inline void summit_check(char *oem, char *productid)
+{
+ if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(str, "VIGIL SMP", 9))
+ x86_summit = 1;
+}
+
+static inline void clustered_apic_check(void)
+{
+ printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
+ (x86_summit ? "Summit" : "Flat"), nr_ioapics);
+}
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+ if (x86_summit)
+ return (int) raw_phys_apicid[mps_cpu];
+ else
+ return mps_cpu;
+}
+
+static inline unsigned long apicid_to_phys_cpu_present(int apicid)
+{
+ if (x86_summit)
+ return (1ul << (((apicid >> 4) << 2) | (apicid & 0x3)));
+ else
+ return (1ul << apicid);
+}
+
+#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/i386/mach-visws/do_timer.h b/arch/i386/mach-visws/do_timer.h
index b2c1cbed5cb9..d19c7063e17d 100644
--- a/arch/i386/mach-visws/do_timer.h
+++ b/arch/i386/mach-visws/do_timer.h
@@ -15,8 +15,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
* system, in that case we have to call the local interrupt handler.
*/
#ifndef CONFIG_X86_LOCAL_APIC
- if (!user_mode(regs))
- x86_do_profile(regs->eip);
+ x86_do_profile(regs);
#else
if (!using_apic_timer)
smp_local_timer_interrupt(regs);
diff --git a/arch/i386/oprofile/Config.help b/arch/i386/oprofile/Config.help
new file mode 100644
index 000000000000..38f8ae424d37
--- /dev/null
+++ b/arch/i386/oprofile/Config.help
@@ -0,0 +1,6 @@
+CONFIG_OPROFILE
+ OProfile is a profiling system capable of profiling the
+ whole system, include the kernel, kernel modules, libraries,
+ and applications.
+
+ If unsure, say N.
diff --git a/arch/i386/oprofile/Config.in b/arch/i386/oprofile/Config.in
new file mode 100644
index 000000000000..dccdec27046c
--- /dev/null
+++ b/arch/i386/oprofile/Config.in
@@ -0,0 +1,9 @@
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Profiling support'
+ bool 'Profiling support (EXPERIMENTAL)' CONFIG_PROFILING
+ if [ "$CONFIG_PROFILING" = "y" ]; then
+ tristate ' OProfile system profiling (EXPERIMENTAL)' CONFIG_OPROFILE
+ fi
+ endmenu
+fi
diff --git a/arch/i386/oprofile/Makefile b/arch/i386/oprofile/Makefile
new file mode 100644
index 000000000000..2c26e7ef9c94
--- /dev/null
+++ b/arch/i386/oprofile/Makefile
@@ -0,0 +1,16 @@
+vpath %.c = . $(TOPDIR)/drivers/oprofile
+
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o )
+
+oprofile-objs := $(DRIVER_OBJS) init.o timer_int.o
+
+ifdef CONFIG_X86_LOCAL_APIC
+oprofile-objs += nmi_int.o op_model_athlon.o op_model_ppro.o
+endif
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/i386/oprofile/init.c b/arch/i386/oprofile/init.c
new file mode 100644
index 000000000000..0ca31295e1f2
--- /dev/null
+++ b/arch/i386/oprofile/init.c
@@ -0,0 +1,30 @@
+/**
+ * @file init.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/oprofile.h>
+#include <linux/init.h>
+
+/* We support CPUs that have performance counters like the Pentium Pro
+ * with NMI mode samples. Other x86 CPUs use a simple interrupt keyed
+ * off the timer interrupt, which cannot profile interrupts-disabled
+ * code unlike the NMI-based code.
+ */
+
+extern int nmi_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu);
+extern void timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu);
+
+int __init oprofile_arch_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (!nmi_init(ops, cpu))
+#endif
+ timer_init(ops, cpu);
+ return 0;
+}
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
new file mode 100644
index 000000000000..3f1938fcdea2
--- /dev/null
+++ b/arch/i386/oprofile/nmi_int.c
@@ -0,0 +1,258 @@
+/**
+ * @file nmi_int.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/smp.h>
+#include <linux/oprofile.h>
+#include <linux/pm.h>
+#include <asm/thread_info.h>
+#include <asm/nmi.h>
+#include <asm/ptrace.h>
+#include <asm/msr.h>
+#include <asm/apic.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+
+#include "op_counter.h"
+#include "op_x86_model.h"
+
+static struct op_x86_model_spec const * model;
+static struct op_msrs cpu_msrs[NR_CPUS];
+static unsigned long saved_lvtpc[NR_CPUS];
+static unsigned long kernel_only;
+
+static int nmi_start(void);
+static void nmi_stop(void);
+
+static struct pm_dev * oprofile_pmdev;
+
+/* We're at risk of causing big trouble unless we
+ * make sure to not cause any NMI interrupts when
+ * suspended.
+ */
+static int oprofile_pm_callback(struct pm_dev * dev,
+ pm_request_t rqst, void * data)
+{
+ switch (rqst) {
+ case PM_SUSPEND:
+ nmi_stop();
+ break;
+ case PM_RESUME:
+ nmi_start();
+ break;
+ }
+ return 0;
+}
+
+
+// FIXME: kernel_only
+static int nmi_callback(struct pt_regs * regs, int cpu)
+{
+ return (model->check_ctrs(cpu, &cpu_msrs[cpu], regs));
+}
+
+
+static void nmi_save_registers(struct op_msrs * msrs)
+{
+ unsigned int const nr_ctrs = model->num_counters;
+ unsigned int const nr_ctrls = model->num_controls;
+ struct op_msr_group * counters = &msrs->counters;
+ struct op_msr_group * controls = &msrs->controls;
+ int i;
+
+ for (i = 0; i < nr_ctrs; ++i) {
+ rdmsr(counters->addrs[i],
+ counters->saved[i].low,
+ counters->saved[i].high);
+ }
+
+ for (i = 0; i < nr_ctrls; ++i) {
+ rdmsr(controls->addrs[i],
+ controls->saved[i].low,
+ controls->saved[i].high);
+ }
+}
+
+
+static void nmi_cpu_setup(void * dummy)
+{
+ int cpu = smp_processor_id();
+ struct op_msrs * msrs = &cpu_msrs[cpu];
+ model->fill_in_addresses(msrs);
+ nmi_save_registers(msrs);
+ spin_lock(&oprofilefs_lock);
+ model->setup_ctrs(msrs);
+ spin_unlock(&oprofilefs_lock);
+ saved_lvtpc[cpu] = apic_read(APIC_LVTPC);
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+}
+
+
+static int nmi_setup(void)
+{
+ /* We walk a thin line between law and rape here.
+ * We need to be careful to install our NMI handler
+ * without actually triggering any NMIs as this will
+ * break the core code horrifically.
+ */
+ smp_call_function(nmi_cpu_setup, NULL, 0, 1);
+ nmi_cpu_setup(0);
+ set_nmi_callback(nmi_callback);
+ oprofile_pmdev = set_nmi_pm_callback(oprofile_pm_callback);
+ return 0;
+}
+
+
+static void nmi_restore_registers(struct op_msrs * msrs)
+{
+ unsigned int const nr_ctrs = model->num_counters;
+ unsigned int const nr_ctrls = model->num_controls;
+ struct op_msr_group * counters = &msrs->counters;
+ struct op_msr_group * controls = &msrs->controls;
+ int i;
+
+ for (i = 0; i < nr_ctrls; ++i) {
+ wrmsr(controls->addrs[i],
+ controls->saved[i].low,
+ controls->saved[i].high);
+ }
+
+ for (i = 0; i < nr_ctrs; ++i) {
+ wrmsr(counters->addrs[i],
+ counters->saved[i].low,
+ counters->saved[i].high);
+ }
+}
+
+
+static void nmi_cpu_shutdown(void * dummy)
+{
+ int cpu = smp_processor_id();
+ struct op_msrs * msrs = &cpu_msrs[cpu];
+ apic_write(APIC_LVTPC, saved_lvtpc[cpu]);
+ nmi_restore_registers(msrs);
+}
+
+
+static void nmi_shutdown(void)
+{
+ unset_nmi_pm_callback(oprofile_pmdev);
+ unset_nmi_callback();
+ smp_call_function(nmi_cpu_shutdown, NULL, 0, 1);
+ nmi_cpu_shutdown(0);
+}
+
+
+static void nmi_cpu_start(void * dummy)
+{
+ struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
+ model->start(msrs);
+}
+
+
+static int nmi_start(void)
+{
+ smp_call_function(nmi_cpu_start, NULL, 0, 1);
+ nmi_cpu_start(0);
+ return 0;
+}
+
+
+static void nmi_cpu_stop(void * dummy)
+{
+ struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
+ model->stop(msrs);
+}
+
+
+static void nmi_stop(void)
+{
+ smp_call_function(nmi_cpu_stop, NULL, 0, 1);
+ nmi_cpu_stop(0);
+}
+
+
+struct op_counter_config counter_config[OP_MAX_COUNTER];
+
+static int nmi_create_files(struct super_block * sb, struct dentry * root)
+{
+ int i;
+
+ for (i = 0; i < model->num_counters; ++i) {
+ struct dentry * dir;
+ char buf[2];
+
+ snprintf(buf, 2, "%d", i);
+ dir = oprofilefs_mkdir(sb, root, buf);
+ oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
+ oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
+ oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
+ oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
+ oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
+ oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
+ }
+
+ oprofilefs_create_ulong(sb, root, "kernel_only", &kernel_only);
+ return 0;
+}
+
+
+struct oprofile_operations nmi_ops = {
+ .create_files = nmi_create_files,
+ .setup = nmi_setup,
+ .shutdown = nmi_shutdown,
+ .start = nmi_start,
+ .stop = nmi_stop
+};
+
+
+int __init nmi_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
+{
+ __u8 vendor = current_cpu_data.x86_vendor;
+ __u8 family = current_cpu_data.x86;
+ __u8 cpu_model = current_cpu_data.x86_model;
+
+ if (!cpu_has_apic)
+ return 0;
+
+ switch (vendor) {
+ case X86_VENDOR_AMD:
+ /* Needs to be at least an Athlon (or hammer in 32bit mode) */
+ if (family < 6)
+ return 0;
+ model = &op_athlon_spec;
+ *cpu = OPROFILE_CPU_ATHLON;
+ break;
+
+ case X86_VENDOR_INTEL:
+ /* Less than a P6-class processor */
+ if (family != 6)
+ return 0;
+
+ if (cpu_model > 5) {
+ *cpu = OPROFILE_CPU_PIII;
+ } else if (cpu_model > 2) {
+ *cpu = OPROFILE_CPU_PII;
+ } else {
+ *cpu = OPROFILE_CPU_PPRO;
+ }
+
+ model = &op_ppro_spec;
+ break;
+
+ default:
+ return 0;
+ }
+
+ *ops = &nmi_ops;
+ printk(KERN_INFO "oprofile: using NMI interrupt.\n");
+ return 1;
+}
diff --git a/arch/i386/oprofile/op_counter.h b/arch/i386/oprofile/op_counter.h
new file mode 100644
index 000000000000..0417e4daf562
--- /dev/null
+++ b/arch/i386/oprofile/op_counter.h
@@ -0,0 +1,29 @@
+/**
+ * @file op_counter.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ */
+
+#ifndef OP_COUNTER_H
+#define OP_COUNTER_H
+
+#define OP_MAX_COUNTER 4
+
+/* Per-perfctr configuration as set via
+ * oprofilefs.
+ */
+struct op_counter_config {
+ unsigned long count;
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long kernel;
+ unsigned long user;
+ unsigned long unit_mask;
+};
+
+extern struct op_counter_config counter_config[];
+
+#endif /* OP_COUNTER_H */
diff --git a/arch/i386/oprofile/op_model_athlon.c b/arch/i386/oprofile/op_model_athlon.c
new file mode 100644
index 000000000000..9d0b71c3d047
--- /dev/null
+++ b/arch/i386/oprofile/op_model_athlon.c
@@ -0,0 +1,149 @@
+/**
+ * @file op_model_athlon.h
+ * athlon / K7 model-specific MSR operations
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ * @author Philippe Elie
+ * @author Graydon Hoare
+ */
+
+#include <linux/oprofile.h>
+#include <asm/ptrace.h>
+#include <asm/msr.h>
+
+#include "op_x86_model.h"
+#include "op_counter.h"
+
+#define NUM_COUNTERS 4
+#define NUM_CONTROLS 4
+
+#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters.addrs[(c)], (l), (h));} while (0)
+#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters.addrs[(c)], -(unsigned int)(l), -1);} while (0)
+#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
+
+#define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls.addrs[(c)], (l), (h));} while (0)
+#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls.addrs[(c)], (l), (h));} while (0)
+#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
+#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
+#define CTRL_CLEAR(x) (x &= (1<<21))
+#define CTRL_SET_ENABLE(val) (val |= 1<<20)
+#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16))
+#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17))
+#define CTRL_SET_UM(val, m) (val |= (m << 8))
+#define CTRL_SET_EVENT(val, e) (val |= e)
+
+static unsigned long reset_value[NUM_COUNTERS];
+
+static void athlon_fill_in_addresses(struct op_msrs * const msrs)
+{
+ msrs->counters.addrs[0] = MSR_K7_PERFCTR0;
+ msrs->counters.addrs[1] = MSR_K7_PERFCTR1;
+ msrs->counters.addrs[2] = MSR_K7_PERFCTR2;
+ msrs->counters.addrs[3] = MSR_K7_PERFCTR3;
+
+ msrs->controls.addrs[0] = MSR_K7_EVNTSEL0;
+ msrs->controls.addrs[1] = MSR_K7_EVNTSEL1;
+ msrs->controls.addrs[2] = MSR_K7_EVNTSEL2;
+ msrs->controls.addrs[3] = MSR_K7_EVNTSEL3;
+}
+
+
+static void athlon_setup_ctrs(struct op_msrs const * const msrs)
+{
+ unsigned int low, high;
+ int i;
+
+ /* clear all counters */
+ for (i = 0 ; i < NUM_CONTROLS; ++i) {
+ CTRL_READ(low, high, msrs, i);
+ CTRL_CLEAR(low);
+ CTRL_WRITE(low, high, msrs, i);
+ }
+
+ /* avoid a false detection of ctr overflows in NMI handler */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ CTR_WRITE(1, msrs, i);
+ }
+
+ /* enable active counters */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ if (counter_config[i].event) {
+ reset_value[i] = counter_config[i].count;
+
+ CTR_WRITE(counter_config[i].count, msrs, i);
+
+ CTRL_READ(low, high, msrs, i);
+ CTRL_CLEAR(low);
+ CTRL_SET_ENABLE(low);
+ CTRL_SET_USR(low, counter_config[i].user);
+ CTRL_SET_KERN(low, counter_config[i].kernel);
+ CTRL_SET_UM(low, counter_config[i].unit_mask);
+ CTRL_SET_EVENT(low, counter_config[i].event);
+ CTRL_WRITE(low, high, msrs, i);
+ } else {
+ reset_value[i] = 0;
+ }
+ }
+}
+
+
+static int athlon_check_ctrs(unsigned int const cpu,
+ struct op_msrs const * const msrs,
+ struct pt_regs * const regs)
+{
+ unsigned int low, high;
+ int handled = 0;
+ int i;
+ for (i = 0 ; i < NUM_COUNTERS; ++i) {
+ CTR_READ(low, high, msrs, i);
+ if (CTR_OVERFLOWED(low)) {
+ oprofile_add_sample(regs->eip, i, cpu);
+ CTR_WRITE(reset_value[i], msrs, i);
+ handled = 1;
+ }
+ }
+ return handled;
+}
+
+
+static void athlon_start(struct op_msrs const * const msrs)
+{
+ unsigned int low, high;
+ int i;
+ for (i = 0 ; i < NUM_COUNTERS ; ++i) {
+ if (reset_value[i]) {
+ CTRL_READ(low, high, msrs, i);
+ CTRL_SET_ACTIVE(low);
+ CTRL_WRITE(low, high, msrs, i);
+ }
+ }
+}
+
+
+static void athlon_stop(struct op_msrs const * const msrs)
+{
+ unsigned int low,high;
+ int i;
+
+ /* Subtle: stop on all counters to avoid race with
+ * setting our pm callback */
+ for (i = 0 ; i < NUM_COUNTERS ; ++i) {
+ CTRL_READ(low, high, msrs, i);
+ CTRL_SET_INACTIVE(low);
+ CTRL_WRITE(low, high, msrs, i);
+ }
+}
+
+
+struct op_x86_model_spec const op_athlon_spec = {
+ .num_counters = NUM_COUNTERS,
+ .num_controls = NUM_CONTROLS,
+ .fill_in_addresses = &athlon_fill_in_addresses,
+ .setup_ctrs = &athlon_setup_ctrs,
+ .check_ctrs = &athlon_check_ctrs,
+ .start = &athlon_start,
+ .stop = &athlon_stop
+};
diff --git a/arch/i386/oprofile/op_model_ppro.c b/arch/i386/oprofile/op_model_ppro.c
new file mode 100644
index 000000000000..9252e01bef1e
--- /dev/null
+++ b/arch/i386/oprofile/op_model_ppro.c
@@ -0,0 +1,133 @@
+/**
+ * @file op_model_ppro.h
+ * pentium pro / P6 model-specific MSR operations
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ * @author Philippe Elie
+ * @author Graydon Hoare
+ */
+
+#include <linux/oprofile.h>
+#include <asm/ptrace.h>
+#include <asm/msr.h>
+
+#include "op_x86_model.h"
+#include "op_counter.h"
+
+#define NUM_COUNTERS 2
+#define NUM_CONTROLS 2
+
+#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters.addrs[(c)], (l), (h));} while (0)
+#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters.addrs[(c)], -(u32)(l), -1);} while (0)
+#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
+
+#define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls.addrs[(c)]), (l), (h));} while (0)
+#define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls.addrs[(c)]), (l), (h));} while (0)
+#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
+#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
+#define CTRL_CLEAR(x) (x &= (1<<21))
+#define CTRL_SET_ENABLE(val) (val |= 1<<20)
+#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16))
+#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17))
+#define CTRL_SET_UM(val, m) (val |= (m << 8))
+#define CTRL_SET_EVENT(val, e) (val |= e)
+
+static unsigned long reset_value[NUM_COUNTERS];
+
+static void ppro_fill_in_addresses(struct op_msrs * const msrs)
+{
+ msrs->counters.addrs[0] = MSR_P6_PERFCTR0;
+ msrs->counters.addrs[1] = MSR_P6_PERFCTR1;
+
+ msrs->controls.addrs[0] = MSR_P6_EVNTSEL0;
+ msrs->controls.addrs[1] = MSR_P6_EVNTSEL1;
+}
+
+
+static void ppro_setup_ctrs(struct op_msrs const * const msrs)
+{
+ unsigned int low, high;
+ int i;
+
+ /* clear all counters */
+ for (i = 0 ; i < NUM_CONTROLS; ++i) {
+ CTRL_READ(low, high, msrs, i);
+ CTRL_CLEAR(low);
+ CTRL_WRITE(low, high, msrs, i);
+ }
+
+ /* avoid a false detection of ctr overflows in NMI handler */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ CTR_WRITE(1, msrs, i);
+ }
+
+ /* enable active counters */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ if (counter_config[i].event) {
+ reset_value[i] = counter_config[i].count;
+
+ CTR_WRITE(counter_config[i].count, msrs, i);
+
+ CTRL_READ(low, high, msrs, i);
+ CTRL_CLEAR(low);
+ CTRL_SET_ENABLE(low);
+ CTRL_SET_USR(low, counter_config[i].user);
+ CTRL_SET_KERN(low, counter_config[i].kernel);
+ CTRL_SET_UM(low, counter_config[i].unit_mask);
+ CTRL_SET_EVENT(low, counter_config[i].event);
+ CTRL_WRITE(low, high, msrs, i);
+ }
+ }
+}
+
+
+static int ppro_check_ctrs(unsigned int const cpu,
+ struct op_msrs const * const msrs,
+ struct pt_regs * const regs)
+{
+ unsigned int low, high;
+ int i;
+ int handled = 0;
+
+ for (i = 0 ; i < NUM_COUNTERS; ++i) {
+ CTR_READ(low, high, msrs, i);
+ if (CTR_OVERFLOWED(low)) {
+ oprofile_add_sample(regs->eip, i, cpu);
+ CTR_WRITE(reset_value[i], msrs, i);
+ handled = 1;
+ }
+ }
+ return handled;
+}
+
+
+static void ppro_start(struct op_msrs const * const msrs)
+{
+ unsigned int low,high;
+ CTRL_READ(low, high, msrs, 0);
+ CTRL_SET_ACTIVE(low);
+ CTRL_WRITE(low, high, msrs, 0);
+}
+
+
+static void ppro_stop(struct op_msrs const * const msrs)
+{
+ unsigned int low,high;
+ CTRL_READ(low, high, msrs, 0);
+ CTRL_SET_INACTIVE(low);
+ CTRL_WRITE(low, high, msrs, 0);
+}
+
+
+struct op_x86_model_spec const op_ppro_spec = {
+ .num_counters = NUM_COUNTERS,
+ .num_controls = NUM_CONTROLS,
+ .fill_in_addresses = &ppro_fill_in_addresses,
+ .setup_ctrs = &ppro_setup_ctrs,
+ .check_ctrs = &ppro_check_ctrs,
+ .start = &ppro_start,
+ .stop = &ppro_stop
+};
diff --git a/arch/i386/oprofile/op_x86_model.h b/arch/i386/oprofile/op_x86_model.h
new file mode 100644
index 000000000000..cd24c76cdea7
--- /dev/null
+++ b/arch/i386/oprofile/op_x86_model.h
@@ -0,0 +1,52 @@
+/**
+ * @file op_x86_model.h
+ * interface to x86 model-specific MSR operations
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Graydon Hoare
+ */
+
+#ifndef OP_X86_MODEL_H
+#define OP_X86_MODEL_H
+
+/* will need re-working for Pentium IV */
+#define MAX_MSR 4
+
+struct op_saved_msr {
+ unsigned int high;
+ unsigned int low;
+};
+
+struct op_msr_group {
+ unsigned int addrs[MAX_MSR];
+ struct op_saved_msr saved[MAX_MSR];
+};
+
+struct op_msrs {
+ struct op_msr_group counters;
+ struct op_msr_group controls;
+};
+
+struct pt_regs;
+
+/* The model vtable abstracts the differences between
+ * various x86 CPU model's perfctr support.
+ */
+struct op_x86_model_spec {
+ unsigned int const num_counters;
+ unsigned int const num_controls;
+ void (*fill_in_addresses)(struct op_msrs * const msrs);
+ void (*setup_ctrs)(struct op_msrs const * const msrs);
+ int (*check_ctrs)(unsigned int const cpu,
+ struct op_msrs const * const msrs,
+ struct pt_regs * const regs);
+ void (*start)(struct op_msrs const * const msrs);
+ void (*stop)(struct op_msrs const * const msrs);
+};
+
+extern struct op_x86_model_spec const op_ppro_spec;
+extern struct op_x86_model_spec const op_athlon_spec;
+
+#endif /* OP_X86_MODEL_H */
diff --git a/arch/i386/oprofile/timer_int.c b/arch/i386/oprofile/timer_int.c
new file mode 100644
index 000000000000..1159b7597eef
--- /dev/null
+++ b/arch/i386/oprofile/timer_int.c
@@ -0,0 +1,57 @@
+/**
+ * @file timer_int.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/oprofile.h>
+#include <asm/ptrace.h>
+
+#include "op_counter.h"
+
+static int timer_notify(struct notifier_block * self, unsigned long val, void * data)
+{
+ struct pt_regs * regs = (struct pt_regs *)data;
+ int cpu = smp_processor_id();
+
+ oprofile_add_sample(regs->eip, 0, cpu);
+ return 0;
+}
+
+
+static struct notifier_block timer_notifier = {
+ .notifier_call = timer_notify,
+};
+
+
+static int timer_start(void)
+{
+ return register_profile_notifier(&timer_notifier);
+}
+
+
+static void timer_stop(void)
+{
+ unregister_profile_notifier(&timer_notifier);
+}
+
+
+static struct oprofile_operations timer_ops = {
+ .start = timer_start,
+ .stop = timer_stop
+};
+
+
+void __init timer_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu)
+{
+ *ops = &timer_ops;
+ *cpu = OPROFILE_CPU_TIMER;
+ printk(KERN_INFO "oprofile: using timer interrupt.\n");
+}
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index f40e6f70df3b..202952ba6723 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -1021,8 +1021,6 @@ static int stram_open( struct inode *inode, struct file *filp )
printk( KERN_NOTICE "Only kernel can open ST-RAM device\n" );
return( -EPERM );
}
- if (MINOR(inode->i_rdev) != STRAM_MINOR)
- return( -ENXIO );
if (refcnt)
return( -EBUSY );
++refcnt;
@@ -1057,7 +1055,7 @@ int __init stram_device_init(void)
if (!max_swap_size)
/* swapping not enabled */
return -ENXIO;
- stram_disk = alloc_disk();
+ stram_disk = alloc_disk(1);
if (!stram_disk)
return -ENOMEM;
@@ -1070,7 +1068,6 @@ int __init stram_device_init(void)
blk_init_queue(BLK_DEFAULT_QUEUE(STRAM_MAJOR), do_stram_request);
stram_disk->major = STRAM_MAJOR;
stram_disk->first_minor = STRAM_MINOR;
- stram_disk->minor_shift = 0;
stram_disk->fops = &stram_fops;
sprintf(stram_disk->disk_name, "stram");
set_capacity(stram_disk, (swap_end - swap_start)/512);
diff --git a/arch/sh/kernel/mach_7751se.c b/arch/sh/kernel/mach_7751se.c
index da24c384b0c4..d3f5aaf8d271 100644
--- a/arch/sh/kernel/mach_7751se.c
+++ b/arch/sh/kernel/mach_7751se.c
@@ -27,52 +27,52 @@ void init_7751se_IRQ(void);
*/
struct sh_machine_vector mv_7751se __initmv = {
- mv_name: "7751 SolutionEngine",
-
- mv_nr_irqs: 72,
-
- mv_inb: sh7751se_inb,
- mv_inw: sh7751se_inw,
- mv_inl: sh7751se_inl,
- mv_outb: sh7751se_outb,
- mv_outw: sh7751se_outw,
- mv_outl: sh7751se_outl,
-
- mv_inb_p: sh7751se_inb_p,
- mv_inw_p: sh7751se_inw,
- mv_inl_p: sh7751se_inl,
- mv_outb_p: sh7751se_outb_p,
- mv_outw_p: sh7751se_outw,
- mv_outl_p: sh7751se_outl,
-
- mv_insb: sh7751se_insb,
- mv_insw: sh7751se_insw,
- mv_insl: sh7751se_insl,
- mv_outsb: sh7751se_outsb,
- mv_outsw: sh7751se_outsw,
- mv_outsl: sh7751se_outsl,
-
- mv_readb: sh7751se_readb,
- mv_readw: sh7751se_readw,
- mv_readl: sh7751se_readl,
- mv_writeb: sh7751se_writeb,
- mv_writew: sh7751se_writew,
- mv_writel: sh7751se_writel,
-
- mv_ioremap: generic_ioremap,
- mv_iounmap: generic_iounmap,
-
- mv_isa_port2addr: sh7751se_isa_port2addr,
-
- mv_init_arch: setup_7751se,
- mv_init_irq: init_7751se_IRQ,
+ .mv_name = "7751 SolutionEngine",
+
+ .mv_nr_irqs = 72,
+
+ .mv_inb = sh7751se_inb,
+ .mv_inw = sh7751se_inw,
+ .mv_inl = sh7751se_inl,
+ .mv_outb = sh7751se_outb,
+ .mv_outw = sh7751se_outw,
+ .mv_outl = sh7751se_outl,
+
+ .mv_inb_p = sh7751se_inb_p,
+ .mv_inw_p = sh7751se_inw,
+ .mv_inl_p = sh7751se_inl,
+ .mv_outb_p = sh7751se_outb_p,
+ .mv_outw_p = sh7751se_outw,
+ .mv_outl_p = sh7751se_outl,
+
+ .mv_insb = sh7751se_insb,
+ .mv_insw = sh7751se_insw,
+ .mv_insl = sh7751se_insl,
+ .mv_outsb = sh7751se_outsb,
+ .mv_outsw = sh7751se_outsw,
+ .mv_outsl = sh7751se_outsl,
+
+ .mv_readb = sh7751se_readb,
+ .mv_readw = sh7751se_readw,
+ .mv_readl = sh7751se_readl,
+ .mv_writeb = sh7751se_writeb,
+ .mv_writew = sh7751se_writew,
+ .mv_writel = sh7751se_writel,
+
+ .mv_ioremap = generic_ioremap,
+ .mv_iounmap = generic_iounmap,
+
+ .mv_isa_port2addr = sh7751se_isa_port2addr,
+
+ .mv_init_arch = setup_7751se,
+ .mv_init_irq = init_7751se_IRQ,
#ifdef CONFIG_HEARTBEAT
- mv_heartbeat: heartbeat_7751se,
+ .mv_heartbeat = heartbeat_7751se,
#endif
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
- mv_hw_7751se: 1,
+ .mv_hw_7751se = 1,
};
ALIAS_MV(7751se)
diff --git a/arch/sh/kernel/mach_adx.c b/arch/sh/kernel/mach_adx.c
index e2c246ef4701..4aec80cf1e35 100644
--- a/arch/sh/kernel/mach_adx.c
+++ b/arch/sh/kernel/mach_adx.c
@@ -25,49 +25,49 @@ extern void init_adx_IRQ(void);
*/
struct sh_machine_vector mv_adx __initmv = {
- mv_name: "A&D_ADX",
+ .mv_name = "A&D_ADX",
- mv_nr_irqs: 48,
+ .mv_nr_irqs = 48,
- mv_inb: adx_inb,
- mv_inw: adx_inw,
- mv_inl: adx_inl,
- mv_outb: adx_outb,
- mv_outw: adx_outw,
- mv_outl: adx_outl,
+ .mv_inb = adx_inb,
+ .mv_inw = adx_inw,
+ .mv_inl = adx_inl,
+ .mv_outb = adx_outb,
+ .mv_outw = adx_outw,
+ .mv_outl = adx_outl,
- mv_inb_p: adx_inb_p,
- mv_inw_p: adx_inw,
- mv_inl_p: adx_inl,
- mv_outb_p: adx_outb_p,
- mv_outw_p: adx_outw,
- mv_outl_p: adx_outl,
+ .mv_inb_p = adx_inb_p,
+ .mv_inw_p = adx_inw,
+ .mv_inl_p = adx_inl,
+ .mv_outb_p = adx_outb_p,
+ .mv_outw_p = adx_outw,
+ .mv_outl_p = adx_outl,
- mv_insb: adx_insb,
- mv_insw: adx_insw,
- mv_insl: adx_insl,
- mv_outsb: adx_outsb,
- mv_outsw: adx_outsw,
- mv_outsl: adx_outsl,
+ .mv_insb = adx_insb,
+ .mv_insw = adx_insw,
+ .mv_insl = adx_insl,
+ .mv_outsb = adx_outsb,
+ .mv_outsw = adx_outsw,
+ .mv_outsl = adx_outsl,
- mv_readb: adx_readb,
- mv_readw: adx_readw,
- mv_readl: adx_readl,
- mv_writeb: adx_writeb,
- mv_writew: adx_writew,
- mv_writel: adx_writel,
+ .mv_readb = adx_readb,
+ .mv_readw = adx_readw,
+ .mv_readl = adx_readl,
+ .mv_writeb = adx_writeb,
+ .mv_writew = adx_writew,
+ .mv_writel = adx_writel,
- mv_ioremap: adx_ioremap,
- mv_iounmap: adx_iounmap,
+ .mv_ioremap = adx_ioremap,
+ .mv_iounmap = adx_iounmap,
- mv_isa_port2addr: adx_isa_port2addr,
+ .mv_isa_port2addr = adx_isa_port2addr,
- mv_init_arch: setup_adx,
- mv_init_irq: init_adx_IRQ,
+ .mv_init_arch = setup_adx,
+ .mv_init_irq = init_adx_IRQ,
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
- mv_hw_adx: 1,
+ .mv_hw_adx = 1,
};
ALIAS_MV(adx)
diff --git a/arch/sh/kernel/mach_bigsur.c b/arch/sh/kernel/mach_bigsur.c
index 65d98409f8bd..e4a419c1b22f 100644
--- a/arch/sh/kernel/mach_bigsur.c
+++ b/arch/sh/kernel/mach_bigsur.c
@@ -29,49 +29,49 @@ extern void setup_bigsur(void);
extern void init_bigsur_IRQ(void);
struct sh_machine_vector mv_bigsur __initmv = {
- mv_name: "Big Sur",
- mv_nr_irqs: NR_IRQS, // Defined in <asm/irq.h>
- mv_inb: bigsur_inb,
- mv_inw: bigsur_inw,
- mv_inl: bigsur_inl,
- mv_outb: bigsur_outb,
- mv_outw: bigsur_outw,
- mv_outl: bigsur_outl,
+ .mv_name = "Big Sur",
+ .mv_nr_irqs = NR_IRQS, // Defined in <asm/irq.h>
+ .mv_inb = bigsur_inb,
+ .mv_inw = bigsur_inw,
+ .mv_inl = bigsur_inl,
+ .mv_outb = bigsur_outb,
+ .mv_outw = bigsur_outw,
+ .mv_outl = bigsur_outl,
- mv_inb_p: bigsur_inb_p,
- mv_inw_p: bigsur_inw,
- mv_inl_p: bigsur_inl,
- mv_outb_p: bigsur_outb_p,
- mv_outw_p: bigsur_outw,
- mv_outl_p: bigsur_outl,
+ .mv_inb_p = bigsur_inb_p,
+ .mv_inw_p = bigsur_inw,
+ .mv_inl_p = bigsur_inl,
+ .mv_outb_p = bigsur_outb_p,
+ .mv_outw_p = bigsur_outw,
+ .mv_outl_p = bigsur_outl,
- mv_insb: bigsur_insb,
- mv_insw: bigsur_insw,
- mv_insl: bigsur_insl,
- mv_outsb: bigsur_outsb,
- mv_outsw: bigsur_outsw,
- mv_outsl: bigsur_outsl,
+ .mv_insb = bigsur_insb,
+ .mv_insw = bigsur_insw,
+ .mv_insl = bigsur_insl,
+ .mv_outsb = bigsur_outsb,
+ .mv_outsw = bigsur_outsw,
+ .mv_outsl = bigsur_outsl,
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
- mv_ioremap: generic_ioremap,
- mv_iounmap: generic_iounmap,
+ .mv_ioremap = generic_ioremap,
+ .mv_iounmap = generic_iounmap,
- mv_isa_port2addr: bigsur_isa_port2addr,
- mv_irq_demux: bigsur_irq_demux,
+ .mv_isa_port2addr = bigsur_isa_port2addr,
+ .mv_irq_demux = bigsur_irq_demux,
- mv_init_arch: setup_bigsur,
- mv_init_irq: init_bigsur_IRQ,
+ .mv_init_arch = setup_bigsur,
+ .mv_init_irq = init_bigsur_IRQ,
#ifdef CONFIG_HEARTBEAT
- mv_heartbeat: heartbeat_bigsur,
+ .mv_heartbeat = heartbeat_bigsur,
#endif
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
};
ALIAS_MV(bigsur)
diff --git a/arch/sh/kernel/mach_cat68701.c b/arch/sh/kernel/mach_cat68701.c
index 1caf12563631..352419ece2b4 100644
--- a/arch/sh/kernel/mach_cat68701.c
+++ b/arch/sh/kernel/mach_cat68701.c
@@ -23,50 +23,50 @@
*/
struct sh_machine_vector mv_cat68701 __initmv = {
- mv_name: "CAT-68701",
- mv_nr_irqs: 32,
- mv_inb: cat68701_inb,
- mv_inw: cat68701_inw,
- mv_inl: cat68701_inl,
- mv_outb: cat68701_outb,
- mv_outw: cat68701_outw,
- mv_outl: cat68701_outl,
+ .mv_name = "CAT-68701",
+ .mv_nr_irqs = 32,
+ .mv_inb = cat68701_inb,
+ .mv_inw = cat68701_inw,
+ .mv_inl = cat68701_inl,
+ .mv_outb = cat68701_outb,
+ .mv_outw = cat68701_outw,
+ .mv_outl = cat68701_outl,
- mv_inb_p: cat68701_inb_p,
- mv_inw_p: cat68701_inw,
- mv_inl_p: cat68701_inl,
- mv_outb_p: cat68701_outb_p,
- mv_outw_p: cat68701_outw,
- mv_outl_p: cat68701_outl,
+ .mv_inb_p = cat68701_inb_p,
+ .mv_inw_p = cat68701_inw,
+ .mv_inl_p = cat68701_inl,
+ .mv_outb_p = cat68701_outb_p,
+ .mv_outw_p = cat68701_outw,
+ .mv_outl_p = cat68701_outl,
- mv_insb: cat68701_insb,
- mv_insw: cat68701_insw,
- mv_insl: cat68701_insl,
- mv_outsb: cat68701_outsb,
- mv_outsw: cat68701_outsw,
- mv_outsl: cat68701_outsl,
+ .mv_insb = cat68701_insb,
+ .mv_insw = cat68701_insw,
+ .mv_insl = cat68701_insl,
+ .mv_outsb = cat68701_outsb,
+ .mv_outsw = cat68701_outsw,
+ .mv_outsl = cat68701_outsl,
- mv_readb: cat68701_readb,
- mv_readw: cat68701_readw,
- mv_readl: cat68701_readl,
- mv_writeb: cat68701_writeb,
- mv_writew: cat68701_writew,
- mv_writel: cat68701_writel,
+ .mv_readb = cat68701_readb,
+ .mv_readw = cat68701_readw,
+ .mv_readl = cat68701_readl,
+ .mv_writeb = cat68701_writeb,
+ .mv_writew = cat68701_writew,
+ .mv_writel = cat68701_writel,
- mv_ioremap: cat68701_ioremap,
- mv_iounmap: cat68701_iounmap,
+ .mv_ioremap = cat68701_ioremap,
+ .mv_iounmap = cat68701_iounmap,
- mv_isa_port2addr: cat68701_isa_port2addr,
- mv_irq_demux: cat68701_irq_demux,
+ .mv_isa_port2addr = cat68701_isa_port2addr,
+ .mv_irq_demux = cat68701_irq_demux,
- mv_init_arch: setup_cat68701,
- mv_init_irq: init_cat68701_IRQ,
+ .mv_init_arch = setup_cat68701,
+ .mv_init_irq = init_cat68701_IRQ,
#ifdef CONFIG_HEARTBEAT
- mv_heartbeat: heartbeat_cat68701,
+ .mv_heartbeat = heartbeat_cat68701,
#endif
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
};
ALIAS_MV(cat68701)
diff --git a/arch/sh/kernel/mach_dc.c b/arch/sh/kernel/mach_dc.c
index 06801a248973..f9eccfad100f 100644
--- a/arch/sh/kernel/mach_dc.c
+++ b/arch/sh/kernel/mach_dc.c
@@ -26,48 +26,48 @@ int aica_rtc_settimeofday(const struct timeval *tv);
*/
struct sh_machine_vector mv_dreamcast __initmv = {
- mv_name: "dreamcast",
-
- mv_nr_irqs: NR_IRQS,
-
- mv_inb: generic_inb,
- mv_inw: generic_inw,
- mv_inl: generic_inl,
- mv_outb: generic_outb,
- mv_outw: generic_outw,
- mv_outl: generic_outl,
-
- mv_inb_p: generic_inb_p,
- mv_inw_p: generic_inw,
- mv_inl_p: generic_inl,
- mv_outb_p: generic_outb_p,
- mv_outw_p: generic_outw,
- mv_outl_p: generic_outl,
-
- mv_insb: generic_insb,
- mv_insw: generic_insw,
- mv_insl: generic_insl,
- mv_outsb: generic_outsb,
- mv_outsw: generic_outsw,
- mv_outsl: generic_outsl,
-
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
-
- mv_ioremap: generic_ioremap,
- mv_iounmap: generic_iounmap,
-
- mv_init_arch: setup_dreamcast,
- mv_isa_port2addr: dreamcast_isa_port2addr,
- mv_irq_demux: systemasic_irq_demux,
-
- mv_rtc_gettimeofday: aica_rtc_gettimeofday,
- mv_rtc_settimeofday: aica_rtc_settimeofday,
-
- mv_hw_dreamcast: 1,
+ .mv_name = "dreamcast",
+
+ .mv_nr_irqs = NR_IRQS,
+
+ .mv_inb = generic_inb,
+ .mv_inw = generic_inw,
+ .mv_inl = generic_inl,
+ .mv_outb = generic_outb,
+ .mv_outw = generic_outw,
+ .mv_outl = generic_outl,
+
+ .mv_inb_p = generic_inb_p,
+ .mv_inw_p = generic_inw,
+ .mv_inl_p = generic_inl,
+ .mv_outb_p = generic_outb_p,
+ .mv_outw_p = generic_outw,
+ .mv_outl_p = generic_outl,
+
+ .mv_insb = generic_insb,
+ .mv_insw = generic_insw,
+ .mv_insl = generic_insl,
+ .mv_outsb = generic_outsb,
+ .mv_outsw = generic_outsw,
+ .mv_outsl = generic_outsl,
+
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
+
+ .mv_ioremap = generic_ioremap,
+ .mv_iounmap = generic_iounmap,
+
+ .mv_init_arch = setup_dreamcast,
+ .mv_isa_port2addr = dreamcast_isa_port2addr,
+ .mv_irq_demux = systemasic_irq_demux,
+
+ .mv_rtc_gettimeofday = aica_rtc_gettimeofday,
+ .mv_rtc_settimeofday = aica_rtc_settimeofday,
+
+ .mv_hw_dreamcast = 1,
};
ALIAS_MV(dreamcast)
diff --git a/arch/sh/kernel/mach_dmida.c b/arch/sh/kernel/mach_dmida.c
index a97c6471ed8b..a94a9d7e9ce4 100644
--- a/arch/sh/kernel/mach_dmida.c
+++ b/arch/sh/kernel/mach_dmida.c
@@ -30,44 +30,44 @@
*/
struct sh_machine_vector mv_dmida __initmv = {
- mv_name: "DMIDA",
+ .mv_name = "DMIDA",
- mv_nr_irqs: HD64465_IRQ_BASE+HD64465_IRQ_NUM,
+ .mv_nr_irqs = HD64465_IRQ_BASE+HD64465_IRQ_NUM,
- mv_inb: hd64465_inb,
- mv_inw: hd64465_inw,
- mv_inl: hd64465_inl,
- mv_outb: hd64465_outb,
- mv_outw: hd64465_outw,
- mv_outl: hd64465_outl,
+ .mv_inb = hd64465_inb,
+ .mv_inw = hd64465_inw,
+ .mv_inl = hd64465_inl,
+ .mv_outb = hd64465_outb,
+ .mv_outw = hd64465_outw,
+ .mv_outl = hd64465_outl,
- mv_inb_p: hd64465_inb_p,
- mv_inw_p: hd64465_inw,
- mv_inl_p: hd64465_inl,
- mv_outb_p: hd64465_outb_p,
- mv_outw_p: hd64465_outw,
- mv_outl_p: hd64465_outl,
+ .mv_inb_p = hd64465_inb_p,
+ .mv_inw_p = hd64465_inw,
+ .mv_inl_p = hd64465_inl,
+ .mv_outb_p = hd64465_outb_p,
+ .mv_outw_p = hd64465_outw,
+ .mv_outl_p = hd64465_outl,
- mv_insb: hd64465_insb,
- mv_insw: hd64465_insw,
- mv_insl: hd64465_insl,
- mv_outsb: hd64465_outsb,
- mv_outsw: hd64465_outsw,
- mv_outsl: hd64465_outsl,
+ .mv_insb = hd64465_insb,
+ .mv_insw = hd64465_insw,
+ .mv_insl = hd64465_insl,
+ .mv_outsb = hd64465_outsb,
+ .mv_outsw = hd64465_outsw,
+ .mv_outsl = hd64465_outsl,
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
- mv_irq_demux: hd64465_irq_demux,
+ .mv_irq_demux = hd64465_irq_demux,
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
- mv_hw_hd64465: 1,
+ .mv_hw_hd64465 = 1,
};
ALIAS_MV(dmida)
diff --git a/arch/sh/kernel/mach_ec3104.c b/arch/sh/kernel/mach_ec3104.c
index 3c10b9baeb9d..46df3a051227 100644
--- a/arch/sh/kernel/mach_ec3104.c
+++ b/arch/sh/kernel/mach_ec3104.c
@@ -28,42 +28,42 @@
*/
struct sh_machine_vector mv_ec3104 __initmv = {
- mv_name: "EC3104",
+ .mv_name = "EC3104",
- mv_nr_irqs: 96,
+ .mv_nr_irqs = 96,
- mv_inb: ec3104_inb,
- mv_inw: ec3104_inw,
- mv_inl: ec3104_inl,
- mv_outb: ec3104_outb,
- mv_outw: ec3104_outw,
- mv_outl: ec3104_outl,
+ .mv_inb = ec3104_inb,
+ .mv_inw = ec3104_inw,
+ .mv_inl = ec3104_inl,
+ .mv_outb = ec3104_outb,
+ .mv_outw = ec3104_outw,
+ .mv_outl = ec3104_outl,
- mv_inb_p: generic_inb_p,
- mv_inw_p: generic_inw,
- mv_inl_p: generic_inl,
- mv_outb_p: generic_outb_p,
- mv_outw_p: generic_outw,
- mv_outl_p: generic_outl,
+ .mv_inb_p = generic_inb_p,
+ .mv_inw_p = generic_inw,
+ .mv_inl_p = generic_inl,
+ .mv_outb_p = generic_outb_p,
+ .mv_outw_p = generic_outw,
+ .mv_outl_p = generic_outl,
- mv_insb: generic_insb,
- mv_insw: generic_insw,
- mv_insl: generic_insl,
- mv_outsb: generic_outsb,
- mv_outsw: generic_outsw,
- mv_outsl: generic_outsl,
+ .mv_insb = generic_insb,
+ .mv_insw = generic_insw,
+ .mv_insl = generic_insl,
+ .mv_outsb = generic_outsb,
+ .mv_outsw = generic_outsw,
+ .mv_outsl = generic_outsl,
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
- mv_irq_demux: ec3104_irq_demux,
+ .mv_irq_demux = ec3104_irq_demux,
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
};
ALIAS_MV(ec3104)
diff --git a/arch/sh/kernel/mach_hp600.c b/arch/sh/kernel/mach_hp600.c
index 66b96da9703a..50ca56ce6b89 100644
--- a/arch/sh/kernel/mach_hp600.c
+++ b/arch/sh/kernel/mach_hp600.c
@@ -24,135 +24,135 @@
*/
struct sh_machine_vector mv_hp620 __initmv = {
- mv_name: "hp620",
-
- mv_nr_irqs: HD64461_IRQBASE+HD64461_IRQ_NUM,
-
- mv_inb: hd64461_inb,
- mv_inw: hd64461_inw,
- mv_inl: hd64461_inl,
- mv_outb: hd64461_outb,
- mv_outw: hd64461_outw,
- mv_outl: hd64461_outl,
-
- mv_inb_p: hd64461_inb_p,
- mv_inw_p: hd64461_inw,
- mv_inl_p: hd64461_inl,
- mv_outb_p: hd64461_outb_p,
- mv_outw_p: hd64461_outw,
- mv_outl_p: hd64461_outl,
-
- mv_insb: hd64461_insb,
- mv_insw: hd64461_insw,
- mv_insl: hd64461_insl,
- mv_outsb: hd64461_outsb,
- mv_outsw: hd64461_outsw,
- mv_outsl: hd64461_outsl,
-
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
-
- mv_irq_demux: hd64461_irq_demux,
-
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
-
- mv_hw_hp600: 1,
- mv_hw_hp620: 1,
- mv_hw_hd64461: 1,
+ .mv_name = "hp620",
+
+ .mv_nr_irqs = HD64461_IRQBASE+HD64461_IRQ_NUM,
+
+ .mv_inb = hd64461_inb,
+ .mv_inw = hd64461_inw,
+ .mv_inl = hd64461_inl,
+ .mv_outb = hd64461_outb,
+ .mv_outw = hd64461_outw,
+ .mv_outl = hd64461_outl,
+
+ .mv_inb_p = hd64461_inb_p,
+ .mv_inw_p = hd64461_inw,
+ .mv_inl_p = hd64461_inl,
+ .mv_outb_p = hd64461_outb_p,
+ .mv_outw_p = hd64461_outw,
+ .mv_outl_p = hd64461_outl,
+
+ .mv_insb = hd64461_insb,
+ .mv_insw = hd64461_insw,
+ .mv_insl = hd64461_insl,
+ .mv_outsb = hd64461_outsb,
+ .mv_outsw = hd64461_outsw,
+ .mv_outsl = hd64461_outsl,
+
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
+
+ .mv_irq_demux = hd64461_irq_demux,
+
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
+
+ .mv_hw_hp600 = 1,
+ .mv_hw_hp620 = 1,
+ .mv_hw_hd64461 = 1,
};
ALIAS_MV(hp620)
struct sh_machine_vector mv_hp680 __initmv = {
- mv_name: "hp680",
-
- mv_nr_irqs: HD64461_IRQBASE+HD64461_IRQ_NUM,
-
- mv_inb: hd64461_inb,
- mv_inw: hd64461_inw,
- mv_inl: hd64461_inl,
- mv_outb: hd64461_outb,
- mv_outw: hd64461_outw,
- mv_outl: hd64461_outl,
-
- mv_inb_p: hd64461_inb_p,
- mv_inw_p: hd64461_inw,
- mv_inl_p: hd64461_inl,
- mv_outb_p: hd64461_outb_p,
- mv_outw_p: hd64461_outw,
- mv_outl_p: hd64461_outl,
-
- mv_insb: hd64461_insb,
- mv_insw: hd64461_insw,
- mv_insl: hd64461_insl,
- mv_outsb: hd64461_outsb,
- mv_outsw: hd64461_outsw,
- mv_outsl: hd64461_outsl,
-
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
-
- mv_irq_demux: hd64461_irq_demux,
-
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
-
- mv_hw_hp600: 1,
- mv_hw_hp680: 1,
- mv_hw_hd64461: 1,
+ .mv_name = "hp680",
+
+ .mv_nr_irqs = HD64461_IRQBASE+HD64461_IRQ_NUM,
+
+ .mv_inb = hd64461_inb,
+ .mv_inw = hd64461_inw,
+ .mv_inl = hd64461_inl,
+ .mv_outb = hd64461_outb,
+ .mv_outw = hd64461_outw,
+ .mv_outl = hd64461_outl,
+
+ .mv_inb_p = hd64461_inb_p,
+ .mv_inw_p = hd64461_inw,
+ .mv_inl_p = hd64461_inl,
+ .mv_outb_p = hd64461_outb_p,
+ .mv_outw_p = hd64461_outw,
+ .mv_outl_p = hd64461_outl,
+
+ .mv_insb = hd64461_insb,
+ .mv_insw = hd64461_insw,
+ .mv_insl = hd64461_insl,
+ .mv_outsb = hd64461_outsb,
+ .mv_outsw = hd64461_outsw,
+ .mv_outsl = hd64461_outsl,
+
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
+
+ .mv_irq_demux = hd64461_irq_demux,
+
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
+
+ .mv_hw_hp600 = 1,
+ .mv_hw_hp680 = 1,
+ .mv_hw_hd64461 = 1,
};
ALIAS_MV(hp680)
struct sh_machine_vector mv_hp690 __initmv = {
- mv_name: "hp690",
-
- mv_nr_irqs: HD64461_IRQBASE+HD64461_IRQ_NUM,
-
- mv_inb: hd64461_inb,
- mv_inw: hd64461_inw,
- mv_inl: hd64461_inl,
- mv_outb: hd64461_outb,
- mv_outw: hd64461_outw,
- mv_outl: hd64461_outl,
-
- mv_inb_p: hd64461_inb_p,
- mv_inw_p: hd64461_inw,
- mv_inl_p: hd64461_inl,
- mv_outb_p: hd64461_outb_p,
- mv_outw_p: hd64461_outw,
- mv_outl_p: hd64461_outl,
-
- mv_insb: hd64461_insb,
- mv_insw: hd64461_insw,
- mv_insl: hd64461_insl,
- mv_outsb: hd64461_outsb,
- mv_outsw: hd64461_outsw,
- mv_outsl: hd64461_outsl,
-
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
-
- mv_irq_demux: hd64461_irq_demux,
-
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
-
- mv_hw_hp600: 1,
- mv_hw_hp690: 1,
- mv_hw_hd64461: 1,
+ .mv_name = "hp690",
+
+ .mv_nr_irqs = HD64461_IRQBASE+HD64461_IRQ_NUM,
+
+ .mv_inb = hd64461_inb,
+ .mv_inw = hd64461_inw,
+ .mv_inl = hd64461_inl,
+ .mv_outb = hd64461_outb,
+ .mv_outw = hd64461_outw,
+ .mv_outl = hd64461_outl,
+
+ .mv_inb_p = hd64461_inb_p,
+ .mv_inw_p = hd64461_inw,
+ .mv_inl_p = hd64461_inl,
+ .mv_outb_p = hd64461_outb_p,
+ .mv_outw_p = hd64461_outw,
+ .mv_outl_p = hd64461_outl,
+
+ .mv_insb = hd64461_insb,
+ .mv_insw = hd64461_insw,
+ .mv_insl = hd64461_insl,
+ .mv_outsb = hd64461_outsb,
+ .mv_outsw = hd64461_outsw,
+ .mv_outsl = hd64461_outsl,
+
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
+
+ .mv_irq_demux = hd64461_irq_demux,
+
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
+
+ .mv_hw_hp600 = 1,
+ .mv_hw_hp690 = 1,
+ .mv_hw_hd64461 = 1,
};
ALIAS_MV(hp690)
diff --git a/arch/sh/kernel/mach_se.c b/arch/sh/kernel/mach_se.c
index 3cdc7169de49..c1241da421cb 100644
--- a/arch/sh/kernel/mach_se.c
+++ b/arch/sh/kernel/mach_se.c
@@ -27,58 +27,58 @@ void init_se_IRQ(void);
*/
struct sh_machine_vector mv_se __initmv = {
- mv_name: "SolutionEngine",
+ .mv_name = "SolutionEngine",
#if defined(__SH4__)
- mv_nr_irqs: 48,
+ .mv_nr_irqs = 48,
#elif defined(CONFIG_CPU_SUBTYPE_SH7708)
- mv_nr_irqs: 32,
+ .mv_nr_irqs = 32,
#elif defined(CONFIG_CPU_SUBTYPE_SH7709)
- mv_nr_irqs: 61,
+ .mv_nr_irqs = 61,
#endif
- mv_inb: se_inb,
- mv_inw: se_inw,
- mv_inl: se_inl,
- mv_outb: se_outb,
- mv_outw: se_outw,
- mv_outl: se_outl,
-
- mv_inb_p: se_inb_p,
- mv_inw_p: se_inw,
- mv_inl_p: se_inl,
- mv_outb_p: se_outb_p,
- mv_outw_p: se_outw,
- mv_outl_p: se_outl,
-
- mv_insb: se_insb,
- mv_insw: se_insw,
- mv_insl: se_insl,
- mv_outsb: se_outsb,
- mv_outsw: se_outsw,
- mv_outsl: se_outsl,
-
- mv_readb: se_readb,
- mv_readw: se_readw,
- mv_readl: se_readl,
- mv_writeb: se_writeb,
- mv_writew: se_writew,
- mv_writel: se_writel,
-
- mv_ioremap: generic_ioremap,
- mv_iounmap: generic_iounmap,
-
- mv_isa_port2addr: se_isa_port2addr,
-
- mv_init_arch: setup_se,
- mv_init_irq: init_se_IRQ,
+ .mv_inb = se_inb,
+ .mv_inw = se_inw,
+ .mv_inl = se_inl,
+ .mv_outb = se_outb,
+ .mv_outw = se_outw,
+ .mv_outl = se_outl,
+
+ .mv_inb_p = se_inb_p,
+ .mv_inw_p = se_inw,
+ .mv_inl_p = se_inl,
+ .mv_outb_p = se_outb_p,
+ .mv_outw_p = se_outw,
+ .mv_outl_p = se_outl,
+
+ .mv_insb = se_insb,
+ .mv_insw = se_insw,
+ .mv_insl = se_insl,
+ .mv_outsb = se_outsb,
+ .mv_outsw = se_outsw,
+ .mv_outsl = se_outsl,
+
+ .mv_readb = se_readb,
+ .mv_readw = se_readw,
+ .mv_readl = se_readl,
+ .mv_writeb = se_writeb,
+ .mv_writew = se_writew,
+ .mv_writel = se_writel,
+
+ .mv_ioremap = generic_ioremap,
+ .mv_iounmap = generic_iounmap,
+
+ .mv_isa_port2addr = se_isa_port2addr,
+
+ .mv_init_arch = setup_se,
+ .mv_init_irq = init_se_IRQ,
#ifdef CONFIG_HEARTBEAT
- mv_heartbeat: heartbeat_se,
+ .mv_heartbeat = heartbeat_se,
#endif
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
- mv_hw_se: 1,
+ .mv_hw_se = 1,
};
ALIAS_MV(se)
diff --git a/arch/sh/kernel/mach_unknown.c b/arch/sh/kernel/mach_unknown.c
index ce2f32e95674..75cff0d139af 100644
--- a/arch/sh/kernel/mach_unknown.c
+++ b/arch/sh/kernel/mach_unknown.c
@@ -23,50 +23,50 @@
*/
struct sh_machine_vector mv_unknown __initmv = {
- mv_name: "Unknown",
+ .mv_name = "Unknown",
#if defined(__SH4__)
- mv_nr_irqs: 48,
+ .mv_nr_irqs = 48,
#elif defined(CONFIG_CPU_SUBTYPE_SH7708)
- mv_nr_irqs: 32,
+ .mv_nr_irqs = 32,
#elif defined(CONFIG_CPU_SUBTYPE_SH7709)
- mv_nr_irqs: 61,
+ .mv_nr_irqs = 61,
#endif
- mv_inb: unknown_inb,
- mv_inw: unknown_inw,
- mv_inl: unknown_inl,
- mv_outb: unknown_outb,
- mv_outw: unknown_outw,
- mv_outl: unknown_outl,
+ .mv_inb = unknown_inb,
+ .mv_inw = unknown_inw,
+ .mv_inl = unknown_inl,
+ .mv_outb = unknown_outb,
+ .mv_outw = unknown_outw,
+ .mv_outl = unknown_outl,
- mv_inb_p: unknown_inb_p,
- mv_inw_p: unknown_inw_p,
- mv_inl_p: unknown_inl_p,
- mv_outb_p: unknown_outb_p,
- mv_outw_p: unknown_outw_p,
- mv_outl_p: unknown_outl_p,
+ .mv_inb_p = unknown_inb_p,
+ .mv_inw_p = unknown_inw_p,
+ .mv_inl_p = unknown_inl_p,
+ .mv_outb_p = unknown_outb_p,
+ .mv_outw_p = unknown_outw_p,
+ .mv_outl_p = unknown_outl_p,
- mv_insb: unknown_insb,
- mv_insw: unknown_insw,
- mv_insl: unknown_insl,
- mv_outsb: unknown_outsb,
- mv_outsw: unknown_outsw,
- mv_outsl: unknown_outsl,
+ .mv_insb = unknown_insb,
+ .mv_insw = unknown_insw,
+ .mv_insl = unknown_insl,
+ .mv_outsb = unknown_outsb,
+ .mv_outsw = unknown_outsw,
+ .mv_outsl = unknown_outsl,
- mv_readb: unknown_readb,
- mv_readw: unknown_readw,
- mv_readl: unknown_readl,
- mv_writeb: unknown_writeb,
- mv_writew: unknown_writew,
- mv_writel: unknown_writel,
+ .mv_readb = unknown_readb,
+ .mv_readw = unknown_readw,
+ .mv_readl = unknown_readl,
+ .mv_writeb = unknown_writeb,
+ .mv_writew = unknown_writew,
+ .mv_writel = unknown_writel,
- mv_ioremap: unknown_ioremap,
- mv_iounmap: unknown_iounmap,
+ .mv_ioremap = unknown_ioremap,
+ .mv_iounmap = unknown_iounmap,
- mv_isa_port2addr: unknown_isa_port2addr,
+ .mv_isa_port2addr = unknown_isa_port2addr,
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
};
ALIAS_MV(unknown)
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index f1ab9da0197d..6043bb426965 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -171,12 +171,12 @@ static int __init sh_console_setup(struct console *co, char *options)
}
static struct console sh_console = {
- name: "bios",
- write: sh_console_write,
- device: sh_console_device,
- setup: sh_console_setup,
- flags: CON_PRINTBUFFER,
- index: -1,
+ .name = "bios",
+ .write = sh_console_write,
+ .device = sh_console_device,
+ .setup = sh_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
};
void sh_console_init(void)
@@ -548,9 +548,9 @@ static void c_stop(struct seq_file *m, void *v)
{
}
struct seq_operations cpuinfo_op = {
- start: c_start,
- next: c_next,
- stop: c_stop,
- show: show_cpuinfo,
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
};
#endif
diff --git a/arch/sh/kernel/setup_cqreek.c b/arch/sh/kernel/setup_cqreek.c
index 9281a6923dfe..2e55b9cd53f4 100644
--- a/arch/sh/kernel/setup_cqreek.c
+++ b/arch/sh/kernel/setup_cqreek.c
@@ -200,53 +200,53 @@ void __init setup_cqreek(void)
*/
struct sh_machine_vector mv_cqreek __initmv = {
- mv_name: "CqREEK",
+ .mv_name = "CqREEK",
#if defined(__SH4__)
- mv_nr_irqs: 48,
+ .mv_nr_irqs = 48,
#elif defined(CONFIG_CPU_SUBTYPE_SH7708)
- mv_nr_irqs: 32,
+ .mv_nr_irqs = 32,
#elif defined(CONFIG_CPU_SUBTYPE_SH7709)
- mv_nr_irqs: 61,
+ .mv_nr_irqs = 61,
#endif
- mv_inb: generic_inb,
- mv_inw: generic_inw,
- mv_inl: generic_inl,
- mv_outb: generic_outb,
- mv_outw: generic_outw,
- mv_outl: generic_outl,
-
- mv_inb_p: generic_inb_p,
- mv_inw_p: generic_inw_p,
- mv_inl_p: generic_inl_p,
- mv_outb_p: generic_outb_p,
- mv_outw_p: generic_outw_p,
- mv_outl_p: generic_outl_p,
-
- mv_insb: generic_insb,
- mv_insw: generic_insw,
- mv_insl: generic_insl,
- mv_outsb: generic_outsb,
- mv_outsw: generic_outsw,
- mv_outsl: generic_outsl,
-
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
-
- mv_init_arch: setup_cqreek,
- mv_init_irq: init_cqreek_IRQ,
-
- mv_isa_port2addr: cqreek_port2addr,
-
- mv_ioremap: generic_ioremap,
- mv_iounmap: generic_iounmap,
-
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_inb = generic_inb,
+ .mv_inw = generic_inw,
+ .mv_inl = generic_inl,
+ .mv_outb = generic_outb,
+ .mv_outw = generic_outw,
+ .mv_outl = generic_outl,
+
+ .mv_inb_p = generic_inb_p,
+ .mv_inw_p = generic_inw_p,
+ .mv_inl_p = generic_inl_p,
+ .mv_outb_p = generic_outb_p,
+ .mv_outw_p = generic_outw_p,
+ .mv_outl_p = generic_outl_p,
+
+ .mv_insb = generic_insb,
+ .mv_insw = generic_insw,
+ .mv_insl = generic_insl,
+ .mv_outsb = generic_outsb,
+ .mv_outsw = generic_outsw,
+ .mv_outsl = generic_outsl,
+
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
+
+ .mv_init_arch = setup_cqreek,
+ .mv_init_irq = init_cqreek_IRQ,
+
+ .mv_isa_port2addr = cqreek_port2addr,
+
+ .mv_ioremap = generic_ioremap,
+ .mv_iounmap = generic_iounmap,
+
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
};
ALIAS_MV(cqreek)
diff --git a/arch/sh/kernel/setup_dc.c b/arch/sh/kernel/setup_dc.c
index 537b040dadd5..f18c35e6febe 100644
--- a/arch/sh/kernel/setup_dc.c
+++ b/arch/sh/kernel/setup_dc.c
@@ -123,13 +123,13 @@ static void shutdown_systemasic_irq(unsigned int irq)
}
static struct hw_interrupt_type systemasic_int = {
- typename: "System ASIC",
- startup: startup_systemasic_irq,
- shutdown: shutdown_systemasic_irq,
- enable: enable_systemasic_irq,
- disable: disable_systemasic_irq,
- ack: ack_systemasic_irq,
- end: end_systemasic_irq,
+ .typename = "System ASIC",
+ .startup = startup_systemasic_irq,
+ .shutdown = shutdown_systemasic_irq,
+ .enable = enable_systemasic_irq,
+ .disable = disable_systemasic_irq,
+ .ack = ack_systemasic_irq,
+ .end = end_systemasic_irq,
};
/*
diff --git a/arch/sh/kernel/setup_ec3104.c b/arch/sh/kernel/setup_ec3104.c
index 4c8c8bd7217b..c864e34f6b4d 100644
--- a/arch/sh/kernel/setup_ec3104.c
+++ b/arch/sh/kernel/setup_ec3104.c
@@ -185,13 +185,13 @@ static void shutdown_ec3104_irq(unsigned int irq)
}
static struct hw_interrupt_type ec3104_int = {
- typename: "EC3104",
- enable: enable_ec3104_irq,
- disable: disable_ec3104_irq,
- ack: mask_and_ack_ec3104_irq,
- end: end_ec3104_irq,
- startup: startup_ec3104_irq,
- shutdown: shutdown_ec3104_irq,
+ .typename = "EC3104",
+ .enable = enable_ec3104_irq,
+ .disable = disable_ec3104_irq,
+ .ack = mask_and_ack_ec3104_irq,
+ .end = end_ec3104_irq,
+ .startup = startup_ec3104_irq,
+ .shutdown = shutdown_ec3104_irq,
};
/* Yuck. the _demux API is ugly */
diff --git a/arch/sh/kernel/setup_hd64465.c b/arch/sh/kernel/setup_hd64465.c
index bcf5f42a9344..857f9da68834 100644
--- a/arch/sh/kernel/setup_hd64465.c
+++ b/arch/sh/kernel/setup_hd64465.c
@@ -89,13 +89,13 @@ static void shutdown_hd64465_irq(unsigned int irq)
static struct hw_interrupt_type hd64465_irq_type = {
- typename: "HD64465-IRQ",
- startup: startup_hd64465_irq,
- shutdown: shutdown_hd64465_irq,
- enable: enable_hd64465_irq,
- disable: disable_hd64465_irq,
- ack: mask_and_ack_hd64465,
- end: end_hd64465_irq
+ .typename = "HD64465-IRQ",
+ .startup = startup_hd64465_irq,
+ .shutdown = shutdown_hd64465_irq,
+ .enable = enable_hd64465_irq,
+ .disable = disable_hd64465_irq,
+ .ack = mask_and_ack_hd64465,
+ .end = end_hd64465_irq
};
diff --git a/arch/sh/kernel/setup_sh2000.c b/arch/sh/kernel/setup_sh2000.c
index ce0770069d60..ad8604916446 100644
--- a/arch/sh/kernel/setup_sh2000.c
+++ b/arch/sh/kernel/setup_sh2000.c
@@ -48,48 +48,48 @@ int __init setup_sh2000(void)
*/
struct sh_machine_vector mv_sh2000 __initmv = {
- mv_name: "sh2000",
+ .mv_name = "sh2000",
- mv_nr_irqs: 80,
+ .mv_nr_irqs = 80,
- mv_inb: generic_inb,
- mv_inw: generic_inw,
- mv_inl: generic_inl,
- mv_outb: generic_outb,
- mv_outw: generic_outw,
- mv_outl: generic_outl,
+ .mv_inb = generic_inb,
+ .mv_inw = generic_inw,
+ .mv_inl = generic_inl,
+ .mv_outb = generic_outb,
+ .mv_outw = generic_outw,
+ .mv_outl = generic_outl,
- mv_inb_p: generic_inb_p,
- mv_inw_p: generic_inw_p,
- mv_inl_p: generic_inl_p,
- mv_outb_p: generic_outb_p,
- mv_outw_p: generic_outw_p,
- mv_outl_p: generic_outl_p,
+ .mv_inb_p = generic_inb_p,
+ .mv_inw_p = generic_inw_p,
+ .mv_inl_p = generic_inl_p,
+ .mv_outb_p = generic_outb_p,
+ .mv_outw_p = generic_outw_p,
+ .mv_outl_p = generic_outl_p,
- mv_insb: generic_insb,
- mv_insw: generic_insw,
- mv_insl: generic_insl,
- mv_outsb: generic_outsb,
- mv_outsw: generic_outsw,
- mv_outsl: generic_outsl,
+ .mv_insb = generic_insb,
+ .mv_insw = generic_insw,
+ .mv_insl = generic_insl,
+ .mv_outsb = generic_outsb,
+ .mv_outsw = generic_outsw,
+ .mv_outsl = generic_outsl,
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
- mv_init_arch: setup_sh2000,
+ .mv_init_arch = setup_sh2000,
- mv_isa_port2addr: sh2000_isa_port2addr,
+ .mv_isa_port2addr = sh2000_isa_port2addr,
- mv_ioremap: generic_ioremap,
- mv_iounmap: generic_iounmap,
+ .mv_ioremap = generic_ioremap,
+ .mv_iounmap = generic_iounmap,
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
- mv_hw_sh2000: 1,
+ .mv_hw_sh2000 = 1,
};
ALIAS_MV(sh2000)
diff --git a/arch/sh/stboards/mach.c b/arch/sh/stboards/mach.c
index 614dd2a889dc..879bfe9e5a73 100644
--- a/arch/sh/stboards/mach.c
+++ b/arch/sh/stboards/mach.c
@@ -26,52 +26,52 @@ void heartbeat_harp(void);
*/
struct sh_machine_vector mv_harp __initmv = {
- mv_name: "STB1 Harp",
+ .mv_name = "STB1 Harp",
- mv_nr_irqs: 89 + HD64465_IRQ_NUM,
+ .mv_nr_irqs = 89 + HD64465_IRQ_NUM,
- mv_inb: hd64465_inb,
- mv_inw: hd64465_inw,
- mv_inl: hd64465_inl,
- mv_outb: hd64465_outb,
- mv_outw: hd64465_outw,
- mv_outl: hd64465_outl,
+ .mv_inb = hd64465_inb,
+ .mv_inw = hd64465_inw,
+ .mv_inl = hd64465_inl,
+ .mv_outb = hd64465_outb,
+ .mv_outw = hd64465_outw,
+ .mv_outl = hd64465_outl,
- mv_inb_p: hd64465_inb_p,
- mv_inw_p: hd64465_inw,
- mv_inl_p: hd64465_inl,
- mv_outb_p: hd64465_outb_p,
- mv_outw_p: hd64465_outw,
- mv_outl_p: hd64465_outl,
+ .mv_inb_p = hd64465_inb_p,
+ .mv_inw_p = hd64465_inw,
+ .mv_inl_p = hd64465_inl,
+ .mv_outb_p = hd64465_outb_p,
+ .mv_outw_p = hd64465_outw,
+ .mv_outl_p = hd64465_outl,
- mv_insb: hd64465_insb,
- mv_insw: hd64465_insw,
- mv_insl: hd64465_insl,
- mv_outsb: hd64465_outsb,
- mv_outsw: hd64465_outsw,
- mv_outsl: hd64465_outsl,
+ .mv_insb = hd64465_insb,
+ .mv_insw = hd64465_insw,
+ .mv_insl = hd64465_insl,
+ .mv_outsb = hd64465_outsb,
+ .mv_outsw = hd64465_outsw,
+ .mv_outsl = hd64465_outsl,
- mv_readb: generic_readb,
- mv_readw: generic_readw,
- mv_readl: generic_readl,
- mv_writeb: generic_writeb,
- mv_writew: generic_writew,
- mv_writel: generic_writel,
+ .mv_readb = generic_readb,
+ .mv_readw = generic_readw,
+ .mv_readl = generic_readl,
+ .mv_writeb = generic_writeb,
+ .mv_writew = generic_writew,
+ .mv_writel = generic_writel,
- mv_ioremap: generic_ioremap,
- mv_iounmap: generic_iounmap,
+ .mv_ioremap = generic_ioremap,
+ .mv_iounmap = generic_iounmap,
- mv_isa_port2addr: hd64465_isa_port2addr,
+ .mv_isa_port2addr = hd64465_isa_port2addr,
- mv_init_arch: setup_harp,
+ .mv_init_arch = setup_harp,
#ifdef CONFIG_PCI
- mv_init_irq: init_harp_irq,
+ .mv_init_irq = init_harp_irq,
#endif
#ifdef CONFIG_HEARTBEAT
- mv_heartbeat: heartbeat_harp,
+ .mv_heartbeat = heartbeat_harp,
#endif
- mv_rtc_gettimeofday: sh_rtc_gettimeofday,
- mv_rtc_settimeofday: sh_rtc_settimeofday,
+ .mv_rtc_gettimeofday = sh_rtc_gettimeofday,
+ .mv_rtc_settimeofday = sh_rtc_settimeofday,
};
ALIAS_MV(harp)
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 1a2f15996a2e..1aff826a51b0 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -72,11 +72,135 @@ unsigned char pci_highest_busnum = 0;
*/
int pci_device_reorder = 0;
-spinlock_t pci_poke_lock = SPIN_LOCK_UNLOCKED;
volatile int pci_poke_in_progress;
volatile int pci_poke_cpu = -1;
volatile int pci_poke_faulted;
+static spinlock_t pci_poke_lock = SPIN_LOCK_UNLOCKED;
+
+void pci_config_read8(u8 *addr, u8 *ret)
+{
+ unsigned long flags;
+ u8 byte;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "lduba [%1] %2, %0\n\t"
+ "membar #Sync"
+ : "=r" (byte)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ if (!pci_poke_faulted)
+ *ret = byte;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_read16(u16 *addr, u16 *ret)
+{
+ unsigned long flags;
+ u16 word;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "lduha [%1] %2, %0\n\t"
+ "membar #Sync"
+ : "=r" (word)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ if (!pci_poke_faulted)
+ *ret = word;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_read32(u32 *addr, u32 *ret)
+{
+ unsigned long flags;
+ u32 dword;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "lduwa [%1] %2, %0\n\t"
+ "membar #Sync"
+ : "=r" (dword)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ if (!pci_poke_faulted)
+ *ret = dword;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_write8(u8 *addr, u8 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "stba %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_write16(u16 *addr, u16 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "stha %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_write32(u32 *addr, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "stwa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
/* Probe for all PCI controllers in the system. */
extern void sabre_init(int, char *);
extern void psycho_init(int, char *);
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
index 3d59584891cf..3453ea3e57e6 100644
--- a/arch/sparc64/kernel/pci_impl.h
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -42,132 +42,11 @@ extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_p
extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
/* Configuration space access. */
-extern spinlock_t pci_poke_lock;
-extern volatile int pci_poke_in_progress;
-extern volatile int pci_poke_cpu;
-extern volatile int pci_poke_faulted;
-
-static __inline__ void pci_config_read8(u8 *addr, u8 *ret)
-{
- unsigned long flags;
- u8 byte;
-
- spin_lock_irqsave(&pci_poke_lock, flags);
- pci_poke_cpu = smp_processor_id();
- pci_poke_in_progress = 1;
- pci_poke_faulted = 0;
- __asm__ __volatile__("membar #Sync\n\t"
- "lduba [%1] %2, %0\n\t"
- "membar #Sync"
- : "=r" (byte)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
- pci_poke_in_progress = 0;
- pci_poke_cpu = -1;
- if (!pci_poke_faulted)
- *ret = byte;
- spin_unlock_irqrestore(&pci_poke_lock, flags);
-}
-
-static __inline__ void pci_config_read16(u16 *addr, u16 *ret)
-{
- unsigned long flags;
- u16 word;
-
- spin_lock_irqsave(&pci_poke_lock, flags);
- pci_poke_cpu = smp_processor_id();
- pci_poke_in_progress = 1;
- pci_poke_faulted = 0;
- __asm__ __volatile__("membar #Sync\n\t"
- "lduha [%1] %2, %0\n\t"
- "membar #Sync"
- : "=r" (word)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
- pci_poke_in_progress = 0;
- pci_poke_cpu = -1;
- if (!pci_poke_faulted)
- *ret = word;
- spin_unlock_irqrestore(&pci_poke_lock, flags);
-}
-
-static __inline__ void pci_config_read32(u32 *addr, u32 *ret)
-{
- unsigned long flags;
- u32 dword;
-
- spin_lock_irqsave(&pci_poke_lock, flags);
- pci_poke_cpu = smp_processor_id();
- pci_poke_in_progress = 1;
- pci_poke_faulted = 0;
- __asm__ __volatile__("membar #Sync\n\t"
- "lduwa [%1] %2, %0\n\t"
- "membar #Sync"
- : "=r" (dword)
- : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
- pci_poke_in_progress = 0;
- pci_poke_cpu = -1;
- if (!pci_poke_faulted)
- *ret = dword;
- spin_unlock_irqrestore(&pci_poke_lock, flags);
-}
-
-static __inline__ void pci_config_write8(u8 *addr, u8 val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pci_poke_lock, flags);
- pci_poke_cpu = smp_processor_id();
- pci_poke_in_progress = 1;
- pci_poke_faulted = 0;
- __asm__ __volatile__("membar #Sync\n\t"
- "stba %0, [%1] %2\n\t"
- "membar #Sync"
- : /* no outputs */
- : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
- pci_poke_in_progress = 0;
- pci_poke_cpu = -1;
- spin_unlock_irqrestore(&pci_poke_lock, flags);
-}
-
-static __inline__ void pci_config_write16(u16 *addr, u16 val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pci_poke_lock, flags);
- pci_poke_cpu = smp_processor_id();
- pci_poke_in_progress = 1;
- pci_poke_faulted = 0;
- __asm__ __volatile__("membar #Sync\n\t"
- "stha %0, [%1] %2\n\t"
- "membar #Sync"
- : /* no outputs */
- : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
- pci_poke_in_progress = 0;
- pci_poke_cpu = -1;
- spin_unlock_irqrestore(&pci_poke_lock, flags);
-}
-
-static __inline__ void pci_config_write32(u32 *addr, u32 val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pci_poke_lock, flags);
- pci_poke_cpu = smp_processor_id();
- pci_poke_in_progress = 1;
- pci_poke_faulted = 0;
- __asm__ __volatile__("membar #Sync\n\t"
- "stwa %0, [%1] %2\n\t"
- "membar #Sync"
- : /* no outputs */
- : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
- : "memory");
- pci_poke_in_progress = 0;
- pci_poke_cpu = -1;
- spin_unlock_irqrestore(&pci_poke_lock, flags);
-}
+extern void pci_config_read8(u8 *addr, u8 *ret);
+extern void pci_config_read16(u16 *addr, u16 *ret);
+extern void pci_config_read32(u32 *addr, u32 *ret);
+extern void pci_config_write8(u8 *addr, u8 val);
+extern void pci_config_write16(u16 *addr, u16 val);
+extern void pci_config_write32(u32 *addr, u32 val);
#endif /* !(PCI_IMPL_H) */
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
index bfb72f4041fd..4ce2f9369019 100644
--- a/arch/sparc64/kernel/semaphore.c
+++ b/arch/sparc64/kernel/semaphore.c
@@ -40,13 +40,57 @@ static __inline__ int __sem_update_count(struct semaphore *sem, int incr)
return old_count;
}
-void __up(struct semaphore *sem)
+static void __up(struct semaphore *sem)
{
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
-void __down(struct semaphore * sem)
+void up(struct semaphore *sem)
+{
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count + 1;
+ * sem->count = new_val;
+ * if (old_val < 0)
+ * __up(sem);
+ *
+ * The (old_val < 0) test is equivalent to
+ * the more straightforward (new_val <= 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! up sem(%0)\n"
+" membar #StoreLoad | #LoadLoad\n"
+"1: lduw [%0], %%g5\n"
+" add %%g5, 1, %%g7\n"
+" cas [%0], %%g5, %%g7\n"
+" cmp %%g5, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" addcc %%g7, 1, %%g0\n"
+" ble,pn %%icc, 3f\n"
+" membar #StoreLoad | #StoreStore\n"
+"2:\n"
+" .subsection 2\n"
+"3: mov %0, %%g5\n"
+" save %%sp, -160, %%sp\n"
+" mov %%g1, %%l1\n"
+" mov %%g2, %%l2\n"
+" mov %%g3, %%l3\n"
+" call %1\n"
+" mov %%g5, %%o0\n"
+" mov %%l1, %%g1\n"
+" mov %%l2, %%g2\n"
+" ba,pt %%xcc, 2b\n"
+" restore %%l3, %%g0, %%g3\n"
+" .previous\n"
+ : : "r" (sem), "i" (__up)
+ : "g5", "g7", "memory", "cc");
+}
+
+static void __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
@@ -64,7 +108,90 @@ void __down(struct semaphore * sem)
wake_up(&sem->wait);
}
-int __down_interruptible(struct semaphore * sem)
+void down(struct semaphore *sem)
+{
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count - 1;
+ * sem->count = new_val;
+ * if (old_val < 1)
+ * __down(sem);
+ *
+ * The (old_val < 1) test is equivalent to
+ * the more straightforward (new_val < 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! down sem(%0)\n"
+"1: lduw [%0], %%g5\n"
+" sub %%g5, 1, %%g7\n"
+" cas [%0], %%g5, %%g7\n"
+" cmp %%g5, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" cmp %%g7, 1\n"
+" bl,pn %%icc, 3f\n"
+" membar #StoreLoad | #StoreStore\n"
+"2:\n"
+" .subsection 2\n"
+"3: mov %0, %%g5\n"
+" save %%sp, -160, %%sp\n"
+" mov %%g1, %%l1\n"
+" mov %%g2, %%l2\n"
+" mov %%g3, %%l3\n"
+" call %1\n"
+" mov %%g5, %%o0\n"
+" mov %%l1, %%g1\n"
+" mov %%l2, %%g2\n"
+" ba,pt %%xcc, 2b\n"
+" restore %%l3, %%g0, %%g3\n"
+" .previous\n"
+ : : "r" (sem), "i" (__down)
+ : "g5", "g7", "memory", "cc");
+}
+
+int down_trylock(struct semaphore *sem)
+{
+ int ret;
+
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count - 1;
+ * if (old_val < 1) {
+ * ret = 1;
+ * } else {
+ * sem->count = new_val;
+ * ret = 0;
+ * }
+ *
+ * The (old_val < 1) test is equivalent to
+ * the more straightforward (new_val < 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! down_trylock sem(%1) ret(%0)\n"
+"1: lduw [%1], %%g5\n"
+" sub %%g5, 1, %%g7\n"
+" cmp %%g5, 1\n"
+" bl,pn %%icc, 2f\n"
+" mov 1, %0\n"
+" cas [%1], %%g5, %%g7\n"
+" cmp %%g5, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" mov 0, %0\n"
+" membar #StoreLoad | #StoreStore\n"
+"2:\n"
+ : "=&r" (ret)
+ : "r" (sem)
+ : "g5", "g7", "memory", "cc");
+
+ return ret;
+}
+
+static int __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
@@ -87,3 +214,51 @@ int __down_interruptible(struct semaphore * sem)
wake_up(&sem->wait);
return retval;
}
+
+int down_interruptible(struct semaphore *sem)
+{
+ int ret = 0;
+
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count - 1;
+ * sem->count = new_val;
+ * if (old_val < 1)
+ * ret = __down_interruptible(sem);
+ *
+ * The (old_val < 1) test is equivalent to
+ * the more straightforward (new_val < 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! down_interruptible sem(%2) ret(%0)\n"
+"1: lduw [%2], %%g5\n"
+" sub %%g5, 1, %%g7\n"
+" cas [%2], %%g5, %%g7\n"
+" cmp %%g5, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" cmp %%g7, 1\n"
+" bl,pn %%icc, 3f\n"
+" membar #StoreLoad | #StoreStore\n"
+"2:\n"
+" .subsection 2\n"
+"3: mov %2, %%g5\n"
+" save %%sp, -160, %%sp\n"
+" mov %%g1, %%l1\n"
+" mov %%g2, %%l2\n"
+" mov %%g3, %%l3\n"
+" call %3\n"
+" mov %%g5, %%o0\n"
+" mov %%l1, %%g1\n"
+" mov %%l2, %%g2\n"
+" mov %%l3, %%g3\n"
+" ba,pt %%xcc, 2b\n"
+" restore %%o0, %%g0, %0\n"
+" .previous\n"
+ : "=r" (ret)
+ : "0" (ret), "r" (sem), "i" (__down_interruptible)
+ : "g5", "g7", "memory", "cc");
+ return ret;
+}
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index 1dad12270c22..84a28f319c7b 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -230,7 +230,7 @@ asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *
}
}
-static inline int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
+static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
@@ -477,7 +477,7 @@ static int invalid_frame_pointer(void *fp, int fplen)
return 0;
}
-static inline void *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
+static void *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp;
@@ -645,7 +645,7 @@ sigsegv:
}
-static inline int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
+static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
@@ -665,8 +665,8 @@ static inline int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
return err;
}
-static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
+static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
{
struct new_signal_frame32 *sf;
int sigframe_size;
@@ -790,7 +790,7 @@ sigsegv:
}
/* Setup a Solaris stack frame */
-static inline void
+static void
setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
struct pt_regs *regs, int signr, sigset_t *oldset)
{
@@ -1089,9 +1089,9 @@ sigsegv:
do_exit(SIGSEGV);
}
-static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- unsigned long signr, sigset_t *oldset,
- siginfo_t *info)
+static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ unsigned long signr, sigset_t *oldset,
+ siginfo_t *info)
{
struct rt_signal_frame32 *sf;
int sigframe_size;
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 31110addbd92..a68ab5b7b15e 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -161,9 +161,10 @@ EXPORT_SYMBOL(smp_call_function);
#endif
/* semaphores */
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(down);
+EXPORT_SYMBOL(down_trylock);
+EXPORT_SYMBOL(down_interruptible);
+EXPORT_SYMBOL(up);
/* Atomic counter implementation. */
EXPORT_SYMBOL(__atomic_add);
@@ -332,6 +333,7 @@ EXPORT_SYMBOL(__strncmp);
EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(csum_partial_copy_sparc64);
+EXPORT_SYMBOL(ip_fast_csum);
/* Moving data to/from userspace. */
EXPORT_SYMBOL(__copy_to_user);
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index af4932750a33..9b62bbe3a655 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -273,7 +273,7 @@ struct itimerval32
struct timeval32 it_value;
};
-static inline long get_tv32(struct timeval *o, struct timeval32 *i)
+static long get_tv32(struct timeval *o, struct timeval32 *i)
{
return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) ||
(__get_user(o->tv_sec, &i->tv_sec) |
@@ -296,7 +296,7 @@ static inline long get_it32(struct itimerval *o, struct itimerval32 *i)
__get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
}
-static inline long put_it32(struct itimerval32 *o, struct itimerval *i)
+static long put_it32(struct itimerval32 *o, struct itimerval *i)
{
return (!access_ok(VERIFY_WRITE, i32, sizeof(*i32)) ||
(__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
@@ -890,7 +890,7 @@ asmlinkage long sys32_fcntl64(unsigned int fd, unsigned int cmd, unsigned long a
return sys32_fcntl(fd, cmd, arg);
}
-static inline int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf)
+static int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf)
{
int err;
@@ -1272,8 +1272,7 @@ out:
* 64-bit unsigned longs.
*/
-static inline int
-get_fd_set32(unsigned long n, unsigned long *fdset, u32 *ufdset)
+static int get_fd_set32(unsigned long n, unsigned long *fdset, u32 *ufdset)
{
if (ufdset) {
unsigned long odd;
@@ -1303,8 +1302,7 @@ get_fd_set32(unsigned long n, unsigned long *fdset, u32 *ufdset)
return 0;
}
-static inline void
-set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
+static void set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
{
unsigned long odd;
@@ -2209,8 +2207,8 @@ static inline int iov_from_user32_to_kern(struct iovec *kiov,
return tot_len;
}
-static inline int msghdr_from_user32_to_kern(struct msghdr *kmsg,
- struct msghdr32 *umsg)
+static int msghdr_from_user32_to_kern(struct msghdr *kmsg,
+ struct msghdr32 *umsg)
{
u32 tmp1, tmp2, tmp3;
int err;
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index d2be1f3629de..d5f8f8eb0360 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -149,8 +149,8 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
}
}
-static inline unsigned long compute_effective_address(struct pt_regs *regs,
- unsigned int insn, unsigned int rd)
+static unsigned long compute_effective_address(struct pt_regs *regs,
+ unsigned int insn, unsigned int rd)
{
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 6d3e2a7aeaca..421f1e403bcf 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -10,6 +10,6 @@ obj-y := PeeCeeI.o blockops.o debuglocks.o strlen.o strncmp.o \
VIScopy.o VISbzero.o VISmemset.o VIScsum.o VIScsumcopy.o \
VIScsumcopyusr.o VISsave.o atomic.o rwlock.o bitops.o \
dec_and_lock.o U3memcpy.o U3copy_from_user.o U3copy_to_user.o \
- U3copy_in_user.o mcount.o
+ U3copy_in_user.o mcount.o ipcsum.o
include $(TOPDIR)/Rules.make
diff --git a/arch/sparc64/lib/ipcsum.S b/arch/sparc64/lib/ipcsum.S
new file mode 100644
index 000000000000..e7d349facf35
--- /dev/null
+++ b/arch/sparc64/lib/ipcsum.S
@@ -0,0 +1,32 @@
+ .text
+ .align 32
+ .globl ip_fast_csum
+ip_fast_csum: /* %o0 = iph, %o1 = ihl */
+ sub %o1, 4, %g7
+ lduw [%o0 + 0x00], %o2
+ lduw [%o0 + 0x04], %g2
+ lduw [%o0 + 0x08], %g3
+ addcc %g2, %o2, %o2
+ lduw [%o0 + 0x0c], %g2
+ addccc %g3, %o2, %o2
+ lduw [%o0 + 0x10], %g3
+
+ addccc %g2, %o2, %o2
+ addc %o2, %g0, %o2
+1: addcc %g3, %o2, %o2
+ add %o0, 4, %o0
+ addccc %o2, %g0, %o2
+ subcc %g7, 1, %g7
+ be,a,pt %icc, 2f
+ sll %o2, 16, %g2
+
+ lduw [%o0 + 0x10], %g3
+ ba,pt %xcc, 1b
+ nop
+2: addcc %o2, %g2, %g2
+ srl %g2, 16, %o2
+ addc %o2, %g0, %o2
+ xnor %g0, %o2, %o2
+ set 0xffff, %o1
+ retl
+ and %o2, %o1, %o0
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index f34184be0c71..e5aa61b9f52d 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -222,7 +222,7 @@ static void do_fault_siginfo(int code, int sig, unsigned long address)
extern int handle_ldf_stq(u32, struct pt_regs *);
extern int handle_ld_nf(u32, struct pt_regs *);
-static inline unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
+static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
{
if (!insn) {
if (!regs->tpc || (regs->tpc & 0x3))
diff --git a/arch/um/Makefile b/arch/um/Makefile
index c8889424b773..f4265f94529b 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -43,9 +43,8 @@ ARCH_INCLUDE = $(TOPDIR)/$(ARCH_DIR)/include
# in CFLAGS. Otherwise, it would cause ld to complain about the two different
# errnos.
-CFLAGS += $(DEBUG) $(PROFILE) $(ARCH_CFLAGS) -D__arch_um__ \
- -DSUBARCH=\"$(SUBARCH)\" -D_LARGEFILE64_SOURCE -I$(ARCH_INCLUDE) \
- -Derrno=kernel_errno
+CFLAGS += $(DEBUG) $(PROFILE) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \
+ -D_LARGEFILE64_SOURCE -I$(ARCH_INCLUDE) -Derrno=kernel_errno
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index efdac9e0747f..958d1faba8c4 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -4,7 +4,7 @@ else
TOP_ADDR = 0xc0000000
endif
-ARCH_CFLAGS = -U__$(SUBARCH)__ -U$(SUBARCH)
+CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH)
ELF_ARCH = $(SUBARCH)
ELF_FORMAT = elf32-$(SUBARCH)
diff --git a/arch/um/config.in b/arch/um/config.in
index 32cac03d468f..e8719b1a6e20 100644
--- a/arch/um/config.in
+++ b/arch/um/config.in
@@ -28,8 +28,13 @@ tristate 'Host filesystem' CONFIG_HOSTFS
bool 'Management console' CONFIG_MCONSOLE
dep_bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ $CONFIG_MCONSOLE
bool '2G/2G host address space split' CONFIG_HOST_2G_2G
+
bool 'Symmetric multi-processing support' CONFIG_UML_SMP
define_bool CONFIG_SMP $CONFIG_UML_SMP
+if [ "$CONFIG_SMP" = "y" ]; then
+ int 'Maximum number of CPUs (2-32)' CONFIG_NR_CPUS 32
+fi
+
int 'Nesting level' CONFIG_NEST_LEVEL 0
int 'Kernel address space size (in .5G units)' CONFIG_KERNEL_HALF_GIGS 1
bool 'Highmem support' CONFIG_HIGHMEM
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index c1ac4d8fbb26..79879f30aef5 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -155,6 +155,8 @@ static void tracer_winch_handler(int sig)
errno);
}
+/* Called only by the tracing thread during initialization */
+
void setup_tracer_winch(void)
{
int err;
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index dd1214f116ab..efe03723db54 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -51,8 +51,8 @@
MODULE_LICENSE("GPL");
+/* Locked by the BKL in harddog_open and harddog_release */
static int timer_alive;
-
static int harddog_in_fd = -1;
static int harddog_out_fd = -1;
@@ -67,6 +67,7 @@ static int harddog_open(struct inode *inode, struct file *file)
int err;
char *sock = NULL;
+ lock_kernel();
if(timer_alive)
return -EBUSY;
#ifdef CONFIG_HARDDOG_NOWAYOUT
@@ -80,6 +81,7 @@ static int harddog_open(struct inode *inode, struct file *file)
if(err) return(err);
timer_alive = 1;
+ unlock_kernel();
return 0;
}
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 5ae1c44294f6..d5c950b4bb83 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -15,6 +15,7 @@
#include "init.h"
#include "hostaudio.h"
+/* Only changed from linux_main at boot time */
char *dsp = HOSTAUDIO_DEV_DSP;
char *mixer = HOSTAUDIO_DEV_MIXER;
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 50e9adfa5e75..837c07f41669 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -99,19 +99,27 @@ int line_write(struct line *lines, struct tty_struct *tty, const char *buf,
i = minor(tty->device) - tty->driver.minor_start;
line = &lines[i];
+ down(&line->sem);
if(line->head != line->tail){
local_irq_save(flags);
buffer_data(line, buf, len);
err = flush_buffer(line);
local_irq_restore(flags);
- if(err <= 0) return(len);
+ if(err <= 0)
+ goto out;
}
else {
n = write_chan(&line->chan_list, buf, len,
line->driver->write_irq);
- if(n < 0) return(n);
- if(n < len) buffer_data(line, buf + n, len - n);
+ if(n < 0){
+ len = n;
+ goto out;
+ }
+ if(n < len)
+ buffer_data(line, buf + n, len - n);
}
+ out:
+ up(&line->sem);
return(len);
}
@@ -249,6 +257,7 @@ void line_close(struct line *lines, struct tty_struct *tty)
else n = minor(tty->device) - tty->driver.minor_start;
line = &lines[n];
+ down(&line->sem);
line->count--;
/* I don't like this, but I can't think of anything better. What's
@@ -261,6 +270,7 @@ void line_close(struct line *lines, struct tty_struct *tty)
line->tty = NULL;
if(line->count == 0)
line_disable(line, -1);
+ up(&line->sem);
}
void close_lines(struct line *lines, int nlines)
@@ -343,8 +353,6 @@ void line_register_devfs(struct lines *set, struct line_driver *line_driver,
driver->write_room = line_write_room;
driver->init_termios = tty_std_termios;
- driver->refcount = &set->refcount;
-
if (tty_register_driver(driver))
panic("line_register_devfs : Couldn't register driver\n");
@@ -408,16 +416,18 @@ void winch_interrupt(int irq, void *data, struct pt_regs *unused)
reactivate_fd(winch->fd, WINCH_IRQ);
}
+DECLARE_MUTEX(winch_handler_sem);
LIST_HEAD(winch_handlers);
void register_winch_irq(int fd, int tty_fd, int pid, void *line)
{
struct winch *winch;
+ down(&winch_handler_sem);
winch = kmalloc(sizeof(*winch), GFP_KERNEL);
if(winch == NULL){
printk("register_winch_irq - kmalloc failed\n");
- return;
+ goto out;
}
*winch = ((struct winch) { list : LIST_HEAD_INIT(winch->list),
fd : fd,
@@ -429,6 +439,8 @@ void register_winch_irq(int fd, int tty_fd, int pid, void *line)
SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
"winch", winch) < 0)
printk("register_winch_irq - failed to register IRQ\n");
+ out:
+ up(&winch_handler_sem);
}
static void winch_cleanup(void)
@@ -439,7 +451,6 @@ static void winch_cleanup(void)
list_for_each(ele, &winch_handlers){
winch = list_entry(ele, struct winch, list);
close(winch->fd);
- free_irq_by_fd(winch->fd);
if(winch->pid != -1) os_kill_process(winch->pid);
}
}
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index a90786aff133..c2822cb872c2 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -40,6 +40,11 @@ static struct notifier_block reboot_notifier = {
priority: 0,
};
+/* Safe without explicit locking for now. Tasklets provide their own
+ * locking, and the interrupt handler is safe because it can't interrupt
+ * itself and it can only happen on CPU 0.
+ */
+
LIST_HEAD(mc_requests);
void mc_work_proc(void *unused)
@@ -49,12 +54,12 @@ void mc_work_proc(void *unused)
int done;
do {
- save_flags(flags);
+ local_save_flags(flags);
req = list_entry(mc_requests.next, struct mconsole_entry,
list);
list_del(&req->list);
done = list_empty(&mc_requests);
- restore_flags(flags);
+ local_irq_restore(flags);
req->request.cmd->handler(&req->request);
kfree(req);
} while(!done);
@@ -152,6 +157,8 @@ void mconsole_stop(struct mc_request *req)
mconsole_reply(req, "", 0, 0);
}
+/* This list is populated by __initcall routines. */
+
LIST_HEAD(mconsole_devices);
void mconsole_register_dev(struct mc_device *new)
@@ -224,7 +231,10 @@ void mconsole_sysrq(struct mc_request *req)
}
#endif
-static char *notify_socket = NULL;
+/* Changed by mconsole_setup, which is __setup, and called before SMP is
+ * active.
+ */
+static char *notify_socket = NULL;
int mconsole_init(void)
{
@@ -301,6 +311,18 @@ static int create_proc_mconsole(void)
return(0);
}
+static spinlock_t notify_spinlock = SPIN_LOCK_UNLOCKED;
+
+void lock_notify(void)
+{
+ spin_lock(&notify_spinlock);
+}
+
+void unlock_notify(void)
+{
+ spin_unlock(&notify_spinlock);
+}
+
__initcall(create_proc_mconsole);
#define NOTIFY "=notify:"
diff --git a/arch/um/drivers/mconsole_user.c b/arch/um/drivers/mconsole_user.c
index d60be6eead37..11b09a96f454 100644
--- a/arch/um/drivers/mconsole_user.c
+++ b/arch/um/drivers/mconsole_user.c
@@ -30,6 +30,7 @@ static struct mconsole_command commands[] = {
{ "go", mconsole_go, 1 },
};
+/* Initialized in mconsole_init, which is an initcall */
char mconsole_socket_name[256];
int mconsole_reply_v0(struct mc_request *req, char *reply)
@@ -162,16 +163,21 @@ int mconsole_notify(char *sock_name, int type, const void *data, int len)
{
struct sockaddr_un target;
struct mconsole_notify packet;
- int n, err;
+ int n, err = 0;
+ lock_notify();
if(notify_sock < 0){
notify_sock = socket(PF_UNIX, SOCK_DGRAM, 0);
if(notify_sock < 0){
printk("mconsole_notify - socket failed, errno = %d\n",
errno);
- return(-errno);
+ err = -errno;
}
}
+ unlock_notify();
+
+ if(err)
+ return(err);
target.sun_family = AF_UNIX;
strcpy(target.sun_path, sock_name);
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index d03082be380d..dc2937a0ed2e 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -15,13 +15,14 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
-#include <asm/smplock.h>
#include <asm/pgtable.h>
#include "mem_user.h"
#include "user_util.h"
+/* These are set in mmapper_init, which is called at boot time */
static unsigned long mmapper_size;
static unsigned long p_buf = 0;
static char *v_buf = NULL;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index a83f84c367d4..deef1a0a75e3 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -27,6 +27,7 @@
#include "init.h"
#include "irq_user.h"
+static spinlock_t opened_lock = SPIN_LOCK_UNLOCKED;
LIST_HEAD(opened);
static int uml_net_rx(struct net_device *dev)
@@ -118,7 +119,9 @@ static int uml_net_open(struct net_device *dev)
lp->tl.data = (unsigned long) &lp->user;
netif_start_queue(dev);
+ spin_lock(&opened_lock);
list_add(&lp->list, &opened);
+ spin_unlock(&opened_lock);
MOD_INC_USE_COUNT;
out:
spin_unlock(&lp->lock);
@@ -135,8 +138,10 @@ static int uml_net_close(struct net_device *dev)
free_irq(dev->irq, dev);
if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
lp->fd = -1;
+ spin_lock(&opened_lock);
list_del(&lp->list);
-
+ spin_unlock(&opened_lock);
+
MOD_DEC_USE_COUNT;
spin_unlock(&lp->lock);
return 0;
@@ -245,6 +250,7 @@ void uml_net_user_timer_expire(unsigned long _conn)
#endif
}
+static spinlock_t devices_lock = SPIN_LOCK_UNLOCKED;
static struct list_head devices = LIST_HEAD_INIT(devices);
static int eth_configure(int n, void *init, char *mac,
@@ -261,7 +267,10 @@ static int eth_configure(int n, void *init, char *mac,
return(1);
}
+ spin_lock(&devices_lock);
list_add(&device->list, &devices);
+ spin_unlock(&devices_lock);
+
device->index = n;
size = transport->private_size + sizeof(struct uml_net_private) +
@@ -373,12 +382,16 @@ static struct uml_net *find_device(int n)
struct uml_net *device;
struct list_head *ele;
+ spin_lock(&devices_lock);
list_for_each(ele, &devices){
device = list_entry(ele, struct uml_net, list);
if(device->index == n)
- return(device);
+ goto out;
}
- return(NULL);
+ device = NULL;
+ out:
+ spin_unlock(&devices_lock);
+ return(device);
}
static int eth_parse(char *str, int *index_out, char **str_out)
@@ -418,8 +431,12 @@ struct eth_init {
int index;
};
+/* Filled in at boot time. Will need locking if the transports become
+ * modular.
+ */
struct list_head transports = LIST_HEAD_INIT(transports);
+/* Filled in during early boot */
struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
static int check_transport(struct transport *transport, char *eth, int n,
@@ -519,8 +536,6 @@ __uml_help(eth_setup,
" Configure a network device.\n\n"
);
-int ndev = 0;
-
static int eth_init(void)
{
struct list_head *ele, *next;
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index 0acb925a70f6..c9783f449d68 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -62,8 +62,6 @@ static void pipe_interrupt(int irq, void *data, struct pt_regs *regs)
up(&conn->port->sem);
}
-struct list_head ports = LIST_HEAD_INIT(ports);
-
static void port_interrupt(int irq, void *data, struct pt_regs *regs)
{
struct port_list *port = data;
@@ -107,6 +105,9 @@ static void port_interrupt(int irq, void *data, struct pt_regs *regs)
reactivate_fd(port->fd, ACCEPT_IRQ);
}
+DECLARE_MUTEX(ports_sem);
+struct list_head ports = LIST_HEAD_INIT(ports);
+
void *port_data(int port_num)
{
struct list_head *ele;
@@ -114,6 +115,7 @@ void *port_data(int port_num)
struct port_dev *dev;
int fd;
+ down(&ports_sem);
list_for_each(ele, &ports){
port = list_entry(ele, struct port_list, list);
if(port->port == port_num) goto found;
@@ -121,7 +123,7 @@ void *port_data(int port_num)
port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
if(port == NULL){
printk(KERN_ERR "Allocation of port list failed\n");
- return(NULL);
+ goto out;
}
fd = port_listen_fd(port_num);
@@ -151,18 +153,21 @@ void *port_data(int port_num)
dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
if(dev == NULL){
printk(KERN_ERR "Allocation of port device entry failed\n");
- return(NULL);
+ goto out;
}
*dev = ((struct port_dev) { port : port,
fd : -1,
helper_pid : -1 });
+ up(&ports_sem);
return(dev);
out_free:
kfree(port);
out_close:
os_close_file(fd);
+ out:
+ up(&ports_sem);
return(NULL);
}
@@ -184,7 +189,6 @@ static void free_port(void)
list_for_each(ele, &ports){
port = list_entry(ele, struct port_list, list);
- free_irq(ACCEPT_IRQ, port);
os_close_file(port->fd);
}
}
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 5d52974e3726..1558c30bb963 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -24,10 +24,13 @@
static int ssl_version = 1;
-static struct tty_driver ssl_driver;
-
+/* Referenced only by tty_driver below - presumably it's locked correctly
+ * by the tty driver.
+ */
static int ssl_refcount = 0;
+static struct tty_driver ssl_driver;
+
#define NR_PORTS 64
void ssl_announce(char *dev_name, int dev)
@@ -58,7 +61,10 @@ static struct line_driver driver = {
symlink_to : "tts",
};
-static struct line serial_lines[NR_PORTS] =
+/* The array is initialized by line_init, which is an initcall. The
+ * individual elements are protected by individual semaphores.
+ */
+static struct line serial_lines[NR_PORTS] =
{ [0 ... NR_PORTS - 1] = LINE_INIT(CONFIG_SSL_CHAN, &driver) };
static struct lines lines = LINES_INIT(NR_PORTS);
@@ -153,6 +159,27 @@ void ssl_hangup(struct tty_struct *tty)
{
}
+static struct tty_driver ssl_driver = {
+ refcount : &ssl_refcount,
+ open : ssl_open,
+ close : ssl_close,
+ write : ssl_write,
+ put_char : ssl_put_char,
+ flush_chars : ssl_flush_chars,
+ chars_in_buffer : ssl_chars_in_buffer,
+ flush_buffer : ssl_flush_buffer,
+ ioctl : ssl_ioctl,
+ throttle : ssl_throttle,
+ unthrottle : ssl_unthrottle,
+ set_termios : ssl_set_termios,
+ stop : ssl_stop,
+ start : ssl_start,
+ hangup : ssl_hangup
+};
+
+/* Changed by ssl_init and referenced by ssl_exit, which are both serialized
+ * by being an initcall and exitcall, respectively.
+ */
static int ssl_init_done = 0;
int ssl_init(void)
@@ -162,25 +189,6 @@ int ssl_init(void)
printk(KERN_INFO "Initializing software serial port version %d\n",
ssl_version);
- ssl_driver = ((struct tty_driver)
- {
- refcount : &ssl_refcount,
- open : ssl_open,
- close : ssl_close,
- write : ssl_write,
- put_char : ssl_put_char,
- flush_chars : ssl_flush_chars,
- chars_in_buffer : ssl_chars_in_buffer,
- flush_buffer : ssl_flush_buffer,
- ioctl : ssl_ioctl,
- throttle : ssl_throttle,
- unthrottle : ssl_unthrottle,
- set_termios : ssl_set_termios,
- stop : ssl_stop,
- start : ssl_start,
- hangup : ssl_hangup
- });
-
line_register_devfs(&lines, &driver, &ssl_driver, serial_lines,
sizeof(serial_lines)/sizeof(serial_lines[0]));
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 3ef550107a93..6d54a6226b4a 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -32,8 +32,14 @@
#define MAX_TTYS (8)
+/* Referenced only by tty_driver below - presumably it's locked correctly
+ * by the tty driver.
+ */
+
static struct tty_driver console_driver;
+static int console_refcount = 0;
+
static struct chan_ops init_console_ops = {
init : NULL,
open : NULL,
@@ -88,6 +94,9 @@ static struct line_driver driver = {
static struct lines console_lines = LINES_INIT(MAX_TTYS);
+/* The array is initialized by line_init, which is an initcall. The
+ * individual elements are protected by individual semaphores.
+ */
struct line vts[MAX_TTYS] = { LINE_INIT(CONFIG_CON_ZERO_CHAN, &driver),
[ 1 ... MAX_TTYS - 1 ] =
LINE_INIT(CONFIG_CON_CHAN, &driver) };
@@ -130,15 +139,6 @@ int stdio_init(void)
printk(KERN_INFO "Initializing stdio console driver\n");
- console_driver = ((struct tty_driver)
- {
- open : con_open,
- close : con_close,
- write : con_write,
- chars_in_buffer : chars_in_buffer,
- set_termios : set_termios
- });
-
line_register_devfs(&console_lines, &driver, &console_driver, vts,
sizeof(vts)/sizeof(vts[0]));
@@ -157,9 +157,20 @@ __initcall(stdio_init);
static void console_write(struct console *console, const char *string,
unsigned len)
{
+ if(con_init_done) down(&vts[console->index].sem);
console_write_chan(&vts[console->index].chan_list, string, len);
+ if(con_init_done) up(&vts[console->index].sem);
}
+static struct tty_driver console_driver = {
+ refcount : &console_refcount,
+ open : con_open,
+ close : con_close,
+ write : con_write,
+ chars_in_buffer : chars_in_buffer,
+ set_termios : set_termios
+};
+
static kdev_t console_device(struct console *c)
{
return mk_kdev(TTY_MAJOR, c->index);
@@ -193,7 +204,6 @@ __channel_help(console_chan_setup, "con");
static void console_exit(void)
{
if(!con_init_done) return;
- line_close(vts, NULL);
close_lines(vts, sizeof(vts)/sizeof(vts[0]));
}
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 36995c3f84f6..68c8e1ca81d1 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -25,6 +25,7 @@
#include "linux/vmalloc.h"
#include "linux/blkpg.h"
#include "linux/genhd.h"
+#include "linux/spinlock.h"
#include "asm/segment.h"
#include "asm/uaccess.h"
#include "asm/irq.h"
@@ -41,7 +42,9 @@
#include "2_5compat.h"
#include "os.h"
-static spinlock_t ubd_lock;
+static spinlock_t ubd_io_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t ubd_lock = SPIN_LOCK_UNLOCKED;
+
static void (*do_ubd)(void);
static int ubd_open(struct inode * inode, struct file * filp);
@@ -62,9 +65,12 @@ static struct block_device_operations ubd_blops = {
.revalidate = ubd_revalidate,
};
+/* Protected by the queue_lock */
static request_queue_t *ubd_queue;
+/* Protected by ubd_lock */
static int fake_major = 0;
+
static struct gendisk *ubd_gendisk[MAX_DEV];
static struct gendisk *fake_gendisk[MAX_DEV];
@@ -74,6 +80,9 @@ static struct gendisk *fake_gendisk[MAX_DEV];
#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0 })
#endif
+/* Not protected - changed only in ubd_setup_common and then only to
+ * to enable O_SYNC.
+ */
static struct openflags global_openflags = OPEN_FLAGS;
struct cow {
@@ -130,12 +139,7 @@ static int ubd0_init(void)
__initcall(ubd0_init);
-static struct hd_driveid ubd_id = {
- .cyls = 0,
- .heads = 128,
- .sectors = 32,
-};
-
+/* Only changed by fake_ide_setup which is a setup */
static int fake_ide = 0;
static struct proc_dir_entry *proc_ide_root = NULL;
static struct proc_dir_entry *proc_ide = NULL;
@@ -161,7 +165,6 @@ static int proc_ide_read_media(char *page, char **start, off_t off, int count,
else len = count;
*start = page + off;
return len;
-
}
static void make_ide_entries(char *dev_name)
@@ -170,7 +173,9 @@ static void make_ide_entries(char *dev_name)
char name[64];
if(!fake_ide) return;
+
if(proc_ide_root == NULL) make_proc_ide();
+
dir = proc_mkdir(dev_name, proc_ide);
ent = create_proc_entry("media", S_IFREG|S_IRUGO, dir);
if(!ent) return;
@@ -199,7 +204,7 @@ static int ubd_setup_common(char *str, int *index_out)
{
struct openflags flags = global_openflags;
char *backing_file;
- int n;
+ int n, err;
if(index_out) *index_out = -1;
n = *str++;
@@ -224,12 +229,22 @@ static int ubd_setup_common(char *str, int *index_out)
return(1);
}
- fake_major = major;
+ err = 1;
+ spin_lock(&ubd_lock);
+ if(!fake_major_allowed){
+ printk(KERN_ERR "Can't assign a fake major twice\n");
+ goto out1;
+ }
+
+ fake_major = major;
fake_major_allowed = 0;
printk(KERN_INFO "Setting extra ubd major number to %d\n",
major);
- return(0);
+ err = 0;
+ out1:
+ spin_unlock(&ubd_lock);
+ return(err);
}
if(n < '0'){
@@ -247,9 +262,12 @@ static int ubd_setup_common(char *str, int *index_out)
return(1);
}
+ err = 1;
+ spin_lock(&ubd_lock);
+
if(ubd_dev[n].file != NULL){
printk(KERN_ERR "ubd_setup : device already configured\n");
- return(1);
+ goto out2;
}
if(index_out) *index_out = n;
@@ -264,8 +282,10 @@ static int ubd_setup_common(char *str, int *index_out)
}
if(*str++ != '='){
printk(KERN_ERR "ubd_setup : Expected '='\n");
- return(1);
+ goto out2;
}
+
+ err = 0;
backing_file = strchr(str, ',');
if(backing_file){
*backing_file = '\0';
@@ -276,7 +296,9 @@ static int ubd_setup_common(char *str, int *index_out)
ubd_dev[n].is_dir = 1;
ubd_dev[n].cow.file = backing_file;
ubd_dev[n].boot_openflags = flags;
- return(0);
+ out2:
+ spin_unlock(&ubd_lock);
+ return(err);
}
static int ubd_setup(char *str)
@@ -317,8 +339,12 @@ __uml_help(fakehd,
static void do_ubd_request(request_queue_t * q);
+/* Only changed by ubd_init, which is an initcall. */
int thread_fd = -1;
+/* Changed by ubd_handler, which is serialized because interrupts only
+ * happen on CPU 0.
+ */
int intr_count = 0;
static void ubd_finish(int error)
@@ -326,7 +352,9 @@ static void ubd_finish(int error)
int nsect;
if(error){
+ spin_lock(&ubd_io_lock);
end_request(CURRENT, 0);
+ spin_unlock(&ubd_io_lock);
return;
}
nsect = CURRENT->current_nr_sectors;
@@ -335,7 +363,9 @@ static void ubd_finish(int error)
CURRENT->errors = 0;
CURRENT->nr_sectors -= nsect;
CURRENT->current_nr_sectors = 0;
+ spin_lock(&ubd_io_lock);
end_request(CURRENT, 1);
+ spin_unlock(&ubd_io_lock);
}
static void ubd_handler(void)
@@ -349,9 +379,9 @@ static void ubd_handler(void)
if(n != sizeof(req)){
printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, "
"errno = %d\n", os_getpid(), -n);
- spin_lock(&ubd_lock);
+ spin_lock(&ubd_io_lock);
end_request(CURRENT, 0);
- spin_unlock(&ubd_lock);
+ spin_unlock(&ubd_io_lock);
return;
}
@@ -359,11 +389,9 @@ static void ubd_handler(void)
(req.length != (CURRENT->current_nr_sectors) << 9))
panic("I/O op mismatch");
- spin_lock(&ubd_lock);
ubd_finish(req.error);
reactivate_fd(thread_fd, UBD_IRQ);
do_ubd_request(ubd_queue);
- spin_unlock(&ubd_lock);
}
static void ubd_intr(int irq, void *dev, struct pt_regs *unused)
@@ -371,6 +399,7 @@ static void ubd_intr(int irq, void *dev, struct pt_regs *unused)
ubd_handler();
}
+/* Only changed by ubd_init, which is an initcall. */
static int io_pid = -1;
void kill_io_thread(void)
@@ -380,8 +409,6 @@ void kill_io_thread(void)
__uml_exitcall(kill_io_thread);
-int sync = 0;
-
static int ubd_file_size(struct ubd *dev, __u64 *size_out)
{
char *file;
@@ -390,6 +417,7 @@ static int ubd_file_size(struct ubd *dev, __u64 *size_out)
return(os_file_size(file, size_out));
}
+/* Initialized in an initcall, and unchanged thereafter */
devfs_handle_t ubd_dir_handle;
devfs_handle_t ubd_fake_dir_handle;
@@ -402,14 +430,13 @@ static int ubd_add(int n)
u64 size;
if (!dev->file)
- return -1;
+ goto out;
- disk = alloc_disk();
+ disk = alloc_disk(1 << UBD_SHIFT);
if (!disk)
return -1;
disk->major = MAJOR_NR;
disk->first_minor = n << UBD_SHIFT;
- disk->minor_shift = UBD_SHIFT;
disk->fops = &ubd_blops;
if (fakehd_set)
sprintf(disk->disk_name, "hd%c", n + 'a');
@@ -417,14 +444,13 @@ static int ubd_add(int n)
sprintf(disk->disk_name, "ubd%d", n);
if (fake_major) {
- fake_disk = alloc_disk();
+ fake_disk = alloc_disk(1 << UBD_SHIFT);
if (!fake_disk) {
put_disk(disk);
return -1;
}
fake_disk->major = fake_major;
fake_disk->first_minor = n << UBD_SHIFT;
- fake_disk->minor_shift = UBD_SHIFT;
fake_disk->fops = &ubd_blops;
sprintf(fake_disk->disk_name, "ubd%d", n);
fake_gendisk[n] = fake_disk;
@@ -443,23 +469,32 @@ static int ubd_add(int n)
MAJOR_NR, n << UBD_SHIFT,
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP |S_IWGRP,
&ubd_blops, NULL);
- add_disk(disk);
+ if(real == NULL)
+ goto out;
+ ubd_dev[n].real = real;
+
if (fake_major) {
fake = devfs_register(ubd_fake_dir_handle, name,
DEVFS_FL_REMOVABLE, fake_major,
n << UBD_SHIFT,
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP |
S_IWGRP, &ubd_blops, NULL);
- add_disk(fake_disk);
- if(fake == NULL) return(-1);
+ if(fake == NULL)
+ goto out_unregister;
+
ubd_dev[n].fake = fake;
+ add_disk(fake_disk);
}
- if(real == NULL) return(-1);
- ubd_dev[n].real = real;
-
+ add_disk(disk);
make_ide_entries(disk->disk_name);
return(0);
+
+ out_unregister:
+ devfs_unregister(real);
+ ubd_dev[n].real = NULL;
+ out:
+ return(-1);
}
static int ubd_config(char *str)
@@ -478,24 +513,29 @@ static int ubd_config(char *str)
}
if(n == -1) return(0);
+ spin_lock(&ubd_lock);
err = ubd_add(n);
- if(err){
+ if(err)
ubd_dev[n].file = NULL;
- return(err);
- }
+ spin_unlock(&ubd_lock);
- return(0);
+ return(err);
}
static int ubd_remove(char *str)
{
struct ubd *dev;
- int n;
+ int n, err;
- if(!isdigit(*str)) return(-1);
+ if(!isdigit(*str))
+ return(-1);
n = *str - '0';
- if(n > MAX_DEV) return(-1);
+ if(n > MAX_DEV)
+ return(-1);
dev = &ubd_dev[n];
+
+ err = 0;
+ spin_lock(&ubd_lock);
del_gendisk(ubd_gendisk[n]);
put_disk(ubd_gendisk[n]);
ubd_gendisk[n] = NULL;
@@ -504,12 +544,20 @@ static int ubd_remove(char *str)
put_disk(fake_gendisk[n]);
fake_gendisk[n] = NULL;
}
- if(dev->file == NULL) return(0);
- if(dev->count > 0) return(-1);
- if(dev->real != NULL) devfs_unregister(dev->real);
- if(dev->fake != NULL) devfs_unregister(dev->fake);
+ if(dev->file == NULL)
+ goto out;
+ err = -1;
+ if(dev->count > 0)
+ goto out;
+ if(dev->real != NULL)
+ devfs_unregister(dev->real);
+ if(dev->fake != NULL)
+ devfs_unregister(dev->fake);
*dev = ((struct ubd) DEFAULT_UBD);
- return(0);
+ err = 0;
+ out:
+ spin_unlock(&ubd_lock);
+ return(err);
}
static struct mc_device ubd_mc = {
@@ -541,7 +589,7 @@ int ubd_init(void)
return -1;
}
ubd_queue = BLK_DEFAULT_QUEUE(MAJOR_NR);
- INIT_QUEUE(ubd_queue, do_ubd_request, &ubd_lock);
+ blk_init_queue(ubd_queue, do_ubd_request, &ubd_io_lock);
elevator_init(ubd_queue, &elevator_noop);
if(fake_major != 0){
char name[sizeof("ubd_nnn\0")];
@@ -566,7 +614,7 @@ int ubd_driver_init(void){
unsigned long stack;
int err;
- if(sync){
+ if(global_openflags.s){
printk(KERN_INFO "ubd : Synchronous mode\n");
return(0);
}
@@ -590,9 +638,9 @@ device_initcall(ubd_driver_init);
static void ubd_close(struct ubd *dev)
{
- close_fd(dev->fd);
+ os_close_file(dev->fd);
if(dev->cow.file != NULL) {
- close_fd(dev->cow.fd);
+ os_close_file(dev->cow.fd);
vfree(dev->cow.bitmap);
dev->cow.bitmap = NULL;
}
@@ -644,22 +692,17 @@ static int ubd_open_dev(struct ubd *dev)
}
return(0);
error:
- close_fd(dev->fd);
+ os_close_file(dev->fd);
return(err);
}
static int ubd_open(struct inode *inode, struct file *filp)
{
- struct ubd *dev;
- int n, offset, err;
-
- n = DEVICE_NR(inode->i_rdev);
- dev = &ubd_dev[n];
- if(n > MAX_DEV)
- return -ENODEV;
- offset = n << UBD_SHIFT;
+ int n = DEVICE_NR(inode->i_rdev);
+ struct ubd *dev = &ubd_dev[n];
+ int err;
if(dev->is_dir == 1)
- return(0);
+ goto out;
if(dev->count == 0){
dev->openflags = dev->boot_openflags;
@@ -668,36 +711,26 @@ static int ubd_open(struct inode *inode, struct file *filp)
if(err){
printk(KERN_ERR "ubd%d: Can't open \"%s\": "
"errno = %d\n", n, dev->file, -err);
- return(err);
+ goto out;
}
- if(err) return(err);
}
dev->count++;
if((filp->f_mode & FMODE_WRITE) && !dev->openflags.w){
if(--dev->count == 0) ubd_close(dev);
- return -EROFS;
+ err = -EROFS;
}
- return(0);
+ out:
+ return(err);
}
static int ubd_release(struct inode * inode, struct file * file)
{
- int n, offset;
-
- n = DEVICE_NR(inode->i_rdev);
- offset = n << UBD_SHIFT;
- if(n > MAX_DEV)
- return -ENODEV;
-
+ int n = DEVICE_NR(inode->i_rdev);
if(--ubd_dev[n].count == 0)
ubd_close(&ubd_dev[n]);
-
return(0);
}
-int cow_read = 0;
-int cow_write = 0;
-
void cowify_req(struct io_thread_req *req, struct ubd *dev)
{
int i, update_bitmap, sector = req->offset >> 9;
@@ -710,14 +743,12 @@ void cowify_req(struct io_thread_req *req, struct ubd *dev)
dev->cow.bitmap)){
ubd_set_bit(i, (unsigned char *)
&req->sector_mask);
- cow_read++;
}
}
}
else {
update_bitmap = 0;
for(i = 0; i < req->length >> 9; i++){
- cow_write++;
ubd_set_bit(i, (unsigned char *)
&req->sector_mask);
if(!ubd_test_bit(sector + i, (unsigned char *)
@@ -752,13 +783,17 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
if(dev->is_dir){
strcpy(req->buffer, "HOSTFS:");
strcat(req->buffer, dev->file);
+ spin_lock(&ubd_io_lock);
end_request(req, 1);
+ spin_unlock(&ubd_io_lock);
return(1);
}
if((rq_data_dir(req) == WRITE) && !dev->openflags.w){
printk("Write attempted on readonly ubd device %d\n", n);
+ spin_lock(&ubd_io_lock);
end_request(req, 0);
+ spin_unlock(&ubd_io_lock);
return(1);
}
@@ -819,6 +854,12 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
struct hd_geometry *loc = (struct hd_geometry *) arg;
struct ubd *dev;
int n, min, err;
+ struct hd_driveid ubd_id = {
+ .cyls = 0,
+ .heads = 128,
+ .sectors = 32,
+ };
+
if(!inode) return(-EINVAL);
min = minor(inode->i_rdev);
@@ -892,8 +933,11 @@ static int ubd_revalidate(kdev_t rdev)
n = minor(rdev) >> UBD_SHIFT;
dev = &ubd_dev[n];
+
+ err = 0;
+ spin_lock(&ubd_lock);
if(dev->is_dir)
- return(0);
+ goto out;
err = ubd_file_size(dev, &size);
if (!err) {
@@ -902,7 +946,8 @@ static int ubd_revalidate(kdev_t rdev)
set_capacity(fake_gendisk[n], size / 512);
dev->size = size;
}
-
+ out:
+ spin_unlock(&ubd_lock);
return err;
}
diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c
index 59eb46ff73a2..8a4b6f52888c 100644
--- a/arch/um/drivers/ubd_user.c
+++ b/arch/um/drivers/ubd_user.c
@@ -533,8 +533,12 @@ void do_io(struct io_thread_req *req)
return;
}
+/* Changed in start_io_thread, which is serialized by being called only
+ * from ubd_init, which is an initcall.
+ */
int kernel_fd = -1;
+/* Only changed by the io thread */
int io_count = 0;
int io_thread(void *arg)
diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
index 9f4a6296fe66..d3f69887f3c2 100644
--- a/arch/um/drivers/xterm.c
+++ b/arch/um/drivers/xterm.c
@@ -44,6 +44,7 @@ void *xterm_init(char *str, int device, struct chan_opts *opts)
return(data);
}
+/* Only changed by xterm_setup, which is a setup */
static char *terminal_emulator = "xterm";
static char *title_switch = "-T";
static char *exec_switch = "-e";
diff --git a/arch/um/include/2_5compat.h b/arch/um/include/2_5compat.h
index 3802d4fafb48..6d36d9c30b91 100644
--- a/arch/um/include/2_5compat.h
+++ b/arch/um/include/2_5compat.h
@@ -20,8 +20,6 @@
next : NULL \
}
-#define INIT_QUEUE(queue, request, lock) blk_init_queue(queue, request, lock)
-
#define INIT_HARDSECT(arr, maj, sizes)
#define SET_PRI(task) do ; while(0)
diff --git a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h
index 4852f2c3613e..52bed5175229 100644
--- a/arch/um/include/irq_user.h
+++ b/arch/um/include/irq_user.h
@@ -18,7 +18,9 @@ extern void forward_interrupts(int pid);
extern void init_irq_signals(int on_sigstack);
extern void forward_ipi(int fd, int pid);
extern void free_irq_later(int irq, void *dev_id);
-
+extern int activate_ipi(int fd, int pid);
+extern unsigned long irq_lock(void);
+extern void irq_unlock(unsigned long flags);
#endif
/*
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h
index 07511e1e7e6e..34e107d545da 100644
--- a/arch/um/include/kern_util.h
+++ b/arch/um/include/kern_util.h
@@ -50,12 +50,8 @@ extern int pid_to_processor_id(int pid);
extern void block_signals(void);
extern void unblock_signals(void);
extern void deliver_signals(void *t);
-extern void lock_syscall(void);
-extern void unlock_syscall(void);
-extern void lock_trap(void);
-extern void unlock_trap(void);
-extern void lock_pid(void);
-extern void unlock_pid(void);
+extern int next_syscall_index(int max);
+extern int next_trap_index(int max);
extern void default_idle(void);
extern void finish_fork(void);
extern void paging_init(void);
@@ -121,7 +117,7 @@ extern void arch_switch(void);
extern int is_valid_pid(int pid);
extern void free_irq(unsigned int, void *);
extern int um_in_interrupt(void);
-
+extern int cpu(void);
#endif
/*
diff --git a/arch/um/include/line.h b/arch/um/include/line.h
index 593355064fe7..4d45c270a80e 100644
--- a/arch/um/include/line.h
+++ b/arch/um/include/line.h
@@ -61,12 +61,9 @@ struct line {
struct lines {
int num;
- int refcount;
};
-#define LINES_INIT(n) \
- { num : n, \
- refcount : 0 }
+#define LINES_INIT(n) { num : n }
extern void line_interrupt(int irq, void *data, struct pt_regs *unused);
extern void line_write_interrupt(int irq, void *data, struct pt_regs *unused);
diff --git a/arch/um/include/mconsole.h b/arch/um/include/mconsole.h
index 192aab5b8e79..8f82ef7201ea 100644
--- a/arch/um/include/mconsole.h
+++ b/arch/um/include/mconsole.h
@@ -77,6 +77,8 @@ extern int mconsole_get_request(int fd, struct mc_request *req);
extern int mconsole_notify(char *sock_name, int type, const void *data,
int len);
extern char *mconsole_notify_socket(void);
+extern void lock_notify(void);
+extern void unlock_notify(void);
#endif
diff --git a/arch/um/include/sigio.h b/arch/um/include/sigio.h
index 23aef0140b7b..37d76e29a147 100644
--- a/arch/um/include/sigio.h
+++ b/arch/um/include/sigio.h
@@ -11,6 +11,8 @@ extern int register_sigio_fd(int fd);
extern int read_sigio_fd(int fd);
extern int add_sigio_fd(int fd, int read);
extern int ignore_sigio_fd(int fd);
+extern void sigio_lock(void);
+extern void sigio_unlock(void);
#endif
diff --git a/arch/um/include/tempfile.h b/arch/um/include/tempfile.h
new file mode 100644
index 000000000000..e36d9e0f5105
--- /dev/null
+++ b/arch/um/include/tempfile.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __TEMPFILE_H__
+#define __TEMPFILE_H__
+
+extern int make_tempfile(const char *template, char **tempname, int do_unlink);
+
+#endif
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/arch/um/include/time_user.h b/arch/um/include/time_user.h
index d68caf278f05..d49a34f0bee8 100644
--- a/arch/um/include/time_user.h
+++ b/arch/um/include/time_user.h
@@ -7,11 +7,11 @@
#define __TIME_USER_H__
extern void timer(void);
-extern void get_profile_timer(void);
-extern void disable_profile_timer(void);
extern void switch_timers(int to_real);
extern void user_time_init(void);
-extern void set_timers(int set_signal);
extern void idle_sleep(int secs);
+extern void enable_timer(void);
+extern void time_lock(void);
+extern void time_unlock(void);
#endif
diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h
index c95f3c45620c..5f50aed753b2 100644
--- a/arch/um/include/user_util.h
+++ b/arch/um/include/user_util.h
@@ -48,32 +48,21 @@ extern unsigned long brk_start;
extern int pty_output_sigio;
extern int pty_close_sigio;
-extern void *open_maps(void);
-extern void close_maps(void *fd);
-extern unsigned long get_brk(void);
extern void stop(void);
-extern int proc_start_thread(unsigned long ip, unsigned long sp);
extern void stack_protections(unsigned long address);
extern void task_protections(unsigned long address);
-extern void abandon_proc_space(int (*proc)(void *), unsigned long sp);
extern int signals(int (*init_proc)(void *), void *sp);
-extern int __personality(int);
extern int wait_for_stop(int pid, int sig, int cont_type, void *relay);
extern void *add_signal_handler(int sig, void (*handler)(int));
-extern void signal_init(void);
extern int start_fork_tramp(void *arg, unsigned long temp_stack,
int clone_flags, int (*tramp)(void *));
extern void trace_myself(void);
extern int clone_and_wait(int (*fn)(void *), void *arg, void *sp, int flags);
-extern int input_loop(void);
-extern void continue_execing_proc(int pid);
extern int linux_main(int argc, char **argv);
extern void remap_data(void *segment_start, void *segment_end, int w);
extern void set_cmdline(char *cmd);
extern void input_cb(void (*proc)(void *), void *arg, int arg_len);
-extern void setup_input(void);
extern int get_pty(void);
-extern void save_signal_state(int *sig_ptr);
extern void *um_kmalloc(int size);
extern int raw(int fd, int complain);
extern int switcheroo(int fd, int prot, void *from, void *to, int size);
@@ -82,15 +71,11 @@ extern void setup_hostinfo(void);
extern void add_arg(char *cmd_line, char *arg);
extern void init_new_thread(void *sig_stack, void (*usr1_handler)(int));
extern void attach_process(int pid);
-extern void calc_sigframe_size(void);
extern int fork_tramp(void *sig_stack);
extern void do_exec(int old_pid, int new_pid);
extern void tracer_panic(char *msg, ...);
-extern void close_fd(int);
-extern int make_tempfile(const char *template, char **tempname, int do_unlink);
extern char *get_umid(int only_if_set);
extern void do_longjmp(void *p);
-extern void term_handler(int sig);
extern void suspend_new_thread(int fd);
extern int detach(int pid, int sig);
extern int attach(int pid);
@@ -99,12 +84,9 @@ extern int cont(int pid);
extern void check_ptrace(void);
extern void check_sigio(void);
extern int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr);
-extern int user_read(int fd, char *buf, int len);
-extern int user_write(int fd, char *buf, int len);
extern void write_sigio_workaround(void);
extern void arch_check_bugs(void);
extern int arch_handle_signal(int sig, struct uml_pt_regs *regs);
-extern unsigned long pid_pc(int pid);
extern int arch_fixup(unsigned long address, void *sc_ptr);
extern void forward_pending_sigio(int target);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 76d39acd2837..0fb628ac28ba 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -3,9 +3,9 @@ EXTRA_TARGETS := unmap_fin.o
obj-y = config.o exec_kern.o exec_user.o exitcode.o frame_kern.o frame.o \
helper.o init_task.o irq.o irq_user.o ksyms.o mem.o mem_user.o \
- process.o process_kern.o ptrace.o reboot.o resource.o setup.o \
- sigio_user.o sigio_kern.o signal_kern.o signal_user.o smp.o \
- syscall_kern.o syscall_user.o sysrq.o sys_call_table.o time.o \
+ process.o process_kern.o ptrace.o reboot.o resource.o sigio_user.o \
+ sigio_kern.o signal_kern.o signal_user.o smp.o syscall_kern.o \
+ syscall_user.o sysrq.o sys_call_table.o tempfile.o time.o \
time_kern.o tlb.o trap_kern.o trap_user.o uaccess_user.o um_arch.o \
umid.o user_util.o
@@ -14,8 +14,8 @@ obj-$(CONFIG_BLK_DEV_INITRD) += initrd_kern.o initrd_user.o
# user_syms.o not included here because Rules.make has its own ideas about
# building anything in export-objs
-USER_OBJS := $(filter %_user.o,$(obj-y)) config.o frame.o helper.o process.o \
- time.o tty_log.o umid.o user_util.o user_syms.o
+USER_OBJS := $(filter %_user.o,$(obj-y)) config.o helper.o process.o \
+ tempfile.o time.o tty_log.o umid.o user_util.o user_syms.o
USER_OBJS := $(foreach file,$(USER_OBJS),arch/um/kernel/$(file))
export-objs := ksyms.o process_kern.o signal_kern.o gprof_syms.o gmon_syms.o
@@ -51,6 +51,9 @@ arch/um/kernel/unmap.o: arch/um/kernel/unmap.c
arch/um/kernel/unmap_fin.o : arch/um/kernel/unmap.o
ld -r -o $@ $< -lc -L/usr/lib
+arch/um/kernel/frame.o: arch/um/kernel/frame.c
+ $(CC) $(CFLAGS_$(notdir $@)) -c -o $@ $<
+
QUOTE = 'my $$config=`cat $(TOPDIR)/.config`; $$config =~ s/"/\\"/g ; while(<STDIN>) { $$_ =~ s/CONFIG/$$config/; print $$_ }'
arch/um/kernel/config.c : arch/um/kernel/config.c.in $(TOPDIR)/.config
diff --git a/arch/um/kernel/exec_kern.c b/arch/um/kernel/exec_kern.c
index 34a2e527a3f8..353bfa4da610 100644
--- a/arch/um/kernel/exec_kern.c
+++ b/arch/um/kernel/exec_kern.c
@@ -17,6 +17,7 @@
#include "tlb.h"
#include "2_5compat.h"
#include "os.h"
+#include "time_user.h"
/* See comment above fork_tramp for why sigstop is defined and used like
* this
@@ -28,7 +29,6 @@ static int exec_tramp(void *sig_stack)
{
int sig = sigstop;
- block_signals();
init_new_thread(sig_stack, NULL);
kill(os_getpid(), sig);
return(0);
@@ -62,6 +62,7 @@ void flush_thread(void)
unprotect_stack((unsigned long) current->thread_info);
os_usr1_process(os_getpid());
+ enable_timer();
free_page(stack);
protect(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1);
task_protections((unsigned long) current->thread_info);
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 6c5c30e9c380..788f914d8510 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -8,6 +8,9 @@
#include "linux/proc_fs.h"
#include "asm/uaccess.h"
+/* If read and write race, the read will still atomically read a valid
+ * value.
+ */
int uml_exitcode = 0;
static int read_proc_exitcode(char *page, char **start, off_t off,
diff --git a/arch/um/kernel/frame.c b/arch/um/kernel/frame.c
index a5d77c0ee15d..28793041426a 100644
--- a/arch/um/kernel/frame.c
+++ b/arch/um/kernel/frame.c
@@ -130,6 +130,7 @@ static void child_common(void *sp, int size, sighandler_t handler, int flags)
os_stop_process(os_getpid());
}
+/* Changed only during early boot */
struct sc_frame signal_frame_sc;
struct sc_frame_raw {
@@ -142,6 +143,7 @@ struct sc_frame_raw {
struct arch_frame_data_raw arch;
};
+/* Changed only during early boot */
static struct sc_frame_raw *raw_sc = NULL;
static void sc_handler(int sig, struct sigcontext sc)
@@ -163,6 +165,7 @@ static int sc_child(void *arg)
return(-1);
}
+/* Changed only during early boot */
struct si_frame signal_frame_si;
struct si_frame_raw {
@@ -175,6 +178,7 @@ struct si_frame_raw {
unsigned long sp;
};
+/* Changed only during early boot */
static struct si_frame_raw *raw_si = NULL;
static void si_handler(int sig, siginfo_t *si)
diff --git a/arch/um/kernel/helper.c b/arch/um/kernel/helper.c
index 324e08974d33..5d8fb7bba2b1 100644
--- a/arch/um/kernel/helper.c
+++ b/arch/um/kernel/helper.c
@@ -22,6 +22,7 @@ struct helper_data {
int fd;
};
+/* Debugging aid, changed only from gdb */
int helper_pause = 0;
static void helper_hup(int sig)
diff --git a/arch/um/kernel/initrd_kern.c b/arch/um/kernel/initrd_kern.c
index dc6cff88b7a6..a8f7e70f1a17 100644
--- a/arch/um/kernel/initrd_kern.c
+++ b/arch/um/kernel/initrd_kern.c
@@ -13,6 +13,7 @@
#include "init.h"
#include "os.h"
+/* Changed by uml_initrd_setup, which is a setup */
static char *initrd __initdata = NULL;
static int __init read_initrd(void)
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 865ecec8d905..56454f28f03b 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -78,6 +78,7 @@ struct hw_interrupt_type no_irq_type = {
end_none
};
+/* Not changed */
volatile unsigned long irq_err_count;
/*
@@ -87,6 +88,7 @@ volatile unsigned long irq_err_count;
int get_irq_list(char *buf)
{
int i, j;
+ unsigned long flags;
struct irqaction * action;
char *p = buf;
@@ -96,9 +98,10 @@ int get_irq_list(char *buf)
*p++ = '\n';
for (i = 0 ; i < NR_IRQS ; i++) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
- continue;
+ goto end;
p += sprintf(p, "%3d: ",i);
#ifndef CONFIG_SMP
p += sprintf(p, "%10u ", kstat_irqs(i));
@@ -113,6 +116,8 @@ int get_irq_list(char *buf)
for (action=action->next; action; action = action->next)
p += sprintf(p, ", %s", action->name);
*p++ = '\n';
+ end:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
p += sprintf(p, "\n");
#ifdef notdef
@@ -548,11 +553,15 @@ void free_irq(unsigned int irq, void *dev_id)
}
}
+/* These are initialized by sysctl_init, which is called from init/main.c */
static struct proc_dir_entry * root_irq_dir;
static struct proc_dir_entry * irq_dir [NR_IRQS];
static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
-unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+/* These are read and written as longs, so a read won't see a partial write
+ * even during a race.
+ */
+static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
#define HEX_DIGITS 8
@@ -679,6 +688,7 @@ static void register_irq_proc (unsigned int irq)
smp_affinity_entry[irq] = entry;
}
+/* Read and written as a long */
unsigned long prof_cpu_mask = -1;
void __init init_irq_proc (void)
@@ -704,6 +714,21 @@ void __init init_irq_proc (void)
register_irq_proc(i);
}
+static spinlock_t irq_spinlock = SPIN_LOCK_UNLOCKED;
+
+unsigned long irq_lock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_spinlock, flags);
+ return(flags);
+}
+
+void irq_unlock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&irq_spinlock, flags);
+}
+
unsigned long probe_irq_on(void)
{
return(0);
diff --git a/arch/um/kernel/irq_user.c b/arch/um/kernel/irq_user.c
index 32099fc7603b..ef56eef4f6c1 100644
--- a/arch/um/kernel/irq_user.c
+++ b/arch/um/kernel/irq_user.c
@@ -111,40 +111,20 @@ static void maybe_sigio_broken(int fd, int type)
int activate_fd(int irq, int fd, int type, void *dev_id)
{
- struct irq_fd *new_fd;
- int pid, retval, events, err;
+ struct pollfd *tmp_pfd;
+ struct irq_fd *new_fd, *irq_fd;
+ unsigned long flags;
+ int pid, events, err, n, size;
+
+ pid = os_getpid();
+ err = os_set_fd_async(fd, pid);
+ if(err < 0)
+ goto out;
- for(new_fd = active_fds;new_fd;new_fd = new_fd->next){
- if((new_fd->fd == fd) && (new_fd->type == type)){
- printk("Registering fd %d twice\n", fd);
- printk("Irqs : %d, %d\n", new_fd->irq, irq);
- printk("Ids : 0x%x, 0x%x\n", new_fd->id, dev_id);
- return(-EIO);
- }
- }
- pid = cpu_tasks[0].pid;
- if((retval = os_set_fd_async(fd, pid)) != 0)
- return(retval);
new_fd = um_kmalloc(sizeof(*new_fd));
err = -ENOMEM;
- if(new_fd == NULL) return(err);
- pollfds_num++;
- if(pollfds_num > pollfds_size){
- struct pollfd *tmp_pfd;
-
- tmp_pfd = um_kmalloc(pollfds_num * sizeof(pollfds[0]));
- if(tmp_pfd == NULL){
- pollfds_num--;
- goto out_irq;
- }
- if(pollfds != NULL){
- memcpy(tmp_pfd, pollfds,
- sizeof(pollfds[0]) * pollfds_size);
- kfree(pollfds);
- }
- pollfds = tmp_pfd;
- pollfds_size = pollfds_num;
- }
+ if(new_fd == NULL)
+ goto out;
if(type == IRQ_READ) events = POLLIN | POLLPRI;
else events = POLLOUT;
@@ -158,29 +138,90 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
current_events: 0,
freed : 0 } );
- *last_irq_ptr = new_fd;
- last_irq_ptr = &new_fd->next;
+ /* Critical section - locked by a spinlock because this stuff can
+ * be changed from interrupt handlers. The stuff above is done
+ * outside the lock because it allocates memory.
+ */
+
+ /* Actually, it only looks like it can be called from interrupt
+ * context. The culprit is reactivate_fd, which calls
+ * maybe_sigio_broken, which calls write_sigio_workaround,
+ * which calls activate_fd. However, write_sigio_workaround should
+ * only be called once, at boot time. That would make it clear that
+ * this is called only from process context, and can be locked with
+ * a semaphore.
+ */
+ flags = irq_lock();
+ for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
+ if((irq_fd->fd == fd) && (irq_fd->type == type)){
+ printk("Registering fd %d twice\n", fd);
+ printk("Irqs : %d, %d\n", irq_fd->irq, irq);
+ printk("Ids : 0x%x, 0x%x\n", irq_fd->id, dev_id);
+ goto out_unlock;
+ }
+ }
+
+ n = pollfds_num;
+ if(n == pollfds_size){
+ while(1){
+ /* Here we have to drop the lock in order to call
+ * kmalloc, which might sleep. If something else
+ * came in and changed the pollfds array, we free
+ * the buffer and try again.
+ */
+ irq_unlock(flags);
+ size = (pollfds_num + 1) * sizeof(pollfds[0]);
+ tmp_pfd = um_kmalloc(size);
+ flags = irq_lock();
+ if(tmp_pfd == NULL)
+ goto out_unlock;
+ if(n == pollfds_size)
+ break;
+ kfree(tmp_pfd);
+ }
+ if(pollfds != NULL){
+ memcpy(tmp_pfd, pollfds,
+ sizeof(pollfds[0]) * pollfds_size);
+ kfree(pollfds);
+ }
+ pollfds = tmp_pfd;
+ pollfds_size++;
+ }
if(type == IRQ_WRITE) events = 0;
- pollfds[pollfds_num - 1] = ((struct pollfd) { fd : fd,
- events : events,
- revents : 0 });
+ pollfds[pollfds_num] = ((struct pollfd) { fd : fd,
+ events : events,
+ revents : 0 });
+ pollfds_num++;
+ *last_irq_ptr = new_fd;
+ last_irq_ptr = &new_fd->next;
+
+ irq_unlock(flags);
+
+ /* This calls activate_fd, so it has to be outside the critical
+ * section.
+ */
maybe_sigio_broken(fd, type);
return(0);
- out_irq:
+ out_unlock:
+ irq_unlock(flags);
+ out_free:
kfree(new_fd);
+ out:
return(err);
}
static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
{
struct irq_fd **prev;
+ unsigned long flags;
int i = 0;
+ flags = irq_lock();
prev = &active_fds;
while(*prev != NULL){
if((*test)(*prev, arg)){
@@ -190,7 +231,7 @@ static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
printk("free_irq_by_cb - mismatch between "
"active_fds and pollfds, fd %d vs %d\n",
(*prev)->fd, pollfds[i].fd);
- return;
+ goto out;
}
memcpy(&pollfds[i], &pollfds[i + 1],
(pollfds_num - i - 1) * sizeof(pollfds[0]));
@@ -206,6 +247,8 @@ static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
prev = &(*prev)->next;
i++;
}
+ out:
+ irq_unlock(flags);
}
struct irq_and_dev {
@@ -242,29 +285,33 @@ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
{
struct irq_fd *irq;
int i = 0;
-
+
for(irq=active_fds; irq != NULL; irq = irq->next){
if((irq->fd == fd) && (irq->irq == irqnum)) break;
i++;
}
if(irq == NULL){
printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
- return(NULL);
+ goto out;
}
if((pollfds[i].fd != -1) && (pollfds[i].fd != fd)){
printk("find_irq_by_fd - mismatch between active_fds and "
"pollfds, fd %d vs %d, need %d\n", irq->fd,
pollfds[i].fd, fd);
- return(NULL);
+ irq = NULL;
+ goto out;
}
*index_out = i;
+ out:
return(irq);
}
void free_irq_later(int irq, void *dev_id)
{
struct irq_fd *irq_fd;
+ unsigned long flags;
+ flags = irq_lock();
for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
if((irq_fd->irq == irq) && (irq_fd->id == dev_id))
break;
@@ -272,30 +319,48 @@ void free_irq_later(int irq, void *dev_id)
if(irq_fd == NULL){
printk("free_irq_later found no irq, irq = %d, "
"dev_id = 0x%p\n", irq, dev_id);
- return;
+ goto out;
}
irq_fd->freed = 1;
+ out:
+ irq_unlock(flags);
}
void reactivate_fd(int fd, int irqnum)
{
struct irq_fd *irq;
+ unsigned long flags;
int i;
+ flags = irq_lock();
irq = find_irq_by_fd(fd, irqnum, &i);
- if(irq == NULL) return;
+ if(irq == NULL){
+ irq_unlock(flags);
+ return;
+ }
pollfds[i].fd = irq->fd;
+
+ irq_unlock(flags);
+
+ /* This calls activate_fd, so it has to be outside the critical
+ * section.
+ */
maybe_sigio_broken(fd, irq->type);
}
void deactivate_fd(int fd, int irqnum)
{
struct irq_fd *irq;
+ unsigned long flags;
int i;
+ flags = irq_lock();
irq = find_irq_by_fd(fd, irqnum, &i);
- if(irq == NULL) return;
+ if(irq == NULL)
+ goto out;
pollfds[i].fd = -1;
+ out:
+ irq_unlock(flags);
}
void forward_ipi(int fd, int pid)
@@ -313,7 +378,9 @@ void forward_ipi(int fd, int pid)
void forward_interrupts(int pid)
{
struct irq_fd *irq;
+ unsigned long flags;
+ flags = irq_lock();
for(irq=active_fds;irq != NULL;irq = irq->next){
if(fcntl(irq->fd, F_SETOWN, pid) < 0){
int save_errno = errno;
@@ -328,6 +395,7 @@ void forward_interrupts(int pid)
}
irq->pid = pid;
}
+ irq_unlock(flags);
}
void init_irq_signals(int on_sigstack)
@@ -339,10 +407,10 @@ void init_irq_signals(int on_sigstack)
if(timer_irq_inited) h = (__sighandler_t) alarm_handler;
else h = boot_timer_handler;
- set_handler(SIGVTALRM, h, flags | SA_NODEFER | SA_RESTART,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ set_handler(SIGVTALRM, h, flags | SA_RESTART,
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1);
set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
signal(SIGWINCH, SIG_IGN);
}
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index dd6d9491bbed..a5cbc3bb257e 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -20,6 +20,7 @@ EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(sys_waitpid);
EXPORT_SYMBOL(task_size);
EXPORT_SYMBOL(__do_copy_from_user);
+EXPORT_SYMBOL(__do_copy_to_user);
EXPORT_SYMBOL(__do_strncpy_from_user);
EXPORT_SYMBOL(__do_strnlen_user);
EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index da7f4b318255..5a582a8de7d1 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -26,33 +26,32 @@
#include "kern.h"
#include "init.h"
+/* Changed during early boot */
+pgd_t swapper_pg_dir[1024];
unsigned long high_physmem;
-unsigned long low_physmem;
-
unsigned long vm_start;
unsigned long vm_end;
-
unsigned long highmem;
-
-pgd_t swapper_pg_dir[1024];
-
unsigned long *empty_zero_page = NULL;
-
unsigned long *empty_bad_page = NULL;
+/* Not modified */
const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
extern char __init_begin, __init_end;
extern long physmem_size;
+/* Not changed by UML */
mmu_gather_t mmu_gathers[NR_CPUS];
+/* Changed during early boot */
int kmalloc_ok = 0;
#define NREGIONS (phys_region_index(0xffffffff) - phys_region_index(0x0) + 1)
struct mem_region *regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] = NULL };
#define REGION_SIZE ((0xffffffff & ~REGION_MASK) + 1)
+/* Changed during early boot */
static unsigned long brk_end;
static void map_cb(void *unused)
@@ -108,6 +107,7 @@ void mem_init(void)
}
#if CONFIG_HIGHMEM
+/* Changed during early boot */
pte_t *kmap_pte;
pgprot_t kmap_prot;
@@ -187,18 +187,22 @@ int init_maps(struct mem_region *region)
return(0);
}
+DECLARE_MUTEX(regions_sem);
+
static int setup_one_range(int fd, char *driver, unsigned long start,
unsigned long pfn, int len,
struct mem_region *region)
{
int i;
+ down(&regions_sem);
for(i = 0; i < NREGIONS; i++){
if(regions[i] == NULL) break;
}
if(i == NREGIONS){
printk("setup_range : no free regions\n");
- return(-1);
+ i = -1;
+ goto out;
}
if(fd == -1)
@@ -216,6 +220,8 @@ static int setup_one_range(int fd, char *driver, unsigned long start,
len : len,
fd : fd } );
regions[i] = region;
+ out:
+ up(&regions_sem);
return(i);
}
@@ -373,7 +379,8 @@ void show_mem(void)
printk("%d pages swap cached\n", cached);
}
-unsigned long kmem_top = 0;
+/* Changed during early boot */
+static unsigned long kmem_top = 0;
unsigned long get_kmem_end(void)
{
@@ -428,8 +435,10 @@ struct page *arch_validate(struct page *page, int mask, int order)
goto again;
}
+DECLARE_MUTEX(vm_reserved_sem);
static struct list_head vm_reserved = LIST_HEAD_INIT(vm_reserved);
+/* Static structures, linked in to the list in early boot */
static struct vm_reserved head = {
list : LIST_HEAD_INIT(head.list),
start : 0,
@@ -455,7 +464,9 @@ int reserve_vm(unsigned long start, unsigned long end, void *e)
{
struct vm_reserved *entry = e, *reserved, *prev;
struct list_head *ele;
+ int err;
+ down(&vm_reserved_sem);
list_for_each(ele, &vm_reserved){
reserved = list_entry(ele, struct vm_reserved, list);
if(reserved->start >= end) goto found;
@@ -469,13 +480,17 @@ int reserve_vm(unsigned long start, unsigned long end, void *e)
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if(entry == NULL){
printk("reserve_vm : Failed to allocate entry\n");
- return(-ENOMEM);
+ err = -ENOMEM;
+ goto out;
}
*entry = ((struct vm_reserved)
{ list : LIST_HEAD_INIT(entry->list),
start : start,
end : end });
list_add(&entry->list, &prev->list);
+ err = 0;
+ out:
+ up(&vm_reserved_sem);
return(0);
}
@@ -486,6 +501,7 @@ unsigned long get_vm(unsigned long len)
unsigned long start;
int err;
+ down(&vm_reserved_sem);
list_for_each(ele, &vm_reserved){
this = list_entry(ele, struct vm_reserved, list);
next = list_entry(ele->next, struct vm_reserved, list);
@@ -493,8 +509,10 @@ unsigned long get_vm(unsigned long len)
(this->end + len + PAGE_SIZE <= next->start))
goto found;
}
+ up(&vm_reserved_sem);
return(0);
found:
+ up(&vm_reserved_sem);
start = (unsigned long) ROUND_UP(this->end) + PAGE_SIZE;
err = reserve_vm(start, start + len, NULL);
if(err) return(0);
@@ -533,7 +551,11 @@ struct iomem {
unsigned long size;
};
-struct iomem iomem_regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] =
+/* iomem regions can only be added on the command line at the moment.
+ * Locking will be needed when they can be added via mconsole.
+ */
+
+struct iomem iomem_regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] =
{ name : NULL,
fd : -1,
size : 0 } };
@@ -569,6 +591,7 @@ __initcall(setup_iomem);
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+/* Changed during early boot */
static struct mem_region physmem_region;
static struct vm_reserved physmem_reserved;
diff --git a/arch/um/kernel/mem_user.c b/arch/um/kernel/mem_user.c
index 8e036687f717..af857510d17a 100644
--- a/arch/um/kernel/mem_user.c
+++ b/arch/um/kernel/mem_user.c
@@ -46,10 +46,9 @@
#include "mem_user.h"
#include "init.h"
#include "os.h"
+#include "tempfile.h"
-struct mem_region physmem_region;
-
-struct mem_region *mem_list = &physmem_region;
+extern struct mem_region physmem_region;
#define TEMPNAME_TEMPLATE "vm_file-XXXXXX"
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index c3731bfd4547..d410aaa7332b 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -48,23 +48,23 @@ void init_new_thread(void *sig_stack, void (*usr1_handler)(int))
flags = SA_ONSTACK;
}
set_handler(SIGSEGV, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGTRAP, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGFPE, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGILL, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGBUS, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGWINCH, (__sighandler_t) sig_handler, flags,
- SIGUSR1, SIGIO, SIGWINCH, -1);
+ SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGUSR2, (__sighandler_t) sig_handler,
SA_NOMASK | flags, -1);
if(usr1_handler) set_handler(SIGUSR1, usr1_handler, flags, -1);
signal(SIGCHLD, SIG_IGN);
signal(SIGHUP, SIG_IGN);
- set_timers(1); /* XXX A bit of a race here */
+
init_irq_signals(sig_stack != NULL);
}
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 8bd714887a3f..79a5b35e9344 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -41,6 +41,10 @@
#include "2_5compat.h"
#include "os.h"
+/* This is a per-cpu array. A processor only modifies its entry and it only
+ * cares about its entry, so it's OK if another processor is modifying its
+ * entry.
+ */
struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
struct task_struct *get_task(int pid, int require)
@@ -86,7 +90,7 @@ int pid_to_processor_id(int pid)
{
int i;
- for(i = 0; i < num_online_cpus(); i++){
+ for(i = 0; i < ncpus; i++){
if(cpu_tasks[i].pid == pid) return(i);
}
return(-1);
@@ -152,12 +156,19 @@ static void new_thread_handler(int sig)
current->thread.regs.regs.sc = (void *) (&sig + 1);
suspend_new_thread(current->thread.switch_pipe[0]);
+ block_signals();
+#ifdef CONFIG_SMP
+ schedule_tail(NULL);
+#endif
+ enable_timer();
free_page(current->thread.temp_stack);
set_cmdline("(kernel thread)");
force_flush_all();
current->thread.prev_sched = NULL;
change_sig(SIGUSR1, 1);
+ change_sig(SIGVTALRM, 1);
+ change_sig(SIGPROF, 1);
unblock_signals();
if(!run_kernel_thread(fn, arg, &current->thread.jmp))
do_exit(0);
@@ -165,7 +176,9 @@ static void new_thread_handler(int sig)
static int new_thread_proc(void *stack)
{
- block_signals();
+ change_sig(SIGIO, 0);
+ change_sig(SIGVTALRM, 0);
+ change_sig(SIGPROF, 0);
init_new_thread(stack, new_thread_handler);
os_usr1_process(os_getpid());
return(0);
@@ -204,6 +217,9 @@ void *switch_to(void *prev, void *next, void *last)
unsigned long flags;
int vtalrm, alrm, prof, err, cpu;
char c;
+ /* jailing and SMP are incompatible, so this doesn't need to be
+ * made per-cpu
+ */
static int reading;
from = prev;
@@ -229,7 +245,7 @@ void *switch_to(void *prev, void *next, void *last)
set_current(to);
reading = 0;
- err = user_write(to->thread.switch_pipe[1], &c, sizeof(c));
+ err = os_write_file(to->thread.switch_pipe[1], &c, sizeof(c));
if(err != sizeof(c))
panic("write of switch_pipe failed, errno = %d", -err);
@@ -237,7 +253,7 @@ void *switch_to(void *prev, void *next, void *last)
if((from->state == TASK_ZOMBIE) || (from->state == TASK_DEAD))
os_kill_process(os_getpid());
- err = user_read(from->thread.switch_pipe[0], &c, sizeof(c));
+ err = os_read_file(from->thread.switch_pipe[0], &c, sizeof(c));
if(err != sizeof(c))
panic("read of switch_pipe failed, errno = %d", -err);
@@ -298,13 +314,16 @@ void exit_thread(void)
* onto the signal frame.
*/
-extern int hit_me;
-
void finish_fork_handler(int sig)
{
current->thread.regs.regs.sc = (void *) (&sig + 1);
suspend_new_thread(current->thread.switch_pipe[0]);
-
+
+#ifdef CONFIG_SMP
+ schedule_tail(NULL);
+#endif
+ enable_timer();
+ change_sig(SIGVTALRM, 1);
force_flush_all();
if(current->mm != current->parent->mm)
protect(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1);
@@ -313,7 +332,6 @@ void finish_fork_handler(int sig)
current->thread.prev_sched = NULL;
free_page(current->thread.temp_stack);
- block_signals();
change_sig(SIGUSR1, 0);
set_user_mode(current);
}
@@ -339,7 +357,9 @@ int fork_tramp(void *stack)
{
int sig = sigusr1;
- block_signals();
+ change_sig(SIGIO, 0);
+ change_sig(SIGVTALRM, 0);
+ change_sig(SIGPROF, 0);
init_new_thread(stack, finish_fork_handler);
kill(os_getpid(), sig);
@@ -474,7 +494,7 @@ int current_pid(void)
void default_idle(void)
{
- if(current->thread_info->cpu == 0) idle_timer();
+ idle_timer();
atomic_inc(&init_mm.mm_count);
current->mm = &init_mm;
@@ -644,6 +664,7 @@ char *uml_strdup(char *string)
return(new);
}
+/* Changed by jail_setup, which is a setup */
int jail = 0;
int __init jail_setup(char *line, int *add)
@@ -708,17 +729,14 @@ static void mprotect_kernel_mem(int w)
mprotect_kernel_vm(w);
}
-int jail_timer_off = 0;
-
+/* No SMP problems since jailing and SMP are incompatible */
void unprotect_kernel_mem(void)
{
mprotect_kernel_mem(1);
- jail_timer_off = 0;
}
void protect_kernel_mem(void)
{
- jail_timer_off = 1;
mprotect_kernel_mem(0);
}
@@ -749,9 +767,11 @@ void set_thread_sc(void *sc)
int smp_sigio_handler(void)
{
+ int cpu = current->thread_info->cpu;
#ifdef CONFIG_SMP
- IPI_handler(hard_smp_processor_id());
- if (hard_smp_processor_id() != 0) return(1);
+ IPI_handler(cpu);
+ if(cpu != 0)
+ return(1);
#endif
return(0);
}
@@ -761,6 +781,11 @@ int um_in_interrupt(void)
return(in_interrupt());
}
+int cpu(void)
+{
+ return(current->thread_info->cpu);
+}
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
diff --git a/arch/um/kernel/sigio_kern.c b/arch/um/kernel/sigio_kern.c
index fc37b78b4009..02272e623614 100644
--- a/arch/um/kernel/sigio_kern.c
+++ b/arch/um/kernel/sigio_kern.c
@@ -11,6 +11,7 @@
#include "sigio.h"
#include "irq_user.h"
+/* Protected by sigio_lock() called from write_sigio_workaround */
static int sigio_irq_fd = -1;
void sigio_interrupt(int irq, void *data, struct pt_regs *unused)
@@ -31,6 +32,18 @@ int write_sigio_irq(int fd)
return(0);
}
+static spinlock_t sigio_spinlock = SPIN_LOCK_UNLOCKED;
+
+void sigio_lock(void)
+{
+ spin_lock(&sigio_spinlock);
+}
+
+void sigio_unlock(void)
+{
+ spin_unlock(&sigio_spinlock);
+}
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c
index fd3b70704621..740608075b7c 100644
--- a/arch/um/kernel/sigio_user.c
+++ b/arch/um/kernel/sigio_user.c
@@ -21,9 +21,11 @@
#include "helper.h"
#include "os.h"
+/* Changed during early boot */
int pty_output_sigio = 0;
int pty_close_sigio = 0;
+/* Used as a flag during SIGIO testing early in boot */
static int got_sigio = 0;
void __init handler(int sig)
@@ -151,7 +153,15 @@ void __init check_sigio(void)
check_one_sigio(tty_close);
}
+/* Protected by sigio_lock(), also used by sigio_cleanup, which is an
+ * exitcall.
+ */
static int write_sigio_pid = -1;
+
+/* These arrays are initialized before the sigio thread is started, and
+ * the descriptors closed after it is killed. So, it can't see them change.
+ * On the UML side, they are changed under the sigio_lock.
+ */
static int write_sigio_fds[2] = { -1, -1 };
static int sigio_private[2] = { -1, -1 };
@@ -161,6 +171,9 @@ struct pollfds {
int used;
};
+/* Protected by sigio_lock(). Used by the sigio thread, but the UML thread
+ * synchronizes with it.
+ */
struct pollfds current_poll = {
poll : NULL,
size : 0,
@@ -217,8 +230,6 @@ static int write_sigio_thread(void *unused)
}
}
-/* XXX SMP locking needed here too */
-
static int need_poll(int n)
{
if(n <= next_poll.size){
@@ -260,25 +271,31 @@ static void update_thread(void)
set_signals(flags);
return;
fail:
+ sigio_lock();
if(write_sigio_pid != -1) kill(write_sigio_pid, SIGKILL);
write_sigio_pid = -1;
close(sigio_private[0]);
close(sigio_private[1]);
close(write_sigio_fds[0]);
close(write_sigio_fds[1]);
+ sigio_unlock();
set_signals(flags);
}
int add_sigio_fd(int fd, int read)
{
- int err, i, n, events;
+ int err = 0, i, n, events;
- for(i = 0; i < current_poll.used; i++)
- if(current_poll.poll[i].fd == fd) return(0);
+ sigio_lock();
+ for(i = 0; i < current_poll.used; i++){
+ if(current_poll.poll[i].fd == fd)
+ goto out;
+ }
n = current_poll.used + 1;
err = need_poll(n);
- if(err) return(err);
+ if(err)
+ goto out;
for(i = 0; i < current_poll.used; i++)
next_poll.poll[i] = current_poll.poll[i];
@@ -290,21 +307,26 @@ int add_sigio_fd(int fd, int read)
events : events,
revents : 0 });
update_thread();
- return(0);
+ out:
+ sigio_unlock();
+ return(err);
}
int ignore_sigio_fd(int fd)
{
struct pollfd *p;
- int err, i, n = 0;
+ int err = 0, i, n = 0;
+ sigio_lock();
for(i = 0; i < current_poll.used; i++){
if(current_poll.poll[i].fd == fd) break;
}
- if(i == current_poll.used) return(0);
+ if(i == current_poll.used)
+ goto out;
err = need_poll(current_poll.used - 1);
- if(err) return(err);
+ if(err)
+ goto out;
for(i = 0; i < current_poll.used; i++){
p = &current_poll.poll[i];
@@ -312,11 +334,14 @@ int ignore_sigio_fd(int fd)
}
if(n == i){
printk("ignore_sigio_fd : fd %d not found\n", fd);
- return(-1);
+ err = -1;
+ goto out;
}
update_thread();
- return(0);
+ out:
+ sigio_unlock();
+ return(err);
}
static int setup_initial_poll(int fd)
@@ -342,14 +367,15 @@ void write_sigio_workaround(void)
unsigned long stack;
int err;
- if(write_sigio_pid != -1) return;
+ sigio_lock();
+ if(write_sigio_pid != -1)
+ goto out;
- /* XXX This needs SMP locking */
err = os_pipe(write_sigio_fds, 1, 1);
if(err){
printk("write_sigio_workaround - os_pipe 1 failed, "
"errno = %d\n", -err);
- return;
+ goto out;
}
err = os_pipe(sigio_private, 1, 1);
if(err){
@@ -368,6 +394,8 @@ void write_sigio_workaround(void)
if(write_sigio_irq(write_sigio_fds[0]))
goto out_kill;
+ out:
+ sigio_unlock();
return;
out_kill:
@@ -379,6 +407,7 @@ void write_sigio_workaround(void)
out_close1:
close(write_sigio_fds[0]);
close(write_sigio_fds[1]);
+ sigio_unlock();
}
int read_sigio_fd(int fd)
diff --git a/arch/um/kernel/signal_user.c b/arch/um/kernel/signal_user.c
index b5b329614493..5e4ae282919a 100644
--- a/arch/um/kernel/signal_user.c
+++ b/arch/um/kernel/signal_user.c
@@ -19,8 +19,6 @@
#include "sysdep/sigcontext.h"
#include "sigcontext.h"
-extern int kern_timer_on;
-
void set_sigstack(void *sig_stack, int size)
{
stack_t stack;
@@ -57,7 +55,7 @@ int change_sig(int signal, int on)
sigemptyset(&sigset);
sigaddset(&sigset, signal);
sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, &old);
- return(sigismember(&old, signal));
+ return(!sigismember(&old, signal));
}
static void change_signals(int type)
@@ -65,12 +63,8 @@ static void change_signals(int type)
sigset_t mask;
sigemptyset(&mask);
- if(type == SIG_BLOCK) kern_timer_on = 0;
- else {
- kern_timer_on = 1;
- sigaddset(&mask, SIGVTALRM);
- sigaddset(&mask, SIGALRM);
- }
+ sigaddset(&mask, SIGVTALRM);
+ sigaddset(&mask, SIGALRM);
sigaddset(&mask, SIGIO);
sigaddset(&mask, SIGPROF);
if(sigprocmask(type, &mask, NULL) < 0)
@@ -97,7 +91,6 @@ static int disable_mask(sigset_t *mask)
sigs = sigismember(mask, SIGIO) ? 1 << SIGIO_BIT : 0;
sigs |= sigismember(mask, SIGVTALRM) ? 1 << SIGVTALRM_BIT : 0;
sigs |= sigismember(mask, SIGALRM) ? 1 << SIGVTALRM_BIT : 0;
- if(!kern_timer_on) sigs |= 1 << SIGVTALRM_BIT;
return(sigs);
}
@@ -116,21 +109,27 @@ int set_signals(int disable)
int ret;
sigemptyset(&mask);
- if(!(disable & (1 << SIGIO_BIT))) sigaddset(&mask, SIGIO);
+ if(!(disable & (1 << SIGIO_BIT)))
+ sigaddset(&mask, SIGIO);
if(!(disable & (1 << SIGVTALRM_BIT))){
- kern_timer_on = 1;
sigaddset(&mask, SIGVTALRM);
sigaddset(&mask, SIGALRM);
}
if(sigprocmask(SIG_UNBLOCK, &mask, &mask) < 0)
panic("Failed to enable signals");
+
ret = disable_mask(&mask);
+
sigemptyset(&mask);
- if(disable & (1 << SIGIO_BIT)) sigaddset(&mask, SIGIO);
- if(disable & (1 << SIGVTALRM_BIT))
- kern_timer_on = 0;
+ if(disable & (1 << SIGIO_BIT))
+ sigaddset(&mask, SIGIO);
+ if(disable & (1 << SIGVTALRM_BIT)){
+ sigaddset(&mask, SIGVTALRM);
+ sigaddset(&mask, SIGALRM);
+ }
if(sigprocmask(SIG_BLOCK, &mask, NULL) < 0)
panic("Failed to block signals");
+
return(ret);
}
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index d53644005d4f..689db0e805d2 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -5,7 +5,7 @@
#include "linux/config.h"
-/* CPU online map */
+/* CPU online map, set by smp_boot_cpus */
unsigned long cpu_online_map = 1;
#ifdef CONFIG_SMP
@@ -21,25 +21,32 @@ unsigned long cpu_online_map = 1;
#include "user_util.h"
#include "kern_util.h"
#include "kern.h"
+#include "irq_user.h"
#include "os.h"
-/* The 'big kernel lock' */
-spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-
-/* Per CPU bogomips and other parameters */
+/* Per CPU bogomips and other parameters
+ * The only piece used here is the ipi pipe, which is set before SMP is
+ * started and never changed.
+ */
struct cpuinfo_um cpu_data[NR_CPUS];
spinlock_t um_bh_lock = SPIN_LOCK_UNLOCKED;
atomic_t global_bh_count;
+/* Not used by UML */
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile long global_irq_lock;
/* Set when the idlers are all forked */
int smp_threads_ready = 0;
+
+/* A statistic, can be a little off */
int num_reschedules_sent = 0;
+/* Small, random number, never changed */
+unsigned long cache_decay_ticks = 5;
+
void smp_send_reschedule(int cpu)
{
write(cpu_data[cpu].ipi_pipe[1], "R", 1);
@@ -83,30 +90,24 @@ void synchronize_bh(void)
void smp_send_stop(void)
{
- printk(KERN_INFO "Stopping all CPUs\n");
-}
+ int i;
+ printk(KERN_INFO "Stopping all CPUs...");
+ for(i = 0; i < num_online_cpus(); i++){
+ if(i == current->thread_info->cpu)
+ continue;
+ write(cpu_data[i].ipi_pipe[1], "S", 1);
+ }
+ printk("done\n");
+}
-static atomic_t smp_commenced = ATOMIC_INIT(0);
+static unsigned long smp_commenced_mask;
static volatile unsigned long smp_callin_map = 0;
-void smp_commence(void)
+static int idle_proc(void *cpup)
{
- printk("All CPUs are go!\n");
-
- wmb();
- atomic_set(&smp_commenced, 1);
-}
-
-static int idle_proc(void *unused)
-{
- int cpu, err;
-
- set_current(current);
- del_from_runqueue(current);
- unhash_process(current);
+ int cpu = (int) cpup, err;
- cpu = current->processor;
err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
if(err)
panic("CPU#%d failed to create IPI pipe, errno = %d", cpu,
@@ -115,46 +116,41 @@ static int idle_proc(void *unused)
activate_ipi(cpu_data[cpu].ipi_pipe[0], current->thread.extern_pid);
wmb();
- if (test_and_set_bit(current->processor, &smp_callin_map)) {
- printk("huh, CPU#%d already present??\n", current->processor);
+ if (test_and_set_bit(cpu, &smp_callin_map)) {
+ printk("huh, CPU#%d already present??\n", cpu);
BUG();
}
- while (!atomic_read(&smp_commenced))
+ while (!test_bit(cpu, &smp_commenced_mask))
cpu_relax();
- init_idle();
+ set_bit(cpu, &cpu_online_map);
default_idle();
return(0);
}
-int inited_cpus = 1;
-
-static int idle_thread(int (*fn)(void *), int cpu)
+static struct task_struct *idle_thread(int cpu)
{
- struct task_struct *p;
- int pid;
+ struct task_struct *new_task;
unsigned char c;
- current->thread.request.u.thread.proc = fn;
- current->thread.request.u.thread.arg = NULL;
- p = do_fork(CLONE_VM | CLONE_PID, 0, NULL, 0);
- if(IS_ERR(p)) panic("do_fork failed in idle_thread");
-
- cpu_tasks[cpu].pid = p->thread.extern_pid;
- cpu_tasks[cpu].task = p;
- inited_cpus++;
- init_tasks[cpu] = p;
- p->processor = cpu;
- p->cpus_allowed = 1 << cpu;
- p->cpus_runnable = p->cpus_allowed;
- write(p->thread.switch_pipe[1], &c, sizeof(c));
- return(p->thread.extern_pid);
+ current->thread.request.u.thread.proc = idle_proc;
+ current->thread.request.u.thread.arg = (void *) cpu;
+ new_task = do_fork(CLONE_VM | CLONE_IDLETASK, 0, NULL, 0, NULL);
+ if(IS_ERR(new_task)) panic("do_fork failed in idle_thread");
+
+ cpu_tasks[cpu] = ((struct cpu_task)
+ { .pid = new_task->thread.extern_pid,
+ .task = new_task } );
+ write(new_task->thread.switch_pipe[1], &c, sizeof(c));
+ return(new_task);
}
-void smp_boot_cpus(void)
+void smp_prepare_cpus(unsigned int maxcpus)
{
- int err;
+ struct task_struct *idle;
+ unsigned long waittime;
+ int err, cpu;
set_bit(0, &cpu_online_map);
set_bit(0, &smp_callin_map);
@@ -164,46 +160,32 @@ void smp_boot_cpus(void)
activate_ipi(cpu_data[0].ipi_pipe[0], current->thread.extern_pid);
- if(ncpus < 1){
- printk(KERN_INFO "ncpus set to 1\n");
- ncpus = 1;
- }
- else if(ncpus > NR_CPUS){
- printk(KERN_INFO
- "ncpus can't be greater than NR_CPUS, set to %d\n",
- NR_CPUS);
- ncpus = NR_CPUS;
- }
-
- if(ncpus > 1){
- int i, pid;
-
- printk(KERN_INFO "Starting up other processors:\n");
- for(i=1;i<ncpus;i++){
- int waittime;
-
- /* Do this early, for hard_smp_processor_id() */
- cpu_tasks[i].pid = -1;
- set_bit(i, &cpu_online_map);
+ for(cpu = 1; cpu < ncpus; cpu++){
+ printk("Booting processor %d...\n", cpu);
+
+ idle = idle_thread(cpu);
- pid = idle_thread(idle_proc, i);
- printk(KERN_INFO "\t#%d - idle thread pid = %d.. ",
- i, pid);
+ init_idle(idle, cpu);
+ unhash_process(idle);
- waittime = 200000000;
- while (waittime-- && !test_bit(i, &smp_callin_map))
- cpu_relax();
+ waittime = 200000000;
+ while (waittime-- && !test_bit(cpu, &smp_callin_map))
+ cpu_relax();
- if (test_bit(i, &smp_callin_map))
- printk("online\n");
- else {
- printk("failed\n");
- clear_bit(i, &cpu_online_map);
- }
- }
+ if (test_bit(cpu, &smp_callin_map))
+ printk("done\n");
+ else printk("failed\n");
}
}
+int __cpu_up(unsigned int cpu)
+{
+ set_bit(cpu, &smp_commenced_mask);
+ while (!test_bit(cpu, &cpu_online_map))
+ mb();
+ return(0);
+}
+
int setup_profiling_timer(unsigned int multiplier)
{
printk(KERN_INFO "setup_profiling_timer\n");
@@ -225,7 +207,13 @@ void IPI_handler(int cpu)
break;
case 'R':
- current->need_resched = 1;
+ set_tsk_need_resched(current);
+ break;
+
+ case 'S':
+ printk("CPU#%d stopping\n", cpu);
+ while(1)
+ pause();
break;
default:
@@ -269,7 +257,8 @@ int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic,
info = _info;
for (i=0;i<NR_CPUS;i++)
- if (i != current->processor && test_bit(i, &cpu_online_map))
+ if((i != current->thread_info->cpu) &&
+ test_bit(i, &cpu_online_map))
write(cpu_data[i].ipi_pipe[1], "C", 1);
while (atomic_read(&scf_started) != cpus)
diff --git a/arch/um/kernel/syscall_kern.c b/arch/um/kernel/syscall_kern.c
index 1aea2a006788..b15340a6b837 100644
--- a/arch/um/kernel/syscall_kern.c
+++ b/arch/um/kernel/syscall_kern.c
@@ -384,6 +384,7 @@ static int check_bogosity(struct pt_regs *regs)
return(0);
}
+/* Unlocked, I don't care if this is a bit off */
int nsyscalls = 0;
extern syscall_handler_t *sys_call_table[];
@@ -417,14 +418,18 @@ long execute_syscall(void *r)
spinlock_t syscall_lock = SPIN_LOCK_UNLOCKED;
-void lock_syscall(void)
-{
- spin_lock(&syscall_lock);
-}
+static int syscall_index = 0;
-void unlock_syscall(void)
+int next_syscall_index(int limit)
{
+ int ret;
+
+ spin_lock(&syscall_lock);
+ ret = syscall_index;
+ if(++syscall_index == limit)
+ syscall_index = 0;
spin_unlock(&syscall_lock);
+ return(ret);
}
/*
diff --git a/arch/um/kernel/syscall_user.c b/arch/um/kernel/syscall_user.c
index 921045f804b7..5da5aefce6d8 100644
--- a/arch/um/kernel/syscall_user.c
+++ b/arch/um/kernel/syscall_user.c
@@ -34,21 +34,14 @@ struct {
struct timeval end;
} syscall_record[1024];
-int syscall_index = 0;
-
-extern int kern_timer_on;
-
void syscall_handler(int sig, struct uml_pt_regs *regs)
{
void *sc;
long result;
- int index, syscall;
+ int index, max, syscall;
- lock_syscall();
- if(syscall_index == 1024) syscall_index = 0;
- index = syscall_index;
- syscall_index++;
- unlock_syscall();
+ max = sizeof(syscall_record)/sizeof(syscall_record[0]);
+ index = next_syscall_index(max);
syscall = regs->syscall;
sc = regs->sc;
diff --git a/arch/um/kernel/tempfile.c b/arch/um/kernel/tempfile.c
new file mode 100644
index 000000000000..0869e1a85ec9
--- /dev/null
+++ b/arch/um/kernel/tempfile.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/param.h>
+#include "init.h"
+
+char *tempdir = NULL;
+
+static void __init find_tempdir(void)
+{
+ char *dirs[] = { "TMP", "TEMP", "TMPDIR", NULL };
+ int i;
+ char *dir = NULL;
+
+ if(tempdir != NULL) return; /* We've already been called */
+ for(i = 0; dirs[i]; i++){
+ dir = getenv(dirs[i]);
+ if(dir != NULL) break;
+ }
+ if(dir == NULL) dir = "/tmp";
+ else if(*dir == '\0') dir = NULL;
+ if(dir != NULL) {
+ tempdir = malloc(strlen(dir) + 2);
+ if(tempdir == NULL){
+ fprintf(stderr, "Failed to malloc tempdir, "
+ "errno = %d\n", errno);
+ return;
+ }
+ strcpy(tempdir, dir);
+ strcat(tempdir, "/");
+ }
+}
+
+int make_tempfile(const char *template, char **out_tempname, int do_unlink)
+{
+ char tempname[MAXPATHLEN];
+ int fd;
+
+ find_tempdir();
+ if (*template != '/')
+ strcpy(tempname, tempdir);
+ else
+ *tempname = 0;
+ strcat(tempname, template);
+ if((fd = mkstemp(tempname)) < 0){
+ fprintf(stderr, "open - cannot create %s: %s\n", tempname,
+ strerror(errno));
+ return -1;
+ }
+ if(do_unlink && (unlink(tempname) < 0)){
+ perror("unlink");
+ return -1;
+ }
+ if(out_tempname){
+ if((*out_tempname = strdup(tempname)) == NULL){
+ perror("strdup");
+ return -1;
+ }
+ }
+ return(fd);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index a9d4669132de..2ab57626b43f 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -14,33 +14,15 @@
#include "user.h"
#include "process.h"
#include "signal_user.h"
+#include "time_user.h"
extern struct timeval xtime;
-void timer_handler(int sig, struct uml_pt_regs *regs)
-{
- timer_irq(regs);
-}
-
void timer(void)
{
gettimeofday(&xtime, NULL);
}
-static struct itimerval profile_interval;
-
-void get_profile_timer(void)
-{
- getitimer(ITIMER_PROF, &profile_interval);
- profile_interval.it_value = profile_interval.it_interval;
-}
-
-void disable_profile_timer(void)
-{
- struct itimerval interval = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
- setitimer(ITIMER_PROF, &interval, NULL);
-}
-
static void set_interval(int timer_type)
{
struct itimerval interval;
@@ -53,6 +35,15 @@ static void set_interval(int timer_type)
panic("setitimer failed - errno = %d\n", errno);
}
+void enable_timer(void)
+{
+ struct itimerval enable = ((struct itimerval) { { 0, 1000000/hz() },
+ { 0, 1000000/hz() }});
+ if(setitimer(ITIMER_VIRTUAL, &enable, NULL))
+ printk("enable_timer - setitimer failed, errno = %d\n",
+ errno);
+}
+
void switch_timers(int to_real)
{
struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
@@ -79,8 +70,9 @@ void idle_timer(void)
{
if(signal(SIGVTALRM, SIG_IGN) == SIG_ERR)
panic("Couldn't unset SIGVTALRM handler");
+
set_handler(SIGALRM, (__sighandler_t) alarm_handler,
- SA_NODEFER | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, -1);
+ SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGVTALRM, -1);
set_interval(ITIMER_REAL);
}
@@ -98,28 +90,24 @@ void time_init(void)
set_interval(ITIMER_VIRTUAL);
}
-void set_timers(int set_signal)
-{
- if(set_signal)
- set_interval(ITIMER_VIRTUAL);
- if(setitimer(ITIMER_PROF, &profile_interval, NULL) == -1)
- panic("setitimer ITIMER_PROF failed - errno = %d\n", errno);
-}
-
struct timeval local_offset = { 0, 0 };
void do_gettimeofday(struct timeval *tv)
{
+ time_lock();
gettimeofday(tv, NULL);
timeradd(tv, &local_offset, tv);
+ time_unlock();
}
void do_settimeofday(struct timeval *tv)
{
struct timeval now;
+ time_lock();
gettimeofday(&now, NULL);
timersub(tv, &now, &local_offset);
+ time_unlock();
}
void idle_sleep(int secs)
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
index 4c96bd9637e7..93199e99c6f8 100644
--- a/arch/um/kernel/time_kern.c
+++ b/arch/um/kernel/time_kern.c
@@ -27,21 +27,21 @@ int hz(void)
return(HZ);
}
+/* Changed at early boot */
int timer_irq_inited = 0;
-/* kern_timer_on and missed_ticks are modified after kernel memory has been
+/* missed_ticks will be modified after kernel memory has been
* write-protected, so this puts it in a section which will be left
* write-enabled.
*/
-int __attribute__ ((__section__ (".unprotected"))) kern_timer_on = 0;
-int __attribute__ ((__section__ (".unprotected"))) missed_ticks = 0;
+int __attribute__ ((__section__ (".unprotected"))) missed_ticks[NR_CPUS];
void timer_irq(struct uml_pt_regs *regs)
{
- int ticks = missed_ticks;
+ int cpu = current->thread_info->cpu, ticks = missed_ticks[cpu];
if(!timer_irq_inited) return;
- missed_ticks = 0;
+ missed_ticks[cpu] = 0;
while(ticks--) do_IRQ(TIMER_IRQ, regs);
}
@@ -86,6 +86,7 @@ long um_stime(int * tptr)
return 0;
}
+/* XXX Needs to be moved under sys-i386 */
void __delay(um_udelay_t time)
{
/* Stolen from the i386 __loop_delay */
@@ -116,6 +117,27 @@ void __const_udelay(um_udelay_t usecs)
for(i=0;i<n;i++) ;
}
+void timer_handler(int sig, struct uml_pt_regs *regs)
+{
+#ifdef CONFIG_SMP
+ update_process_times(user_context(UPT_SP(regs)));
+#endif
+ if(current->thread_info->cpu == 0)
+ timer_irq(regs);
+}
+
+static spinlock_t timer_spinlock = SPIN_LOCK_UNLOCKED;
+
+void time_lock(void)
+{
+ spin_lock(&timer_spinlock);
+}
+
+void time_unlock(void)
+{
+ spin_unlock(&timer_spinlock);
+}
+
int __init timer_init(void)
{
int err;
diff --git a/arch/um/kernel/trap_kern.c b/arch/um/kernel/trap_kern.c
index 8f104d7a1853..158ea30937b3 100644
--- a/arch/um/kernel/trap_kern.c
+++ b/arch/um/kernel/trap_kern.c
@@ -171,14 +171,18 @@ void trap_init(void)
spinlock_t trap_lock = SPIN_LOCK_UNLOCKED;
-void lock_trap(void)
-{
- spin_lock(&trap_lock);
-}
+static int trap_index = 0;
-void unlock_trap(void)
+int next_trap_index(int limit)
{
+ int ret;
+
+ spin_lock(&trap_lock);
+ ret = trap_index;
+ if(++trap_index == limit)
+ trap_index = 0;
spin_unlock(&trap_lock);
+ return(ret);
}
extern int debugger_pid;
@@ -209,6 +213,7 @@ static struct chan_opts opts = {
tramp_stack : 0,
};
+/* Accessed by the tracing thread, which automatically serializes access */
static void *xterm_data;
static int xterm_fd;
diff --git a/arch/um/kernel/trap_user.c b/arch/um/kernel/trap_user.c
index 90a6df7a43ec..8971a61c8310 100644
--- a/arch/um/kernel/trap_user.c
+++ b/arch/um/kernel/trap_user.c
@@ -70,10 +70,10 @@ void kill_child_dead(int pid)
while(waitpid(pid, NULL, 0) > 0) kill(pid, SIGCONT);
}
+/* Changed early in boot, and then only read */
int debug = 0;
int debug_stop = 1;
int debug_parent = 0;
-
int honeypot = 0;
static int signal_tramp(void *arg)
@@ -90,7 +90,6 @@ static int signal_tramp(void *arg)
signal(SIGUSR1, SIG_IGN);
change_sig(SIGCHLD, 0);
signal(SIGSEGV, (__sighandler_t) sig_handler);
- set_timers(0);
set_cmdline("(idle thread)");
set_init_pid(os_getpid());
proc = arg;
@@ -99,6 +98,7 @@ static int signal_tramp(void *arg)
static void last_ditch_exit(int sig)
{
+ kmalloc_ok = 0;
signal(SIGINT, SIG_DFL);
signal(SIGTERM, SIG_DFL);
signal(SIGHUP, SIG_DFL);
@@ -142,33 +142,20 @@ static void sleeping_process_signal(int pid, int sig)
}
}
-#ifdef CONFIG_SMP
-#error need to make these arrays
-#endif
-
+/* Accessed only by the tracing thread */
int debugger_pid = -1;
int debugger_parent = -1;
int debugger_fd = -1;
int gdb_pid = -1;
struct {
- unsigned long address;
- int is_write;
- int pid;
- unsigned long sp;
- int is_user;
-} segfault_record[1024];
-
-int segfault_index = 0;
-
-struct {
int pid;
int signal;
unsigned long addr;
struct timeval time;
-} signal_record[1024];
+} signal_record[1024][32];
-int signal_index = 0;
+int signal_index[32];
int nsignals = 0;
int debug_trace = 0;
extern int io_nsignals, io_count, intr_count;
@@ -188,7 +175,7 @@ int signals(int (*init_proc)(void *), void *sp)
signal(SIGPIPE, SIG_IGN);
setup_tracer_winch();
tracing_pid = os_getpid();
- printk("tracing thread pid = %d\n", tracing_pid);
+ printf("tracing thread pid = %d\n", tracing_pid);
pid = clone(signal_tramp, sp, CLONE_FILES | SIGCHLD, init_proc);
n = waitpid(pid, &status, WUNTRACED);
@@ -207,7 +194,7 @@ int signals(int (*init_proc)(void *), void *sp)
set_handler(SIGTERM, last_ditch_exit, SA_ONESHOT | SA_NODEFER, -1);
set_handler(SIGHUP, last_ditch_exit, SA_ONESHOT | SA_NODEFER, -1);
if(debug_trace){
- printk("Tracing thread pausing to be attached\n");
+ printf("Tracing thread pausing to be attached\n");
stop();
}
if(debug){
@@ -219,14 +206,14 @@ int signals(int (*init_proc)(void *), void *sp)
init_parent_proxy(debugger_parent);
err = attach(debugger_parent);
if(err){
- printk("Failed to attach debugger parent %d, "
+ printf("Failed to attach debugger parent %d, "
"errno = %d\n", debugger_parent, err);
debugger_parent = -1;
}
else {
if(ptrace(PTRACE_SYSCALL, debugger_parent,
0, 0) < 0){
- printk("Failed to continue debugger "
+ printf("Failed to continue debugger "
"parent, errno = %d\n", errno);
debugger_parent = -1;
}
@@ -237,7 +224,7 @@ int signals(int (*init_proc)(void *), void *sp)
while(1){
if((pid = waitpid(-1, &status, WUNTRACED)) <= 0){
if(errno != ECHILD){
- printk("wait failed - errno = %d\n", errno);
+ printf("wait failed - errno = %d\n", errno);
}
continue;
}
@@ -259,36 +246,36 @@ int signals(int (*init_proc)(void *), void *sp)
if(WIFEXITED(status)) ;
#ifdef notdef
{
- printk("Child %d exited with status %d\n", pid,
+ printf("Child %d exited with status %d\n", pid,
WEXITSTATUS(status));
}
#endif
else if(WIFSIGNALED(status)){
sig = WTERMSIG(status);
if(sig != 9){
- printk("Child %d exited with signal %d\n", pid,
+ printf("Child %d exited with signal %d\n", pid,
sig);
}
}
else if(WIFSTOPPED(status)){
+ proc_id = pid_to_processor_id(pid);
sig = WSTOPSIG(status);
- if(signal_index == 1024){
- signal_index = 0;
+ if(signal_index[proc_id] == 1024){
+ signal_index[proc_id] = 0;
last_index = 1023;
}
- else last_index = signal_index - 1;
+ else last_index = signal_index[proc_id] - 1;
if(((sig == SIGPROF) || (sig == SIGVTALRM) ||
(sig == SIGALRM)) &&
- (signal_record[last_index].signal == sig) &&
- (signal_record[last_index].pid == pid))
- signal_index = last_index;
- signal_record[signal_index].pid = pid;
- gettimeofday(&signal_record[signal_index].time, NULL);
+ (signal_record[proc_id][last_index].signal == sig)&&
+ (signal_record[proc_id][last_index].pid == pid))
+ signal_index[proc_id] = last_index;
+ signal_record[proc_id][signal_index[proc_id]].pid = pid;
+ gettimeofday(&signal_record[proc_id][signal_index[proc_id]].time, NULL);
eip = ptrace(PTRACE_PEEKUSER, pid, PT_IP_OFFSET, 0);
- signal_record[signal_index].addr = eip;
- signal_record[signal_index++].signal = sig;
+ signal_record[proc_id][signal_index[proc_id]].addr = eip;
+ signal_record[proc_id][signal_index[proc_id]++].signal = sig;
- proc_id = pid_to_processor_id(pid);
if(proc_id == -1){
sleeping_process_signal(pid, sig);
continue;
@@ -314,7 +301,7 @@ int signals(int (*init_proc)(void *), void *sp)
ptrace(PTRACE_KILL, pid, 0, 0);
return(op == OP_REBOOT);
case OP_NONE:
- printk("Detaching pid %d\n", pid);
+ printf("Detaching pid %d\n", pid);
detach(pid, SIGSTOP);
continue;
default:
@@ -413,22 +400,30 @@ __uml_setup("honeypot", uml_honeypot_setup,
" UML. This implies 'jail'.\n\n"
);
+/* Unlocked - don't care if this is a bit off */
int nsegfaults = 0;
+struct {
+ unsigned long address;
+ int is_write;
+ int pid;
+ unsigned long sp;
+ int is_user;
+} segfault_record[1024];
+
void segv_handler(int sig, struct uml_pt_regs *regs)
{
struct sigcontext *context = regs->sc;
- int index;
+ int index, max;
if(regs->is_user && !SEGV_IS_FIXABLE(context)){
bad_segv(SC_FAULT_ADDR(context), SC_IP(context),
SC_FAULT_WRITE(context));
return;
}
- lock_trap();
- index = segfault_index++;
- if(segfault_index == 1024) segfault_index = 0;
- unlock_trap();
+ max = sizeof(segfault_record)/sizeof(segfault_record[0]);
+ index = next_trap_index(max);
+
nsegfaults++;
segfault_record[index].address = SC_FAULT_ADDR(context);
segfault_record[index].pid = os_getpid();
@@ -439,8 +434,6 @@ void segv_handler(int sig, struct uml_pt_regs *regs)
regs->is_user, context);
}
-extern int kern_timer_on;
-
struct signal_info {
void (*handler)(int, struct uml_pt_regs *);
int is_irq;
@@ -471,7 +464,7 @@ void sig_handler_common(int sig, struct sigcontext *sc)
{
struct uml_pt_regs save_regs, *r;
struct signal_info *info;
- int save_errno = errno, save_timer = kern_timer_on, is_user;
+ int save_errno = errno, is_user;
unprotect_kernel_mem();
@@ -488,7 +481,6 @@ void sig_handler_common(int sig, struct sigcontext *sc)
(*info->handler)(sig, r);
- kern_timer_on = save_timer;
if(is_user){
interrupt_end();
block_signals();
@@ -505,19 +497,15 @@ void sig_handler(int sig, struct sigcontext sc)
sig_handler_common(sig, &sc);
}
-extern int timer_irq_inited, missed_ticks;
-
-extern int jail_timer_off;
+extern int timer_irq_inited, missed_ticks[];
void alarm_handler(int sig, struct sigcontext sc)
{
int user;
if(!timer_irq_inited) return;
- missed_ticks++;
+ missed_ticks[cpu()]++;
user = user_context(SC_SP(&sc));
- if(!user && !kern_timer_on) return;
- if(!user && jail_timer_off) return;
if(sig == SIGALRM)
switch_timers(0);
diff --git a/arch/um/kernel/tty_log.c b/arch/um/kernel/tty_log.c
index 1472a99a7686..7d53a6ea2ab0 100644
--- a/arch/um/kernel/tty_log.c
+++ b/arch/um/kernel/tty_log.c
@@ -17,8 +17,8 @@
#define TTY_LOG_DIR "./"
-char *tty_log_dir = TTY_LOG_DIR;
-
+/* Set early in boot and then unchanged */
+static char *tty_log_dir = TTY_LOG_DIR;
static int tty_log_fd = -1;
#define TTY_LOG_OPEN 1
@@ -104,7 +104,7 @@ static int __init set_tty_log_fd(char *name, int *add)
tty_log_fd = strtoul(name, &end, 0);
if(*end != '\0'){
- printk("set_tty_log_dir - strtoul failed on '%s'\n", name);
+ printk("set_tty_log_fd - strtoul failed on '%s'\n", name);
tty_log_fd = -1;
}
return 0;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 5452d3f2563c..b7d436981572 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -37,6 +37,11 @@
#define DEFAULT_COMMAND_LINE "root=6200"
+struct cpuinfo_um boot_cpu_data = {
+ .loops_per_jiffy = 0,
+ .ipi_pipe = { -1, -1 }
+};
+
unsigned long thread_saved_pc(struct task_struct *task)
{
return(os_process_pc(task->thread.extern_pid));
@@ -119,6 +124,7 @@ static int start_kernel_proc(void *unused)
#define SIZE ((CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS) * 0x20000000)
#define START (TOP - SIZE)
+/* Set in main */
unsigned long host_task_size;
unsigned long task_size;
@@ -129,17 +135,21 @@ void set_task_sizes(int arg)
task_size = START;
}
+/* Set in early boot */
unsigned long uml_physmem;
unsigned long uml_reserved;
-
unsigned long start_vm;
unsigned long end_vm;
-
int ncpus = 1;
+/* Pointer set in linux_main, the array itself is private to each thread,
+ * and changed at address space creation time so this poses no concurrency
+ * problems.
+ */
static char *argv1_begin = NULL;
static char *argv1_end = NULL;
+/* Set in early boot */
static int have_root __initdata = 0;
long physmem_size = 32 * 1024 * 1024;
@@ -258,8 +268,9 @@ static void __init uml_postsetup(void)
}
extern int debug_trace;
-unsigned long brk_start;
+/* Set during early boot */
+unsigned long brk_start;
static struct vm_reserved kernel_vm_reserved;
#define MIN_VMALLOC (32 * 1024 * 1024)
@@ -316,11 +327,12 @@ int linux_main(int argc, char **argv)
end_vm = start_vm + virtmem_size;
if(virtmem_size < physmem_size)
- printk(KERN_INFO "Kernel virtual memory size shrunk to %ld "
- "bytes\n", virtmem_size);
+ printf("Kernel virtual memory size shrunk to %ld bytes\n",
+ virtmem_size);
err = reserve_vm(high_physmem, end_vm, &kernel_vm_reserved);
- if(err) panic("Failed to reserve VM area for kernel VM\n");
+ if(err)
+ tracer_panic("Failed to reserve VM area for kernel VM\n");
uml_postsetup();
@@ -365,18 +377,6 @@ void __init check_bugs(void)
check_sigio();
}
-spinlock_t pid_lock = SPIN_LOCK_UNLOCKED;
-
-void lock_pid(void)
-{
- spin_lock(&pid_lock);
-}
-
-void unlock_pid(void)
-{
- spin_unlock(&pid_lock);
-}
-
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c
index ca6735f415ba..2ba2049510b3 100644
--- a/arch/um/kernel/umid.c
+++ b/arch/um/kernel/umid.c
@@ -21,9 +21,13 @@
#define UMID_LEN 64
#define UML_DIR "~/.uml/"
+/* Changed by set_umid and make_umid, which are run early in boot */
static char umid[UMID_LEN] = { 0 };
+
+/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */
static char *uml_dir = UML_DIR;
+/* Changed by set_umid */
static int umid_is_random = 1;
static int umid_inited = 0;
diff --git a/arch/um/kernel/user_util.c b/arch/um/kernel/user_util.c
index 0445cde3cb52..2ed1491bded2 100644
--- a/arch/um/kernel/user_util.c
+++ b/arch/um/kernel/user_util.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -32,6 +32,7 @@
#define COMMAND_LINE_SIZE _POSIX_ARG_MAX
+/* Changed in linux_main and setup_arch, which run before SMP is started */
char saved_command_line[COMMAND_LINE_SIZE] = { 0 };
char command_line[COMMAND_LINE_SIZE] = { 0 };
@@ -184,85 +185,6 @@ void setup_hostinfo(void)
host.release, host.version, host.machine);
}
-void close_fd(int fd)
-{
- close(fd);
-}
-
-char *tempdir = NULL;
-
-static void __init find_tempdir(void)
-{
- char *dirs[] = { "TMP", "TEMP", "TMPDIR", NULL };
- int i;
- char *dir = NULL;
-
- if(tempdir != NULL) return; /* We've already been called */
- for(i = 0; dirs[i]; i++){
- dir = getenv(dirs[i]);
- if(dir != NULL) break;
- }
- if(dir == NULL) dir = "/tmp";
- else if(*dir == '\0') dir = NULL;
- if(dir != NULL) {
- tempdir = malloc(strlen(dir) + 2);
- if(tempdir == NULL){
- fprintf(stderr, "Failed to malloc tempdir, "
- "errno = %d\n", errno);
- return;
- }
- strcpy(tempdir, dir);
- strcat(tempdir, "/");
- }
-}
-
-int make_tempfile(const char *template, char **out_tempname, int do_unlink)
-{
- char tempname[MAXPATHLEN];
- int fd;
-
- find_tempdir();
- if (*template != '/')
- strcpy(tempname, tempdir);
- else
- *tempname = 0;
- strcat(tempname, template);
- if((fd = mkstemp(tempname)) < 0){
- fprintf(stderr, "open - cannot create %s: %s\n", tempname,
- strerror(errno));
- return -1;
- }
- if(do_unlink && (unlink(tempname) < 0)){
- perror("unlink");
- return -1;
- }
- if(out_tempname){
- if((*out_tempname = strdup(tempname)) == NULL){
- perror("strdup");
- return -1;
- }
- }
- return(fd);
-}
-
-int user_read(int fd, char *buf, int len)
-{
- int err;
-
- err = read(fd, buf, len);
- if(err < 0) return(-errno);
- else return(err);
-}
-
-int user_write(int fd, char *buf, int len)
-{
- int err;
-
- err = write(fd, buf, len);
- if(err < 0) return(-errno);
- else return(err);
-}
-
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
diff --git a/arch/um/main.c b/arch/um/main.c
index 7005f4d7bc3b..204c7a189fdd 100644
--- a/arch/um/main.c
+++ b/arch/um/main.c
@@ -18,15 +18,22 @@
#include "user.h"
#include "init.h"
+/* Set in set_stklim, which is called from main and __wrap_malloc.
+ * __wrap_malloc only calls it if main hasn't started.
+ */
unsigned long stacksizelim;
+/* Set in main */
char *linux_prog;
#define PGD_BOUND (4 * 1024 * 1024)
#define STACKSIZE (8 * 1024 * 1024)
#define THREAD_NAME_LEN (256)
-char padding[THREAD_NAME_LEN] = { [ 0 ... THREAD_NAME_LEN - 2] = ' ', '\0' };
+/* Never changed */
+static char padding[THREAD_NAME_LEN] = {
+ [ 0 ... THREAD_NAME_LEN - 2] = ' ', '\0'
+};
static void set_stklim(void)
{
@@ -129,7 +136,8 @@ int main(int argc, char **argv, char **envp)
return(uml_exitcode);
}
-int allocating_monbuf = 0;
+/* Changed in __wrap___monstartup and __wrap_malloc very early */
+static int allocating_monbuf = 0;
#ifdef PROFILING
extern void __real___monstartup (unsigned long, unsigned long);
@@ -146,6 +154,7 @@ void __wrap___monstartup (unsigned long lowpc, unsigned long highpc)
extern void *__real_malloc(int);
extern unsigned long host_task_size;
+/* Set in __wrap_malloc early */
static void *gmon_buf = NULL;
void *__wrap_malloc(int size)
diff --git a/arch/um/ptproxy/proxy.c b/arch/um/ptproxy/proxy.c
index 78789b9403e8..d5cd8c8919ba 100644
--- a/arch/um/ptproxy/proxy.c
+++ b/arch/um/ptproxy/proxy.c
@@ -30,6 +30,7 @@ Jeff Dike (jdike@karaya.com) : Modified for integration into uml
#include "user_util.h"
#include "user.h"
#include "os.h"
+#include "tempfile.h"
static int debugger_wait(debugger_state *debugger, int *status, int options,
int (*syscall)(debugger_state *debugger, pid_t child),
@@ -122,6 +123,7 @@ int debugger_syscall(debugger_state *debugger, pid_t child)
return(0);
}
+/* Used by the tracing thread */
static debugger_state parent;
static int parent_syscall(debugger_state *debugger, int pid);
@@ -174,10 +176,7 @@ void debugger_cancelled_return(debugger_state *debugger, int result)
syscall_continue(debugger->pid);
}
-#ifdef CONFIG_SMP
-#error need to make these arrays
-#endif
-
+/* Used by the tracing thread */
static debugger_state debugger;
static debugee_state debugee;
diff --git a/arch/um/sys-i386/bugs.c b/arch/um/sys-i386/bugs.c
index 678b04a127c9..5b01a07368cc 100644
--- a/arch/um/sys-i386/bugs.c
+++ b/arch/um/sys-i386/bugs.c
@@ -15,6 +15,7 @@
#define MAXTOKEN 64
+/* Set during early boot */
int cpu_has_cmov = 1;
int cpu_has_xmm = 0;
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c
index 45a45edd28dc..659db8a6e2d8 100644
--- a/arch/um/sys-i386/ptrace_user.c
+++ b/arch/um/sys-i386/ptrace_user.c
@@ -59,6 +59,7 @@ static void read_debugregs(int pid, unsigned long *regs)
}
}
+/* Accessed only by the tracing thread */
static unsigned long kernel_debugregs[8] = { [ 0 ... 7 ] = 0 };
static int debugregs_seq = 0;
diff --git a/arch/um/sys-ppc/miscthings.c b/arch/um/sys-ppc/miscthings.c
index 2377b5930ea8..373061c50129 100644
--- a/arch/um/sys-ppc/miscthings.c
+++ b/arch/um/sys-ppc/miscthings.c
@@ -2,9 +2,6 @@
#include "linux/stddef.h" // for NULL
#include "linux/elf.h" // for AT_NULL
-/* unsigned int local_bh_count[NR_CPUS]; */
-unsigned long isa_io_base = 0;
-
/* The following function nicked from arch/ppc/kernel/process.c and
* adapted slightly */
/*
diff --git a/arch/um/uml.lds.S b/arch/um/uml.lds.S
index f6c40ec0d143..212f614b7621 100644
--- a/arch/um/uml.lds.S
+++ b/arch/um/uml.lds.S
@@ -67,6 +67,9 @@ SECTIONS
__setup_start = .;
.setup.init : { *(.setup.init) }
__setup_end = .;
+ __per_cpu_start = . ;
+ .data.percpu : { *(.data.percpu) }
+ __per_cpu_end = . ;
__initcall_start = .;
.initcall.init : {
*(.initcall1.init)
diff --git a/drivers/Makefile b/drivers/Makefile
index 958ecc4df984..2224ec9303f6 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -38,7 +38,7 @@ obj-$(CONFIG_I2O) += message/
obj-$(CONFIG_I2C) += i2c/
obj-$(CONFIG_PHONE) += telephony/
obj-$(CONFIG_MD) += md/
-obj-$(CONFIG_BLUEZ) += bluetooth/
+obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
obj-$(CONFIG_ISDN_BOOL) += isdn/
diff --git a/drivers/acorn/block/fd1772.c b/drivers/acorn/block/fd1772.c
index 85b5b9cd8859..1d55917a10f8 100644
--- a/drivers/acorn/block/fd1772.c
+++ b/drivers/acorn/block/fd1772.c
@@ -1470,9 +1470,6 @@ static int floppy_open(struct inode *inode, struct file *filp)
int drive = minor(inode->i_rdev) & 3;
int old_dev;
- if ((minor(inode->i_rdev) >> 2) > NUM_DISK_TYPES)
- return -ENXIO;
-
old_dev = fd_device[drive];
if (fd_ref[drive])
@@ -1547,7 +1544,7 @@ int fd1772_init(void)
return 0;
for (i = 0; i < FD_MAX_UNITS; i++) {
- disks[i] = alloc_disk();
+ disks[i] = alloc_disk(1);
if (!disks[i])
goto out;
}
diff --git a/drivers/acorn/block/mfmhd.c b/drivers/acorn/block/mfmhd.c
index 32bef8806190..d4c01d605559 100644
--- a/drivers/acorn/block/mfmhd.c
+++ b/drivers/acorn/block/mfmhd.c
@@ -1186,14 +1186,6 @@ static int mfm_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long a
return 0;
}
-static int mfm_open(struct inode *inode, struct file *file)
-{
- int dev = DEVICE_NR(minor(inode->i_rdev));
- if (dev >= mfm_drives)
- return -ENODEV;
- return 0;
-}
-
/*
* This is to handle various kernel command line parameters
* specific to this driver.
@@ -1239,7 +1231,6 @@ void xd_set_geometry(struct block_device *bdev, unsigned char secsptrack,
static struct block_device_operations mfm_fops =
{
.owner = THIS_MODULE,
- .open = mfm_open,
.ioctl = mfm_ioctl,
};
@@ -1336,12 +1327,11 @@ static int __init mfm_init (void)
goto out3;
for (i = 0; i < mfm_drives; i++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(64);
if (!disk)
goto Enomem;
disk->major = MAJOR_NR;
disk->first_minor = i << 6;
- disk->minor_shift = 6;
disk->fops = &mfm_fops;
sprintf(disk->disk_name, "mfm%c", 'a'+i);
mfm_gendisk[i] = disk;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4fc859d3ab57..83c31723d844 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -149,36 +149,16 @@ void driver_detach(struct device_driver * drv)
spin_unlock(&device_lock);
}
-/**
- * device_register - register a device
- * @dev: pointer to the device structure
- *
- * First, make sure that the device has a parent, create
- * a directory for it, then add it to the parent's list of
- * children.
- *
- * Maintains a global list of all devices, in depth-first ordering.
- * The head for that list is device_root.g_list.
- */
-int device_register(struct device *dev)
+int device_add(struct device *dev)
{
int error;
if (!dev || !strlen(dev->bus_id))
return -EINVAL;
- INIT_LIST_HEAD(&dev->node);
- INIT_LIST_HEAD(&dev->children);
- INIT_LIST_HEAD(&dev->g_list);
- INIT_LIST_HEAD(&dev->driver_list);
- INIT_LIST_HEAD(&dev->bus_list);
- INIT_LIST_HEAD(&dev->intf_list);
- spin_lock_init(&dev->lock);
- atomic_set(&dev->refcount,2);
- dev->present = 1;
spin_lock(&device_lock);
+ dev->present = 1;
if (dev->parent) {
- get_device_locked(dev->parent);
list_add_tail(&dev->g_list,&dev->parent->g_list);
list_add_tail(&dev->node,&dev->parent->children);
} else
@@ -209,10 +189,48 @@ int device_register(struct device *dev)
list_del_init(&dev->g_list);
list_del_init(&dev->node);
spin_unlock(&device_lock);
- if (dev->parent)
- put_device(dev->parent);
}
- put_device(dev);
+ return error;
+}
+
+void device_initialize(struct device *dev)
+{
+ INIT_LIST_HEAD(&dev->node);
+ INIT_LIST_HEAD(&dev->children);
+ INIT_LIST_HEAD(&dev->g_list);
+ INIT_LIST_HEAD(&dev->driver_list);
+ INIT_LIST_HEAD(&dev->bus_list);
+ INIT_LIST_HEAD(&dev->intf_list);
+ spin_lock_init(&dev->lock);
+ atomic_set(&dev->refcount,1);
+ if (dev->parent)
+ get_device(dev->parent);
+}
+
+/**
+ * device_register - register a device
+ * @dev: pointer to the device structure
+ *
+ * First, make sure that the device has a parent, create
+ * a directory for it, then add it to the parent's list of
+ * children.
+ *
+ * Maintains a global list of all devices, in depth-first ordering.
+ * The head for that list is device_root.g_list.
+ */
+int device_register(struct device *dev)
+{
+ int error;
+
+ if (!dev || !strlen(dev->bus_id))
+ return -EINVAL;
+
+ device_initialize(dev);
+ if (dev->parent)
+ get_device(dev->parent);
+ error = device_add(dev);
+ if (error && dev->parent)
+ put_device(dev->parent);
return error;
}
@@ -257,16 +275,7 @@ void put_device(struct device * dev)
put_device(parent);
}
-/**
- * device_unregister - unlink device
- * @dev: device going away
- *
- * The device has been removed from the system, so we disavow knowledge
- * of it. It might not be the final reference to the device, so we mark
- * it as !present, so no more references to it can be acquired.
- * In the end, we decrement the final reference count for it.
- */
-void device_unregister(struct device * dev)
+void device_del(struct device * dev)
{
spin_lock(&device_lock);
dev->present = 0;
@@ -293,7 +302,20 @@ void device_unregister(struct device * dev)
/* remove the driverfs directory */
device_remove_dir(dev);
+}
+/**
+ * device_unregister - unlink device
+ * @dev: device going away
+ *
+ * The device has been removed from the system, so we disavow knowledge
+ * of it. It might not be the final reference to the device, so we mark
+ * it as !present, so no more references to it can be acquired.
+ * In the end, we decrement the final reference count for it.
+ */
+void device_unregister(struct device * dev)
+{
+ device_del(dev);
put_device(dev);
}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 24a1ee66d93b..1c1a72e440e6 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1962,7 +1962,6 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
disk->major = MajorNumber;
disk->first_minor = n << DAC960_MaxPartitionsBits;
- disk->minor_shift = DAC960_MaxPartitionsBits;
disk->fops = &DAC960_BlockDeviceOperations;
}
/*
@@ -2200,7 +2199,7 @@ static void DAC960_DetectControllers(DAC960_HardwareType_T HardwareType)
}
memset(Controller, 0, sizeof(DAC960_Controller_T));
for (i = 0; i < DAC960_MaxLogicalDrives; i++) {
- Controller->disks[i] = alloc_disk();
+ Controller->disks[i] = alloc_disk(1<<DAC960_MaxPartitionsBits);
if (!Controller->disks[i])
goto Enomem;
}
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index eff7ee947ea7..6c22bb8963d6 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -9,9 +9,9 @@
#
export-objs := elevator.o ll_rw_blk.o loop.o genhd.o acsi.o \
- block_ioctl.o deadline-iosched.o
+ scsi_ioctl.o deadline-iosched.o
-obj-y := elevator.o ll_rw_blk.o blkpg.o genhd.o block_ioctl.o deadline-iosched.o
+obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o deadline-iosched.o
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c
index 5d36adb832e2..520eeabab869 100644
--- a/drivers/block/acsi.c
+++ b/drivers/block/acsi.c
@@ -1086,8 +1086,6 @@ static int acsi_ioctl( struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg )
{
int dev = DEVICE_NR(inode->i_rdev);
- if (dev >= NDevices)
- return -EINVAL;
switch (cmd) {
case HDIO_GETGEO:
/* HDIO_GETGEO is supported more for getting the partition's
@@ -1130,13 +1128,8 @@ static int acsi_ioctl( struct inode *inode, struct file *file,
static int acsi_open( struct inode * inode, struct file * filp )
{
- int device;
- struct acsi_info_struct *aip;
-
- device = DEVICE_NR(inode->i_rdev);
- if (device >= NDevices)
- return -ENXIO;
- aip = &acsi_info[device];
+ int device = DEVICE_NR(inode->i_rdev);
+ struct acsi_info_struct *aip = &acsi_info[device];
if (access_count[device] == 0 && aip->removable) {
#if 0
@@ -1729,7 +1722,7 @@ int acsi_init( void )
#endif
err = -ENOMEM;
for( i = 0; i < NDevices; ++i ) {
- acsi_gendisk[i] = alloc_disk();
+ acsi_gendisk[i] = alloc_disk(16);
if (!acsi_gendisk[i])
goto out4;
}
@@ -1739,7 +1732,10 @@ int acsi_init( void )
sprintf(disk->disk_name, "ad%c", 'a'+i);
disk->major = MAJOR_NR;
disk->first_minor = i << 4;
- disk->minor_shift = (acsi_info[i].type==HARDDISK)?4:0;
+ if (acsi_info[i].type != HARDDISK) {
+ disk->minor_shift = 0;
+ disk->minors = 1;
+ }
disk->fops = &acsi_fops;
set_capacity(disk, acsi_info[i].size);
add_disk(disk);
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 22790c4145fe..9e6ae34ae194 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1607,9 +1607,6 @@ static int floppy_open(struct inode *inode, struct file *filp)
if (!kdev_same(old_dev, inode->i_rdev))
return -EBUSY;
- if (unit[drive].type->code == FD_NODRIVE)
- return -ENODEV;
-
if (filp && filp->f_mode & 3) {
check_disk_change(inode->i_bdev);
if (filp->f_mode & 2 ) {
@@ -1683,11 +1680,6 @@ static int amiga_floppy_change(kdev_t dev)
int changed;
static int first_time = 1;
- if (major(dev) != MAJOR_NR) {
- printk(KERN_CRIT "floppy_change: not a floppy\n");
- return 0;
- }
-
if (first_time)
changed = first_time--;
else {
@@ -1735,7 +1727,7 @@ static int __init fd_probe_drives(void)
fd_probe(drive);
if (unit[drive].type->code == FD_NODRIVE)
continue;
- disk = alloc_disk();
+ disk = alloc_disk(1);
if (!disk) {
unit[drive].type->code = FD_NODRIVE;
continue;
@@ -1751,7 +1743,6 @@ static int __init fd_probe_drives(void)
printk("fd%d ",drive);
disk->major = MAJOR_NR;
disk->first_minor = drive;
- disk->minor_shift = 0;
disk->fops = &floppy_fops;
sprintf(disk->disk_name, "fd%d", drive);
set_capacity(disk, 880*2);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 12f3ae02b317..c2ae35bb875b 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1358,12 +1358,6 @@ static int fd_device[4] = { 0,0,0,0 };
static int check_floppy_change (kdev_t dev)
{
unsigned int drive = minor(dev) & 0x03;
-
- if (major(dev) != MAJOR_NR) {
- printk(KERN_ERR "floppy_changed: not a floppy\n");
- return 0;
- }
-
if (test_bit (drive, &fake_change)) {
/* simulated change (e.g. after formatting) */
return 1;
@@ -1855,17 +1849,11 @@ static void __init config_types( void )
static int floppy_open( struct inode *inode, struct file *filp )
{
- int drive, type;
- int old_dev;
+ int drive = minor(inode->i_rdev) & 3;
+ int type = minor(inode->i_rdev) >> 2;
+ int old_dev = fd_device[drive];
- drive = minor(inode->i_rdev) & 3;
- type = minor(inode->i_rdev) >> 2;
DPRINT(("fd_open: type=%d\n",type));
- if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
- return -ENXIO;
-
- old_dev = fd_device[drive];
-
if (fd_ref[drive] && old_dev != minor(inode->i_rdev))
return -EBUSY;
@@ -1949,7 +1937,7 @@ int __init atari_floppy_init (void)
}
for (i = 0; i < FD_MAX_UNITS; i++) {
- unit[i].disk = alloc_disk();
+ unit[i].disk = alloc_disk(1);
if (!unit[i].disk)
goto Enomem;
}
diff --git a/drivers/block/blkpg.c b/drivers/block/blkpg.c
deleted file mode 100644
index d5ba72a8ac86..000000000000
--- a/drivers/block/blkpg.c
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Partition table and disk geometry handling
- *
- * This obsoletes the partition-handling code in genhd.c:
- * Userspace can look at a disk in arbitrary format and tell
- * the kernel what partitions there are on the disk, and how
- * these should be numbered.
- * It also allows one to repartition a disk that is being used.
- *
- * A single ioctl with lots of subfunctions:
- *
- * Device number stuff:
- * get_whole_disk() (given the device number of a partition, find
- * the device number of the encompassing disk)
- * get_all_partitions() (given the device number of a disk, return the
- * device numbers of all its known partitions)
- *
- * Partition stuff:
- * add_partition()
- * delete_partition()
- * test_partition_in_use() (also for test_disk_in_use)
- *
- * Geometry stuff:
- * get_geometry()
- * set_geometry()
- * get_bios_drivedata()
- *
- * For today, only the partition stuff - aeb, 990515
- */
-
-#include <linux/errno.h>
-#include <linux/fs.h> /* for BLKROSET, ... */
-#include <linux/sched.h> /* for capable() */
-#include <linux/blk.h> /* for set_device_ro() */
-#include <linux/blkpg.h>
-#include <linux/genhd.h>
-#include <linux/module.h> /* for EXPORT_SYMBOL */
-#include <linux/backing-dev.h>
-#include <linux/buffer_head.h>
-
-#include <asm/uaccess.h>
-
-/*
- * What is the data describing a partition?
- *
- * 1. a device number (kdev_t)
- * 2. a starting sector and number of sectors (hd_struct)
- * given in the part[] array of the gendisk structure for the drive.
- *
- * The number of sectors is replicated in the sizes[] array of
- * the gendisk structure for the major, which again is copied to
- * the blk_size[][] array.
- * (However, hd_struct has the number of 512-byte sectors,
- * g->sizes[] and blk_size[][] have the number of 1024-byte blocks.)
- * Note that several drives may have the same major.
- */
-
-/*
- * Add a partition.
- *
- * returns: EINVAL: bad parameters
- * ENXIO: cannot find drive
- * EBUSY: proposed partition overlaps an existing one
- * or has the same number as an existing one
- * 0: all OK.
- */
-int add_partition(struct block_device *bdev, struct blkpg_partition *p)
-{
- struct gendisk *g;
- long long ppstart, pplength;
- int part, i;
-
- /* convert bytes to sectors */
- ppstart = (p->start >> 9);
- pplength = (p->length >> 9);
-
- /* check for fit in a hd_struct */
- if (sizeof(sector_t) == sizeof(long) &&
- sizeof(long long) > sizeof(long)) {
- long pstart, plength;
- pstart = ppstart;
- plength = pplength;
- if (pstart != ppstart || plength != pplength
- || pstart < 0 || plength < 0)
- return -EINVAL;
- }
-
- /* find the drive major */
- g = get_gendisk(bdev->bd_dev, &part);
- if (!g)
- return -ENXIO;
-
- /* existing drive? */
-
- /* drive and partition number OK? */
- if (bdev != bdev->bd_contains)
- return -EINVAL;
- if (part)
- BUG();
- if (p->pno <= 0 || p->pno >= (1 << g->minor_shift))
- return -EINVAL;
-
- /* partition number in use? */
- if (g->part[p->pno - 1].nr_sects != 0)
- return -EBUSY;
-
- /* overlap? */
- for (i = 0; i < (1<<g->minor_shift) - 1; i++)
- if (!(ppstart+pplength <= g->part[i].start_sect ||
- ppstart >= g->part[i].start_sect + g->part[i].nr_sects))
- return -EBUSY;
-
- /* all seems OK */
- g->part[p->pno - 1].start_sect = ppstart;
- g->part[p->pno - 1].nr_sects = pplength;
- update_partition(g, p->pno);
- return 0;
-}
-
-/*
- * Delete a partition given by partition number
- *
- * returns: EINVAL: bad parameters
- * ENXIO: cannot find partition
- * EBUSY: partition is busy
- * 0: all OK.
- *
- * Note that the dev argument refers to the entire disk, not the partition.
- */
-int del_partition(struct block_device *bdev, struct blkpg_partition *p)
-{
- struct gendisk *g;
- struct block_device *bdevp;
- int part;
- int holder;
-
- /* find the drive major */
- g = get_gendisk(bdev->bd_dev, &part);
- if (!g)
- return -ENXIO;
- if (bdev != bdev->bd_contains)
- return -EINVAL;
- if (part)
- BUG();
- if (p->pno <= 0 || p->pno >= (1 << g->minor_shift))
- return -EINVAL;
-
- /* existing drive and partition? */
- if (g->part[p->pno - 1].nr_sects == 0)
- return -ENXIO;
-
- /* partition in use? Incomplete check for now. */
- bdevp = bdget(MKDEV(g->major, g->first_minor + p->pno));
- if (!bdevp)
- return -ENOMEM;
- if (bd_claim(bdevp, &holder) < 0) {
- bdput(bdevp);
- return -EBUSY;
- }
-
- /* all seems OK */
- fsync_bdev(bdevp);
- invalidate_bdev(bdevp, 0);
-
- g->part[p->pno - 1].start_sect = 0;
- g->part[p->pno - 1].nr_sects = 0;
- update_partition(g, p->pno);
- bd_release(bdevp);
- bdput(bdevp);
-
- return 0;
-}
-
-int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg *arg)
-{
- struct blkpg_ioctl_arg a;
- struct blkpg_partition p;
- int len;
-
- if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
- return -EFAULT;
-
- switch (a.op) {
- case BLKPG_ADD_PARTITION:
- case BLKPG_DEL_PARTITION:
- len = a.datalen;
- if (len < sizeof(struct blkpg_partition))
- return -EINVAL;
- if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
- return -EFAULT;
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (a.op == BLKPG_ADD_PARTITION)
- return add_partition(bdev, &p);
- else
- return del_partition(bdev, &p);
- default:
- return -EINVAL;
- }
-}
-
-/*
- * Common ioctl's for block devices
- */
-int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
-{
- request_queue_t *q;
- u64 ullval = 0;
- int intval;
- unsigned short usval;
- kdev_t dev = to_kdev_t(bdev->bd_dev);
- int holder;
- struct backing_dev_info *bdi;
-
- switch (cmd) {
- case BLKROSET:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (get_user(intval, (int *)(arg)))
- return -EFAULT;
- set_device_ro(dev, intval);
- return 0;
- case BLKROGET:
- intval = (bdev_read_only(bdev) != 0);
- return put_user(intval, (int *)(arg));
-
- case BLKRASET:
- case BLKFRASET:
- if(!capable(CAP_SYS_ADMIN))
- return -EACCES;
- bdi = blk_get_backing_dev_info(bdev);
- if (bdi == NULL)
- return -ENOTTY;
- bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
- return 0;
-
- case BLKRAGET:
- case BLKFRAGET:
- if (!arg)
- return -EINVAL;
- bdi = blk_get_backing_dev_info(bdev);
- if (bdi == NULL)
- return -ENOTTY;
- return put_user((bdi->ra_pages * PAGE_CACHE_SIZE) / 512,
- (long *)arg);
-
- case BLKSECTGET:
- if ((q = bdev_get_queue(bdev)) == NULL)
- return -EINVAL;
-
- usval = q->max_sectors;
- blk_put_queue(q);
- return put_user(usval, (unsigned short *)arg);
-
- case BLKFLSBUF:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- fsync_bdev(bdev);
- invalidate_bdev(bdev, 0);
- return 0;
-
- case BLKSSZGET:
- /* get block device hardware sector size */
- intval = bdev_hardsect_size(bdev);
- return put_user(intval, (int *) arg);
-
- case BLKGETSIZE:
- {
- unsigned long ret;
- /* size in sectors, works up to 2 TB */
- ullval = bdev->bd_inode->i_size;
- ret = ullval >> 9;
- if ((u64)ret != (ullval >> 9))
- return -EFBIG;
- return put_user(ret, (unsigned long *) arg);
- }
-
- case BLKGETSIZE64:
- /* size in bytes */
- ullval = bdev->bd_inode->i_size;
- return put_user(ullval, (u64 *) arg);
-
- case BLKPG:
- return blkpg_ioctl(bdev, (struct blkpg_ioctl_arg *) arg);
- case BLKBSZGET:
- /* get the logical block size (cf. BLKSSZGET) */
- intval = block_size(bdev);
- return put_user(intval, (int *) arg);
-
- case BLKBSZSET:
- /* set the logical block size */
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (!arg)
- return -EINVAL;
- if (get_user(intval, (int *) arg))
- return -EFAULT;
- if (intval > PAGE_SIZE || intval < 512 ||
- (intval & (intval - 1)))
- return -EINVAL;
- if (bd_claim(bdev, &holder) < 0)
- return -EBUSY;
- set_blocksize(bdev, intval);
- bd_release(bdev);
- return 0;
-
- default:
- return -EINVAL;
- }
-}
diff --git a/drivers/block/block_ioctl.c b/drivers/block/block_ioctl.c
deleted file mode 100644
index edde76503d60..000000000000
--- a/drivers/block/block_ioctl.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
- */
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/config.h>
-#include <linux/swap.h>
-#include <linux/init.h>
-#include <linux/smp_lock.h>
-#include <linux/module.h>
-#include <linux/blk.h>
-#include <linux/completion.h>
-
-#include <linux/cdrom.h>
-
-int blk_do_rq(request_queue_t *q, struct request *rq)
-{
- DECLARE_COMPLETION(wait);
- int err = 0;
-
- rq->flags |= REQ_NOMERGE;
- rq->waiting = &wait;
- elv_add_request(q, rq, 1);
- generic_unplug_device(q);
- wait_for_completion(&wait);
-
- /*
- * for now, never retry anything
- */
- if (rq->errors)
- err = -EIO;
-
- return err;
-}
-
-int block_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
-{
- request_queue_t *q;
- struct request *rq;
- int close = 0, err;
-
- q = bdev_get_queue(bdev);
- if (!q)
- return -ENXIO;
-
- switch (cmd) {
- case CDROMCLOSETRAY:
- close = 1;
- case CDROMEJECT:
- rq = blk_get_request(q, WRITE, __GFP_WAIT);
- rq->flags = REQ_BLOCK_PC;
- memset(rq->cmd, 0, sizeof(rq->cmd));
- rq->cmd[0] = GPCMD_START_STOP_UNIT;
- rq->cmd[4] = 0x02 + (close != 0);
- err = blk_do_rq(q, rq);
- blk_put_request(rq);
- break;
- default:
- err = -ENOTTY;
- }
-
- blk_put_queue(q);
- return err;
-}
-
-EXPORT_SYMBOL(block_ioctl);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index ebd7a216810f..caa30e1c6e84 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -740,7 +740,7 @@ static int revalidate_allvol(kdev_t dev)
for(i=0; i< NWD; i++) {
struct gendisk *disk = hba[ctlr]->gendisk[i];
- if (disk->part)
+ if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
}
@@ -792,7 +792,7 @@ static int deregister_disk(int ctlr, int logvol)
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
/* invalidate the devices and deregister the disk */
- if (disk->part)
+ if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
/* check to see if it was the last disk */
if (logvol == h->highest_lun) {
@@ -2274,7 +2274,7 @@ static int alloc_cciss_hba(void)
struct gendisk *disk[NWD];
int i, n;
for (n = 0; n < NWD; n++) {
- disk[n] = alloc_disk();
+ disk[n] = alloc_disk(1 << NWD_SHIFT);
if (!disk[n])
goto out;
}
@@ -2447,7 +2447,6 @@ static int __init cciss_init_one(struct pci_dev *pdev,
sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
disk->major = MAJOR_NR + i;
disk->first_minor = j << NWD_SHIFT;
- disk->minor_shift = NWD_SHIFT;
if( !(drv->nr_blocks))
continue;
(BLK_DEFAULT_QUEUE(MAJOR_NR + i))->hardsect_size = drv->block_size;
@@ -2500,7 +2499,7 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev)
/* remove it from the disk list */
for (j = 0; j < NWD; j++) {
struct gendisk *disk = hba[i]->gendisk[j];
- if (disk->part)
+ if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
}
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 7bfa29a5bc89..c3b1c4b17ea7 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -304,7 +304,7 @@ static void __exit cpqarray_exit(void)
kfree(hba[i]->cmd_pool_bits);
for (j = 0; j < NWD; j++) {
- if (ida_gendisk[i][j]->part)
+ if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
del_gendisk(ida_gendisk[i][j]);
put_disk(ida_gendisk[i][j]);
}
@@ -358,7 +358,7 @@ static int __init cpqarray_init(void)
}
num_cntlrs_reg++;
for (j=0; j<NWD; j++) {
- ida_gendisk[i][j] = alloc_disk();
+ ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
if (!ida_gendisk[i][j])
goto Enomem2;
}
@@ -405,7 +405,6 @@ static int __init cpqarray_init(void)
sprintf(disk->disk_name, "ida/c%dd%d", i, j);
disk->major = MAJOR_NR + i;
disk->first_minor = j<<NWD_SHIFT;
- disk->minor_shift = NWD_SHIFT;
disk->flags = GENHD_FL_DEVFS;
disk->fops = &ida_fops;
if (!drv->nr_blks)
@@ -1428,7 +1427,7 @@ static int revalidate_allvol(kdev_t dev)
*/
for (i = 0; i < NWD; i++) {
struct gendisk *disk = ida_gendisk[ctlr][i];
- if (disk->part)
+ if (disk->flags & GENDH_FL_UP)
del_gendisk(disk);
}
memset(hba[ctlr]->drv, 0, sizeof(drv_info_t)*NWD);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 924e1e011f76..3fde460ce7ea 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3488,16 +3488,6 @@ static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
loc.start = 0;
return _COPYOUT(loc);
}
-
- case BLKGETSIZE:
- ECALL(get_floppy_geometry(drive, type, &g));
- return put_user(g->size, (unsigned long *) param);
-
- case BLKGETSIZE64:
- ECALL(get_floppy_geometry(drive, type, &g));
- return put_user((u64)g->size << 9, (u64 *) param);
- /* BLKRRPART is not defined as floppies don't have
- * partition tables */
}
/* convert the old style command into a new style command */
@@ -4240,7 +4230,7 @@ int __init floppy_init(void)
raw_cmd = NULL;
for (i=0; i<N_DRIVE; i++) {
- disks[i] = alloc_disk();
+ disks[i] = alloc_disk(1);
if (!disks[i])
goto Enomem;
}
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c
index 3f6d259165f3..449e69061bbc 100644
--- a/drivers/block/genhd.c
+++ b/drivers/block/genhd.c
@@ -57,34 +57,13 @@ EXPORT_SYMBOL(blk_set_probe); /* Will go away */
* This function registers the partitioning information in @gp
* with the kernel.
*/
-static void add_gendisk(struct gendisk *gp)
+void add_disk(struct gendisk *disk)
{
- struct hd_struct *p = NULL;
-
- if (gp->minor_shift) {
- size_t size = sizeof(struct hd_struct)*((1<<gp->minor_shift)-1);
- p = kmalloc(size, GFP_KERNEL);
- if (!p) {
- printk(KERN_ERR "out of memory; no partitions for %s\n",
- gp->disk_name);
- gp->minor_shift = 0;
- } else
- memset(p, 0, size);
- }
- gp->part = p;
-
write_lock(&gendisk_lock);
- list_add(&gp->list, &gendisks[gp->major].list);
- if (gp->minor_shift)
- list_add_tail(&gp->full_list, &gendisk_list);
- else
- INIT_LIST_HEAD(&gp->full_list);
+ list_add(&disk->list, &gendisks[disk->major].list);
+ list_add_tail(&disk->full_list, &gendisk_list);
write_unlock(&gendisk_lock);
-}
-
-void add_disk(struct gendisk *disk)
-{
- add_gendisk(disk);
+ disk->flags |= GENHD_FL_UP;
register_disk(disk);
}
@@ -118,6 +97,8 @@ get_gendisk(dev_t dev, int *part)
read_lock(&gendisk_lock);
if (gendisks[major].get) {
disk = gendisks[major].get(minor);
+ if (disk)
+ get_disk(disk);
read_unlock(&gendisk_lock);
return disk;
}
@@ -125,8 +106,9 @@ get_gendisk(dev_t dev, int *part)
disk = list_entry(p, struct gendisk, list);
if (disk->first_minor > minor)
continue;
- if (disk->first_minor + (1<<disk->minor_shift) <= minor)
+ if (disk->first_minor + disk->minors <= minor)
continue;
+ get_disk(disk);
read_unlock(&gendisk_lock);
*part = minor - disk->first_minor;
return disk;
@@ -135,8 +117,6 @@ get_gendisk(dev_t dev, int *part)
return NULL;
}
-EXPORT_SYMBOL(get_gendisk);
-
#ifdef CONFIG_PROC_FS
/* iterator */
static void *part_start(struct seq_file *part, loff_t *pos)
@@ -173,7 +153,7 @@ static int show_partition(struct seq_file *part, void *v)
seq_puts(part, "major minor #blocks name\n\n");
/* Don't show non-partitionable devices or empty devices */
- if (!get_capacity(sgp))
+ if (!get_capacity(sgp) || sgp->minors == 1)
return 0;
/* show the full disk and all non-0 size partitions of it */
@@ -181,7 +161,7 @@ static int show_partition(struct seq_file *part, void *v)
sgp->major, sgp->first_minor,
(unsigned long long)get_capacity(sgp) >> 1,
disk_name(sgp, 0, buf));
- for (n = 0; n < (1<<sgp->minor_shift) - 1; n++) {
+ for (n = 0; n < sgp->minors - 1; n++) {
if (sgp->part[n].nr_sects == 0)
continue;
seq_printf(part, "%4d %4d %10llu %s\n",
@@ -210,6 +190,10 @@ struct device_class disk_devclass = {
.name = "disk",
};
+static struct bus_type disk_bus = {
+ name: "block",
+};
+
int __init device_init(void)
{
int i;
@@ -218,6 +202,7 @@ int __init device_init(void)
INIT_LIST_HEAD(&gendisks[i].list);
blk_dev_init();
devclass_register(&disk_devclass);
+ bus_register(&disk_bus);
return 0;
}
@@ -225,17 +210,51 @@ __initcall(device_init);
EXPORT_SYMBOL(disk_devclass);
-struct gendisk *alloc_disk(void)
+static void disk_release(struct device *dev)
+{
+ struct gendisk *disk = dev->driver_data;
+ kfree(disk->part);
+ kfree(disk);
+}
+
+struct gendisk *alloc_disk(int minors)
{
struct gendisk *disk = kmalloc(sizeof(struct gendisk), GFP_KERNEL);
- if (disk)
+ if (disk) {
memset(disk, 0, sizeof(struct gendisk));
+ if (minors > 1) {
+ int size = (minors - 1) * sizeof(struct hd_struct);
+ disk->part = kmalloc(size, GFP_KERNEL);
+ if (!disk->part) {
+ kfree(disk);
+ return NULL;
+ }
+ memset(disk->part, 0, size);
+ }
+ disk->minors = minors;
+ while (minors >>= 1)
+ disk->minor_shift++;
+ INIT_LIST_HEAD(&disk->full_list);
+ disk->disk_dev.bus = &disk_bus;
+ disk->disk_dev.release = disk_release;
+ disk->disk_dev.driver_data = disk;
+ device_initialize(&disk->disk_dev);
+ }
+ return disk;
+}
+
+struct gendisk *get_disk(struct gendisk *disk)
+{
+ atomic_inc(&disk->disk_dev.refcount);
return disk;
}
void put_disk(struct gendisk *disk)
{
- kfree(disk);
+ if (disk)
+ put_device(&disk->disk_dev);
}
+
EXPORT_SYMBOL(alloc_disk);
+EXPORT_SYMBOL(get_disk);
EXPORT_SYMBOL(put_disk);
diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
new file mode 100644
index 000000000000..e420c691763d
--- /dev/null
+++ b/drivers/block/ioctl.c
@@ -0,0 +1,215 @@
+#include <linux/sched.h> /* for capable() */
+#include <linux/blk.h> /* for set_device_ro() */
+#include <linux/blkpg.h>
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+#include <asm/uaccess.h>
+
+static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg *arg)
+{
+ struct block_device *bdevp;
+ int holder;
+ struct gendisk *disk;
+ struct blkpg_ioctl_arg a;
+ struct blkpg_partition p;
+ long long start, length;
+ int part;
+ int i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
+ return -EFAULT;
+ if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+ return -EFAULT;
+ disk = bdev->bd_disk;
+ if (bdev != bdev->bd_contains)
+ return -EINVAL;
+ part = p.pno;
+ if (part <= 0 || part >= disk->minors)
+ return -EINVAL;
+ switch (a.op) {
+ case BLKPG_ADD_PARTITION:
+ start = p.start >> 9;
+ length = p.length >> 9;
+ /* check for fit in a hd_struct */
+ if (sizeof(sector_t) == sizeof(long) &&
+ sizeof(long long) > sizeof(long)) {
+ long pstart = start, plength = length;
+ if (pstart != start || plength != length
+ || pstart < 0 || plength < 0)
+ return -EINVAL;
+ }
+ /* partition number in use? */
+ if (disk->part[part - 1].nr_sects != 0)
+ return -EBUSY;
+ /* overlap? */
+ for (i = 0; i < disk->minors - 1; i++) {
+ struct hd_struct *s = &disk->part[i];
+ if (!(start+length <= s->start_sect ||
+ start >= s->start_sect + s->nr_sects))
+ return -EBUSY;
+ }
+ /* all seems OK */
+ add_partition(disk, part, start, length);
+ return 0;
+ case BLKPG_DEL_PARTITION:
+ if (disk->part[part - 1].nr_sects == 0)
+ return -ENXIO;
+ /* partition in use? Incomplete check for now. */
+ bdevp = bdget(MKDEV(disk->major, disk->first_minor) + part);
+ if (!bdevp)
+ return -ENOMEM;
+ if (bd_claim(bdevp, &holder) < 0) {
+ bdput(bdevp);
+ return -EBUSY;
+ }
+ /* all seems OK */
+ fsync_bdev(bdevp);
+ invalidate_bdev(bdevp, 0);
+
+ delete_partition(disk, part);
+ bd_release(bdevp);
+ bdput(bdevp);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int blkdev_reread_part(struct block_device *bdev)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ int res;
+
+ if (disk->minors == 1 || bdev != bdev->bd_contains)
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (down_trylock(&bdev->bd_sem))
+ return -EBUSY;
+ res = rescan_partitions(disk, bdev);
+ up(&bdev->bd_sem);
+ return res;
+}
+
+static int put_ushort(unsigned long arg, unsigned short val)
+{
+ return put_user(val, (unsigned short *)arg);
+}
+
+static int put_int(unsigned long arg, int val)
+{
+ return put_user(val, (int *)arg);
+}
+
+static int put_long(unsigned long arg, long val)
+{
+ return put_user(val, (long *)arg);
+}
+
+static int put_ulong(unsigned long arg, unsigned long val)
+{
+ return put_user(val, (unsigned long *)arg);
+}
+
+static int put_u64(unsigned long arg, u64 val)
+{
+ return put_user(val, (u64 *)arg);
+}
+
+int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
+ unsigned long arg)
+{
+ struct block_device *bdev = inode->i_bdev;
+ struct backing_dev_info *bdi;
+ int holder;
+ int ret, n;
+
+ switch (cmd) {
+ case BLKELVGET:
+ case BLKELVSET:
+ /* deprecated, use the /proc/iosched interface instead */
+ return -ENOTTY;
+ case BLKRAGET:
+ case BLKFRAGET:
+ if (!arg)
+ return -EINVAL;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+ case BLKROGET:
+ return put_int(arg, bdev_read_only(bdev) != 0);
+ case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
+ return put_int(arg, block_size(bdev));
+ case BLKSSZGET: /* get block device hardware sector size */
+ return put_int(arg, bdev_hardsect_size(bdev));
+ case BLKSECTGET:
+ return put_ushort(arg, bdev->bd_queue->max_sectors);
+ case BLKRASET:
+ case BLKFRASET:
+ if(!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+ return 0;
+ case BLKBSZSET:
+ /* set the logical block size */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!arg)
+ return -EINVAL;
+ if (get_user(n, (int *) arg))
+ return -EFAULT;
+ if (n > PAGE_SIZE || n < 512 || (n & (n - 1)))
+ return -EINVAL;
+ if (bd_claim(bdev, &holder) < 0)
+ return -EBUSY;
+ set_blocksize(bdev, n);
+ bd_release(bdev);
+ return 0;
+ case BLKPG:
+ return blkpg_ioctl(bdev, (struct blkpg_ioctl_arg *) arg);
+ case BLKRRPART:
+ return blkdev_reread_part(bdev);
+ case BLKGETSIZE:
+ if ((bdev->bd_inode->i_size >> 9) > ~0UL)
+ return -EFBIG;
+ return put_ulong(arg, bdev->bd_inode->i_size >> 9);
+ case BLKGETSIZE64:
+ return put_u64(arg, bdev->bd_inode->i_size);
+ case BLKFLSBUF:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (bdev->bd_op->ioctl) {
+ ret = bdev->bd_op->ioctl(inode, file, cmd, arg);
+ if (ret != -EINVAL)
+ return ret;
+ }
+ fsync_bdev(bdev);
+ invalidate_bdev(bdev, 0);
+ return 0;
+ case BLKROSET:
+ if (bdev->bd_op->ioctl) {
+ ret = bdev->bd_op->ioctl(inode, file, cmd, arg);
+ if (ret != -EINVAL)
+ return ret;
+ }
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (get_user(n, (int *)(arg)))
+ return -EFAULT;
+ set_device_ro(to_kdev_t(bdev->bd_dev), n);
+ return 0;
+ default:
+ if (bdev->bd_op->ioctl) {
+ ret = bdev->bd_op->ioctl(inode, file, cmd, arg);
+ if (ret != -EINVAL)
+ return ret;
+ }
+ }
+ return -ENOTTY;
+}
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index ea56c1d8456c..eb877e50a8d1 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -1427,7 +1427,19 @@ void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
int rw = rq_data_dir(rq);
unsigned int index;
- index = disk_index(rq->rq_dev);
+ if (!rq->rq_disk)
+ return;
+
+ if (rw == READ) {
+ rq->rq_disk->rio += new_io;
+ rq->rq_disk->reads += nr_sectors;
+ } else if (rw == WRITE) {
+ rq->rq_disk->wio += new_io;
+ rq->rq_disk->writes += nr_sectors;
+ }
+
+ index = rq->rq_disk->first_minor >> rq->rq_disk->minor_shift;
+
if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
return;
@@ -1747,6 +1759,7 @@ get_rq:
req->waiting = NULL;
req->bio = req->biotail = bio;
req->rq_dev = to_kdev_t(bio->bi_bdev->bd_dev);
+ req->rq_disk = bio->bi_bdev->bd_disk;
add_request(q, req, insert_here);
out:
if (freereq)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e39755017faf..14fa8720f8db 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1075,7 +1075,7 @@ int __init loop_init(void)
goto out_mem;
for (i = 0; i < max_loop; i++) {
- disks[i] = alloc_disk();
+ disks[i] = alloc_disk(1);
if (!disks[i])
goto out_mem2;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index be27027d32b8..27726bd0246a 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -507,7 +507,7 @@ static int __init nbd_init(void)
}
for (i = 0; i < MAX_NBD; i++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(1);
if (!disk)
goto out;
nbd_dev[i].disk = disk;
@@ -537,7 +537,6 @@ static int __init nbd_init(void)
nbd_bytesizes[i] = 0x7ffffc00; /* 2GB */
disk->major = MAJOR_NR;
disk->first_minor = i;
- disk->minor_shift = 0;
disk->fops = &nbd_fops;
sprintf(disk->disk_name, "nbd%d", i);
set_capacity(disk, 0x3ffffe);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 0e4bac2bd1ef..95bedb2a580c 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -281,7 +281,7 @@ static void pcd_init_units(void)
pcd_drive_count = 0;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(1);
if (!disk)
continue;
cd->disk = disk;
@@ -303,7 +303,6 @@ static void pcd_init_units(void)
cd->info.mask = 0;
disk->major = major;
disk->first_minor = unit;
- disk->minor_shift = 0;
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 7fdf4a3e4b2a..2278ee4928ab 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -350,9 +350,6 @@ static int pd_open(struct inode *inode, struct file *file)
int unit = DEVICE_NR(inode->i_rdev);
struct pd_unit *disk = pd + unit;
- if (unit >= PD_UNITS || !disk->present)
- return -ENODEV;
-
disk->access++;
if (disk->removable) {
@@ -703,14 +700,13 @@ static int pd_detect(void)
}
for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
if (disk->present) {
- struct gendisk *p = alloc_disk();
+ struct gendisk *p = alloc_disk(1 << PD_BITS);
if (!p) {
disk->present = 0;
k--;
continue;
}
strcpy(p->disk_name, disk->name);
- p->minor_shift = PD_BITS;
p->fops = &pd_fops;
p->major = major;
p->first_minor = unit << PD_BITS;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index becf37efd5ec..9598323b5694 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -308,7 +308,7 @@ void pf_init_units(void)
pf_drive_count = 0;
for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(1);
if (!disk)
continue;
pf->disk = disk;
@@ -320,7 +320,6 @@ void pf_init_units(void)
disk->major = MAJOR_NR;
disk->first_minor = unit;
strcpy(disk->disk_name, pf->name);
- disk->minor_shift = 0;
disk->fops = &pf_fops;
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
@@ -332,9 +331,6 @@ static int pf_open(struct inode *inode, struct file *file)
int unit = DEVICE_NR(inode->i_rdev);
struct pf_unit *pf = units + unit;
- if ((unit >= PF_UNITS) || (!pf->present))
- return -ENODEV;
-
pf_identify(pf);
if (pf->media_status == PF_NM)
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index 770fbfd4613f..0015ef9a1cd2 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -89,9 +89,6 @@ static void (*current_int_handler) (u_int) = NULL;
static void ps2esdi_normal_interrupt_handler(u_int);
static void ps2esdi_initial_reset_int_handler(u_int);
static void ps2esdi_geometry_int_handler(u_int);
-
-static int ps2esdi_open(struct inode *inode, struct file *file);
-
static int ps2esdi_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg);
@@ -141,7 +138,6 @@ static struct ps2esdi_i_struct ps2esdi_info[MAX_HD] =
static struct block_device_operations ps2esdi_fops =
{
.owner = THIS_MODULE,
- .open = ps2esdi_open,
.ioctl = ps2esdi_ioctl,
};
@@ -421,13 +417,12 @@ static int __init ps2esdi_geninit(void)
error = -ENOMEM;
for (i = 0; i < ps2esdi_drives; i++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(64);
if (!disk)
goto err_out4;
disk->major = MAJOR_NR;
disk->first_minor = i<<6;
sprintf(disk->disk_name, "ed%c", 'a'+i);
- disk->minor_shift = 6;
disk->fops = &ps2esdi_fops;
ps2esdi_gendisk[i] = disk;
}
@@ -1076,15 +1071,6 @@ static void dump_cmd_complete_status(u_int int_ret_code)
}
-
-static int ps2esdi_open(struct inode *inode, struct file *file)
-{
- int dev = DEVICE_NR(inode->i_rdev);
- if (dev >= ps2esdi_drives)
- return -ENODEV;
- return 0;
-}
-
static int ps2esdi_ioctl(struct inode *inode,
struct file *file, u_int cmd, u_long arg)
{
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index a0e60c5972a6..bbd247fa29dc 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -291,8 +291,6 @@ static int rd_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
if (cmd != BLKFLSBUF)
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
/* special: we want to release the ramdisk memory,
it's not like with the other blockdevices where
this ioctl only flushes away the buffer cache. */
@@ -383,6 +381,7 @@ static int rd_open(struct inode * inode, struct file * filp)
rd_bdev[unit]->bd_inode->i_mapping->a_ops = &ramdisk_aops;
rd_bdev[unit]->bd_inode->i_size = rd_length[unit];
rd_bdev[unit]->bd_queue = &blk_dev[MAJOR_NR].request_queue;
+ rd_bdev[unit]->bd_disk = get_disk(rd_disks[unit]);
}
return 0;
@@ -431,17 +430,16 @@ static int __init rd_init (void)
}
#ifdef CONFIG_BLK_DEV_INITRD
- initrd_disk = alloc_disk();
+ initrd_disk = alloc_disk(1);
if (!initrd_disk)
return -ENOMEM;
initrd_disk->major = MAJOR_NR;
initrd_disk->first_minor = INITRD_MINOR;
- initrd_disk->minor_shift = 0;
initrd_disk->fops = &rd_bd_op;
sprintf(initrd_disk->disk_name, "initrd");
#endif
for (i = 0; i < NUM_RAMDISKS; i++) {
- rd_disks[i] = alloc_disk();
+ rd_disks[i] = alloc_disk(1);
if (!rd_disks[i])
goto out;
}
@@ -460,7 +458,6 @@ static int __init rd_init (void)
rd_length[i] = rd_size << 10;
disk->major = MAJOR_NR;
disk->first_minor = i;
- disk->minor_shift = 0;
disk->fops = &rd_bd_op;
sprintf(disk->disk_name, "rd%d", i);
set_capacity(disk, rd_size * 2);
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
new file mode 100644
index 000000000000..1f7ef11b1c62
--- /dev/null
+++ b/drivers/block/scsi_ioctl.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
+ *
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/config.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+#include <linux/blk.h>
+#include <linux/completion.h>
+#include <linux/cdrom.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+
+#include <asm/uaccess.h>
+
+int blk_do_rq(request_queue_t *q, struct request *rq)
+{
+ DECLARE_COMPLETION(wait);
+ int err = 0;
+
+ rq->flags |= REQ_NOMERGE;
+ rq->waiting = &wait;
+ elv_add_request(q, rq, 1);
+ generic_unplug_device(q);
+ wait_for_completion(&wait);
+
+ /*
+ * for now, never retry anything
+ */
+ if (rq->errors)
+ err = -EIO;
+
+ return err;
+}
+
+#include <scsi/sg.h>
+
+static int sg_get_version(int *p)
+{
+ static int sg_version_num = 30527;
+ return put_user(sg_version_num, p);
+}
+
+static int scsi_get_idlun(request_queue_t *q, int *p)
+{
+ return put_user(0, p);
+}
+
+static int scsi_get_bus(request_queue_t *q, int *p)
+{
+ return put_user(0, p);
+}
+
+static int sg_get_timeout(request_queue_t *q)
+{
+ return HZ;
+}
+
+static int sg_set_timeout(request_queue_t *q, int *p)
+{
+ int timeout;
+ int error = get_user(timeout, p);
+ return error;
+}
+
+static int reserved_size = 0;
+
+static int sg_get_reserved_size(request_queue_t *q, int *p)
+{
+ return put_user(reserved_size, p);
+}
+
+static int sg_set_reserved_size(request_queue_t *q, int *p)
+{
+ int size;
+ int error = get_user(size, p);
+ if (!error)
+ reserved_size = size;
+ return error;
+}
+
+static int sg_emulated_host(request_queue_t *q, int *p)
+{
+ return put_user(1, p);
+}
+
+static int sg_io(request_queue_t *q, struct sg_io_hdr *uptr)
+{
+ int err;
+ struct sg_io_hdr hdr;
+ struct request *rq;
+ void *buffer;
+
+ if (!access_ok(VERIFY_WRITE, uptr, sizeof(*uptr)))
+ return -EFAULT;
+ if (copy_from_user(&hdr, uptr, sizeof(*uptr)))
+ return -EFAULT;
+
+ if ( hdr.cmd_len > sizeof(rq->cmd) )
+ return -EINVAL;
+
+ buffer = NULL;
+ if (hdr.dxfer_len) {
+ unsigned int bytes = (hdr.dxfer_len + 511) & ~511;
+
+ switch (hdr.dxfer_direction) {
+ default:
+ return -EINVAL;
+ case SG_DXFER_TO_DEV:
+ case SG_DXFER_FROM_DEV:
+ case SG_DXFER_TO_FROM_DEV:
+ break;
+ }
+ buffer = kmalloc(bytes, GFP_USER);
+ if (!buffer)
+ return -ENOMEM;
+ if (hdr.dxfer_direction == SG_DXFER_TO_DEV ||
+ hdr.dxfer_direction == SG_DXFER_TO_FROM_DEV)
+ copy_from_user(buffer, hdr.dxferp, hdr.dxfer_len);
+ }
+
+ rq = blk_get_request(q, WRITE, __GFP_WAIT);
+ rq->timeout = 60*HZ;
+ rq->data = buffer;
+ rq->data_len = hdr.dxfer_len;
+ rq->flags = REQ_BLOCK_PC;
+ memset(rq->cmd, 0, sizeof(rq->cmd));
+ copy_from_user(rq->cmd, hdr.cmdp, hdr.cmd_len);
+ err = blk_do_rq(q, rq);
+
+ blk_put_request(rq);
+
+ copy_to_user(uptr, &hdr, sizeof(*uptr));
+ if (buffer) {
+ if (hdr.dxfer_direction == SG_DXFER_FROM_DEV ||
+ hdr.dxfer_direction == SG_DXFER_TO_FROM_DEV)
+ copy_to_user(hdr.dxferp, buffer, hdr.dxfer_len);
+ kfree(buffer);
+ }
+ return err;
+}
+
+int scsi_cmd_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
+{
+ request_queue_t *q;
+ struct request *rq;
+ int close = 0, err;
+
+ q = bdev_get_queue(bdev);
+ if (!q)
+ return -ENXIO;
+
+ switch (cmd) {
+ case SG_GET_VERSION_NUM:
+ return sg_get_version((int *) arg);
+ case SCSI_IOCTL_GET_IDLUN:
+ return scsi_get_idlun(q, (int *) arg);
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ return scsi_get_bus(q, (int *) arg);
+ case SG_SET_TIMEOUT:
+ return sg_set_timeout(q, (int *) arg);
+ case SG_GET_TIMEOUT:
+ return sg_get_timeout(q);
+ case SG_GET_RESERVED_SIZE:
+ return sg_get_reserved_size(q, (int *) arg);
+ case SG_SET_RESERVED_SIZE:
+ return sg_set_reserved_size(q, (int *) arg);
+ case SG_EMULATED_HOST:
+ return sg_emulated_host(q, (int *) arg);
+ case SG_IO:
+ return sg_io(q, (struct sg_io_hdr *) arg);
+ case CDROMCLOSETRAY:
+ close = 1;
+ case CDROMEJECT:
+ rq = blk_get_request(q, WRITE, __GFP_WAIT);
+ rq->flags = REQ_BLOCK_PC;
+ rq->data = NULL;
+ rq->data_len = 0;
+ rq->timeout = 60*HZ;
+ memset(rq->cmd, 0, sizeof(rq->cmd));
+ rq->cmd[0] = GPCMD_START_STOP_UNIT;
+ rq->cmd[4] = 0x02 + (close != 0);
+ err = blk_do_rq(q, rq);
+ blk_put_request(rq);
+ break;
+ default:
+ err = -ENOTTY;
+ }
+
+ blk_put_queue(q);
+ return err;
+}
+
+EXPORT_SYMBOL(scsi_cmd_ioctl);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index b1cb36f3ca5c..2a5f3afefbfa 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1037,7 +1037,7 @@ int swim3_init(void)
return -ENODEV;
for (i = 0; i < floppy_count; i++) {
- disks[i] = alloc_disk();
+ disks[i] = alloc_disk(1);
if (!disks[i])
goto out;
}
diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c
index 29c2f1696063..3ec747c3f80f 100644
--- a/drivers/block/swim_iop.c
+++ b/drivers/block/swim_iop.c
@@ -188,7 +188,7 @@ int swimiop_init(void)
printk("SWIM-IOP: detected %d installed drives.\n", floppy_count);
for (i = 0; i < floppy_count; i++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(1);
if (!disk)
continue;
disk->major = MAJOR_NR;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 53dfd2a7c624..4a2b212f1261 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -864,18 +864,6 @@ static int mm_check_change(kdev_t i_rdev)
return 0;
}
-
-/*
------------------------------------------------------------------------------------
--- mm_open
------------------------------------------------------------------------------------
-*/
-static int mm_open(struct inode *i, struct file *filp)
-{
- if (DEVICE_NR(i->i_rdev) >= num_cards)
- return -ENXIO;
- return 0;
-}
/*
-----------------------------------------------------------------------------------
-- mm_fops
@@ -883,7 +871,6 @@ static int mm_open(struct inode *i, struct file *filp)
*/
static struct block_device_operations mm_fops = {
owner: THIS_MODULE,
- open: mm_open,
ioctl: mm_ioctl,
revalidate: mm_revalidate,
check_media_change: mm_check_change,
@@ -1190,7 +1177,7 @@ int __init mm_init(void)
}
for (i = 0; i < num_cards; i++) {
- mm_gendisk[i] = alloc_disk();
+ mm_gendisk[i] = alloc_disk(1 << MM_SHIFT);
if (!mm_gendisk[i])
goto out;
}
@@ -1203,7 +1190,6 @@ int __init mm_init(void)
spin_lock_init(&cards[i].lock);
disk->major = major_nr;
disk->first_minor = i << MM_SHIFT;
- disk->minor_shift = MM_SHIFT;
disk->fops = &mm_fops;
set_capacity(disk, cards[i].mm_size << 1);
add_disk(disk);
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 3e3315e81bde..939901490525 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -130,7 +130,6 @@ static struct gendisk *xd_gendisk[2];
static struct block_device_operations xd_fops = {
owner: THIS_MODULE,
- open: xd_open,
ioctl: xd_ioctl,
};
static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
@@ -205,12 +204,11 @@ static int __init xd_init(void)
goto out3;
for (i = 0; i < xd_drives; i++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(64);
if (!disk)
goto Enomem;
disk->major = MAJOR_NR;
disk->first_minor = i<<6;
- disk->minor_shift = 6;
sprintf(disk->disk_name, "xd%c", i+'a');
disk->fops = &xd_fops;
xd_gendisk[i] = disk;
@@ -284,15 +282,6 @@ static u_char __init xd_detect (u_char *controller, unsigned int *address)
return (found);
}
-/* xd_open: open a device */
-static int xd_open (struct inode *inode,struct file *file)
-{
- int dev = DEVICE_NR(inode->i_rdev);
- if (dev >= xd_drives)
- return -ENXIO;
- return 0;
-}
-
/* do_xd_request: handle an incoming request */
static void do_xd_request (request_queue_t * q)
{
@@ -337,8 +326,6 @@ static void do_xd_request (request_queue_t * q)
static int xd_ioctl (struct inode *inode,struct file *file,u_int cmd,u_long arg)
{
int dev = DEVICE_NR(inode->i_rdev);
-
- if (dev >= xd_drives) return -EINVAL;
switch (cmd) {
case HDIO_GETGEO:
{
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 30625811de3e..edb2676680e3 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -365,14 +365,13 @@ z2_init( void )
MAJOR_NR );
return -EBUSY;
}
- z2ram_gendisk = alloc_disk();
+ z2ram_gendisk = alloc_disk(1);
if (!z2ram_gendisk) {
unregister_blkdev( MAJOR_NR, DEVICE_NAME );
return -ENOMEM;
}
z2ram_gendisk->major = MAJOR_NR;
z2ram_gendisk->first_minor = 0;
- z2ram_gendisk->minor_shift = 0;
z2ram_gendisk->fops = &z2_fops;
sprintf(z2ram_gendisk->disk_name, "z2ram");
diff --git a/drivers/bluetooth/Config.help b/drivers/bluetooth/Config.help
index 6196615c7ec2..212fec68fdb3 100644
--- a/drivers/bluetooth/Config.help
+++ b/drivers/bluetooth/Config.help
@@ -1,5 +1,5 @@
HCI UART driver
-CONFIG_BLUEZ_HCIUART
+CONFIG_BT_HCIUART
Bluetooth HCI UART driver.
This driver is required if you want to use Bluetooth devices with
serial port interface. You will also need this driver if you have
@@ -10,7 +10,7 @@ CONFIG_BLUEZ_HCIUART
kernel or say M to compile it as module (hci_uart.o).
HCI UART (H4) protocol support
-CONFIG_BLUEZ_HCIUART_H4
+CONFIG_BT_HCIUART_H4
UART (H4) is serial protocol for communication between Bluetooth
device and host. This protocol is required for most Bluetooth devices
with UART interface, including PCMCIA and CF cards.
@@ -18,7 +18,7 @@ CONFIG_BLUEZ_HCIUART_H4
Say Y here to compile support for HCI UART (H4) protocol.
HCI BCSP protocol support
-CONFIG_BLUEZ_HCIUART_BCSP
+CONFIG_BT_HCIUART_BCSP
BCSP (BlueCore Serial Protocol) is serial protocol for communication
between Bluetooth device and host. This protocol is required for non
USB Bluetooth devices based on CSR BlueCore chip, including PCMCIA and
@@ -27,7 +27,7 @@ CONFIG_BLUEZ_HCIUART_BCSP
Say Y here to compile support for HCI BCSP protocol.
HCI USB driver
-CONFIG_BLUEZ_HCIUSB
+CONFIG_BT_HCIUSB
Bluetooth HCI USB driver.
This driver is required if you want to use Bluetooth devices with
USB interface.
@@ -36,7 +36,7 @@ CONFIG_BLUEZ_HCIUSB
kernel or say M to compile it as module (hci_usb.o).
HCI USB zero packet support
-CONFIG_BLUEZ_USB_ZERO_PACKET
+CONFIG_BT_USB_ZERO_PACKET
Support for USB zero packets.
This option is provided only as a work around for buggy Bluetooth USB
devices. Do _not_ enable it unless you know for sure that your device
@@ -44,7 +44,7 @@ CONFIG_BLUEZ_USB_ZERO_PACKET
Most people should say N here.
HCI VHCI Virtual HCI device driver
-CONFIG_BLUEZ_HCIVHCI
+CONFIG_BT_HCIVHCI
Bluetooth Virtual HCI device driver.
This driver is required if you want to use HCI Emulation software.
@@ -52,7 +52,7 @@ CONFIG_BLUEZ_HCIVHCI
kernel or say M to compile it as module (hci_vhci.o).
HCI DTL1 (PC Card) device driver
-CONFIG_BLUEZ_HCIDTL1
+CONFIG_BT_HCIDTL1
Bluetooth HCI DTL1 (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
Nokia DTL1 interface:
@@ -63,7 +63,7 @@ CONFIG_BLUEZ_HCIDTL1
kernel or say M to compile it as module (dtl1_cs.o).
HCI BT3C (PC Card) device driver
-CONFIG_BLUEZ_HCIBT3C
+CONFIG_BT_HCIBT3C
Bluetooth HCI BT3C (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
3Com BT3C interface:
@@ -77,7 +77,7 @@ CONFIG_BLUEZ_HCIBT3C
kernel or say M to compile it as module (bt3c_cs.o).
HCI BlueCard (PC Card) device driver
-CONFIG_BLUEZ_HCIBLUECARD
+CONFIG_BT_HCIBLUECARD
Bluetooth HCI BlueCard (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
Anycom BlueCard interface:
diff --git a/drivers/bluetooth/Config.in b/drivers/bluetooth/Config.in
index a411065d23b7..46f37275ff76 100644
--- a/drivers/bluetooth/Config.in
+++ b/drivers/bluetooth/Config.in
@@ -1,23 +1,23 @@
mainmenu_option next_comment
comment 'Bluetooth device drivers'
-dep_tristate 'HCI USB driver' CONFIG_BLUEZ_HCIUSB $CONFIG_BLUEZ $CONFIG_USB
-if [ "$CONFIG_BLUEZ_HCIUSB" != "n" ]; then
- bool ' USB zero packet support' CONFIG_BLUEZ_USB_ZERO_PACKET
+dep_tristate 'HCI USB driver' CONFIG_BT_HCIUSB $CONFIG_BT $CONFIG_USB
+if [ "$CONFIG_BT_HCIUSB" != "n" ]; then
+ bool ' USB zero packet support' CONFIG_BT_USB_ZERO_PACKET
fi
-dep_tristate 'HCI UART driver' CONFIG_BLUEZ_HCIUART $CONFIG_BLUEZ
-if [ "$CONFIG_BLUEZ_HCIUART" != "n" ]; then
- bool ' UART (H4) protocol support' CONFIG_BLUEZ_HCIUART_H4
- bool ' BCSP protocol support' CONFIG_BLUEZ_HCIUART_BCSP
+dep_tristate 'HCI UART driver' CONFIG_BT_HCIUART $CONFIG_BT
+if [ "$CONFIG_BT_HCIUART" != "n" ]; then
+ bool ' UART (H4) protocol support' CONFIG_BT_HCIUART_H4
+ bool ' BCSP protocol support' CONFIG_BT_HCIUART_BCSP
fi
-dep_tristate 'HCI DTL1 (PC Card) driver' CONFIG_BLUEZ_HCIDTL1 $CONFIG_PCMCIA $CONFIG_BLUEZ
+dep_tristate 'HCI DTL1 (PC Card) driver' CONFIG_BT_HCIDTL1 $CONFIG_PCMCIA $CONFIG_BT
-dep_tristate 'HCI BT3C (PC Card) driver' CONFIG_BLUEZ_HCIBT3C $CONFIG_PCMCIA $CONFIG_BLUEZ
+dep_tristate 'HCI BT3C (PC Card) driver' CONFIG_BT_HCIBT3C $CONFIG_PCMCIA $CONFIG_BT
-dep_tristate 'HCI BlueCard (PC Card) driver' CONFIG_BLUEZ_HCIBLUECARD $CONFIG_PCMCIA $CONFIG_BLUEZ
+dep_tristate 'HCI BlueCard (PC Card) driver' CONFIG_BT_HCIBLUECARD $CONFIG_PCMCIA $CONFIG_BT
-dep_tristate 'HCI VHCI (Virtual HCI device) driver' CONFIG_BLUEZ_HCIVHCI $CONFIG_BLUEZ
+dep_tristate 'HCI VHCI (Virtual HCI device) driver' CONFIG_BT_HCIVHCI $CONFIG_BT
endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 15e76c032f2c..1fbd6f052b07 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -2,16 +2,16 @@
# Makefile for the Linux Bluetooth HCI device drivers.
#
-obj-$(CONFIG_BLUEZ_HCIUSB) += hci_usb.o
-obj-$(CONFIG_BLUEZ_HCIVHCI) += hci_vhci.o
-obj-$(CONFIG_BLUEZ_HCIUART) += hci_uart.o
-obj-$(CONFIG_BLUEZ_HCIDTL1) += dtl1_cs.o
-obj-$(CONFIG_BLUEZ_HCIBT3C) += bt3c_cs.o
-obj-$(CONFIG_BLUEZ_HCIBLUECARD) += bluecard_cs.o
+obj-$(CONFIG_BT_HCIUSB) += hci_usb.o
+obj-$(CONFIG_BT_HCIVHCI) += hci_vhci.o
+obj-$(CONFIG_BT_HCIUART) += hci_uart.o
+obj-$(CONFIG_BT_HCIDTL1) += dtl1_cs.o
+obj-$(CONFIG_BT_HCIBT3C) += bt3c_cs.o
+obj-$(CONFIG_BT_HCIBLUECARD) += bluecard_cs.o
hci_uart-y := hci_ldisc.o
-hci_uart-$(CONFIG_BLUEZ_HCIUART_H4) += hci_h4.o
-hci_uart-$(CONFIG_BLUEZ_HCIUART_BCSP) += hci_bcsp.o
+hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o
+hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
hci_uart-objs := $(hci_uart-y)
include $(TOPDIR)/Rules.make
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 296d674bc38a..67dcad9138da 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -60,7 +60,7 @@ MODULE_PARM(irq_mask, "i");
MODULE_PARM(irq_list, "1-4i");
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("BlueZ driver for the Anycom BlueCard (LSE039/LSE041)");
+MODULE_DESCRIPTION("Bluetooth driver for the Anycom BlueCard (LSE039/LSE041)");
MODULE_LICENSE("GPL");
@@ -396,7 +396,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
- if (!(info->rx_skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
printk(KERN_WARNING "bluecard_cs: Can't allocate mem for new packet.\n");
return;
}
@@ -571,7 +571,7 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
/* Ericsson baud rate command */
unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
- if (!(skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ if (!(skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
printk(KERN_WARNING "bluecard_cs: Can't allocate mem for new packet.\n");
return -1;
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 517fd86199a4..460652790b07 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -72,7 +72,7 @@ MODULE_PARM(irq_mask, "i");
MODULE_PARM(irq_list, "1-4i");
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>, Jose Orlando Pereira <jop@di.uminho.pt>");
-MODULE_DESCRIPTION("BlueZ driver for the 3Com Bluetooth PCMCIA card");
+MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card");
MODULE_LICENSE("GPL");
@@ -264,7 +264,7 @@ static void bt3c_receive(bt3c_info_t *info)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
- if (!(info->rx_skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
printk(KERN_WARNING "bt3c_cs: Can't allocate mem for new packet.\n");
return;
}
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 80df236ec1dc..63de1e3fc4ac 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -66,7 +66,7 @@ MODULE_PARM(irq_mask, "i");
MODULE_PARM(irq_list, "1-4i");
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("BlueZ driver for Nokia Connectivity Card DTL-1");
+MODULE_DESCRIPTION("Bluetooth driver for Nokia Connectivity Card DTL-1");
MODULE_LICENSE("GPL");
@@ -238,7 +238,7 @@ static void dtl1_receive(dtl1_info_t *info)
/* Allocate packet */
if (info->rx_skb == NULL)
- if (!(info->rx_skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
printk(KERN_WARNING "dtl1_cs: Can't allocate mem for new packet.\n");
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
@@ -433,7 +433,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
nsh.zero = 0;
nsh.len = skb->len;
- s = bluez_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC);
+ s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC);
skb_reserve(s, NSHL);
memcpy(skb_put(s, skb->len), skb->data, skb->len);
if (skb->len & 0x0001)
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index b3f3c71c8287..bfd5401987ec 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -57,7 +57,7 @@
#include "hci_uart.h"
#include "hci_bcsp.h"
-#ifndef HCI_UART_DEBUG
+#ifndef CONFIG_BT_HCIUART_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#undef BT_DMP
@@ -176,7 +176,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
u8 hdr[4], chan;
int rel, i;
-#ifdef CONFIG_BLUEZ_HCIUART_BCSP_TXCRC
+#ifdef CONFIG_BT_HCIUART_BCSP_TXCRC
u16 BCSP_CRC_INIT(bcsp_txmsg_crc);
#endif
@@ -228,7 +228,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq);
bcsp->msgq_txseq = ++(bcsp->msgq_txseq) & 0x07;
}
-#ifdef CONFIG_BLUEZ_HCIUART_BCSP_TXCRC
+#ifdef CONFIG_BT_HCIUART_BCSP_TXCRC
hdr[0] |= 0x40;
#endif
@@ -240,7 +240,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
/* Put BCSP header */
for (i = 0; i < 4; i++) {
bcsp_slip_one_byte(nskb, hdr[i]);
-#ifdef CONFIG_BLUEZ_HCIUART_BCSP_TXCRC
+#ifdef CONFIG_BT_HCIUART_BCSP_TXCRC
bcsp_crc_update(&bcsp_txmsg_crc, hdr[i]);
#endif
}
@@ -248,12 +248,12 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
/* Put payload */
for (i = 0; i < len; i++) {
bcsp_slip_one_byte(nskb, data[i]);
-#ifdef CONFIG_BLUEZ_HCIUART_BCSP_TXCRC
+#ifdef CONFIG_BT_HCIUART_BCSP_TXCRC
bcsp_crc_update(&bcsp_txmsg_crc, data[i]);
#endif
}
-#ifdef CONFIG_BLUEZ_HCIUART_BCSP_TXCRC
+#ifdef CONFIG_BT_HCIUART_BCSP_TXCRC
/* Put CRC */
bcsp_txmsg_crc = bcsp_crc_reverse(bcsp_txmsg_crc);
bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff));
@@ -611,7 +611,7 @@ static int bcsp_recv(struct hci_uart *hu, void *data, int count)
* Allocate packet. Max len of a BCSP pkt=
* 0xFFF (payload) +4 (header) +2 (crc) */
- bcsp->rx_skb = bluez_skb_alloc(0x1005, GFP_ATOMIC);
+ bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC);
if (!bcsp->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 0e2a25e0526e..521eb19442be 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ HCI UART(H4) protocol.
+ * Bluetooth HCI UART(H4) protocol.
*
* $Id: hci_h4.c,v 1.3 2002/09/09 01:17:32 maxk Exp $
*/
@@ -56,7 +56,7 @@
#include "hci_uart.h"
#include "hci_h4.h"
-#ifndef HCI_UART_DEBUG
+#ifndef CONFIG_BT_HCIUART_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#undef BT_DMP
@@ -160,7 +160,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
ptr = data;
while (count) {
if (h4->rx_count) {
- len = MIN(h4->rx_count, count);
+ len = min_t(unsigned int, h4->rx_count, count);
memcpy(skb_put(h4->rx_skb, len), ptr, len);
h4->rx_count -= len; count -= len; ptr += len;
@@ -238,7 +238,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
ptr++; count--;
/* Allocate packet */
- h4->rx_skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ h4->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
if (!h4->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
h4->rx_state = H4_W4_PACKET_TYPE;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 067a25c7fc1f..e5196a92b7ba 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ HCI UART driver.
+ * Bluetooth HCI UART driver.
*
* $Id: hci_ldisc.c,v 1.5 2002/10/02 18:37:20 maxk Exp $
*/
@@ -55,7 +55,7 @@
#include <net/bluetooth/hci_core.h>
#include "hci_uart.h"
-#ifndef HCI_UART_DEBUG
+#ifndef CONFIG_BT_HCIUART_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#undef BT_DMP
@@ -507,11 +507,11 @@ static unsigned int hci_uart_tty_poll(struct tty_struct *tty, struct file *filp,
return 0;
}
-#ifdef CONFIG_BLUEZ_HCIUART_H4
+#ifdef CONFIG_BT_HCIUART_H4
int h4_init(void);
int h4_deinit(void);
#endif
-#ifdef CONFIG_BLUEZ_HCIUART_BCSP
+#ifdef CONFIG_BT_HCIUART_BCSP
int bcsp_init(void);
int bcsp_deinit(void);
#endif
@@ -521,7 +521,7 @@ int __init hci_uart_init(void)
static struct tty_ldisc hci_uart_ldisc;
int err;
- BT_INFO("BlueZ HCI UART driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
+ BT_INFO("Bluetooth HCI UART driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
VERSION);
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
@@ -545,10 +545,10 @@ int __init hci_uart_init(void)
return err;
}
-#ifdef CONFIG_BLUEZ_HCIUART_H4
+#ifdef CONFIG_BT_HCIUART_H4
h4_init();
#endif
-#ifdef CONFIG_BLUEZ_HCIUART_BCSP
+#ifdef CONFIG_BT_HCIUART_BCSP
bcsp_init();
#endif
@@ -559,10 +559,10 @@ void hci_uart_cleanup(void)
{
int err;
-#ifdef CONFIG_BLUEZ_HCIUART_H4
+#ifdef CONFIG_BT_HCIUART_H4
h4_deinit();
#endif
-#ifdef CONFIG_BLUEZ_HCIUART_BCSP
+#ifdef CONFIG_BT_HCIUART_BCSP
bcsp_deinit();
#endif
@@ -575,5 +575,5 @@ module_init(hci_uart_init);
module_exit(hci_uart_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
-MODULE_DESCRIPTION("BlueZ HCI UART driver ver " VERSION);
+MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index a94786f7d6ba..027b4463f3c9 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ HCI USB driver.
+ * Bluetooth HCI USB driver.
* Based on original USB Bluetooth driver for Linux kernel
* Copyright (c) 2000 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2000 Mark Douglas Corner <mcorner@umich.edu>
@@ -59,14 +59,14 @@
#define HCI_MAX_PENDING (HCI_MAX_BULK_RX + HCI_MAX_BULK_TX + 1)
-#ifndef HCI_USB_DEBUG
+#ifndef CONFIG_BT_HCIUSB_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#undef BT_DMP
#define BT_DMP( A... )
#endif
-#ifndef CONFIG_BLUEZ_USB_ZERO_PACKET
+#ifndef CONFIG_BT_USB_ZERO_PACKET
#undef USB_ZERO_PACKET
#define USB_ZERO_PACKET 0
#endif
@@ -167,7 +167,7 @@ static int hci_usb_rx_submit(struct hci_usb *husb, struct urb *urb)
size = HCI_MAX_FRAME_SIZE;
- if (!(skb = bluez_skb_alloc(size, GFP_ATOMIC))) {
+ if (!(skb = bt_skb_alloc(size, GFP_ATOMIC))) {
usb_free_urb(urb);
return -ENOMEM;
}
@@ -465,7 +465,7 @@ static void hci_usb_interrupt(struct urb *urb)
if (count > len)
goto bad_len;
- skb = bluez_skb_alloc(len, GFP_ATOMIC);
+ skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!skb) {
BT_ERR("%s no memory for event packet", husb->hdev.name);
goto done;
@@ -569,7 +569,7 @@ static void hci_usb_rx_complete(struct urb *urb)
if (count != size) {
BT_ERR("%s corrupted ACL packet: count %d, dlen %d",
husb->hdev.name, count, dlen);
- bluez_dump("hci_usb", skb->data, count);
+ bt_dump("hci_usb", skb->data, count);
husb->hdev.stat.err_rx++;
goto resubmit;
}
@@ -639,7 +639,7 @@ int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* Find endpoints that we need */
- ifn = MIN(udev->actconfig->bNumInterfaces, HCI_MAX_IFACE_NUM);
+ ifn = min_t(unsigned int, udev->actconfig->bNumInterfaces, HCI_MAX_IFACE_NUM);
for (i = 0; i < ifn; i++) {
iface = &udev->actconfig->interface[i];
for (a = 0; a < iface->num_altsetting; a++) {
@@ -781,7 +781,7 @@ int hci_usb_init(void)
{
int err;
- BT_INFO("BlueZ HCI USB driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
+ BT_INFO("Bluetooth HCI USB driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
VERSION);
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
@@ -800,5 +800,5 @@ module_init(hci_usb_init);
module_exit(hci_usb_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
-MODULE_DESCRIPTION("BlueZ HCI USB driver ver " VERSION);
+MODULE_DESCRIPTION("Bluetooth HCI USB driver ver " VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index a3ca871e2049..a70eef0fd4d9 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ HCI virtual device driver.
+ * Bluetooth HCI virtual device driver.
*
* $Id: hci_vhci.c,v 1.3 2002/04/17 17:37:20 maxk Exp $
*/
@@ -136,7 +136,7 @@ static inline ssize_t hci_vhci_get_user(struct hci_vhci_struct *hci_vhci, const
if (count > HCI_MAX_FRAME_SIZE)
return -EINVAL;
- if (!(skb = bluez_skb_alloc(count, GFP_KERNEL)))
+ if (!(skb = bt_skb_alloc(count, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(skb_put(skb, count), buf, count)) {
@@ -172,7 +172,7 @@ static inline ssize_t hci_vhci_put_user(struct hci_vhci_struct *hci_vhci,
int len = count, total = 0;
char *ptr = buf;
- len = MIN(skb->len, len);
+ len = min_t(unsigned int, skb->len, len);
if (copy_to_user(ptr, skb->data, len))
return -EFAULT;
total += len;
@@ -331,7 +331,7 @@ static struct miscdevice hci_vhci_miscdev=
int __init hci_vhci_init(void)
{
- BT_INFO("BlueZ VHCI driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
+ BT_INFO("Bluetooth VHCI driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
VERSION);
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
@@ -352,5 +352,5 @@ module_init(hci_vhci_init);
module_exit(hci_vhci_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
-MODULE_DESCRIPTION("BlueZ VHCI driver ver " VERSION);
+MODULE_DESCRIPTION("Bluetooth VHCI driver ver " VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/cdrom/aztcd.c b/drivers/cdrom/aztcd.c
index 53f8fe2bafe2..b8e1880d8714 100644
--- a/drivers/cdrom/aztcd.c
+++ b/drivers/cdrom/aztcd.c
@@ -1908,7 +1908,7 @@ static int __init aztcd_init(void)
}
devfs_register(NULL, "aztcd", DEVFS_FL_DEFAULT, MAJOR_NR, 0,
S_IFBLK | S_IRUGO | S_IWUGO, &azt_fops, NULL);
- azt_disk = alloc_disk();
+ azt_disk = alloc_disk(1);
if (!azt_disk)
goto err_out2;
if (register_blkdev(MAJOR_NR, "aztcd", &azt_fops) != 0) {
@@ -1921,7 +1921,6 @@ static int __init aztcd_init(void)
blk_queue_hardsect_size(BLK_DEFAULT_QUEUE(MAJOR_NR), 2048);
azt_disk->major = MAJOR_NR;
azt_disk->first_minor = 0;
- azt_disk->minor_shift = 0;
azt_disk->fops = &azt_fops;
sprintf(azt_disk->disk_name, "aztcd");
add_disk(azt_disk);
diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c
index 8863cb1254de..f4077094707a 100644
--- a/drivers/cdrom/cdu31a.c
+++ b/drivers/cdrom/cdu31a.c
@@ -3366,12 +3366,11 @@ int __init cdu31a_init(void)
goto errout2;
}
- disk = alloc_disk();
+ disk = alloc_disk(1);
if (!disk)
goto errout1;
disk->major = MAJOR_NR;
disk->first_minor = 0;
- disk->minor_shift = 0;
sprintf(disk->disk_name, "cdu31a");
disk->fops = &scd_bdops;
disk->flags = GENHD_FL_CD;
diff --git a/drivers/cdrom/cm206.c b/drivers/cdrom/cm206.c
index 0da8b3bcdf30..8a83a381bcc1 100644
--- a/drivers/cdrom/cm206.c
+++ b/drivers/cdrom/cm206.c
@@ -1470,12 +1470,11 @@ int __init cm206_init(void)
printk(KERN_INFO "Cannot register for major %d!\n", MAJOR_NR);
goto out_blkdev;
}
- disk = alloc_disk();
+ disk = alloc_disk(1);
if (!disk)
goto out_disk;
disk->major = MAJOR_NR;
disk->first_minor = 0;
- disk->minor_shift = 0;
sprintf(disk->disk_name, "cm206");
disk->fops = &cm206_bdops;
disk->flags = GENHD_FL_CD;
diff --git a/drivers/cdrom/gscd.c b/drivers/cdrom/gscd.c
index 9e8a14ce9374..d82b99f5a4b5 100644
--- a/drivers/cdrom/gscd.c
+++ b/drivers/cdrom/gscd.c
@@ -972,12 +972,11 @@ static int __init gscd_init(void)
i++;
}
- gscd_disk = alloc_disk();
+ gscd_disk = alloc_disk(1);
if (!gscd_disk)
goto err_out1;
gscd_disk->major = MAJOR_NR;
gscd_disk->first_minor = 0;
- gscd_disk->minor_shift = 0;
gscd_disk->fops = &gscd_fops;
sprintf(gscd_disk->disk_name, "gscd");
diff --git a/drivers/cdrom/mcd.c b/drivers/cdrom/mcd.c
index 39eff9436cbf..e6c72eabda52 100644
--- a/drivers/cdrom/mcd.c
+++ b/drivers/cdrom/mcd.c
@@ -1031,7 +1031,7 @@ static void mcd_release(struct cdrom_device_info *cdi)
int __init mcd_init(void)
{
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(1);
int count;
unsigned char result[3];
char msg[80];
@@ -1124,7 +1124,6 @@ int __init mcd_init(void)
disk->major = MAJOR_NR;
disk->first_minor = 0;
- disk->minor_shift = 0;
sprintf(disk->disk_name, "mcd");
disk->fops = &mcd_bdops;
disk->flags = GENHD_FL_CD;
diff --git a/drivers/cdrom/mcdx.c b/drivers/cdrom/mcdx.c
index 7b6aaace0be1..9747c15b926b 100644
--- a/drivers/cdrom/mcdx.c
+++ b/drivers/cdrom/mcdx.c
@@ -1076,7 +1076,7 @@ int __init mcdx_init_drive(int drive)
return 1;
}
- disk = alloc_disk();
+ disk = alloc_disk(1);
if (!disk) {
xwarn("init() malloc failed\n");
kfree(stuffp);
@@ -1221,7 +1221,6 @@ int __init mcdx_init_drive(int drive)
stuffp->info.dev = mk_kdev(MAJOR_NR, drive);
disk->major = MAJOR_NR;
disk->first_minor = drive;
- disk->minor_shift = 0;
strcpy(disk->disk_name, stuffp->info.name);
disk->fops = &mcdx_bdops;
disk->flags = GENHD_FL_CD;
diff --git a/drivers/cdrom/optcd.c b/drivers/cdrom/optcd.c
index baf39fd6f708..6abce539684e 100644
--- a/drivers/cdrom/optcd.c
+++ b/drivers/cdrom/optcd.c
@@ -2010,14 +2010,13 @@ static int __init optcd_init(void)
"optcd: no Optics Storage CDROM Initialization\n");
return -EIO;
}
- optcd_disk = alloc_disk();
+ optcd_disk = alloc_disk(1);
if (!optcd_disk) {
printk(KERN_ERR "optcd: can't allocate disk\n");
return -ENOMEM;
}
optcd_disk->major = MAJOR_NR;
optcd_disk->first_minor = 0;
- optcd_disk->minor_shift = 0;
optcd_disk->fops = &opt_fops;
sprintf(optcd_disk->disk_name, "optcd");
if (!request_region(optcd_port, 4, "optcd")) {
diff --git a/drivers/cdrom/sbpcd.c b/drivers/cdrom/sbpcd.c
index 409aea0c4f0f..22a4ca708c6f 100644
--- a/drivers/cdrom/sbpcd.c
+++ b/drivers/cdrom/sbpcd.c
@@ -5831,10 +5831,9 @@ int __init sbpcd_init(void)
sbpcd_infop->dev = mk_kdev(MAJOR_NR, j);
sbpcd_infop->handle = p;
p->sbpcd_infop = sbpcd_infop;
- disk = alloc_disk();
+ disk = alloc_disk(1);
disk->major = MAJOR_NR;
disk->first_minor = j;
- disk->minor_shift = 0;
disk->fops = &sbpcd_bdops;
strcpy(disk->disk_name, sbpcd_infop->name);
disk->flags = GENHD_FL_CD;
diff --git a/drivers/cdrom/sjcd.c b/drivers/cdrom/sjcd.c
index c04647548625..9dcdda8741b0 100644
--- a/drivers/cdrom/sjcd.c
+++ b/drivers/cdrom/sjcd.c
@@ -1689,14 +1689,13 @@ static int __init sjcd_init(void)
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_sjcd_request, &sjcd_lock);
blk_queue_hardsect_size(BLK_DEFAULT_QUEUE(MAJOR_NR), 2048);
- sjcd_disk = alloc_disk();
+ sjcd_disk = alloc_disk(1);
if (!sjcd_disk) {
printk(KERN_ERR "SJCD: can't allocate disk");
goto out1;
}
sjcd_disk->major = MAJOR_NR,
sjcd_disk->first_minor = 0,
- sjcd_disk->minor_shift = 0,
sjcd_disk->fops = &sjcd_fops,
sprintf(sjcd_disk->disk_name, "sjcd");
diff --git a/drivers/cdrom/sonycd535.c b/drivers/cdrom/sonycd535.c
index d73013c02bad..68e8103a7223 100644
--- a/drivers/cdrom/sonycd535.c
+++ b/drivers/cdrom/sonycd535.c
@@ -1605,12 +1605,11 @@ static int __init sony535_init(void)
}
initialized = 1;
- cdu_disk = alloc_disk();
+ cdu_disk = alloc_disk(1);
if (!cdu_disk)
goto out6;
cdu_disk->major = MAJOR_NR;
cdu_disk->first_minor = 0;
- cdu_disk->minor_shift = 0;
cdu_disk->fops = &cdu_fops;
sprintf(cdu_disk->disk_name, "cdu");
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 012381dd73d9..8fffe423ab14 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -309,6 +309,8 @@
#include <linux/ide.h>
#include <linux/completion.h>
+#include <scsi/scsi.h> /* For SCSI -> ATAPI command conversion */
+
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -332,12 +334,12 @@ static void cdrom_saw_media_change (ide_drive_t *drive)
info->nsectors_buffered = 0;
}
-static int cdrom_log_sense(ide_drive_t *drive, struct packet_command *pc,
+static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
struct request_sense *sense)
{
int log = 0;
- if (sense == NULL || pc == NULL || pc->quiet)
+ if (!sense || !rq || (rq->flags & REQ_QUIET))
return 0;
switch (sense->sense_key) {
@@ -370,10 +372,9 @@ static int cdrom_log_sense(ide_drive_t *drive, struct packet_command *pc,
static
void cdrom_analyze_sense_data(ide_drive_t *drive,
- struct packet_command *failed_command,
+ struct request *failed_command,
struct request_sense *sense)
{
-
if (!cdrom_log_sense(drive, failed_command, sense))
return;
@@ -382,7 +383,7 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
* the first toc has not been recorded yet, it will fail with
* 05/24/00 (which is a confusing error)
*/
- if (failed_command && failed_command->c[0] == GPCMD_READ_TOC_PMA_ATIP)
+ if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
if (sense->sense_key == 0x05 && sense->asc == 0x24)
return;
@@ -453,20 +454,20 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
while (hi > lo) {
mid = (lo + hi) / 2;
if (packet_command_texts[mid].packet_command ==
- failed_command->c[0]) {
+ failed_command->cmd[0]) {
s = packet_command_texts[mid].text;
break;
}
if (packet_command_texts[mid].packet_command >
- failed_command->c[0])
+ failed_command->cmd[0])
hi = mid;
else
lo = mid+1;
}
printk (" The failed \"%s\" packet command was: \n \"", s);
- for (i=0; i<sizeof (failed_command->c); i++)
- printk ("%02x ", failed_command->c[i]);
+ for (i=0; i<sizeof (failed_command->cmd); i++)
+ printk ("%02x ", failed_command->cmd[i]);
printk ("\"\n");
}
@@ -512,30 +513,39 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
#endif /* not VERBOSE_IDE_CD_ERRORS */
}
+/*
+ * Initialize a ide-cd packet command request
+ */
+static void cdrom_prepare_request(struct request *rq)
+{
+ ide_init_drive_cmd(rq);
+ rq->flags = REQ_PC;
+}
+
static void cdrom_queue_request_sense(ide_drive_t *drive,
struct completion *wait,
- struct request_sense *sense,
- struct packet_command *failed_command)
+ void *sense,
+ struct request *failed_command)
{
struct cdrom_info *info = drive->driver_data;
- struct packet_command *pc = &info->request_sense_pc;
- struct request *rq;
+ struct request *rq = &info->request_sense_request;
if (sense == NULL)
sense = &info->sense_data;
- memset(pc, 0, sizeof(struct packet_command));
- pc->c[0] = GPCMD_REQUEST_SENSE;
- pc->c[4] = pc->buflen = 18;
- pc->buffer = (char *) sense;
- pc->sense = (struct request_sense *) failed_command;
-
/* stuff the sense request in front of our current request */
- rq = &info->request_sense_request;
- ide_init_drive_cmd(rq);
+ cdrom_prepare_request(rq);
+
+ rq->data = sense;
+ rq->cmd[0] = GPCMD_REQUEST_SENSE;
+ rq->cmd[4] = rq->data_len = 18;
+
rq->flags = REQ_SENSE;
- rq->buffer = (char *) pc;
rq->waiting = wait;
+
+ /* NOTE! Save the failed command in "rq->buffer" */
+ rq->buffer = (void *) failed_command;
+
(void) ide_do_drive_cmd(drive, rq, ide_preempt);
}
@@ -630,17 +640,26 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
struct request *rq = HWGROUP(drive)->rq;
if ((rq->flags & REQ_SENSE) && uptodate) {
- struct packet_command *pc = (struct packet_command *) rq->buffer;
- cdrom_analyze_sense_data(drive,
- (struct packet_command *) pc->sense,
- (struct request_sense *) (pc->buffer - pc->c[4]));
+ /* For REQ_SENSE, "rq->buffer" points to the original failed request */
+ struct request *failed = (struct request *) rq->buffer;
+ struct cdrom_info *info = drive->driver_data;
+ void * sense = &info->sense_data;
+
+ if (failed && failed->sense)
+ sense = failed->sense;
+
+ cdrom_analyze_sense_data(drive, failed, sense);
}
+
if (blk_fs_request(rq) && !rq->current_nr_sectors)
uptodate = 1;
ide_end_request(drive, uptodate, rq->hard_cur_sectors);
}
+/* Handle differences between SCSI and ATAPI packet commands */
+static int pre_transform_command(struct request *);
+static void post_transform_command(struct request *);
/* Returns 0 if the request should be continued.
Returns 1 if the request was ended. */
@@ -649,7 +668,6 @@ static int cdrom_decode_status (ide_startstop_t *startstop, ide_drive_t *drive,
{
struct request *rq = HWGROUP(drive)->rq;
int stat, err, sense_key;
- struct packet_command *pc;
/* Check for errors. */
*stat_ret = stat = HWIF(drive)->INB(IDE_STATUS_REG);
@@ -672,16 +690,18 @@ static int cdrom_decode_status (ide_startstop_t *startstop, ide_drive_t *drive,
from the drive (probably while trying
to recover from a former error). Just give up. */
- pc = (struct packet_command *) rq->buffer;
- pc->stat = 1;
- cdrom_end_request(drive, 1);
+ rq->flags |= REQ_FAILED;
+ cdrom_end_request(drive, 0);
*startstop = DRIVER(drive)->error(drive, "request sense failure", stat);
return 1;
} else if (rq->flags & (REQ_PC | REQ_BLOCK_PC)) {
/* All other functions, except for READ. */
struct completion *wait = NULL;
- pc = (struct packet_command *) rq->buffer;
+
+ /* Fix up any SCSI command differences.. */
+ if (rq->flags & REQ_BLOCK_PC)
+ post_transform_command(rq);
/* Check for tray open. */
if (sense_key == NOT_READY) {
@@ -691,7 +711,7 @@ static int cdrom_decode_status (ide_startstop_t *startstop, ide_drive_t *drive,
cdrom_saw_media_change (drive);
/*printk("%s: media changed\n",drive->name);*/
return 0;
- } else if (!pc->quiet) {
+ } else if (!(rq->flags & REQ_QUIET)) {
/* Otherwise, print an error. */
ide_dump_status(drive, "packet command error", stat);
}
@@ -710,11 +730,11 @@ static int cdrom_decode_status (ide_startstop_t *startstop, ide_drive_t *drive,
rq->waiting = NULL;
}
- pc->stat = 1;
- cdrom_end_request(drive, 1);
+ rq->flags |= REQ_FAILED;
+ cdrom_end_request(drive, 0);
if ((stat & ERR_STAT) != 0)
- cdrom_queue_request_sense(drive, wait, pc->sense, pc);
+ cdrom_queue_request_sense(drive, wait, rq->sense, rq);
} else if (blk_fs_request(rq)) {
/* Handle errors from READ and WRITE requests. */
@@ -770,7 +790,6 @@ static int cdrom_decode_status (ide_startstop_t *startstop, ide_drive_t *drive,
static int cdrom_timer_expiry(ide_drive_t *drive)
{
struct request *rq = HWGROUP(drive)->rq;
- struct packet_command *pc = (struct packet_command *) rq->buffer;
unsigned long wait = 0;
/*
@@ -779,7 +798,7 @@ static int cdrom_timer_expiry(ide_drive_t *drive)
* this, but not all commands/drives support that. Let
* ide_timer_expiry keep polling us for these.
*/
- switch (pc->c[0]) {
+ switch (rq->cmd[0]) {
case GPCMD_BLANK:
case GPCMD_FORMAT_UNIT:
case GPCMD_RESERVE_RZONE_TRACK:
@@ -854,12 +873,12 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
* struct packet_command *pc; now packet_command_t *pc;
*/
static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
- struct packet_command *pc,
+ struct request *rq,
ide_handler_t *handler)
{
- unsigned char *cmd_buf = pc->c;
- int cmd_len = sizeof(pc->c);
- unsigned int timeout = pc->timeout;
+ unsigned char *cmd_buf = rq->cmd;
+ int cmd_len = sizeof(rq->cmd);
+ unsigned int timeout = rq->timeout;
struct cdrom_info *info = drive->driver_data;
ide_startstop_t startstop;
@@ -1029,6 +1048,7 @@ static ide_startstop_t cdrom_read_intr (ide_drive_t *drive)
if (rq->current_nr_sectors > 0) {
printk ("%s: cdrom_read_intr: data underrun (%d blocks)\n",
drive->name, rq->current_nr_sectors);
+ rq->flags |= REQ_FAILED;
cdrom_end_request(drive, 0);
} else
cdrom_end_request(drive, 1);
@@ -1179,7 +1199,6 @@ static int cdrom_read_from_buffer (ide_drive_t *drive)
*/
static ide_startstop_t cdrom_start_read_continuation (ide_drive_t *drive)
{
- struct packet_command pc;
struct request *rq = HWGROUP(drive)->rq;
int nsect, sector, nframes, frame, nskip;
@@ -1222,11 +1241,10 @@ static ide_startstop_t cdrom_start_read_continuation (ide_drive_t *drive)
(65534 / CD_FRAMESIZE) : 65535);
/* Set up the command */
- memcpy(pc.c, rq->cmd, sizeof(pc.c));
- pc.timeout = WAIT_CMD;
+ rq->timeout = WAIT_CMD;
/* Send the command to the drive and return. */
- return cdrom_transfer_packet_command(drive, &pc, &cdrom_read_intr);
+ return cdrom_transfer_packet_command(drive, rq, &cdrom_read_intr);
}
@@ -1262,7 +1280,6 @@ static ide_startstop_t cdrom_seek_intr (ide_drive_t *drive)
static ide_startstop_t cdrom_start_seek_continuation (ide_drive_t *drive)
{
- struct packet_command pc;
struct request *rq = HWGROUP(drive)->rq;
int sector, frame, nskip;
@@ -1273,11 +1290,11 @@ static ide_startstop_t cdrom_start_seek_continuation (ide_drive_t *drive)
frame = sector / SECTORS_PER_FRAME;
memset(rq->cmd, 0, sizeof(rq->cmd));
- pc.c[0] = GPCMD_SEEK;
- put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]);
+ rq->cmd[0] = GPCMD_SEEK;
+ put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
- pc.timeout = WAIT_CMD;
- return cdrom_transfer_packet_command(drive, &pc, &cdrom_seek_intr);
+ rq->timeout = WAIT_CMD;
+ return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
}
static ide_startstop_t cdrom_start_seek (ide_drive_t *drive, unsigned int block)
@@ -1296,6 +1313,7 @@ static void restore_request (struct request *rq)
{
if (rq->buffer != bio_data(rq->bio)) {
sector_t n = (rq->buffer - (char *) bio_data(rq->bio)) / SECTOR_SIZE;
+
rq->buffer = bio_data(rq->bio);
rq->nr_sectors += n;
rq->sector -= n;
@@ -1352,7 +1370,6 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
{
int ireason, len, stat, thislen;
struct request *rq = HWGROUP(drive)->rq;
- struct packet_command *pc = (struct packet_command *)rq->buffer;
ide_startstop_t startstop;
u8 lowcyl = 0, highcyl = 0;
@@ -1372,16 +1389,16 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
if ((stat & DRQ_STAT) == 0) {
/* Some of the trailing request sense fields are optional, and
some drives don't send them. Sigh. */
- if (pc->c[0] == GPCMD_REQUEST_SENSE &&
- pc->buflen > 0 &&
- pc->buflen <= 5) {
- while (pc->buflen > 0) {
- *pc->buffer++ = 0;
- --pc->buflen;
+ if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
+ rq->data_len > 0 &&
+ rq->data_len <= 5) {
+ while (rq->data_len > 0) {
+ *(unsigned char *)rq->data++ = 0;
+ --rq->data_len;
}
}
- if (pc->buflen == 0)
+ if (rq->data_len == 0)
cdrom_end_request(drive, 1);
else {
/* Comment this out, because this always happens
@@ -1391,20 +1408,22 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
printk ("%s: cdrom_pc_intr: data underrun %d\n",
drive->name, pc->buflen);
*/
- pc->stat = 1;
- cdrom_end_request(drive, 1);
+ rq->flags |= REQ_FAILED;
+ cdrom_end_request(drive, 0);
}
return ide_stopped;
}
/* Figure out how much data to transfer. */
- thislen = pc->buflen;
+ thislen = rq->data_len;
if (thislen > len) thislen = len;
/* The drive wants to be written to. */
if ((ireason & 3) == 0) {
+ if (!rq->data)
+ goto confused;
/* Transfer the data. */
- HWIF(drive)->atapi_output_bytes(drive, pc->buffer, thislen);
+ HWIF(drive)->atapi_output_bytes(drive, rq->data, thislen);
/* If we haven't moved enough data to satisfy the drive,
add some padding. */
@@ -1415,15 +1434,16 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
}
/* Keep count of how much data we've moved. */
- pc->buffer += thislen;
- pc->buflen -= thislen;
+ rq->data += thislen;
+ rq->data_len -= thislen;
}
/* Same drill for reading. */
else if ((ireason & 3) == 2) {
-
+ if (!rq->data)
+ goto confused;
/* Transfer the data. */
- HWIF(drive)->atapi_input_bytes(drive, pc->buffer, thislen);
+ HWIF(drive)->atapi_input_bytes(drive, rq->data, thislen);
/* If we haven't moved enough data to satisfy the drive,
add some padding. */
@@ -1434,13 +1454,14 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
}
/* Keep count of how much data we've moved. */
- pc->buffer += thislen;
- pc->buflen -= thislen;
+ rq->data += thislen;
+ rq->data_len -= thislen;
} else {
+confused:
printk ("%s: cdrom_pc_intr: The drive "
"appears confused (ireason = 0x%2x)\n",
drive->name, ireason);
- pc->stat = 1;
+ rq->flags |= REQ_FAILED;
}
if (HWGROUP(drive)->handler != NULL)
@@ -1455,13 +1476,12 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
static ide_startstop_t cdrom_do_pc_continuation (ide_drive_t *drive)
{
struct request *rq = HWGROUP(drive)->rq;
- struct packet_command *pc = (struct packet_command *)rq->buffer;
- if (!pc->timeout)
- pc->timeout = WAIT_CMD;
+ if (!rq->timeout)
+ rq->timeout = WAIT_CMD;
/* Send the command to the drive and return. */
- return cdrom_transfer_packet_command(drive, pc, &cdrom_pc_intr);
+ return cdrom_transfer_packet_command(drive, rq, &cdrom_pc_intr);
}
@@ -1469,13 +1489,12 @@ static ide_startstop_t cdrom_do_packet_command (ide_drive_t *drive)
{
int len;
struct request *rq = HWGROUP(drive)->rq;
- struct packet_command *pc = (struct packet_command *)rq->buffer;
struct cdrom_info *info = drive->driver_data;
info->dma = 0;
info->cmd = 0;
- pc->stat = 0;
- len = pc->buflen;
+ rq->flags &= ~REQ_FAILED;
+ len = rq->data_len;
/* Start sending the command to the drive. */
return cdrom_start_packet_command(drive, len, cdrom_do_pc_continuation);
@@ -1496,28 +1515,31 @@ void cdrom_sleep (int time)
}
static
-int cdrom_queue_packet_command(ide_drive_t *drive, struct packet_command *pc)
+int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq)
{
struct request_sense sense;
- struct request req;
int retries = 10;
+ unsigned int flags = rq->flags;
- if (pc->sense == NULL)
- pc->sense = &sense;
+ if (rq->sense == NULL)
+ rq->sense = &sense;
/* Start of retry loop. */
do {
- ide_init_drive_cmd (&req);
- req.flags = REQ_PC;
- req.buffer = (char *)pc;
- ide_do_drive_cmd(drive, &req, ide_wait);
+ int error;
+ unsigned long time = jiffies;
+ rq->flags = flags;
+
+ error = ide_do_drive_cmd(drive, rq, ide_wait);
+ time = jiffies - time;
+
/* FIXME: we should probably abort/retry or something
* in case of failure */
- if (pc->stat != 0) {
+ if (rq->flags & REQ_FAILED) {
/* The request failed. Retry if it was due to a unit
attention status
(usually means media was changed). */
- struct request_sense *reqbuf = pc->sense;
+ struct request_sense *reqbuf = rq->sense;
if (reqbuf->sense_key == UNIT_ATTENTION)
cdrom_saw_media_change(drive);
@@ -1535,10 +1557,10 @@ int cdrom_queue_packet_command(ide_drive_t *drive, struct packet_command *pc)
}
/* End of retry loop. */
- } while (pc->stat != 0 && retries >= 0);
+ } while ((rq->flags & REQ_FAILED) && retries >= 0);
/* Return an error if the command failed. */
- return pc->stat ? -EIO : 0;
+ return (rq->flags & REQ_FAILED) ? -EIO : 0;
}
/*
@@ -1681,20 +1703,18 @@ static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
static ide_startstop_t cdrom_start_write_cont(ide_drive_t *drive)
{
- struct packet_command pc; /* packet_command_t pc; */
struct request *rq = HWGROUP(drive)->rq;
unsigned nframes, frame;
nframes = rq->nr_sectors >> 2;
frame = rq->sector >> 2;
- memcpy(pc.c, rq->cmd, sizeof(pc.c));
#if 0 /* the immediate bit */
- pc.c[1] = 1 << 3;
+ rq->cmd[1] = 1 << 3;
#endif
- pc.timeout = 2 * WAIT_CMD;
+ rq->timeout = 2 * WAIT_CMD;
- return cdrom_transfer_packet_command(drive, &pc, cdrom_write_intr);
+ return cdrom_transfer_packet_command(drive, rq, cdrom_write_intr);
}
static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq)
@@ -1728,20 +1748,54 @@ static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq)
return cdrom_start_packet_command(drive, 32768, cdrom_start_write_cont);
}
+/*
+ * Most of the SCSI commands are supported directly by ATAPI devices.
+ * This transform handles the few exceptions.
+ */
+static int pre_transform_command(struct request *req)
+{
+ u8 *c = req->cmd;
+ /* Transform 6-byte read/write commands to the 10-byte version. */
+ if (c[0] == READ_6 || c[0] == WRITE_6) {
+ c[8] = c[4];
+ c[5] = c[3];
+ c[4] = c[2];
+ c[3] = c[1] & 0x1f;
+ c[2] = 0;
+ c[1] &= 0xe0;
+ c[0] += (READ_10 - READ_6);
+ return 0;
+ }
+
+ /* These also need fixup, not done yet */
+ if (c[0] == MODE_SENSE || c[0] == MODE_SELECT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void post_transform_command(struct request *req)
+{
+}
+
static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
{
- struct packet_command pc;
ide_startstop_t startstop;
+ struct cdrom_info *info;
- memset(&pc, 0, sizeof(pc));
- memcpy(pc.c, rq->cmd, sizeof(pc.c));
- pc.quiet = 1;
- pc.timeout = 60 * HZ;
- rq->buffer = (char *) &pc;
+ if (pre_transform_command(rq) < 0) {
+ cdrom_end_request(drive, 0);
+ return ide_stopped;
+ }
+
+ rq->flags |= REQ_QUIET;
- startstop = cdrom_do_packet_command(drive);
- if (pc.stat)
- rq->errors++;
+ info = drive->driver_data;
+ info->dma = 0;
+ info->cmd = 0;
+
+ /* Start sending the command to the drive. */
+ startstop = cdrom_start_packet_command(drive, rq->data_len, cdrom_do_pc_continuation);
return startstop;
}
@@ -1757,11 +1811,11 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
if (blk_fs_request(rq)) {
if (CDROM_CONFIG_FLAGS(drive)->seeking) {
- unsigned long elpased = jiffies - info->start_seek;
+ unsigned long elapsed = jiffies - info->start_seek;
int stat = HWIF(drive)->INB(IDE_STATUS_REG);
if ((stat & SEEK_STAT) != SEEK_STAT) {
- if (elpased < IDECD_SEEK_TIMEOUT) {
+ if (elapsed < IDECD_SEEK_TIMEOUT) {
ide_stall_queue(drive, IDECD_SEEK_TIMER);
return ide_stopped;
}
@@ -1781,14 +1835,14 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
return action;
} else if (rq->flags & (REQ_PC | REQ_SENSE)) {
return cdrom_do_packet_command(drive);
+ } else if (rq->flags & REQ_BLOCK_PC) {
+ return cdrom_do_block_pc(drive, rq);
} else if (rq->flags & REQ_SPECIAL) {
/*
* right now this can only be a reset...
*/
cdrom_end_request(drive, 1);
return ide_stopped;
- } else if (rq->flags & REQ_BLOCK_PC) {
- return cdrom_do_block_pc(drive, rq);
}
blk_dump_rq_flags(rq, "ide-cd bad flags");
@@ -1853,23 +1907,23 @@ int msf_to_lba (byte m, byte s, byte f)
static int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
{
- struct packet_command pc;
+ struct request req;
struct cdrom_info *info = drive->driver_data;
struct cdrom_device_info *cdi = &info->devinfo;
- memset(&pc, 0, sizeof(pc));
- pc.sense = sense;
+ cdrom_prepare_request(&req);
- pc.c[0] = GPCMD_TEST_UNIT_READY;
+ req.sense = sense;
+ req.cmd[0] = GPCMD_TEST_UNIT_READY;
#if ! STANDARD_ATAPI
/* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to
switch CDs instead of supporting the LOAD_UNLOAD opcode */
- pc.c[7] = cdi->sanyo_slot % 3;
+ req.cmd[7] = cdi->sanyo_slot % 3;
#endif /* not STANDARD_ATAPI */
- return cdrom_queue_packet_command(drive, &pc);
+ return cdrom_queue_packet_command(drive, &req);
}
@@ -1878,7 +1932,7 @@ static int
cdrom_lockdoor(ide_drive_t *drive, int lockflag, struct request_sense *sense)
{
struct request_sense my_sense;
- struct packet_command pc;
+ struct request req;
int stat;
if (sense == NULL)
@@ -1888,11 +1942,11 @@ cdrom_lockdoor(ide_drive_t *drive, int lockflag, struct request_sense *sense)
if (CDROM_CONFIG_FLAGS(drive)->no_doorlock) {
stat = 0;
} else {
- memset(&pc, 0, sizeof(pc));
- pc.sense = sense;
- pc.c[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
- pc.c[4] = lockflag ? 1 : 0;
- stat = cdrom_queue_packet_command(drive, &pc);
+ cdrom_prepare_request(&req);
+ req.sense = sense;
+ req.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
+ req.cmd[4] = lockflag ? 1 : 0;
+ stat = cdrom_queue_packet_command(drive, &req);
}
/* If we got an illegal field error, the drive
@@ -1922,7 +1976,7 @@ cdrom_lockdoor(ide_drive_t *drive, int lockflag, struct request_sense *sense)
static int cdrom_eject(ide_drive_t *drive, int ejectflag,
struct request_sense *sense)
{
- struct packet_command pc;
+ struct request req;
if (CDROM_CONFIG_FLAGS(drive)->no_eject && !ejectflag)
return -EDRIVE_CANT_DO_THIS;
@@ -1931,12 +1985,12 @@ static int cdrom_eject(ide_drive_t *drive, int ejectflag,
if (CDROM_STATE_FLAGS(drive)->door_locked && ejectflag)
return 0;
- memset(&pc, 0, sizeof (pc));
- pc.sense = sense;
+ cdrom_prepare_request(&req);
- pc.c[0] = GPCMD_START_STOP_UNIT;
- pc.c[4] = 0x02 + (ejectflag != 0);
- return cdrom_queue_packet_command(drive, &pc);
+ req.sense = sense;
+ req.cmd[0] = GPCMD_START_STOP_UNIT;
+ req.cmd[4] = 0x02 + (ejectflag != 0);
+ return cdrom_queue_packet_command(drive, &req);
}
static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -1948,16 +2002,16 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
} capbuf;
int stat;
- struct packet_command pc;
+ struct request req;
- memset(&pc, 0, sizeof(pc));
- pc.sense = sense;
+ cdrom_prepare_request(&req);
- pc.c[0] = GPCMD_READ_CDVD_CAPACITY;
- pc.buffer = (char *)&capbuf;
- pc.buflen = sizeof(capbuf);
+ req.sense = sense;
+ req.cmd[0] = GPCMD_READ_CDVD_CAPACITY;
+ req.data = (char *)&capbuf;
+ req.data_len = sizeof(capbuf);
- stat = cdrom_queue_packet_command(drive, &pc);
+ stat = cdrom_queue_packet_command(drive, &req);
if (stat == 0)
*capacity = 1 + be32_to_cpu(capbuf.lba);
@@ -1968,24 +2022,24 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
int format, char *buf, int buflen,
struct request_sense *sense)
{
- struct packet_command pc;
+ struct request req;
- memset(&pc, 0, sizeof(pc));
- pc.sense = sense;
+ cdrom_prepare_request(&req);
- pc.buffer = buf;
- pc.buflen = buflen;
- pc.quiet = 1;
- pc.c[0] = GPCMD_READ_TOC_PMA_ATIP;
- pc.c[6] = trackno;
- pc.c[7] = (buflen >> 8);
- pc.c[8] = (buflen & 0xff);
- pc.c[9] = (format << 6);
+ req.sense = sense;
+ req.data = buf;
+ req.data_len = buflen;
+ req.flags |= REQ_QUIET;
+ req.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ req.cmd[6] = trackno;
+ req.cmd[7] = (buflen >> 8);
+ req.cmd[8] = (buflen & 0xff);
+ req.cmd[9] = (format << 6);
if (msf_flag)
- pc.c[1] = 2;
+ req.cmd[1] = 2;
- return cdrom_queue_packet_command(drive, &pc);
+ return cdrom_queue_packet_command(drive, &req);
}
@@ -2144,20 +2198,20 @@ static int cdrom_read_toc(ide_drive_t *drive, struct request_sense *sense)
static int cdrom_read_subchannel(ide_drive_t *drive, int format, char *buf,
int buflen, struct request_sense *sense)
{
- struct packet_command pc;
+ struct request req;
- memset(&pc, 0, sizeof(pc));
- pc.sense = sense;
+ cdrom_prepare_request(&req);
- pc.buffer = buf;
- pc.buflen = buflen;
- pc.c[0] = GPCMD_READ_SUBCHANNEL;
- pc.c[1] = 2; /* MSF addressing */
- pc.c[2] = 0x40; /* request subQ data */
- pc.c[3] = format;
- pc.c[7] = (buflen >> 8);
- pc.c[8] = (buflen & 0xff);
- return cdrom_queue_packet_command(drive, &pc);
+ req.sense = sense;
+ req.data = buf;
+ req.data_len = buflen;
+ req.cmd[0] = GPCMD_READ_SUBCHANNEL;
+ req.cmd[1] = 2; /* MSF addressing */
+ req.cmd[2] = 0x40; /* request subQ data */
+ req.cmd[3] = format;
+ req.cmd[7] = (buflen >> 8);
+ req.cmd[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command(drive, &req);
}
/* ATAPI cdrom drives are free to select the speed you request or any slower
@@ -2165,45 +2219,45 @@ static int cdrom_read_subchannel(ide_drive_t *drive, int format, char *buf,
static int cdrom_select_speed(ide_drive_t *drive, int speed,
struct request_sense *sense)
{
- struct packet_command pc;
- memset(&pc, 0, sizeof(pc));
- pc.sense = sense;
+ struct request req;
+ cdrom_prepare_request(&req);
+ req.sense = sense;
if (speed == 0)
speed = 0xffff; /* set to max */
else
speed *= 177; /* Nx to kbytes/s */
- pc.c[0] = GPCMD_SET_SPEED;
+ req.cmd[0] = GPCMD_SET_SPEED;
/* Read Drive speed in kbytes/second MSB */
- pc.c[2] = (speed >> 8) & 0xff;
+ req.cmd[2] = (speed >> 8) & 0xff;
/* Read Drive speed in kbytes/second LSB */
- pc.c[3] = speed & 0xff;
+ req.cmd[3] = speed & 0xff;
if (CDROM_CONFIG_FLAGS(drive)->cd_r ||
CDROM_CONFIG_FLAGS(drive)->cd_rw ||
CDROM_CONFIG_FLAGS(drive)->dvd_r) {
/* Write Drive speed in kbytes/second MSB */
- pc.c[4] = (speed >> 8) & 0xff;
+ req.cmd[4] = (speed >> 8) & 0xff;
/* Write Drive speed in kbytes/second LSB */
- pc.c[5] = speed & 0xff;
+ req.cmd[5] = speed & 0xff;
}
- return cdrom_queue_packet_command(drive, &pc);
+ return cdrom_queue_packet_command(drive, &req);
}
static int cdrom_play_audio(ide_drive_t *drive, int lba_start, int lba_end)
{
struct request_sense sense;
- struct packet_command pc;
+ struct request req;
- memset(&pc, 0, sizeof (pc));
- pc.sense = &sense;
+ cdrom_prepare_request(&req);
- pc.c[0] = GPCMD_PLAY_AUDIO_MSF;
- lba_to_msf(lba_start, &pc.c[3], &pc.c[4], &pc.c[5]);
- lba_to_msf(lba_end-1, &pc.c[6], &pc.c[7], &pc.c[8]);
+ req.sense = &sense;
+ req.cmd[0] = GPCMD_PLAY_AUDIO_MSF;
+ lba_to_msf(lba_start, &req.cmd[3], &req.cmd[4], &req.cmd[5]);
+ lba_to_msf(lba_end-1, &req.cmd[6], &req.cmd[7], &req.cmd[8]);
- return cdrom_queue_packet_command(drive, &pc);
+ return cdrom_queue_packet_command(drive, &req);
}
static int cdrom_get_toc_entry(ide_drive_t *drive, int track,
@@ -2237,7 +2291,7 @@ static int cdrom_get_toc_entry(ide_drive_t *drive, int track,
static int ide_cdrom_packet(struct cdrom_device_info *cdi,
struct cdrom_generic_command *cgc)
{
- struct packet_command pc;
+ struct request req;
ide_drive_t *drive = (ide_drive_t*) cdi->handle;
if (cgc->timeout <= 0)
@@ -2246,18 +2300,21 @@ static int ide_cdrom_packet(struct cdrom_device_info *cdi,
/* here we queue the commands from the uniform CD-ROM
layer. the packet must be complete, as we do not
touch it at all. */
- memset(&pc, 0, sizeof(pc));
- memcpy(pc.c, cgc->cmd, CDROM_PACKET_SIZE);
+ cdrom_prepare_request(&req);
+ memcpy(req.cmd, cgc->cmd, CDROM_PACKET_SIZE);
if (cgc->sense)
memset(cgc->sense, 0, sizeof(struct request_sense));
- pc.buffer = cgc->buffer;
- pc.buflen = cgc->buflen;
- pc.quiet = cgc->quiet;
- pc.timeout = cgc->timeout;
- pc.sense = cgc->sense;
- cgc->stat = cdrom_queue_packet_command(drive, &pc);
+ req.data = cgc->buffer;
+ req.data_len = cgc->buflen;
+ req.timeout = cgc->timeout;
+
+ if (cgc->quiet)
+ req.flags |= REQ_QUIET;
+
+ req.sense = cgc->sense;
+ cgc->stat = cdrom_queue_packet_command(drive, &req);
if (!cgc->stat)
- cgc->buflen -= pc.buflen;
+ cgc->buflen -= req.data_len;
return cgc->stat;
}
@@ -2393,7 +2450,7 @@ int ide_cdrom_reset (struct cdrom_device_info *cdi)
struct request req;
int ret;
- ide_init_drive_cmd (&req);
+ cdrom_prepare_request(&req);
req.flags = REQ_SPECIAL;
ret = ide_do_drive_cmd(drive, &req, ide_wait);
@@ -2969,6 +3026,14 @@ int ide_cdrom_ioctl (ide_drive_t *drive,
struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
+ int error;
+
+ /* Try the generic SCSI command ioctl's first.. */
+ error = scsi_cmd_ioctl(inode->i_bdev, cmd, arg);
+ if (error != -ENOTTY)
+ return error;
+
+ /* Then the generic cdrom ioctl's.. */
return cdrom_ioctl(inode, file, cmd, arg);
}
@@ -3128,8 +3193,10 @@ static int ide_cdrom_attach (ide_drive_t *drive)
memset(info, 0, sizeof (struct cdrom_info));
drive->driver_data = info;
DRIVER(drive)->busy++;
+ g->minors = 1;
g->minor_shift = 0;
g->de = drive->de;
+ g->driverfs_dev = &drive->gendev;
g->flags = GENHD_FL_CD;
if (ide_cdrom_setup(drive)) {
struct cdrom_device_info *devinfo = &info->devinfo;
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 2455e002bfad..1126c6824bef 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -105,13 +105,6 @@ struct ide_cd_state_flags {
#define CDROM_STATE_FLAGS(drive) (&(((struct cdrom_info *)(drive->driver_data))->state_flags))
struct packet_command {
- char *buffer;
- int buflen;
- int stat;
- int quiet;
- int timeout;
- struct request_sense *sense;
- unsigned char c[12];
};
/* Structure of a MSF cdrom address. */
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 5ff3daf64280..aecd9a7de7ed 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -1871,8 +1871,10 @@ static int idedisk_attach(ide_drive_t *drive)
goto failed;
}
DRIVER(drive)->busy--;
+ g->minors = 1 << PARTN_BITS;
g->minor_shift = PARTN_BITS;
g->de = drive->de;
+ g->driverfs_dev = &drive->gendev;
g->flags = drive->removable ? GENHD_FL_REMOVABLE : 0;
g->flags |= GENHD_FL_DEVFS;
set_capacity(g, current_capacity(drive));
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 60e3aed69166..f10543ba3d8f 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -2108,7 +2108,9 @@ static int idefloppy_attach (ide_drive_t *drive)
DRIVER(drive)->busy++;
idefloppy_setup (drive, floppy);
DRIVER(drive)->busy--;
+ g->minors = 1 << PARTN_BITS;
g->minor_shift = PARTN_BITS;
+ g->driverfs_dev = &drive->gendev;
g->de = drive->de;
g->flags = drive->removable ? GENHD_FL_REMOVABLE : 0;
g->flags |= GENHD_FL_DEVFS;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 6277ce3cb1e0..478bffc6aed8 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -986,7 +986,7 @@ static void init_gendisk (ide_hwif_t *hwif)
units = MAX_DRIVES;
for (unit = 0; unit < MAX_DRIVES; unit++) {
- disks[unit] = alloc_disk();
+ disks[unit] = alloc_disk(1 << PARTN_BITS);
if (!disks[unit])
goto err_kmalloc_gd;
}
@@ -996,7 +996,6 @@ static void init_gendisk (ide_hwif_t *hwif)
disk->major = hwif->major;
disk->first_minor = unit << PARTN_BITS;
sprintf(disk->disk_name,"hd%c",'a'+hwif->index*MAX_DRIVES+unit);
- disk->minor_shift = PARTN_BITS;
disk->fops = ide_fops;
hwif->drives[unit].disk = disk;
}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index e2380bcb9fe8..00830680bb42 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -2639,7 +2639,7 @@ static int ide_ioctl (struct inode *inode, struct file *file,
case CDROMEJECT:
case CDROMCLOSETRAY:
- return block_ioctl(inode->i_bdev, cmd, arg);
+ return scsi_cmd_ioctl(inode->i_bdev, cmd, arg);
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index b0f5f104876d..66ed54f354f7 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -677,21 +677,11 @@ static int hd_ioctl(struct inode * inode, struct file * file,
}
}
-static int hd_open(struct inode * inode, struct file * filp)
-{
- int target = DEVICE_NR(inode->i_rdev);
- if (target >= NR_HD)
- return -ENODEV;
- return 0;
-}
-
/*
* Releasing a block device means we sync() it, so that it can safely
* be forgotten about...
*/
-extern struct block_device_operations hd_fops;
-
static void hd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
void (*handler)(void) = do_hd;
@@ -705,7 +695,6 @@ static void hd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
static struct block_device_operations hd_fops = {
- .open = hd_open,
.ioctl = hd_ioctl,
};
@@ -802,12 +791,11 @@ static int __init hd_init(void)
goto out;
for (drive=0 ; drive < NR_HD ; drive++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(64);
if (!disk)
goto Enomem;
disk->major = MAJOR_NR;
disk->first_minor = drive << 6;
- disk->minor_shift = 6;
disk->fops = &hd_fops;
sprintf(disk->disk_name, "hd%c", 'a'+drive);
hd_gendisk[drive] = disk;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a40c6af55da5..784e3b69213e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1394,12 +1394,11 @@ static int do_md_run(mddev_t * mddev)
#endif
}
- disk = alloc_disk();
+ disk = alloc_disk(1);
if (!disk)
return -ENOMEM;
disk->major = MD_MAJOR;
disk->first_minor = mdidx(mddev);
- disk->minor_shift = 0;
sprintf(disk->disk_name, "md%d", mdidx(mddev));
disk->fops = &md_fops;
@@ -2732,18 +2731,9 @@ int unregister_md_personality(int pnum)
return 0;
}
-static unsigned int sync_io[DK_MAX_MAJOR][DK_MAX_DISK];
void md_sync_acct(mdk_rdev_t *rdev, unsigned long nr_sectors)
{
- kdev_t dev = to_kdev_t(rdev->bdev->bd_dev);
- unsigned int major = major(dev);
- unsigned int index;
-
- index = disk_index(dev);
- if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
- return;
-
- sync_io[major][index] += nr_sectors;
+ rdev->bdev->bd_disk->sync_io += nr_sectors;
}
static int is_mddev_idle(mddev_t *mddev)
@@ -2755,16 +2745,8 @@ static int is_mddev_idle(mddev_t *mddev)
idle = 1;
ITERATE_RDEV(mddev,rdev,tmp) {
- kdev_t dev = to_kdev_t(rdev->bdev->bd_dev);
- int major = major(dev);
- int idx = disk_index(dev);
-
- if ((idx >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
- continue;
-
- curr_events = kstat.dk_drive_rblk[major][idx] +
- kstat.dk_drive_wblk[major][idx] ;
- curr_events -= sync_io[major][idx];
+ struct gendisk *disk = rdev->bdev->bd_disk;
+ curr_events = disk->reads + disk->writes - disk->sync_io;
if ((curr_events - rdev->last_events) > 32) {
rdev->last_events = curr_events;
idle = 0;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 99a221c8ccda..761aed5551c8 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -162,6 +162,29 @@ static int create_strip_zones (mddev_t *mddev)
return 1;
}
+/**
+ * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
+ * @q: request queue
+ * @bio: the buffer head that's been built up so far
+ * @biovec: the request that could be merged to it.
+ *
+ * Return 1 if the merge is not permitted (because the
+ * result would cross a chunk boundary), 0 otherwise.
+ */
+static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+{
+ mddev_t *mddev = q->queuedata;
+ sector_t block;
+ unsigned int chunk_size;
+ unsigned int bio_sz;
+
+ chunk_size = mddev->chunk_size >> 10;
+ block = bio->bi_sector >> 1;
+ bio_sz = (bio->bi_size + biovec->bv_len) >> 10;
+
+ return chunk_size < ((block & (chunk_size - 1)) + bio_sz);
+}
+
static int raid0_run (mddev_t *mddev)
{
unsigned cur=0, i=0, nb_zone;
@@ -233,6 +256,8 @@ static int raid0_run (mddev_t *mddev)
conf->hash_table[i++].zone1 = conf->strip_zone + cur;
size -= (conf->smallest->size - zone0_size);
}
+ blk_queue_max_sectors(&mddev->queue, mddev->chunk_size >> 9);
+ blk_queue_merge_bvec(&mddev->queue, raid0_mergeable_bvec);
return 0;
out_free_zone_conf:
@@ -262,13 +287,6 @@ static int raid0_stop (mddev_t *mddev)
return 0;
}
-/*
- * FIXME - We assume some things here :
- * - requested buffers NEVER bigger than chunk size,
- * - requested buffers NEVER cross stripes limits.
- * Of course, those facts may not be valid anymore (and surely won't...)
- * Hey guys, there's some work out there ;-)
- */
static int raid0_make_request (request_queue_t *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
@@ -286,13 +304,16 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
{
+#if __GNUC__ < 3
+ volatile
+#endif
sector_t x = block;
sector_div(x, (unsigned long)conf->smallest->size);
hash = conf->hash_table + x;
}
- /* Sanity check */
- if (chunk_size < (block & (chunk_size - 1)) + (bio->bi_size >> 10))
+ /* Sanity check -- queue functions should prevent this happening */
+ if (unlikely(chunk_size < (block & (chunk_size - 1)) + (bio->bi_size >> 10)))
goto bad_map;
if (!hash)
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index 4b87c907f425..8eee8bc1e804 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <linux/init.h>
#include <linux/kdev_t.h>
#include <asm/io.h>
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 0980a0b775c6..b6f8af6193f1 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1647,7 +1647,7 @@ static int i2o_block_init(void)
}
for (i = 0; i < MAX_I2OB; i++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(16);
if (!disk)
goto oom;
i2o_disk[i] = disk;
@@ -1679,7 +1679,6 @@ static int i2o_block_init(void)
struct gendisk *disk = i2ob_disk + i;
disk->major = MAJOR_NR;
disk->first_minor = i<<4;
- disk->minor_shift = 4;
disk->fops = &i2ob_fops;
sprintf(disk->disk_name, "i2o/hd%c", 'a' + i);
}
diff --git a/drivers/mtd/Config.help b/drivers/mtd/Config.help
index 822dc7424d34..83e9b5c7d44b 100644
--- a/drivers/mtd/Config.help
+++ b/drivers/mtd/Config.help
@@ -21,36 +21,62 @@ CONFIG_MTD_PARTITIONS
devices. Partitioning on NFTL 'devices' is a different - that's the
'normal' form of partitioning used on a block device.
+CONFIG_MTD_CONCAT
+ Support for concatenating several MTD devices into a single
+ (virtual) one. This allows you to have -for example- a JFFS(2)
+ file system spanning multiple physical flash chips. If unsure,
+ say 'Y'.
+
CONFIG_MTD_REDBOOT_PARTS
RedBoot is a ROM monitor and bootloader which deals with multiple
- 'images' in flash devices by putting a table in the last erase block
- of the device, similar to a partition table, which gives the
- offsets, lengths and names of all the images stored in the flash.
+ 'images' in flash devices by putting a table in the last erase
+ block of the device, similar to a partition table, which gives
+ the offsets, lengths and names of all the images stored in the
+ flash.
If you need code which can detect and parse this table, and register
MTD 'partitions' corresponding to each image in the table, enable
- this option.
+ this option.
You will still need the parsing functions to be called by the driver
- for your particular device. It won't happen automatically. The
- SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
+ for your particular device. It won't happen automatically. The
+ SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
example.
-CONFIG_MTD_BOOTLDR_PARTS
- The Compaq bootldr deals with multiple 'images' in flash devices
- by putting a table in one of the first erase blocks of the device,
- similar to a partition table, which gives the offsets, lengths and
- names of all the images stored in the flash.
-
- If you need code which can detect and parse this table, and register
- MTD 'partitions' corresponding to each image in the table, enable
- this option.
-
+CONFIG_MTD_CMDLINE_PARTS
+ Allow generic configuration of the MTD paritition tables via the kernel
+ command line. Multiple flash resources are supported for hardware where
+ different kinds of flash memory are available.
+
You will still need the parsing functions to be called by the driver
for your particular device. It won't happen automatically. The
SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
example.
+ The format for the command line is as follows:
+
+ mtdparts=<mtddef>[;<mtddef]
+ <mtddef> := <mtd-id>:<partdef>[,<partdef>]
+ <partdef> := <size>[@offset][<name>][ro]
+ <mtd-id> := unique id used in mapping driver/device
+ <size> := standard linux memsize OR "-" to denote all
+ remaining space
+ <name> := (NAME)
+
+ Due to the way Linux handles the command line, no spaces are
+ allowed in the partition definition, including mtd id's and partition
+ names.
+
+ Examples:
+
+ 1 flash resource (mtd-id "sa1100"), with 1 single writable partition:
+ mtdparts=sa1100:-
+
+ Same flash, but 2 named partitions, the first one being read-only:
+ mtdparts=sa1100:256k(ARMboot)ro,-(root)
+
+ If unsure, say 'N'.
+
CONFIG_MTD_AFS_PARTS
The ARM Firmware Suite allows the user to divide flash devices into
multiple 'images'. Each such image has a header containing its name
@@ -61,7 +87,7 @@ CONFIG_MTD_AFS_PARTS
enable this option.
You will still need the parsing functions to be called by the driver
- for your particular device. It won't happen automatically. The
+ for your particular device. It won't happen automatically. The
'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
CONFIG_MTD_DEBUG_VERBOSE
diff --git a/drivers/mtd/Config.in b/drivers/mtd/Config.in
index 797f79667844..7e3d3ffd2983 100644
--- a/drivers/mtd/Config.in
+++ b/drivers/mtd/Config.in
@@ -1,5 +1,5 @@
-# $Id: Config.in,v 1.71 2001/10/03 11:38:38 dwmw2 Exp $
+# $Id: Config.in,v 1.74 2002/04/23 13:52:14 mag Exp $
mainmenu_option next_comment
comment 'Memory Technology Devices (MTD)'
@@ -12,9 +12,10 @@ if [ "$CONFIG_MTD" = "y" -o "$CONFIG_MTD" = "m" ]; then
int ' Debugging verbosity (0 = quiet, 3 = noisy)' CONFIG_MTD_DEBUG_VERBOSE 0
fi
dep_tristate ' MTD partitioning support' CONFIG_MTD_PARTITIONS $CONFIG_MTD
+ dep_tristate ' MTD concatenating support' CONFIG_MTD_CONCAT $CONFIG_MTD
dep_tristate ' RedBoot partition table parsing' CONFIG_MTD_REDBOOT_PARTS $CONFIG_MTD_PARTITIONS
+ dep_tristate ' Command line partition table parsing' CONFIG_MTD_CMDLINE_PARTS $CONFIG_MTD_PARTITIONS
if [ "$CONFIG_ARM" = "y" ]; then
- dep_tristate ' Compaq bootldr partition table parsing' CONFIG_MTD_BOOTLDR_PARTS $CONFIG_MTD_PARTITIONS
dep_tristate ' ARM Firmware Suite partition parsing' CONFIG_MTD_AFS_PARTS $CONFIG_MTD_PARTITIONS
fi
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4b8108198e7e..7ec5dfbb2501 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -1,12 +1,12 @@
#
# Makefile for the memory technology device drivers.
#
-#
-# $Id: Makefile,v 1.63 2001/06/13 09:43:07 dwmw2 Exp $
+# Based on:
+# $Id: Makefile,v 1.66 2002/04/23 13:52:14 mag Exp $
-export-objs := mtdcore.o mtdpart.o redboot.o bootldr.o afs.o
+export-objs := mtdcore.o mtdpart.o redboot.o cmdline.o afs.o mtdconcat.o
-obj-y += chips/ maps/ devices/ nand/
+obj-y += chips/ maps/ devices/ nand/
# *** BIG UGLY NOTE ***
#
@@ -26,9 +26,10 @@ obj-y += chips/ maps/ devices/ nand/
# Core functionality.
obj-$(CONFIG_MTD) += mtdcore.o
+obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
obj-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
-obj-$(CONFIG_MTD_BOOTLDR_PARTS) += bootldr.o
+obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdline.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
# 'Users' - code which presents functionality to userspace.
diff --git a/drivers/mtd/bootldr.c b/drivers/mtd/bootldr.c
deleted file mode 100644
index 43fcd6bea8b8..000000000000
--- a/drivers/mtd/bootldr.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Read flash partition table from Compaq Bootloader
- *
- * Copyright 2001 Compaq Computer Corporation.
- *
- * $Id: bootldr.c,v 1.6 2001/10/02 15:05:11 dwmw2 Exp $
- *
- * Use consistent with the GNU GPL is permitted,
- * provided that this copyright notice is
- * preserved in its entirety in all copies and derived works.
- *
- * COMPAQ COMPUTER CORPORATION MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
- * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
- * FITNESS FOR ANY PARTICULAR PURPOSE.
- *
- */
-
-/*
- * Maintainer: Jamey Hicks (jamey.hicks@compaq.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <asm/setup.h>
-#include <linux/bootmem.h>
-
-#define FLASH_PARTITION_NAMELEN 32
-enum LFR_FLAGS {
- LFR_SIZE_PREFIX = 1, /* prefix data with 4-byte size */
- LFR_PATCH_BOOTLDR = 2, /* patch bootloader's 0th instruction */
- LFR_KERNEL = 4, /* add BOOTIMG_MAGIC, imgsize and VKERNEL_BASE to head of programmed region (see bootldr.c) */
- LFR_EXPAND = 8 /* expand partition size to fit rest of flash */
-};
-
-// the tags are parsed too early to malloc or alloc_bootmem so we'll fix it
-// for now
-#define MAX_NUM_PARTITIONS 8
-typedef struct FlashRegion {
- char name[FLASH_PARTITION_NAMELEN];
- unsigned long base;
- unsigned long size;
- enum LFR_FLAGS flags;
-} FlashRegion;
-
-typedef struct BootldrFlashPartitionTable {
- int magic; /* should be filled with 0x646c7470 (btlp) BOOTLDR_PARTITION_MAGIC */
- int npartitions;
- struct FlashRegion partition[8];
-} BootldrFlashPartitionTable;
-
-#define BOOTLDR_MAGIC 0x646c7462 /* btld: marks a valid bootldr image */
-#define BOOTLDR_PARTITION_MAGIC 0x646c7470 /* btlp: marks a valid bootldr partition table in params sector */
-
-#define BOOTLDR_MAGIC_OFFSET 0x20 /* offset 0x20 into the bootldr */
-#define BOOTCAP_OFFSET 0X30 /* offset 0x30 into the bootldr */
-
-#define BOOTCAP_WAKEUP (1<<0)
-#define BOOTCAP_PARTITIONS (1<<1) /* partition table stored in params sector */
-#define BOOTCAP_PARAMS_AFTER_BOOTLDR (1<<2) /* params sector right after bootldr sector(s), else in last sector */
-
-static struct BootldrFlashPartitionTable Table;
-static struct BootldrFlashPartitionTable *partition_table = NULL;
-
-
-int parse_bootldr_partitions(struct mtd_info *master, struct mtd_partition **pparts)
-{
- struct mtd_partition *parts;
- int ret, retlen, i;
- int npartitions = 0;
- long partition_table_offset;
- long bootmagic = 0;
- long bootcap = 0;
- int namelen = 0;
-
- char *names;
-
-#if 0
- /* verify bootldr magic */
- ret = master->read(master, BOOTLDR_MAGIC_OFFSET, sizeof(long), &retlen, (void *)&bootmagic);
- if (ret)
- goto out;
- if (bootmagic != BOOTLDR_MAGIC)
- goto out;
- /* see if bootldr supports partition tables and where to find the partition table */
- ret = master->read(master, BOOTCAP_OFFSET, sizeof(long), &retlen, (void *)&bootcap);
- if (ret)
- goto out;
-
- if (!(bootcap & BOOTCAP_PARTITIONS))
- goto out;
- if (bootcap & BOOTCAP_PARAMS_AFTER_BOOTLDR)
- partition_table_offset = master->erasesize;
- else
- partition_table_offset = master->size - master->erasesize;
-
- printk(__FUNCTION__ ": partition_table_offset=%#lx\n", partition_table_offset);
- printk(__FUNCTION__ ": ptable_addr=%#lx\n", ptable_addr);
-
-
- /* Read the partition table */
- partition_table = (struct BootldrFlashPartitionTable *)kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!partition_table)
- return -ENOMEM;
-
- ret = master->read(master, partition_table_offset,
- PAGE_SIZE, &retlen, (void *)partition_table);
- if (ret)
- goto out;
-
-#endif
- if (!partition_table)
- return -ENOMEM;
-
-
- printk(__FUNCTION__ ": magic=%#x\n", partition_table->magic);
- printk(__FUNCTION__ ": numPartitions=%#x\n", partition_table->npartitions);
-
-
- /* check for partition table magic number */
- if (partition_table->magic != BOOTLDR_PARTITION_MAGIC)
- goto out;
- npartitions = (partition_table->npartitions > MAX_NUM_PARTITIONS)?
- MAX_NUM_PARTITIONS:partition_table->npartitions;
-
- printk(__FUNCTION__ ": npartitions=%#x\n", npartitions);
-
- for (i = 0; i < npartitions; i++) {
- namelen += strlen(partition_table->partition[i].name) + 1;
- }
-
- parts = kmalloc(sizeof(*parts)*npartitions + namelen, GFP_KERNEL);
- if (!parts) {
- ret = -ENOMEM;
- goto out;
- }
- names = (char *)&parts[npartitions];
- memset(parts, 0, sizeof(*parts)*npartitions + namelen);
-
-
-
- // from here we use the partition table
- for (i = 0; i < npartitions; i++) {
- struct FlashRegion *partition = &partition_table->partition[i];
- const char *name = partition->name;
- parts[i].name = names;
- names += strlen(name) + 1;
- strcpy(parts[i].name, name);
-
- if (partition->flags & LFR_EXPAND)
- parts[i].size = MTDPART_SIZ_FULL;
- else
- parts[i].size = partition->size;
- parts[i].offset = partition->base;
- parts[i].mask_flags = 0;
-
- printk(" partition %s o=%x s=%x\n",
- parts[i].name, parts[i].offset, parts[i].size);
-
- }
-
- ret = npartitions;
- *pparts = parts;
-
- out:
-#if 0
- if (partition_table)
- kfree(partition_table);
-#endif
-
- return ret;
-}
-
-
-static int __init parse_tag_ptable(const struct tag *tag)
-{
- char buf[128];
- int i;
- int j;
-
- partition_table = &Table;
-
-#ifdef CONFIG_DEBUG_LL
- sprintf(buf,"ptable: magic = = 0x%lx npartitions= %d \n",
- tag->u.ptable.magic,tag->u.ptable.npartitions);
- printascii(buf);
-
- for (i=0; i<tag->u.ptable.npartitions; i++){
- sprintf(buf,"ptable: partition name = %s base= 0x%lx size= 0x%lx flags= 0x%lx\n",
- (char *) (&tag->u.ptable.partition[i].name[0]),
- tag->u.ptable.partition[i].base,
- tag->u.ptable.partition[i].size,
- tag->u.ptable.partition[i].flags);
- printascii(buf);
- }
-#endif
-
- memcpy((void *)partition_table,(void *) (&(tag->u.ptable)),sizeof(partition_table) +
- sizeof(struct FlashRegion)*tag->u.ptable.npartitions);
-
-
- return 0;
-}
-
-__tagtable(ATAG_PTABLE, parse_tag_ptable);
-
-EXPORT_SYMBOL(parse_bootldr_partitions);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Compaq Computer Corporation");
-MODULE_DESCRIPTION("Parsing code for Compaq bootldr partitions");
diff --git a/drivers/mtd/cmdline.c b/drivers/mtd/cmdline.c
new file mode 100644
index 000000000000..4d92157f46de
--- /dev/null
+++ b/drivers/mtd/cmdline.c
@@ -0,0 +1,343 @@
+/*
+ * $Id: cmdline.c,v 1.4 2002/09/13 01:18:38 jamey Exp $
+ *
+ * Read flash partition table from command line
+ *
+ * Copyright 2002 SYSGO Real-Time Solutions GmbH
+ *
+ * The format for the command line is as follows:
+ *
+ * mtdparts=<mtddef>[;<mtddef]
+ * <mtddef> := <mtd-id>:<partdef>[,<partdef>]
+ * <partdef> := <size>[@offset][<name>][ro]
+ * <mtd-id> := unique id used in mapping driver/device
+ * <size> := standard linux memsize OR "-" to denote all remaining space
+ * <name> := '(' NAME ')'
+ *
+ * Examples:
+ *
+ * 1 NOR Flash, with 1 single writable partition:
+ * edb7312-nor:-
+ *
+ * 1 NOR Flash with 2 partitions, 1 NAND with one
+ * edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <asm/setup.h>
+#include <linux/bootmem.h>
+
+/* error message prefix */
+#define ERRP "mtd: "
+
+/* debug macro */
+#if 0
+#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
+#else
+#define dbg(x)
+#endif
+
+
+/* special size referring to all the remaining space in a partition */
+#define SIZE_REMAINING 0xffffffff
+
+struct cmdline_mtd_partition {
+ struct cmdline_mtd_partition *next;
+ char *mtd_id;
+ int num_parts;
+ struct mtd_partition *parts;
+};
+
+/* mtdpart_setup() parses into here */
+static struct cmdline_mtd_partition *partitions;
+
+/* the command line passed to mtdpart_setupd() */
+static char *cmdline;
+static int cmdline_parsed = 0;
+
+/*
+ * Parse one partition definition for an MTD. Since there can be many
+ * comma separated partition definitions, this function calls itself
+ * recursively until no more partition definitions are found. Nice side
+ * effect: the memory to keep the mtd_partition structs and the names
+ * is allocated upon the last definition being found. At that point the
+ * syntax has been verified ok.
+ */
+static struct mtd_partition * newpart(char *s,
+ char **retptr,
+ int *num_parts,
+ int this_part,
+ unsigned char **extra_mem_ptr,
+ int extra_mem_size)
+{
+ struct mtd_partition *parts;
+ unsigned long size;
+ unsigned long offset = 0;
+ char *name;
+ int name_len;
+ unsigned char *extra_mem;
+ char delim;
+ unsigned int mask_flags;
+
+ /* fetch the partition size */
+ if (*s == '-')
+ { /* assign all remaining space to this partition */
+ size = SIZE_REMAINING;
+ s++;
+ }
+ else
+ {
+ size = memparse(s, &s);
+ if (size < PAGE_SIZE)
+ {
+ printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
+ return 0;
+ }
+ }
+
+ /* fetch partition name and flags */
+ mask_flags = 0; /* this is going to be a regular partition */
+ delim = 0;
+ /* check for offset */
+ if (*s == '@')
+ {
+ s++;
+ offset = memparse(s, &s);
+ }
+ /* now look for name */
+ if (*s == '(')
+ {
+ delim = ')';
+ }
+ if (delim)
+ {
+ char *p;
+
+ name = ++s;
+ if ((p = strchr(name, delim)) == 0)
+ {
+ printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
+ return 0;
+ }
+ name_len = p - name;
+ s = p + 1;
+ }
+ else
+ {
+ name = NULL;
+ name_len = 13; /* Partition_000 */
+ }
+
+ /* record name length for memory allocation later */
+ extra_mem_size += name_len + 1;
+
+ /* test for options */
+ if (strncmp(s, "ro", 2) == 0)
+ {
+ mask_flags |= MTD_WRITEABLE;
+ s += 2;
+ }
+
+ /* test if more partitions are following */
+ if (*s == ',')
+ {
+ if (size == SIZE_REMAINING)
+ {
+ printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
+ return 0;
+ }
+ /* more partitions follow, parse them */
+ if ((parts = newpart(s + 1, &s, num_parts,
+ this_part + 1, &extra_mem, extra_mem_size)) == 0)
+ return 0;
+ }
+ else
+ { /* this is the last partition: allocate space for all */
+ int alloc_size;
+
+ *num_parts = this_part + 1;
+ alloc_size = *num_parts * sizeof(struct mtd_partition) +
+ extra_mem_size;
+ parts = kmalloc(alloc_size, GFP_KERNEL);
+ if (!parts)
+ {
+ printk(KERN_ERR ERRP "out of memory\n");
+ return 0;
+ }
+ memset(parts, 0, alloc_size);
+ extra_mem = (unsigned char *)(parts + *num_parts);
+ }
+ /* enter this partition (offset will be calculated later if it is zero at this point) */
+ parts[this_part].size = size;
+ parts[this_part].offset = offset;
+ parts[this_part].mask_flags = mask_flags;
+ if (name)
+ {
+ strncpy(extra_mem, name, name_len);
+ extra_mem[name_len] = 0;
+ }
+ else
+ {
+ sprintf(extra_mem, "Partition_%03d", this_part);
+ }
+ parts[this_part].name = extra_mem;
+ extra_mem += name_len + 1;
+
+ dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n",
+ this_part,
+ parts[this_part].name,
+ parts[this_part].offset,
+ parts[this_part].size,
+ parts[this_part].mask_flags));
+
+ /* return (updated) pointer to extra_mem memory */
+ if (extra_mem_ptr)
+ *extra_mem_ptr = extra_mem;
+
+ /* return (updated) pointer command line string */
+ *retptr = s;
+
+ /* return partition table */
+ return parts;
+}
+
+/*
+ * Parse the command line.
+ */
+static int mtdpart_setup_real(char *s)
+{
+ cmdline_parsed = 1;
+
+ for( ; s != NULL; )
+ {
+ struct cmdline_mtd_partition *this_mtd;
+ struct mtd_partition *parts;
+ int mtd_id_len;
+ int num_parts;
+ char *p, *mtd_id;
+
+ mtd_id = s;
+ /* fetch <mtd-id> */
+ if (!(p = strchr(s, ':')))
+ {
+ printk(KERN_ERR ERRP "no mtd-id\n");
+ return 0;
+ }
+ mtd_id_len = p - mtd_id;
+
+ dbg(("parsing <%s>\n", p+1));
+
+ /*
+ * parse one mtd. have it reserve memory for the
+ * struct cmdline_mtd_partition and the mtd-id string.
+ */
+ parts = newpart(p + 1, /* cmdline */
+ &s, /* out: updated cmdline ptr */
+ &num_parts, /* out: number of parts */
+ 0, /* first partition */
+ (unsigned char**)&this_mtd, /* out: extra mem */
+ mtd_id_len + 1 + sizeof(*this_mtd));
+
+ /* enter results */
+ this_mtd->parts = parts;
+ this_mtd->num_parts = num_parts;
+ this_mtd->mtd_id = (char*)(this_mtd + 1);
+ strncpy(this_mtd->mtd_id, mtd_id, mtd_id_len);
+ this_mtd->mtd_id[mtd_id_len] = 0;
+
+ /* link into chain */
+ this_mtd->next = partitions;
+ partitions = this_mtd;
+
+ dbg(("mtdid=<%s> num_parts=<%d>\n",
+ this_mtd->mtd_id, this_mtd->num_parts));
+
+
+ /* EOS - we're done */
+ if (*s == 0)
+ break;
+
+ /* does another spec follow? */
+ if (*s != ';')
+ {
+ printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
+ return 0;
+ }
+ s++;
+ }
+ return 1;
+}
+
+/*
+ * Main function to be called from the MTD mapping driver/device to
+ * obtain the partitioning information. At this point the command line
+ * arguments will actually be parsed and turned to struct mtd_partition
+ * information.
+ */
+int parse_cmdline_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ const char *mtd_id)
+{
+ unsigned long offset;
+ int i;
+ struct cmdline_mtd_partition *part;
+
+ if (!cmdline)
+ return -EINVAL;
+
+ /* parse command line */
+ if (!cmdline_parsed)
+ mtdpart_setup_real(cmdline);
+
+ for(part = partitions; part; part = part->next)
+ {
+ if (!strcmp(part->mtd_id, mtd_id))
+ {
+ for(i = 0, offset = 0; i < part->num_parts; i++)
+ {
+ if (!part->parts[i].offset)
+ part->parts[i].offset = offset;
+ else
+ offset = part->parts[i].offset;
+ if (part->parts[i].size == SIZE_REMAINING)
+ part->parts[i].size = master->size - offset;
+ if (offset + part->parts[i].size > master->size)
+ {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ mtd_id);
+ part->parts[i].size = master->size - offset;
+ part->num_parts = i;
+ }
+ offset += part->parts[i].size;
+ }
+ *pparts = part->parts;
+ return part->num_parts;
+ }
+ }
+ return -EINVAL;
+}
+
+
+/*
+ * This is the handler for our kernel parameter, called from
+ * main.c::checksetup(). Note that we can not yet kmalloc() anything,
+ * so we only save the commandline for later processing.
+ */
+static int __init mtdpart_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+
+__setup("mtdparts=", mtdpart_setup);
+
+EXPORT_SYMBOL(parse_cmdline_partitions);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
+MODULE_DESCRIPTION("Command line configuration of MTD partitions");
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 341ad2252885..e40e34d3c7d6 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1223,7 +1223,7 @@ static void ftl_notify_add(struct mtd_info *mtd)
}
partition = kmalloc(sizeof(partition_t), GFP_KERNEL);
- disk = alloc_disk();
+ disk = alloc_disk(1 << PART_BITS);
if (!partition||!disk) {
printk(KERN_WARNING "No memory to scan for FTL on %s\n",
@@ -1237,7 +1237,6 @@ static void ftl_notify_add(struct mtd_info *mtd)
sprintf(disk->disk_name, "ftl%c", 'a' + device);
disk->major = FTL_MAJOR;
disk->first_minor = device << 4;
- disk->minor_shift = PART_BITS;
disk->fops = &ftl_blk_fops;
partition->mtd = mtd;
partition->disk = disk;
diff --git a/drivers/mtd/maps/Config.help b/drivers/mtd/maps/Config.help
index aaf3a1aa894e..d4cc1af6505e 100644
--- a/drivers/mtd/maps/Config.help
+++ b/drivers/mtd/maps/Config.help
@@ -1,3 +1,32 @@
+CONFIG_MTD_CDB89712
+ This enables access to the flash or ROM chips on the CDB89712 board.
+ If you have such a board, say 'Y'.
+
+CONFIG_MTD_CEIVA
+ This enables access to the flash chips on the Ceiva/Polaroid
+ PhotoMax Digital Picture Frame.
+ If you have such a device, say 'Y'.
+
+CONFIG_MTD_FORTUNET
+ This enables access to the Flash on the FortuNet board. If you
+ have such a board, say 'Y'.
+
+CONFIG_MTD_AUTCPU12
+ This enables access to the NV-RAM on autronix autcpu12 board.
+ If you have such a board, say 'Y'.
+
+CONFIG_MTD_EDB7312
+ This enables access to the CFI Flash on the Cogent EDB7312 board.
+ If you have such a board, say 'Y' here.
+
+CONFIG_MTD_NAND_EDB7312
+ This enables access to the NAND Flash on the Cogent EDB7312 board.
+ If you have such a board, say 'Y' here.
+
+CONFIG_MTD_IMPA7
+ This enables access to the NOR Flash on the impA7 board of
+ implementa GmbH. If you have such a board, say 'Y' here.
+
CONFIG_MTD_SA1100
This enables access to the flash chips on most platforms based on
the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
@@ -39,6 +68,12 @@ CONFIG_MTD_SUN_UFLASH
CONFIG_MTD_NORA
If you had to ask, you don't have one. Say 'N'.
+CONFIG_MTD_L440GX
+ Support for treating the BIOS flash chip on Intel L440GX motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
CONFIG_MTD_PNC2000
PNC-2000 is the name of Network Camera product from PHOTRON
Ltd. in Japan. It uses CFI-compliant flash.
@@ -50,6 +85,13 @@ CONFIG_MTD_RPXLITE
to communicate with the chips on the RPXLite board. More at
<http://www.embeddedplanet.com/rpx_lite_specification_sheet.htm>.
+CONFIG_MTD_TQM8XXL
+ The TQM8xxL PowerPC board has up to two banks of CFI-compliant
+ chips, currently uses AMD one. This 'mapping' driver supports
+ that arrangement, allowing the CFI probe and command set driver
+ code to communicate with the chips on the TQM8xxL board. More at
+ <http://www.denx.de/embedded-ppc-en.html>.
+
CONFIG_MTD_SC520CDP
The SC520 CDP board has two banks of CFI-compliant chips and one
Dual-in-line JEDEC chip. This 'mapping' driver supports that
@@ -59,7 +101,7 @@ CONFIG_MTD_SBC_GXX
This provides a driver for the on-board flash of Arcom Control
Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX.
By default the flash is split into 3 partitions which are accessed
- as separate MTD devices. This board utilizes Intel StrataFlash.
+ as separate MTD devices. This board utilizes Intel StrataFlash.
More info at
<http://www.arcomcontrols.com/products/icp/pc104/processors/>.
@@ -78,6 +120,11 @@ CONFIG_MTD_NETSC520
demonstration board. If you have one of these boards and would like
to use the flash chips on it, say 'Y'.
+CONFIG_MTD_OCELOT
+ This enables access routines for the boot flash device and for the
+ NVRAM on the Momenco Ocelot board. If you have one of these boards
+ and would like access to either of these, say 'Y'.
+
CONFIG_MTD_ELAN_104NC
This provides a driver for the on-board flash of the Arcom Control
System's ELAN-104NC development board. By default the flash
@@ -91,17 +138,17 @@ CONFIG_MTD_DC21285
<http://developer.intel.com/design/bridge/quicklist/dsc-21285.htm>.
CONFIG_MTD_CSTM_MIPS_IXX
- This provides a mapping driver for the Integrated Tecnology Express,
- Inc (ITE) QED-4N-S01B eval board and the Globespan IVR Reference
- Board. It provides the necessary addressing, length, buswidth, vpp
- code and addition setup of the flash device for these boards. In
- addition, this mapping driver can be used for other boards via
- setting of the CONFIG_MTD_CSTM_MIPS_IXX_START/LEN/BUSWIDTH
- parameters. This mapping will provide one mtd device using one
- partition. The start address can be offset from the beginning of
- flash and the len can be less than the total flash device size to
- allow a window into the flash. Both CFI and JEDEC probes are
- called.
+ This provides a mapping driver for the Integrated Tecnology
+ Express, Inc (ITE) QED-4N-S01B eval board and the Globespan IVR
+ Reference Board. It provides the necessary addressing, length,
+ buswidth, vpp code and addition setup of the flash device for
+ these boards. In addition, this mapping driver can be used for
+ other boards via setting of the CONFIG_MTD_CSTM_MIPS_IXX_START/
+ LEN/BUSWIDTH parameters. This mapping will provide one mtd device
+ using one partition. The start address can be offset from the
+ beginning of flash and the len can be less than the total flash
+ device size to allow a window into the flash. Both CFI and JEDEC
+ probes are called.
CONFIG_MTD_CSTM_MIPS_IXX_START
This is the physical memory location that the MTD driver will
@@ -141,6 +188,11 @@ CONFIG_MTD_OCTAGON
Computer. More information on the board is available at
<http://www.octagonsystems.com/Products/5066/5066.html>.
+CONFIG_MTD_PCMCIA
+ Map driver for accessing PCMCIA linear flash memory cards. These
+ cards are usually around 4-16MiB in size. This does not include
+ Compact Flash cards which are treated as IDE devices.
+
CONFIG_MTD_VMAX
This provides a 'mapping' driver which supports the way in which
the flash chips are connected in the Tempustech VMAX SBC301 Single
@@ -148,32 +200,21 @@ CONFIG_MTD_VMAX
<http://www.tempustech.com/tt301.htm>.
CONFIG_MTD_CFI_FLAGADM
- Mapping for the Flaga digital module. If you don´t have one, ignore
+ Mapping for the Flaga digital module. If you don´t have one, ignore
this setting.
-CONFIG_MTD_OCELOT
- This enables access routines for the boot flash device and for the
- NVRAM on the Momenco Ocelot board. If you have one of these boards
- and would like access to either of these, say 'Y'.
-
-CONFIG_MTD_CDB89712
- This enables access to the flash or ROM chips on the CDB89712 board.
- If you have such a board, say 'Y'.
-
-CONFIG_MTD_L440GX
- Support for treating the BIOS flash chip on Intel L440GX motherboards
- as an MTD device - with this you can reprogram your BIOS.
-
- BE VERY CAREFUL.
-
CONFIG_MTD_SOLUTIONENGINE
This enables access to the flash chips on the Hitachi SolutionEngine and
similar boards. Say 'Y' if you are building a kernel for such a board.
-CONFIG_MTD_TQM8XXL
- The TQM8xxL PowerPC board has up to two banks of CFI-compliant
- chips, currently uses AMD one. This 'mapping' driver supports
- that arrangement, allowing the CFI probe and command set driver
- code to communicate with the chips on the TQM8xxL board. More at
- <http://www.denx.de/embedded-ppc-en.html>.
+CONFIG_MTD_EPXA10DB
+ This enables support for the flash devices on the Altera
+ Excalibur XA10 Development Board. If you are building a kernel
+ for on of these boards then you should say 'Y' otherwise say 'N'.
+
+CONFIG_MTD_PCI
+ Mapping for accessing flash devices on add-in cards like the Intel XScale
+ IQ80310 card, and the Intel EBSA285 card in blank ROM programming mode
+ (please see the manual for the link settings).
+ If you are not sure, say N.
diff --git a/drivers/mtd/maps/Config.in b/drivers/mtd/maps/Config.in
index 7b4cbd4eda85..e0668372fa79 100644
--- a/drivers/mtd/maps/Config.in
+++ b/drivers/mtd/maps/Config.in
@@ -56,8 +56,18 @@ if [ "$CONFIG_ARM" = "y" ]; then
dep_tristate ' CFI Flash device mapped on ARM Integrator/P720T' CONFIG_MTD_ARM_INTEGRATOR $CONFIG_MTD_CFI
dep_tristate ' Cirrus CDB89712 evaluation board mappings' CONFIG_MTD_CDB89712 $CONFIG_MTD_CFI $CONFIG_ARCH_CDB89712
dep_tristate ' CFI Flash device mapped on StrongARM SA11x0' CONFIG_MTD_SA1100 $CONFIG_MTD_CFI $CONFIG_ARCH_SA1100 $CONFIG_MTD_PARTITIONS
- dep_tristate ' CFI Flash device mapped on DC21285 Footbridge' CONFIG_MTD_DC21285 $CONFIG_MTD_CFI $CONFIG_ARCH_FOOTBRIDGE $CONFIG_MTD_PARTITIONS
+ dep_tristate ' CFI Flash device mapped on DC21285 Footbridge' CONFIG_MTD_DC21285 $CONFIG_MTD_CFI $CONFIG_ARCH_FOOTBRIDGE
dep_tristate ' CFI Flash device mapped on the XScale IQ80310 board' CONFIG_MTD_IQ80310 $CONFIG_MTD_CFI $CONFIG_ARCH_IQ80310
+ dep_tristate ' CFI Flash device mapped on Epxa10db' CONFIG_MTD_EPXA10DB $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS $CONFIG_ARCH_CAMELOT
+ dep_tristate ' CFI Flash device mapped on the FortuNet board' CONFIG_MTD_FORTUNET $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS $CONFIG_SA1100_FORTUNET
+ dep_tristate ' NV-RAM mapping AUTCPU12 board' CONFIG_MTD_AUTCPU12 $CONFIG_ARCH_AUTCPU12
+ dep_tristate ' CFI Flash device mapped on EDB7312' CONFIG_MTD_EDB7312 $CONFIG_MTD_CFI
+ dep_tristate ' JEDEC Flash device mapped on impA7' CONFIG_MTD_IMPA7 $CONFIG_MTD_JEDECPROBE
+ dep_tristate ' JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame' CONFIG_MTD_CEIVA $CONFIG_MTD_JEDECPROBE $CONFIG_ARCH_CEIVA
fi
+# This needs CFI or JEDEC, depending on the cards found.
+dep_tristate ' PCI MTD driver' CONFIG_MTD_PCI $CONFIG_MTD $CONFIG_PCI
+dep_tristate ' PCMCIA MTD driver' CONFIG_MTD_PCMCIA $CONFIG_MTD $CONFIG_PCMCIA
+
endmenu
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index c0bdc2fa8f23..f4acee989d04 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -4,29 +4,37 @@
# $Id: Makefile,v 1.13 2001/08/16 15:16:58 rmk Exp $
# Chip mappings
-obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
+obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o
obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
-obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
-obj-$(CONFIG_MTD_DC21285) += dc21285.o
-obj-$(CONFIG_MTD_ELAN_104NC) += elan-104nc.o
+obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
+obj-$(CONFIG_MTD_DC21285) += dc21285.o
+obj-$(CONFIG_MTD_ELAN_104NC) += elan-104nc.o
+obj-$(CONFIG_MTD_EPXA10DB) += epxa10db-flash.o
obj-$(CONFIG_MTD_IQ80310) += iq80310.o
obj-$(CONFIG_MTD_L440GX) += l440gx.o
obj-$(CONFIG_MTD_NORA) += nora.o
+obj-$(CONFIG_MTD_CEIVA) += ceiva.o
obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
+obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
-obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
+obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
obj-$(CONFIG_MTD_NETSC520) += netsc520.o
-obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
+obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
obj-$(CONFIG_MTD_VMAX) += vmax301.o
obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
obj-$(CONFIG_MTD_OCELOT) += ocelot.o
obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
+obj-$(CONFIG_MTD_PCI) += pci.o
+obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
+obj-$(CONFIG_MTD_EDB7312) += edb7312.o
+obj-$(CONFIG_MTD_IMPA7) += impa7.o
+obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
include $(TOPDIR)/Rules.make
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
new file mode 100644
index 000000000000..db78b01e6438
--- /dev/null
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -0,0 +1,179 @@
+/*
+ * NV-RAM memory access on autcpu12
+ * (C) 2002 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * $Id: autcpu12-nvram.c,v 1.1 2002/02/22 09:30:24 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+#include <asm/hardware.h>
+#include <asm/arch/autcpu12.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+__u8 autcpu12_read8(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readb(map->map_priv_1 + ofs);
+}
+
+__u16 autcpu12_read16(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readw(map->map_priv_1 + ofs);
+}
+
+__u32 autcpu12_read32(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readl(map->map_priv_1 + ofs);
+}
+
+void autcpu12_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ __raw_writeb(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void autcpu12_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ __raw_writew(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void autcpu12_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ __raw_writel(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void autcpu12_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy_fromio(to, map->map_priv_1 + from, len);
+}
+
+void autcpu12_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ while(len) {
+ __raw_writeb(*(unsigned char *) from, map->map_priv_1 + to);
+ from++;
+ to++;
+ len--;
+ }
+}
+
+static struct mtd_info *sram_mtd;
+
+struct map_info autcpu12_sram_map = {
+ name: "SRAM",
+ size: 32768,
+ buswidth: 8,
+ read8: autcpu12_read8,
+ read16: autcpu12_read16,
+ read32: autcpu12_read32,
+ copy_from: autcpu12_copy_from,
+ write8: autcpu12_write8,
+ write16: autcpu12_write16,
+ write32: autcpu12_write32,
+ copy_to: autcpu12_copy_to
+};
+
+static int __init init_autcpu12_sram (void)
+{
+ int err, save0, save1;
+
+ autcpu12_sram_map.map_priv_1 = (unsigned long)ioremap(0x12000000, SZ_128K);
+ if (!autcpu12_sram_map.map_priv_1) {
+ printk("Failed to ioremap autcpu12 NV-RAM space\n");
+ err = -EIO;
+ goto out;
+ }
+
+ /*
+ * Check for 32K/128K
+ * read ofs 0
+ * read ofs 0x10000
+ * Write complement to ofs 0x100000
+ * Read and check result on ofs 0x0
+ * Restore contents
+ */
+ save0 = autcpu12_read32(&autcpu12_sram_map,0);
+ save1 = autcpu12_read32(&autcpu12_sram_map,0x10000);
+ autcpu12_write32(&autcpu12_sram_map,~save0,0x10000);
+ /* if we find this pattern on 0x0, we have 32K size
+ * restore contents and exit
+ */
+ if ( autcpu12_read32(&autcpu12_sram_map,0) != save0) {
+ autcpu12_write32(&autcpu12_sram_map,save0,0x0);
+ goto map;
+ }
+ /* We have a 128K found, restore 0x10000 and set size
+ * to 128K
+ */
+ autcpu12_write32(&autcpu12_sram_map,save1,0x10000);
+ autcpu12_sram_map.size = SZ_128K;
+
+map:
+ sram_mtd = do_map_probe("map_ram", &autcpu12_sram_map);
+ if (!sram_mtd) {
+ printk("NV-RAM probe failed\n");
+ err = -ENXIO;
+ goto out_ioremap;
+ }
+
+ sram_mtd->module = THIS_MODULE;
+ sram_mtd->erasesize = 16;
+
+ if (add_mtd_device(sram_mtd)) {
+ printk("NV-RAM device addition failed\n");
+ err = -ENOMEM;
+ goto out_probe;
+ }
+
+ printk("NV-RAM device size %ldK registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K);
+
+ return 0;
+
+out_probe:
+ map_destroy(sram_mtd);
+ sram_mtd = 0;
+
+out_ioremap:
+ iounmap((void *)autcpu12_sram_map.map_priv_1);
+out:
+ return err;
+}
+
+static void __exit cleanup_autcpu12_maps(void)
+{
+ if (sram_mtd) {
+ del_mtd_device(sram_mtd);
+ map_destroy(sram_mtd);
+ iounmap((void *)autcpu12_sram_map.map_priv_1);
+ }
+}
+
+module_init(init_autcpu12_sram);
+module_exit(cleanup_autcpu12_maps);
+
+MODULE_AUTHOR("Thomas Gleixner");
+MODULE_DESCRIPTION("autcpu12 NV-RAM map driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
new file mode 100644
index 000000000000..259a9a8b76c0
--- /dev/null
+++ b/drivers/mtd/maps/ceiva.c
@@ -0,0 +1,408 @@
+/*
+ * Ceiva flash memory driver.
+ * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net>
+ *
+ * Note: this driver supports jedec compatible devices. Modification
+ * for CFI compatible devices should be straight forward: change
+ * jedec_probe to cfi_probe.
+ *
+ * Based on: sa1100-flash.c, which has the following copyright:
+ * Flash memory access on SA11x0 based devices
+ *
+ * (C) 2000 Nicolas Pitre <nico@cam.org>
+ *
+ * $Id: ceiva.c,v 1.2 2002/10/14 12:50:22 rmk Exp $
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+/*
+ * This isnt complete yet, so...
+ */
+#define CONFIG_MTD_CEIVA_STATICMAP
+
+static __u8 clps_read8(struct map_info *map, unsigned long ofs)
+{
+ return readb(map->map_priv_1 + ofs);
+}
+
+static __u16 clps_read16(struct map_info *map, unsigned long ofs)
+{
+ return readw(map->map_priv_1 + ofs);
+}
+
+static __u32 clps_read32(struct map_info *map, unsigned long ofs)
+{
+ return readl(map->map_priv_1 + ofs);
+}
+
+static void clps_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy(to, (void *)(map->map_priv_1 + from), len);
+}
+
+static void clps_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ writeb(d, map->map_priv_1 + adr);
+}
+
+static void clps_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ writew(d, map->map_priv_1 + adr);
+}
+
+static void clps_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ writel(d, map->map_priv_1 + adr);
+}
+
+static void clps_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy((void *)(map->map_priv_1 + to), from, len);
+}
+
+static struct map_info clps_map __initdata = {
+ name: "clps flash",
+ read8: clps_read8,
+ read16: clps_read16,
+ read32: clps_read32,
+ copy_from: clps_copy_from,
+ write8: clps_write8,
+ write16: clps_write16,
+ write32: clps_write32,
+ copy_to: clps_copy_to,
+};
+
+#ifdef CONFIG_MTD_CEIVA_STATICMAP
+/*
+ * See include/linux/mtd/partitions.h for definition of the mtd_partition
+ * structure.
+ *
+ * Please note:
+ * 1. The flash size given should be the largest flash size that can
+ * be accomodated.
+ *
+ * 2. The bus width must defined in clps_setup_flash.
+ *
+ * The MTD layer will detect flash chip aliasing and reduce the size of
+ * the map accordingly.
+ *
+ */
+
+#ifdef CONFIG_ARCH_CEIVA
+/* Flash / Partition sizing */
+/* For the 28F8003, we use the block mapping to calcuate the sizes */
+#define MAX_SIZE_KiB (16 + 8 + 8 + 96 + (7*128))
+#define BOOT_PARTITION_SIZE_KiB (16)
+#define PARAMS_PARTITION_SIZE_KiB (8)
+#define KERNEL_PARTITION_SIZE_KiB (4*128)
+/* Use both remaing portion of first flash, and all of second flash */
+#define ROOT_PARTITION_SIZE_KiB (3*128) + (8*128)
+
+static struct mtd_partition ceiva_partitions[] = {
+ {
+ name: "Ceiva BOOT partition",
+ size: BOOT_PARTITION_SIZE_KiB*1024,
+ offset: 0,
+
+ },{
+ name: "Ceiva parameters partition",
+ size: PARAMS_PARTITION_SIZE_KiB*1024,
+ offset: (16 + 8) * 1024,
+ },{
+ name: "Ceiva kernel partition",
+ size: (KERNEL_PARTITION_SIZE_KiB)*1024,
+ offset: 0x20000,
+
+ },{
+ name: "Ceiva root filesystem partition",
+ offset: MTDPART_OFS_APPEND,
+ size: (ROOT_PARTITION_SIZE_KiB)*1024,
+ }
+};
+#endif
+
+static int __init clps_static_partitions(struct mtd_partition **parts)
+{
+ int nb_parts = 0;
+
+#ifdef CONFIG_ARCH_CEIVA
+ if (machine_is_ceiva()) {
+ *parts = ceiva_partitions;
+ nb_parts = ARRAY_SIZE(ceiva_partitions);
+ }
+#endif
+ return nb_parts;
+}
+#endif
+
+struct clps_info {
+ unsigned long base;
+ unsigned long size;
+ int width;
+ void *vbase;
+ struct map_info *map;
+ struct mtd_info *mtd;
+ struct resource *res;
+};
+
+#define NR_SUBMTD 4
+
+static struct clps_info info[NR_SUBMTD];
+
+static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd)
+{
+ struct mtd_info *subdev[nr];
+ struct map_info *maps;
+ int i, found = 0, ret = 0;
+
+ /*
+ * Allocate the map_info structs in one go.
+ */
+ maps = kmalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
+ if (!maps)
+ return -ENOMEM;
+
+ /*
+ * Claim and then map the memory regions.
+ */
+ for (i = 0; i < nr; i++) {
+ if (clps[i].base == (unsigned long)-1)
+ break;
+
+ clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash");
+ if (!clps[i].res) {
+ ret = -EBUSY;
+ break;
+ }
+
+ clps[i].map = maps + i;
+ memcpy(clps[i].map, &clps_map, sizeof(struct map_info));
+
+ clps[i].vbase = ioremap(clps[i].base, clps[i].size);
+ if (!clps[i].vbase) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ clps[i].map->map_priv_1 = (unsigned long)clps[i].vbase;
+ clps[i].map->buswidth = clps[i].width;
+ clps[i].map->size = clps[i].size;
+
+ clps[i].mtd = do_map_probe("jedec_probe", clps[i].map);
+ if (clps[i].mtd == NULL) {
+ ret = -ENXIO;
+ break;
+ }
+ clps[i].mtd->module = THIS_MODULE;
+ subdev[i] = clps[i].mtd;
+
+ printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, "
+ "%d-bit\n", clps[i].base, clps[i].mtd->size >> 20,
+ clps[i].width * 8);
+ found += 1;
+ }
+
+ /*
+ * ENXIO is special. It means we didn't find a chip when
+ * we probed. We need to tear down the mapping, free the
+ * resource and mark it as such.
+ */
+ if (ret == -ENXIO) {
+ iounmap(clps[i].vbase);
+ clps[i].vbase = NULL;
+ release_resource(clps[i].res);
+ clps[i].res = NULL;
+ }
+
+ /*
+ * If we found one device, don't bother with concat support.
+ * If we found multiple devices, use concat if we have it
+ * available, otherwise fail.
+ */
+ if (ret == 0 || ret == -ENXIO) {
+ if (found == 1) {
+ *rmtd = subdev[0];
+ ret = 0;
+ } else if (found > 1) {
+ /*
+ * We detected multiple devices. Concatenate
+ * them together.
+ */
+#ifdef CONFIG_MTD_CONCAT
+ *rmtd = mtd_concat_create(subdev, found,
+ "clps flash");
+ if (*rmtd == NULL)
+ ret = -ENXIO;
+#else
+ printk(KERN_ERR "clps flash: multiple devices "
+ "found but MTD concat support disabled.\n");
+ ret = -ENXIO;
+#endif
+ }
+ }
+
+ /*
+ * If we failed, clean up.
+ */
+ if (ret) {
+ do {
+ if (clps[i].mtd)
+ map_destroy(clps[i].mtd);
+ if (clps[i].vbase)
+ iounmap(clps[i].vbase);
+ if (clps[i].res)
+ release_resource(clps[i].res);
+ } while (i--);
+
+ kfree(maps);
+ }
+
+ return ret;
+}
+
+static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd)
+{
+ int i;
+
+ del_mtd_partitions(mtd);
+
+ if (mtd != clps[0].mtd)
+ mtd_concat_destroy(mtd);
+
+ for (i = NR_SUBMTD; i >= 0; i--) {
+ if (clps[i].mtd)
+ map_destroy(clps[i].mtd);
+ if (clps[i].vbase)
+ iounmap(clps[i].vbase);
+ if (clps[i].res)
+ release_resource(clps[i].res);
+ }
+ kfree(clps[0].map);
+}
+
+/*
+ * We define the memory space, size, and width for the flash memory
+ * space here.
+ */
+
+static int __init clps_setup_flash(void)
+{
+ int nr;
+
+#ifdef CONFIG_ARCH_CEIVA
+ if (machine_is_ceiva()) {
+ info[0].base = CS0_PHYS_BASE;
+ info[0].size = SZ_32M;
+ info[0].width = CEIVA_FLASH_WIDTH;
+ info[1].base = CS1_PHYS_BASE;
+ info[1].size = SZ_32M;
+ info[1].width = CEIVA_FLASH_WIDTH;
+ nr = 2;
+ }
+#endif
+ return nr;
+}
+
+extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts);
+extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, char *);
+
+static struct mtd_partition *parsed_parts;
+
+static void __init clps_locate_partitions(struct mtd_info *mtd)
+{
+ const char *part_type = NULL;
+ int nr_parts = 0;
+ do {
+ /*
+ * Partition selection stuff.
+ */
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ nr_parts = parse_cmdline_partitions(mtd, &parsed_parts, "clps");
+ if (nr_parts > 0) {
+ part_type = "command line";
+ break;
+ }
+#endif
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+ nr_parts = parse_redboot_partitions(mtd, &parsed_parts);
+ if (nr_parts > 0) {
+ part_type = "RedBoot";
+ break;
+ }
+#endif
+#ifdef CONFIG_MTD_CEIVA_STATICMAP
+ nr_parts = clps_static_partitions(&parsed_parts);
+ if (nr_parts > 0) {
+ part_type = "static";
+ break;
+ }
+ printk("found: %d partitions\n", nr_parts);
+#endif
+ } while (0);
+
+ if (nr_parts == 0) {
+ printk(KERN_NOTICE "clps flash: no partition info "
+ "available, registering whole flash\n");
+ add_mtd_device(mtd);
+ } else {
+ printk(KERN_NOTICE "clps flash: using %s partition "
+ "definition\n", part_type);
+ add_mtd_partitions(mtd, parsed_parts, nr_parts);
+ }
+
+ /* Always succeeds. */
+}
+
+static void __exit clps_destroy_partitions(void)
+{
+ if (parsed_parts)
+ kfree(parsed_parts);
+}
+
+static struct mtd_info *mymtd;
+
+static int __init clps_mtd_init(void)
+{
+ int ret;
+ int nr;
+
+ nr = clps_setup_flash();
+ if (nr < 0)
+ return nr;
+
+ ret = clps_setup_mtd(info, nr, &mymtd);
+ if (ret)
+ return ret;
+
+ clps_locate_partitions(mymtd);
+
+ return 0;
+}
+
+static void __exit clps_mtd_cleanup(void)
+{
+ clps_destroy_mtd(info, mymtd);
+ clps_destroy_partitions();
+}
+
+module_init(clps_mtd_init);
+module_exit(clps_mtd_cleanup);
+
+MODULE_AUTHOR("Rob Scott");
+MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index e7eea7ef53b2..f030f3447302 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -5,9 +5,9 @@
*
* This code is GPL
*
- * $Id: dc21285.c,v 1.6 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: dc21285.c,v 1.9 2002/10/14 12:22:10 rmk Exp $
*/
-
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -44,15 +44,15 @@ void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize
void dc21285_write8(struct map_info *map, __u8 d, unsigned long adr)
{
- *CSR_ROMWRITEREG = adr;
+ *CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
*(__u8*)(map->map_priv_1 + adr) = d;
}
void dc21285_write16(struct map_info *map, __u16 d, unsigned long adr)
{
- *CSR_ROMWRITEREG = adr;
- adr &= ~1;
+ *CSR_ROMWRITEREG = adr & 3;
+ adr &= ~3;
*(__u16*)(map->map_priv_1 + adr) = d;
}
@@ -131,7 +131,7 @@ int __init init_dc21285(void)
dc21285_map.buswidth*8);
/* Let's map the flash area */
- dc21285_map.map_priv_1 = (unsigned long)__ioremap(DC21285_FLASH, 16*1024*1024, 0);
+ dc21285_map.map_priv_1 = (unsigned long)ioremap(DC21285_FLASH, 16*1024*1024);
if (!dc21285_map.map_priv_1) {
printk("Failed to ioremap\n");
return -EIO;
@@ -139,21 +139,22 @@ int __init init_dc21285(void)
mymtd = do_map_probe("cfi_probe", &dc21285_map);
if (mymtd) {
- int nrparts;
+ int nrparts = 0;
mymtd->module = THIS_MODULE;
/* partition fixup */
+#ifdef CONFIG_MTD_REDBOOT_PARTS
nrparts = parse_redboot_partitions(mymtd, &dc21285_parts);
- if (nrparts <=0) {
+#endif
+ if (nrparts > 0) {
+ add_mtd_partitions(mymtd, dc21285_parts, nrparts);
+ } else if (nrparts == 0) {
printk(KERN_NOTICE "RedBoot partition table failed\n");
- iounmap((void *)dc21285_map.map_priv_1);
- return -ENXIO;
+ add_mtd_device(mymtd);
}
- add_mtd_partitions(mymtd, dc21285_parts, nrparts);
-
/*
* Flash timing is determined with bits 19-16 of the
* CSR_SA110_CNTL. The value is the number of wait cycles, or
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
new file mode 100644
index 000000000000..405429d92735
--- /dev/null
+++ b/drivers/mtd/maps/edb7312.c
@@ -0,0 +1,202 @@
+/*
+ * $Id: edb7312.c,v 1.2 2002/09/05 05:11:24 acurtis Exp $
+ *
+ * Handle mapping of the NOR flash on Cogent EDB7312 boards
+ *
+ * Copyright 2002 SYSGO Real-Time Solutions GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/config.h>
+
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
+#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
+#define WINDOW_SIZE 0x01000000
+#define BUSWIDTH 2
+#define FLASH_BLOCKSIZE_MAIN 0x20000
+#define FLASH_NUMBLOCKS_MAIN 128
+/* can be "cfi_probe", "jedec_probe", "map_rom", 0 }; */
+#define PROBETYPES { "cfi_probe", 0 }
+
+#define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */
+#define MTDID "edb7312-nor" /* for mtdparts= partitioning */
+
+static struct mtd_info *mymtd;
+
+__u8 edb7312nor_read8(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readb(map->map_priv_1 + ofs);
+}
+
+__u16 edb7312nor_read16(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readw(map->map_priv_1 + ofs);
+}
+
+__u32 edb7312nor_read32(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readl(map->map_priv_1 + ofs);
+}
+
+void edb7312nor_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy_fromio(to, map->map_priv_1 + from, len);
+}
+
+void edb7312nor_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ __raw_writeb(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void edb7312nor_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ __raw_writew(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void edb7312nor_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ __raw_writel(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void edb7312nor_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy_toio(map->map_priv_1 + to, from, len);
+}
+
+struct map_info edb7312nor_map = {
+ name: "NOR flash on EDB7312",
+ size: WINDOW_SIZE,
+ buswidth: BUSWIDTH,
+ read8: edb7312nor_read8,
+ read16: edb7312nor_read16,
+ read32: edb7312nor_read32,
+ copy_from: edb7312nor_copy_from,
+ write8: edb7312nor_write8,
+ write16: edb7312nor_write16,
+ write32: edb7312nor_write32,
+ copy_to: edb7312nor_copy_to
+};
+
+#ifdef CONFIG_MTD_PARTITIONS
+
+/*
+ * MTD partitioning stuff
+ */
+static struct mtd_partition static_partitions[3] =
+{
+ {
+ name: "ARMboot",
+ size: 0x40000,
+ offset: 0
+ },
+ {
+ name: "Kernel",
+ size: 0x200000,
+ offset: 0x40000
+ },
+ {
+ name: "RootFS",
+ size: 0xDC0000,
+ offset: 0x240000
+ },
+};
+
+#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+int parse_cmdline_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ const char *mtd_id);
+#endif
+
+#endif
+
+static int mtd_parts_nb = 0;
+static struct mtd_partition *mtd_parts = 0;
+
+int __init init_edb7312nor(void)
+{
+ static const char *rom_probe_types[] = PROBETYPES;
+ const char **type;
+ const char *part_type = 0;
+
+ printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n",
+ WINDOW_SIZE, WINDOW_ADDR);
+ edb7312nor_map.map_priv_1 = (unsigned long)
+ ioremap(WINDOW_ADDR, WINDOW_SIZE);
+
+ if (!edb7312nor_map.map_priv_1) {
+ printk(MSG_PREFIX "failed to ioremap\n");
+ return -EIO;
+ }
+
+ mymtd = 0;
+ type = rom_probe_types;
+ for(; !mymtd && *type; type++) {
+ mymtd = do_map_probe(*type, &edb7312nor_map);
+ }
+ if (mymtd) {
+ mymtd->module = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ mtd_parts_nb = parse_cmdline_partitions(mymtd, &mtd_parts, MTDID);
+ if (mtd_parts_nb > 0)
+ part_type = "command line";
+#endif
+ if (mtd_parts_nb == 0)
+ {
+ mtd_parts = static_partitions;
+ mtd_parts_nb = NB_OF(static_partitions);
+ part_type = "static";
+ }
+#endif
+ add_mtd_device(mymtd);
+ if (mtd_parts_nb == 0)
+ printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
+ else
+ {
+ printk(KERN_NOTICE MSG_PREFIX
+ "using %s partition definition\n", part_type);
+ add_mtd_partitions(mymtd, mtd_parts, mtd_parts_nb);
+ }
+ return 0;
+ }
+
+ iounmap((void *)edb7312nor_map.map_priv_1);
+ return -ENXIO;
+}
+
+static void __exit cleanup_edb7312nor(void)
+{
+ if (mymtd) {
+ del_mtd_device(mymtd);
+ map_destroy(mymtd);
+ }
+ if (edb7312nor_map.map_priv_1) {
+ iounmap((void *)edb7312nor_map.map_priv_1);
+ edb7312nor_map.map_priv_1 = 0;
+ }
+}
+
+module_init(init_edb7312nor);
+module_exit(cleanup_edb7312nor);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
+MODULE_DESCRIPTION("Generic configurable MTD map driver");
diff --git a/drivers/mtd/maps/epxa10db-flash.c b/drivers/mtd/maps/epxa10db-flash.c
new file mode 100644
index 000000000000..cb4c76e4bb71
--- /dev/null
+++ b/drivers/mtd/maps/epxa10db-flash.c
@@ -0,0 +1,233 @@
+/*
+ * Flash memory access on EPXA based devices
+ *
+ * (C) 2000 Nicolas Pitre <nico@cam.org>
+ * Copyright (C) 2001 Altera Corporation
+ * Copyright (C) 2001 Red Hat, Inc.
+ *
+ * $Id: epxa10db-flash.c,v 1.4 2002/08/22 10:46:19 cdavies Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/hardware.h>
+#ifdef CONFIG_EPXA10DB
+#define BOARD_NAME "EPXA10DB"
+#else
+#define BOARD_NAME "EPXA1DB"
+#endif
+
+static int nr_parts = 0;
+static struct mtd_partition *parts;
+
+static struct mtd_info *mymtd;
+
+extern int parse_redboot_partitions(struct mtd_info *, struct mtd_partition **);
+static int epxa_default_partitions(struct mtd_info *master, struct mtd_partition **pparts);
+
+static __u8 epxa_read8(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readb(map->map_priv_1 + ofs);
+}
+
+static __u16 epxa_read16(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readw(map->map_priv_1 + ofs);
+}
+
+static __u32 epxa_read32(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readl(map->map_priv_1 + ofs);
+}
+
+static void epxa_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
+}
+
+static void epxa_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ __raw_writeb(d, map->map_priv_1 + adr);
+ mb();
+}
+
+static void epxa_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ __raw_writew(d, map->map_priv_1 + adr);
+ mb();
+}
+
+static void epxa_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ __raw_writel(d, map->map_priv_1 + adr);
+ mb();
+}
+
+static void epxa_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy_toio((void *)(map->map_priv_1 + to), from, len);
+}
+
+
+
+static struct map_info epxa_map = {
+ name: "EPXA flash",
+ size: FLASH_SIZE,
+ buswidth: 2,
+ read8: epxa_read8,
+ read16: epxa_read16,
+ read32: epxa_read32,
+ copy_from: epxa_copy_from,
+ write8: epxa_write8,
+ write16: epxa_write16,
+ write32: epxa_write32,
+ copy_to: epxa_copy_to
+};
+
+
+static int __init epxa_mtd_init(void)
+{
+ int i;
+
+ printk(KERN_NOTICE "%s flash device: %x at %x\n", BOARD_NAME, FLASH_SIZE, FLASH_START);
+ epxa_map.map_priv_1 = (unsigned long)ioremap(FLASH_START, FLASH_SIZE);
+ if (!epxa_map.map_priv_1) {
+ printk("Failed to ioremap %s flash\n",BOARD_NAME);
+ return -EIO;
+ }
+
+ mymtd = do_map_probe("cfi_probe", &epxa_map);
+ if (!mymtd) {
+ iounmap((void *)epxa_map.map_priv_1);
+ return -ENXIO;
+ }
+
+ mymtd->module = THIS_MODULE;
+
+ /* Unlock the flash device. */
+ if(mymtd->unlock){
+ for (i=0; i<mymtd->numeraseregions;i++){
+ int j;
+ for(j=0;j<mymtd->eraseregions[i].numblocks;j++){
+ mymtd->unlock(mymtd,mymtd->eraseregions[i].offset + j * mymtd->eraseregions[i].erasesize,mymtd->eraseregions[i].erasesize);
+ }
+ }
+ }
+
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+ nr_parts = parse_redboot_partitions(mymtd, &parts);
+
+ if (nr_parts > 0) {
+ add_mtd_partitions(mymtd, parts, nr_parts);
+ return 0;
+ }
+#endif
+#ifdef CONFIG_MTD_AFS_PARTS
+ nr_parts = parse_afs_partitions(mymtd, &parts);
+
+ if (nr_parts > 0) {
+ add_mtd_partitions(mymtd, parts, nr_parts);
+ return 0;
+ }
+#endif
+
+ /* No recognised partitioning schemes found - use defaults */
+ nr_parts = epxa_default_partitions(mymtd, &parts);
+ if (nr_parts > 0) {
+ add_mtd_partitions(mymtd, parts, nr_parts);
+ return 0;
+ }
+
+ /* If all else fails... */
+ add_mtd_device(mymtd);
+ return 0;
+}
+
+static void __exit epxa_mtd_cleanup(void)
+{
+ if (mymtd) {
+ if (nr_parts)
+ del_mtd_partitions(mymtd);
+ else
+ del_mtd_device(mymtd);
+ map_destroy(mymtd);
+ }
+ if (epxa_map.map_priv_1) {
+ iounmap((void *)epxa_map.map_priv_1);
+ epxa_map.map_priv_1 = 0;
+ }
+}
+
+
+/*
+ * This will do for now, once we decide which bootldr we're finally
+ * going to use then we'll remove this function and do it properly
+ *
+ * Partions are currently (as offsets from base of flash):
+ * 0x00000000 - 0x003FFFFF - bootloader (!)
+ * 0x00400000 - 0x00FFFFFF - Flashdisk
+ */
+
+static int __init epxa_default_partitions(struct mtd_info *master, struct mtd_partition **pparts)
+{
+ struct mtd_partition *parts;
+ int ret, i;
+ int npartitions = 0;
+ char *names;
+ const char *name = "jffs";
+
+ printk("Using default partitions for %s\n",BOARD_NAME);
+ npartitions=1;
+ parts = kmalloc(npartitions*sizeof(*parts)+strlen(name), GFP_KERNEL);
+ memzero(parts,npartitions*sizeof(*parts)+strlen(name));
+ if (!parts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ i=0;
+ names = (char *)&parts[npartitions];
+ parts[i].name = names;
+ names += strlen(name) + 1;
+ strcpy(parts[i].name, name);
+
+#ifdef CONFIG_EPXA10DB
+ parts[i].size = FLASH_SIZE-0x00400000;
+ parts[i].offset = 0x00400000;
+#else
+ parts[i].size = FLASH_SIZE-0x00180000;
+ parts[i].offset = 0x00180000;
+#endif
+
+ out:
+ *pparts = parts;
+ return npartitions;
+}
+
+
+module_init(epxa_mtd_init);
+module_exit(epxa_mtd_cleanup);
+
+MODULE_AUTHOR("Clive Davies");
+MODULE_DESCRIPTION("Altera epxa mtd flash map");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
new file mode 100644
index 000000000000..98fd322e9523
--- /dev/null
+++ b/drivers/mtd/maps/fortunet.c
@@ -0,0 +1,309 @@
+/* fortunet.c memory map
+ *
+ * $Id: fortunet.c,v 1.2 2002/10/14 12:50:22 rmk Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#define MAX_NUM_REGIONS 4
+#define MAX_NUM_PARTITIONS 8
+
+#define DEF_WINDOW_ADDR_PHY 0x00000000
+#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
+
+#define MTD_FORTUNET_PK "MTD FortuNet: "
+
+#define MAX_NAME_SIZE 128
+
+struct map_region
+{
+ int window_addr_phyical;
+ int altbuswidth;
+ struct map_info map_info;
+ struct mtd_info *mymtd;
+ struct mtd_partition parts[MAX_NUM_PARTITIONS];
+ char map_name[MAX_NAME_SIZE];
+ char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
+};
+
+static struct map_region map_regions[MAX_NUM_REGIONS];
+static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
+static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
+
+
+__u8 fortunet_read8(struct map_info *map, unsigned long ofs)
+{
+ return *(__u8 *)(map->map_priv_1 + ofs);
+}
+
+__u16 fortunet_read16(struct map_info *map, unsigned long ofs)
+{
+ return *(__u16 *)(map->map_priv_1 + ofs);
+}
+
+__u32 fortunet_read32(struct map_info *map, unsigned long ofs)
+{
+ return *(__u32 *)(map->map_priv_1 + ofs);
+}
+
+void fortunet_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy(to, (void *)(map->map_priv_1 + from), len);
+}
+
+void fortunet_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ *(__u8 *)(map->map_priv_1 + adr) = d;
+}
+
+void fortunet_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ *(__u16 *)(map->map_priv_1 + adr) = d;
+}
+
+void fortunet_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ *(__u32 *)(map->map_priv_1 + adr) = d;
+}
+
+void fortunet_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy((void *)(map->map_priv_1 + to), from, len);
+}
+
+struct map_info default_map = {
+ size: DEF_WINDOW_SIZE,
+ buswidth: 4,
+ read8: fortunet_read8,
+ read16: fortunet_read16,
+ read32: fortunet_read32,
+ copy_from: fortunet_copy_from,
+ write8: fortunet_write8,
+ write16: fortunet_write16,
+ write32: fortunet_write32,
+ copy_to: fortunet_copy_to
+};
+
+static char * __init get_string_option(char *dest,int dest_size,char *sor)
+{
+ if(!dest_size)
+ return sor;
+ dest_size--;
+ while(*sor)
+ {
+ if(*sor==',')
+ {
+ sor++;
+ break;
+ }
+ else if(*sor=='\"')
+ {
+ sor++;
+ while(*sor)
+ {
+ if(*sor=='\"')
+ {
+ sor++;
+ break;
+ }
+ *dest = *sor;
+ dest++;
+ sor++;
+ dest_size--;
+ if(!dest_size)
+ {
+ *dest = 0;
+ return sor;
+ }
+ }
+ }
+ else
+ {
+ *dest = *sor;
+ dest++;
+ sor++;
+ dest_size--;
+ if(!dest_size)
+ {
+ *dest = 0;
+ return sor;
+ }
+ }
+ }
+ *dest = 0;
+ return sor;
+}
+
+static int __init MTD_New_Region(char *line)
+{
+ char string[MAX_NAME_SIZE];
+ int params[6];
+ get_options (get_string_option(string,sizeof(string),line),6,params);
+ if(params[0]<1)
+ {
+ printk(MTD_FORTUNET_PK "Bad paramters for MTD Region "
+ " name,region-number[,base,size,buswidth,altbuswidth]\n");
+ return 1;
+ }
+ if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
+ {
+ printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
+ params[1],MAX_NUM_REGIONS-1);
+ return 1;
+ }
+ memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
+ memcpy(&map_regions[params[1]].map_info,
+ &default_map,sizeof(map_regions[params[1]].map_info));
+ map_regions_set[params[1]] = 1;
+ map_regions[params[1]].window_addr_phyical = DEF_WINDOW_ADDR_PHY;
+ map_regions[params[1]].altbuswidth = 2;
+ map_regions[params[1]].mymtd = NULL;
+ map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
+ strcpy(map_regions[params[1]].map_info.name,string);
+ if(params[0]>1)
+ {
+ map_regions[params[1]].window_addr_phyical = params[2];
+ }
+ if(params[0]>2)
+ {
+ map_regions[params[1]].map_info.size = params[3];
+ }
+ if(params[0]>3)
+ {
+ map_regions[params[1]].map_info.buswidth = params[4];
+ }
+ if(params[0]>4)
+ {
+ map_regions[params[1]].altbuswidth = params[5];
+ }
+ return 1;
+}
+
+static int __init MTD_New_Partion(char *line)
+{
+ char string[MAX_NAME_SIZE];
+ int params[4];
+ get_options (get_string_option(string,sizeof(string),line),4,params);
+ if(params[0]<3)
+ {
+ printk(MTD_FORTUNET_PK "Bad paramters for MTD Partion "
+ " name,region-number,size,offset\n");
+ return 1;
+ }
+ if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
+ {
+ printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
+ params[1],MAX_NUM_REGIONS-1);
+ return 1;
+ }
+ if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
+ {
+ printk(MTD_FORTUNET_PK "Out of space for partion in this region\n");
+ return 1;
+ }
+ map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
+ map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
+ strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
+ map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
+ params[2];
+ map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
+ params[3];
+ map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
+ map_regions_parts[params[1]]++;
+ return 1;
+}
+
+__setup("MTD_Region=", MTD_New_Region);
+__setup("MTD_Partion=", MTD_New_Partion);
+
+int __init init_fortunet(void)
+{
+ int ix,iy;
+ for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
+ {
+ if(map_regions_parts[ix]&&(!map_regions_set[ix]))
+ {
+ printk(MTD_FORTUNET_PK "Region %d is not setup (Seting to default)\n",
+ ix);
+ memset(&map_regions[ix],0,sizeof(map_regions[ix]));
+ memcpy(&map_regions[ix].map_info,&default_map,
+ sizeof(map_regions[ix].map_info));
+ map_regions_set[ix] = 1;
+ map_regions[ix].window_addr_phyical = DEF_WINDOW_ADDR_PHY;
+ map_regions[ix].altbuswidth = 2;
+ map_regions[ix].mymtd = NULL;
+ map_regions[ix].map_info.name = map_regions[ix].map_name;
+ strcpy(map_regions[ix].map_info.name,"FORTUNET");
+ }
+ if(map_regions_set[ix])
+ {
+ iy++;
+ printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at phyicaly "
+ " address %x size %x\n",
+ map_regions[ix].map_info.name,
+ map_regions[ix].window_addr_phyical,
+ map_regions[ix].map_info.size);
+ map_regions[ix].map_info.map_priv_1 =
+ (int)ioremap_nocache(
+ map_regions[ix].window_addr_phyical,
+ map_regions[ix].map_info.size);
+ if(!map_regions[ix].map_info.map_priv_1)
+ {
+ printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
+ map_regions[ix].map_info.name);
+ return -ENXIO;
+ }
+ printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is veritualy at: %x\n",
+ map_regions[ix].map_info.name,
+ map_regions[ix].map_info.map_priv_1);
+ map_regions[ix].mymtd = do_map_probe("cfi_probe",
+ &map_regions[ix].map_info);
+ if((!map_regions[ix].mymtd)&&(
+ map_regions[ix].altbuswidth!=map_regions[ix].map_info.buswidth))
+ {
+ printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternet buswidth "
+ "for %s flash.\n",
+ map_regions[ix].map_info.name);
+ map_regions[ix].map_info.buswidth =
+ map_regions[ix].altbuswidth;
+ map_regions[ix].mymtd = do_map_probe("cfi_probe",
+ &map_regions[ix].map_info);
+ }
+ map_regions[ix].mymtd->module = THIS_MODULE;
+ add_mtd_partitions(map_regions[ix].mymtd,
+ map_regions[ix].parts,map_regions_parts[ix]);
+ }
+ }
+ if(iy)
+ return 0;
+ return -ENXIO;
+}
+
+static void __exit cleanup_fortunet(void)
+{
+ int ix;
+ for(ix=0;ix<MAX_NUM_REGIONS;ix++)
+ {
+ if(map_regions_set[ix])
+ {
+ if( map_regions[ix].mymtd )
+ {
+ del_mtd_partitions( map_regions[ix].mymtd );
+ map_destroy( map_regions[ix].mymtd );
+ }
+ iounmap((void *)map_regions[ix].map_info.map_priv_1);
+ }
+ }
+}
+
+module_init(init_fortunet);
+module_exit(cleanup_fortunet);
+
+MODULE_AUTHOR("FortuNet, Inc.");
+MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
new file mode 100644
index 000000000000..3dc382bc9511
--- /dev/null
+++ b/drivers/mtd/maps/impa7.c
@@ -0,0 +1,234 @@
+/*
+ * $Id: impa7.c,v 1.2 2002/09/05 05:11:24 acurtis Exp $
+ *
+ * Handle mapping of the NOR flash on implementa A7 boards
+ *
+ * Copyright 2002 SYSGO Real-Time Solutions GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/config.h>
+
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
+#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
+#define WINDOW_SIZE0 0x00800000
+#define WINDOW_ADDR1 0x10000000 /* physical properties of flash */
+#define WINDOW_SIZE1 0x00800000
+#define NUM_FLASHBANKS 2
+#define BUSWIDTH 4
+
+/* can be { "cfi_probe", "jedec_probe", "map_rom", 0 }; */
+#define PROBETYPES { "jedec_probe", 0 }
+
+#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
+#define MTDID "impa7-%d" /* for mtdparts= partitioning */
+
+static struct mtd_info *impa7_mtd[NUM_FLASHBANKS] = { 0 };
+
+__u8 impa7_read8(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readb(map->map_priv_1 + ofs);
+}
+
+__u16 impa7_read16(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readw(map->map_priv_1 + ofs);
+}
+
+__u32 impa7_read32(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readl(map->map_priv_1 + ofs);
+}
+
+void impa7_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy_fromio(to, map->map_priv_1 + from, len);
+}
+
+void impa7_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ __raw_writeb(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void impa7_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ __raw_writew(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void impa7_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ __raw_writel(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void impa7_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy_toio(map->map_priv_1 + to, from, len);
+}
+
+static struct map_info impa7_map[NUM_FLASHBANKS] = {
+ {
+ name: "impA7 NOR Flash Bank #0",
+ size: WINDOW_SIZE0,
+ buswidth: BUSWIDTH,
+ read8: impa7_read8,
+ read16: impa7_read16,
+ read32: impa7_read32,
+ copy_from: impa7_copy_from,
+ write8: impa7_write8,
+ write16: impa7_write16,
+ write32: impa7_write32,
+ copy_to: impa7_copy_to
+ },
+ {
+ name: "impA7 NOR Flash Bank #1",
+ size: WINDOW_SIZE1,
+ buswidth: BUSWIDTH,
+ read8: impa7_read8,
+ read16: impa7_read16,
+ read32: impa7_read32,
+ copy_from: impa7_copy_from,
+ write8: impa7_write8,
+ write16: impa7_write16,
+ write32: impa7_write32,
+ copy_to: impa7_copy_to
+ },
+};
+
+#ifdef CONFIG_MTD_PARTITIONS
+
+/*
+ * MTD partitioning stuff
+ */
+static struct mtd_partition static_partitions[] =
+{
+ {
+ name: "FileSystem",
+ size: 0x800000,
+ offset: 0x00000000
+ },
+};
+
+#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+int parse_cmdline_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ const char *mtd_id);
+#endif
+
+#endif
+
+static int mtd_parts_nb = 0;
+static struct mtd_partition *mtd_parts = 0;
+
+int __init init_impa7(void)
+{
+ static const char *rom_probe_types[] = PROBETYPES;
+ const char **type;
+ const char *part_type = 0;
+ int i;
+ static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
+ { WINDOW_ADDR0, WINDOW_SIZE0 },
+ { WINDOW_ADDR1, WINDOW_SIZE1 },
+ };
+ char mtdid[10];
+ int devicesfound = 0;
+
+ for(i=0; i<NUM_FLASHBANKS; i++)
+ {
+ printk(KERN_NOTICE MSG_PREFIX "probing 0x%08lx at 0x%08lx\n",
+ pt[i].size, pt[i].addr);
+ impa7_map[i].map_priv_1 = (unsigned long)
+ ioremap(pt[i].addr, pt[i].size);
+
+ if (!impa7_map[i].map_priv_1) {
+ printk(MSG_PREFIX "failed to ioremap\n");
+ return -EIO;
+ }
+
+ impa7_mtd[i] = 0;
+ type = rom_probe_types;
+ for(; !impa7_mtd[i] && *type; type++) {
+ impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]);
+ }
+
+ if (impa7_mtd[i])
+ {
+ impa7_mtd[i]->module = THIS_MODULE;
+ add_mtd_device(impa7_mtd[i]);
+ devicesfound++;
+#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ sprintf(mtdid, MTDID, i);
+ mtd_parts_nb = parse_cmdline_partitions(impa7_mtd[i],
+ &mtd_parts,
+ mtdid);
+ if (mtd_parts_nb > 0)
+ part_type = "command line";
+#endif
+ if (mtd_parts_nb <= 0)
+ {
+ mtd_parts = static_partitions;
+ mtd_parts_nb = NB_OF(static_partitions);
+ part_type = "static";
+ }
+ if (mtd_parts_nb <= 0)
+ {
+ printk(KERN_NOTICE MSG_PREFIX
+ "no partition info available\n");
+ }
+ else
+ {
+ printk(KERN_NOTICE MSG_PREFIX
+ "using %s partition definition\n",
+ part_type);
+ add_mtd_partitions(impa7_mtd[i],
+ mtd_parts, mtd_parts_nb);
+ }
+#endif
+ }
+ else
+ iounmap((void *)impa7_map[i].map_priv_1);
+ }
+ return devicesfound == 0 ? -ENXIO : 0;
+}
+
+static void __exit cleanup_impa7(void)
+{
+ int i;
+ for (i=0; i<NUM_FLASHBANKS; i++)
+ {
+ if (impa7_mtd[i])
+ {
+ del_mtd_device(impa7_mtd[i]);
+ map_destroy(impa7_mtd[i]);
+ }
+ if (impa7_map[i].map_priv_1)
+ {
+ iounmap((void *)impa7_map[i].map_priv_1);
+ impa7_map[i].map_priv_1 = 0;
+ }
+ }
+}
+
+module_init(init_impa7);
+module_exit(cleanup_impa7);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Bartusek <pba@sysgo.de>");
+MODULE_DESCRIPTION("MTD map driver for implementa impA7");
diff --git a/drivers/mtd/maps/iq80310.c b/drivers/mtd/maps/iq80310.c
index cb3cb05766d1..3a301135831b 100644
--- a/drivers/mtd/maps/iq80310.c
+++ b/drivers/mtd/maps/iq80310.c
@@ -1,5 +1,5 @@
/*
- * $Id: iq80310.c,v 1.8 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: iq80310.c,v 1.9 2002/01/01 22:45:02 rmk Exp $
*
* Mapping for the Intel XScale IQ80310 evaluation board
*
@@ -116,7 +116,7 @@ static int __init init_iq80310(void)
int parsed_nr_parts = 0;
char *part_type = "static";
- iq80310_map.map_priv_1 = (unsigned long)__ioremap(WINDOW_ADDR, WINDOW_SIZE, 0);
+ iq80310_map.map_priv_1 = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE);
if (!iq80310_map.map_priv_1) {
printk("Failed to ioremap\n");
return -EIO;
@@ -161,7 +161,6 @@ static void __exit cleanup_iq80310(void)
}
if (iq80310_map.map_priv_1)
iounmap((void *)iq80310_map.map_priv_1);
- return 0;
}
module_init(init_iq80310);
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
new file mode 100644
index 000000000000..ccc854980c6f
--- /dev/null
+++ b/drivers/mtd/maps/pci.c
@@ -0,0 +1,385 @@
+/*
+ * linux/drivers/mtd/maps/pci.c
+ *
+ * Copyright (C) 2001 Russell King, All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * $Id: pci.c,v 1.1 2001/09/27 20:28:45 rmk Exp $
+ *
+ * Generic PCI memory map driver. We support the following boards:
+ * - Intel IQ80310 ATU.
+ * - Intel EBSA285 (blank rom programming mode). Tested working 27/09/2001
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+struct map_pci_info;
+
+struct mtd_pci_info {
+ int (*init)(struct pci_dev *dev, struct map_pci_info *map);
+ void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
+ unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
+ const char *map_name;
+};
+
+struct map_pci_info {
+ struct map_info map;
+ void *base;
+ void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
+ unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
+ struct pci_dev *dev;
+};
+
+/*
+ * Intel IOP80310 Flash driver
+ */
+
+static int
+intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
+{
+ u32 win_base;
+
+ map->map.buswidth = 1;
+ map->map.size = 0x00800000;
+ map->base = ioremap_nocache(pci_resource_start(dev, 0),
+ pci_resource_len(dev, 0));
+
+ if (!map->base)
+ return -ENOMEM;
+
+ /*
+ * We want to base the memory window at Xscale
+ * bus address 0, not 0x1000.
+ */
+ pci_read_config_dword(dev, 0x44, &win_base);
+ pci_write_config_dword(dev, 0x44, 0);
+
+ map->map.map_priv_2 = win_base;
+
+ return 0;
+}
+
+static void
+intel_iq80310_exit(struct pci_dev *dev, struct map_pci_info *map)
+{
+ if (map->base)
+ iounmap((void *)map->base);
+ pci_write_config_dword(dev, 0x44, map->map.map_priv_2);
+}
+
+static unsigned long
+intel_iq80310_translate(struct map_pci_info *map, unsigned long ofs)
+{
+ unsigned long page_addr = ofs & 0x00400000;
+
+ /*
+ * This mundges the flash location so we avoid
+ * the first 80 bytes (they appear to read nonsense).
+ */
+ if (page_addr) {
+ writel(0x00000008, map->base + 0x1558);
+ writel(0x00000000, map->base + 0x1550);
+ } else {
+ writel(0x00000007, map->base + 0x1558);
+ writel(0x00800000, map->base + 0x1550);
+ ofs += 0x00800000;
+ }
+
+ return ofs;
+}
+
+static struct mtd_pci_info intel_iq80310_info = {
+ init: intel_iq80310_init,
+ exit: intel_iq80310_exit,
+ translate: intel_iq80310_translate,
+ map_name: "cfi_probe",
+};
+
+/*
+ * Intel DC21285 driver
+ */
+
+static int
+intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
+{
+ unsigned long base, len;
+
+ base = pci_resource_start(dev, PCI_ROM_RESOURCE);
+ len = pci_resource_len(dev, PCI_ROM_RESOURCE);
+
+ if (!len || !base) {
+ /*
+ * No ROM resource
+ */
+ base = pci_resource_start(dev, 2);
+ len = pci_resource_len(dev, 2);
+
+ /*
+ * We need to re-allocate PCI BAR2 address range to the
+ * PCI ROM BAR, and disable PCI BAR2.
+ */
+ } else {
+ /*
+ * Hmm, if an address was allocated to the ROM resource, but
+ * not enabled, should we be allocating a new resource for it
+ * or simply enabling it?
+ */
+ if (!(pci_resource_flags(dev, PCI_ROM_RESOURCE) &
+ PCI_ROM_ADDRESS_ENABLE)) {
+ u32 val;
+ pci_resource_flags(dev, PCI_ROM_RESOURCE) |= PCI_ROM_ADDRESS_ENABLE;
+ pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
+ val |= PCI_ROM_ADDRESS_ENABLE;
+ pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
+ printk("%s: enabling expansion ROM\n", dev->slot_name);
+ }
+ }
+
+ if (!len || !base)
+ return -ENXIO;
+
+ map->map.buswidth = 4;
+ map->map.size = len;
+ map->base = ioremap_nocache(base, len);
+
+ if (!map->base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map)
+{
+ u32 val;
+
+ if (map->base)
+ iounmap((void *)map->base);
+
+ /*
+ * We need to undo the PCI BAR2/PCI ROM BAR address alteration.
+ */
+ pci_resource_flags(dev, PCI_ROM_RESOURCE) &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
+ val &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
+}
+
+static unsigned long
+intel_dc21285_translate(struct map_pci_info *map, unsigned long ofs)
+{
+ return ofs & 0x00ffffc0 ? ofs : (ofs ^ (1 << 5));
+}
+
+static struct mtd_pci_info intel_dc21285_info = {
+ init: intel_dc21285_init,
+ exit: intel_dc21285_exit,
+ translate: intel_dc21285_translate,
+ map_name: "jedec_probe",
+};
+
+/*
+ * PCI device ID table
+ */
+
+static struct pci_device_id mtd_pci_ids[] __devinitdata = {
+ {
+ vendor: PCI_VENDOR_ID_INTEL,
+ device: 0x530d,
+ subvendor: PCI_ANY_ID,
+ subdevice: PCI_ANY_ID,
+ class: PCI_CLASS_MEMORY_OTHER << 8,
+ class_mask: 0xffff00,
+ driver_data: (unsigned long)&intel_iq80310_info,
+ },
+ {
+ vendor: PCI_VENDOR_ID_DEC,
+ device: PCI_DEVICE_ID_DEC_21285,
+ subvendor: 0, /* DC21285 defaults to 0 on reset */
+ subdevice: 0, /* DC21285 defaults to 0 on reset */
+ class: 0,
+ class_mask: 0,
+ driver_data: (unsigned long)&intel_dc21285_info,
+ },
+ { 0, }
+};
+
+/*
+ * Generic code follows.
+ */
+
+static u8 mtd_pci_read8(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ u8 val = readb(map->base + map->translate(map, ofs));
+// printk("read8 : %08lx => %02x\n", ofs, val);
+ return val;
+}
+
+static u16 mtd_pci_read16(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ u16 val = readw(map->base + map->translate(map, ofs));
+// printk("read16: %08lx => %04x\n", ofs, val);
+ return val;
+}
+
+static u32 mtd_pci_read32(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ u32 val = readl(map->base + map->translate(map, ofs));
+// printk("read32: %08lx => %08x\n", ofs, val);
+ return val;
+}
+
+static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_fromio(to, map->base + map->translate(map, from), len);
+}
+
+static void mtd_pci_write8(struct map_info *_map, u8 val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+// printk("write8 : %08lx <= %02x\n", ofs, val);
+ writeb(val, map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_write16(struct map_info *_map, u16 val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+// printk("write16: %08lx <= %04x\n", ofs, val);
+ writew(val, map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_write32(struct map_info *_map, u32 val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+// printk("write32: %08lx <= %08x\n", ofs, val);
+ writel(val, map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_toio(map->base + map->translate(map, to), from, len);
+}
+
+static struct map_info mtd_pci_map = {
+ read8: mtd_pci_read8,
+ read16: mtd_pci_read16,
+ read32: mtd_pci_read32,
+ copy_from: mtd_pci_copyfrom,
+ write8: mtd_pci_write8,
+ write16: mtd_pci_write16,
+ write32: mtd_pci_write32,
+ copy_to: mtd_pci_copyto,
+};
+
+static int __devinit
+mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
+ struct map_pci_info *map = NULL;
+ struct mtd_info *mtd = NULL;
+ int err;
+
+ err = pci_enable_device(dev);
+ if (err)
+ goto out;
+
+ err = pci_request_regions(dev, "pci mtd");
+ if (err)
+ goto out;
+
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!map)
+ goto release;
+
+ map->map = mtd_pci_map;
+ map->map.name = dev->slot_name;
+ map->dev = dev;
+ map->exit = info->exit;
+ map->translate = info->translate;
+
+ err = info->init(dev, map);
+ if (err)
+ goto release;
+
+ /* tsk - do_map_probe should take const char * */
+ mtd = do_map_probe((char *)info->map_name, &map->map);
+ err = -ENODEV;
+ if (!mtd)
+ goto release;
+
+ mtd->module = THIS_MODULE;
+ add_mtd_device(mtd);
+
+ pci_set_drvdata(dev, mtd);
+
+ return 0;
+
+release:
+ if (mtd)
+ map_destroy(mtd);
+
+ if (map) {
+ map->exit(dev, map);
+ kfree(map);
+ }
+
+ pci_release_regions(dev);
+out:
+ return err;
+}
+
+static void __devexit
+mtd_pci_remove(struct pci_dev *dev)
+{
+ struct mtd_info *mtd = pci_get_drvdata(dev);
+ struct map_pci_info *map = mtd->priv;
+
+ del_mtd_device(mtd);
+ map_destroy(mtd);
+ map->exit(dev, map);
+ kfree(map);
+
+ pci_set_drvdata(dev, NULL);
+ pci_release_regions(dev);
+}
+
+static struct pci_driver mtd_pci_driver = {
+ name: "MTD PCI",
+ probe: mtd_pci_probe,
+ remove: mtd_pci_remove,
+ id_table: mtd_pci_ids,
+};
+
+static int __init mtd_pci_maps_init(void)
+{
+ return pci_module_init(&mtd_pci_driver);
+}
+
+static void __exit mtd_pci_maps_exit(void)
+{
+ pci_unregister_driver(&mtd_pci_driver);
+}
+
+module_init(mtd_pci_maps_init);
+module_exit(mtd_pci_maps_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Generic PCI map driver");
+MODULE_DEVICE_TABLE(pci, mtd_pci_ids);
+
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
new file mode 100644
index 000000000000..fb87cdd8b873
--- /dev/null
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -0,0 +1,893 @@
+/*
+ * $Id: pcmciamtd.c,v 1.36 2002/10/14 18:49:12 rmk Exp $
+ *
+ * pcmciamtd.c - MTD driver for PCMCIA flash memory cards
+ *
+ * Author: Simon Evans <spse@secret.org.uk>
+ *
+ * Copyright (C) 2002 Simon Evans
+ *
+ * Licence: GPL
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_DEBUG
+static int debug = CONFIG_MTD_DEBUG_VERBOSE;
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
+#undef DEBUG
+#define DEBUG(n, format, arg...) \
+ if (n <= debug) { \
+ printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __FUNCTION__ , ## arg); \
+ }
+
+#else
+#undef DEBUG
+#define DEBUG(n, arg...)
+static const int debug = 0;
+#endif
+
+#define err(format, arg...) printk(KERN_ERR __FILE__ ": " format "\n" , ## arg)
+#define info(format, arg...) printk(KERN_INFO __FILE__ ": " format "\n" , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING __FILE__ ": " format "\n" , ## arg)
+
+
+#define DRIVER_DESC "PCMCIA Flash memory card driver"
+#define DRIVER_VERSION "$Revision: 1.36 $"
+
+/* Size of the PCMCIA address space: 26 bits = 64 MB */
+#define MAX_PCMCIA_ADDR 0x4000000
+
+struct pcmciamtd_dev {
+ struct list_head list;
+ dev_link_t link; /* PCMCIA link */
+ caddr_t win_base; /* ioremapped address of PCMCIA window */
+ unsigned int win_size; /* size of window */
+ unsigned int cardsize; /* size of whole card */
+ unsigned int offset; /* offset into card the window currently points at */
+ struct map_info pcmcia_map;
+ struct mtd_info *mtd_info;
+ u8 vpp;
+ char mtd_name[sizeof(struct cistpl_vers_1_t)];
+};
+
+
+static dev_info_t dev_info = "pcmciamtd";
+static LIST_HEAD(dev_list);
+
+/* Module parameters */
+
+/* 2 = do 16-bit transfers, 1 = do 8-bit transfers */
+static int buswidth = 2;
+
+/* Speed of memory accesses, in ns */
+static int mem_speed;
+
+/* Force the size of an SRAM card */
+static int force_size;
+
+/* Force Vpp */
+static int vpp;
+
+/* Set Vpp */
+static int setvpp;
+
+/* Force card to be treated as FLASH, ROM or RAM */
+static int mem_type;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_PARM(buswidth, "i");
+MODULE_PARM_DESC(buswidth, "Set buswidth (1=8 bit, 2=16 bit, default=2)");
+MODULE_PARM(mem_speed, "i");
+MODULE_PARM_DESC(mem_speed, "Set memory access speed in ns");
+MODULE_PARM(force_size, "i");
+MODULE_PARM_DESC(force_size, "Force size of card in MB (1-64)");
+MODULE_PARM(setvpp, "i");
+MODULE_PARM_DESC(setvpp, "Set Vpp (0=Never, 1=On writes, 2=Always on, default=0)");
+MODULE_PARM(vpp, "i");
+MODULE_PARM_DESC(vpp, "Vpp value in 1/10ths eg 33=3.3V 120=12V (Dangerous)");
+MODULE_PARM(mem_type, "i");
+MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
+
+
+
+static void inline cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+
+/* read/write{8,16} copy_{from,to} routines with window remapping to access whole card */
+
+static caddr_t remap_window(struct map_info *map, unsigned long to)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ window_handle_t win = (window_handle_t)map->map_priv_2;
+ memreq_t mrq;
+ int ret;
+
+ mrq.CardOffset = to & ~(dev->win_size-1);
+ if(mrq.CardOffset != dev->offset) {
+ DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
+ dev->offset, mrq.CardOffset);
+ mrq.Page = 0;
+ if( (ret = CardServices(MapMemPage, win, &mrq)) != CS_SUCCESS) {
+ cs_error(dev->link.handle, MapMemPage, ret);
+ return NULL;
+ }
+ dev->offset = mrq.CardOffset;
+ }
+ return dev->win_base + (to & (dev->win_size-1));
+}
+
+
+static u8 pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
+{
+ caddr_t addr;
+ u8 d;
+
+ addr = remap_window(map, ofs);
+ if(!addr)
+ return 0;
+
+ d = readb(addr);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d);
+ return d;
+}
+
+
+static u16 pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
+{
+ caddr_t addr;
+ u16 d;
+
+ addr = remap_window(map, ofs);
+ if(!addr)
+ return 0;
+
+ d = readw(addr);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d);
+ return d;
+}
+
+
+static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ unsigned long win_size = dev->win_size;
+
+ DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ while(len) {
+ int toread = win_size - (from & (win_size-1));
+ caddr_t addr;
+
+ if(toread > len)
+ toread = len;
+
+ addr = remap_window(map, from);
+ if(!addr)
+ return;
+
+ DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread);
+ memcpy_fromio(to, addr, toread);
+ len -= toread;
+ to += toread;
+ from += toread;
+ }
+}
+
+
+static void pcmcia_write8_remap(struct map_info *map, u8 d, unsigned long adr)
+{
+ caddr_t addr = remap_window(map, adr);
+
+ if(!addr)
+ return;
+
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d);
+ writeb(d, addr);
+}
+
+
+static void pcmcia_write16_remap(struct map_info *map, u16 d, unsigned long adr)
+{
+ caddr_t addr = remap_window(map, adr);
+ if(!addr)
+ return;
+
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d);
+ writew(d, addr);
+}
+
+
+static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ unsigned long win_size = dev->win_size;
+
+ DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ while(len) {
+ int towrite = win_size - (to & (win_size-1));
+ caddr_t addr;
+
+ if(towrite > len)
+ towrite = len;
+
+ addr = remap_window(map, to);
+ if(!addr)
+ return;
+
+ DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite);
+ memcpy_toio(addr, from, towrite);
+ len -= towrite;
+ to += towrite;
+ from += towrite;
+ }
+}
+
+
+/* read/write{8,16} copy_{from,to} routines with direct access */
+
+static u8 pcmcia_read8(struct map_info *map, unsigned long ofs)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+ u8 d;
+
+ d = readb(win_base + ofs);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d);
+ return d;
+}
+
+
+static u16 pcmcia_read16(struct map_info *map, unsigned long ofs)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+ u16 d;
+
+ d = readw(win_base + ofs);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d);
+ return d;
+}
+
+
+static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ memcpy_fromio(to, win_base + from, len);
+}
+
+
+static void pcmcia_write8(struct map_info *map, u8 d, unsigned long adr)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, win_base + adr, d);
+ writeb(d, win_base + adr);
+}
+
+
+static void pcmcia_write16(struct map_info *map, u16 d, unsigned long adr)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, win_base + adr, d);
+ writew(d, win_base + adr);
+}
+
+
+static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ memcpy_toio(win_base + to, from, len);
+}
+
+
+static void pcmciamtd_set_vpp(struct map_info *map, int on)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ dev_link_t *link = &dev->link;
+ modconf_t mod;
+ int ret;
+
+ mod.Attributes = CONF_VPP1_CHANGE_VALID | CONF_VPP2_CHANGE_VALID;
+ mod.Vcc = 0;
+ mod.Vpp1 = mod.Vpp2 = on ? dev->vpp : 0;
+
+ DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
+ ret = CardServices(ModifyConfiguration, link->handle, &mod);
+ if(ret != CS_SUCCESS) {
+ cs_error(link->handle, ModifyConfiguration, ret);
+ }
+}
+
+
+/* After a card is removed, pcmciamtd_release() will unregister the
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+
+static void pcmciamtd_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+ struct pcmciamtd_dev *dev = NULL;
+ int ret;
+ struct list_head *temp1, *temp2;
+
+ DEBUG(3, "link = 0x%p", link);
+ /* Find device in list */
+ list_for_each_safe(temp1, temp2, &dev_list) {
+ dev = list_entry(temp1, struct pcmciamtd_dev, list);
+ if(link == &dev->link)
+ break;
+ }
+ if(link != &dev->link) {
+ DEBUG(1, "Cant find %p in dev_list", link);
+ return;
+ }
+
+ if(dev) {
+ if(dev->mtd_info) {
+ del_mtd_device(dev->mtd_info);
+ dev->mtd_info = NULL;
+ MOD_DEC_USE_COUNT;
+ }
+ if (link->win) {
+ if(dev->win_base) {
+ iounmap(dev->win_base);
+ dev->win_base = NULL;
+ }
+ CardServices(ReleaseWindow, link->win);
+ }
+ ret = CardServices(ReleaseConfiguration, link->handle);
+ if(ret != CS_SUCCESS)
+ cs_error(link->handle, ReleaseConfiguration, ret);
+
+ }
+ link->state &= ~DEV_CONFIG;
+}
+
+
+static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_name)
+{
+ int rc;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[64];
+
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = RETURN_FIRST_TUPLE;
+
+ rc = CardServices(GetFirstTuple, link->handle, &tuple);
+ while(rc == CS_SUCCESS) {
+ rc = CardServices(GetTupleData, link->handle, &tuple);
+ if(rc != CS_SUCCESS) {
+ cs_error(link->handle, GetTupleData, rc);
+ break;
+ }
+ rc = CardServices(ParseTuple, link->handle, &tuple, &parse);
+ if(rc != CS_SUCCESS) {
+ cs_error(link->handle, ParseTuple, rc);
+ break;
+ }
+
+ switch(tuple.TupleCode) {
+ case CISTPL_FORMAT: {
+ cistpl_format_t *t = &parse.format;
+ (void)t; /* Shut up, gcc */
+ DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u",
+ t->type, t->edc, t->offset, t->length);
+ break;
+
+ }
+
+ case CISTPL_DEVICE: {
+ cistpl_device_t *t = &parse.device;
+ int i;
+ DEBUG(2, "Common memory:");
+ dev->pcmcia_map.size = t->dev[0].size;
+ for(i = 0; i < t->ndev; i++) {
+ DEBUG(2, "Region %d, type = %u", i, t->dev[i].type);
+ DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp);
+ DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
+ DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
+ }
+ break;
+ }
+
+ case CISTPL_VERS_1: {
+ cistpl_vers_1_t *t = &parse.version_1;
+ int i;
+ if(t->ns) {
+ dev->mtd_name[0] = '\0';
+ for(i = 0; i < t->ns; i++) {
+ if(i)
+ strcat(dev->mtd_name, " ");
+ strcat(dev->mtd_name, t->str+t->ofs[i]);
+ }
+ }
+ DEBUG(2, "Found name: %s", dev->mtd_name);
+ break;
+ }
+
+ case CISTPL_JEDEC_C: {
+ cistpl_jedec_t *t = &parse.jedec;
+ int i;
+ for(i = 0; i < t->nid; i++) {
+ DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info);
+ }
+ break;
+ }
+
+ case CISTPL_DEVICE_GEO: {
+ cistpl_device_geo_t *t = &parse.device_geo;
+ int i;
+ dev->pcmcia_map.buswidth = t->geo[0].buswidth;
+ for(i = 0; i < t->ngeo; i++) {
+ DEBUG(2, "region: %d buswidth = %u", i, t->geo[i].buswidth);
+ DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
+ DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
+ DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
+ DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
+ DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
+ }
+ break;
+ }
+
+ default:
+ DEBUG(2, "Unknown tuple code %d", tuple.TupleCode);
+ }
+
+ rc = CardServices(GetNextTuple, link->handle, &tuple, &parse);
+ }
+ if(!dev->pcmcia_map.size)
+ dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
+
+ if(!dev->pcmcia_map.buswidth)
+ dev->pcmcia_map.buswidth = 2;
+
+ if(force_size) {
+ dev->pcmcia_map.size = force_size << 20;
+ DEBUG(2, "size forced to %dM", force_size);
+
+ }
+
+ if(buswidth) {
+ dev->pcmcia_map.buswidth = buswidth;
+ DEBUG(2, "buswidth forced to %d", buswidth);
+ }
+
+ dev->pcmcia_map.name = dev->mtd_name;
+ if(!dev->mtd_name[0]) {
+ strcpy(dev->mtd_name, "PCMCIA Memory card");
+ *new_name = 1;
+ }
+
+ DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
+ dev->pcmcia_map.size, dev->pcmcia_map.buswidth << 3, dev->mtd_name);
+}
+
+
+/* pcmciamtd_config() is scheduled to run after a CARD_INSERTION event
+ * is received, to configure the PCMCIA socket, and to make the
+ * MTD device available to the system.
+ */
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+static void pcmciamtd_config(dev_link_t *link)
+{
+ struct pcmciamtd_dev *dev = link->priv;
+ struct mtd_info *mtd = NULL;
+ cs_status_t status;
+ win_req_t req;
+ int last_ret = 0, last_fn = 0;
+ int ret;
+ int i;
+ config_info_t t;
+ static char *probes[] = { "jedec_probe", "cfi_probe" };
+ cisinfo_t cisinfo;
+ int new_name = 0;
+
+ DEBUG(3, "link=0x%p", link);
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ DEBUG(2, "Validating CIS");
+ ret = CardServices(ValidateCIS, link->handle, &cisinfo);
+ if(ret != CS_SUCCESS) {
+ cs_error(link->handle, GetTupleData, ret);
+ } else {
+ DEBUG(2, "ValidateCIS found %d chains", cisinfo.Chains);
+ }
+
+ card_settings(dev, link, &new_name);
+
+ dev->pcmcia_map.read8 = pcmcia_read8_remap;
+ dev->pcmcia_map.read16 = pcmcia_read16_remap;
+ dev->pcmcia_map.copy_from = pcmcia_copy_from_remap;
+ dev->pcmcia_map.write8 = pcmcia_write8_remap;
+ dev->pcmcia_map.write16 = pcmcia_write16_remap;
+ dev->pcmcia_map.copy_to = pcmcia_copy_to_remap;
+ if(setvpp == 1)
+ dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
+
+ /* Request a memory window for PCMCIA. Some architeures can map windows upto the maximum
+ that PCMCIA can support (64Mb) - this is ideal and we aim for a window the size of the
+ whole card - otherwise we try smaller windows until we succeed */
+
+ req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+ req.Attributes |= (dev->pcmcia_map.buswidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
+ req.Base = 0;
+ req.AccessSpeed = mem_speed;
+ link->win = (window_handle_t)link->handle;
+ req.Size = (force_size) ? force_size << 20 : MAX_PCMCIA_ADDR;
+ dev->win_size = 0;
+
+ do {
+ int ret;
+ DEBUG(2, "requesting window with size = %dKB memspeed = %d",
+ req.Size >> 10, req.AccessSpeed);
+ link->win = (window_handle_t)link->handle;
+ ret = CardServices(RequestWindow, &link->win, &req);
+ DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
+ if(ret) {
+ req.Size >>= 1;
+ } else {
+ DEBUG(2, "Got window of size %dKB", req.Size >> 10);
+ dev->win_size = req.Size;
+ break;
+ }
+ } while(req.Size >= 0x1000);
+
+ DEBUG(2, "dev->win_size = %d", dev->win_size);
+
+ if(!dev->win_size) {
+ err("Cant allocate memory window");
+ pcmciamtd_release((u_long)link);
+ return;
+ }
+ DEBUG(1, "Allocated a window of %dKB", dev->win_size >> 10);
+
+ /* Get write protect status */
+ CS_CHECK(GetStatus, link->handle, &status);
+ DEBUG(2, "status value: 0x%x window handle = 0x%8.8lx",
+ status.CardState, (unsigned long)link->win);
+ dev->win_base = ioremap(req.Base, req.Size);
+ if(!dev->win_base) {
+ err("ioremap(%lu, %u) failed", req.Base, req.Size);
+ pcmciamtd_release((u_long)link);
+ return;
+ }
+ DEBUG(1, "mapped window dev = %p req.base = 0x%lx base = %p size = 0x%x",
+ dev, req.Base, dev->win_base, req.Size);
+ dev->cardsize = 0;
+ dev->offset = 0;
+
+ dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
+ dev->pcmcia_map.map_priv_2 = (unsigned long)link->win;
+
+ DEBUG(2, "Getting configuration");
+ CS_CHECK(GetConfigurationInfo, link->handle, &t);
+ DEBUG(2, "Vcc = %d Vpp1 = %d Vpp2 = %d", t.Vcc, t.Vpp1, t.Vpp2);
+ dev->vpp = (vpp) ? vpp : t.Vpp1;
+ link->conf.Attributes = 0;
+ link->conf.Vcc = t.Vcc;
+ if(setvpp == 2) {
+ link->conf.Vpp1 = dev->vpp;
+ link->conf.Vpp2 = dev->vpp;
+ } else {
+ link->conf.Vpp1 = 0;
+ link->conf.Vpp2 = 0;
+ }
+
+ link->conf.IntType = INT_MEMORY;
+ link->conf.ConfigBase = t.ConfigBase;
+ link->conf.Status = t.Status;
+ link->conf.Pin = t.Pin;
+ link->conf.Copy = t.Copy;
+ link->conf.ExtStatus = t.ExtStatus;
+ link->conf.ConfigIndex = 0;
+ link->conf.Present = t.Present;
+ DEBUG(2, "Setting Configuration");
+ ret = CardServices(RequestConfiguration, link->handle, &link->conf);
+ if(ret != CS_SUCCESS) {
+ cs_error(link->handle, RequestConfiguration, ret);
+ }
+
+ link->dev = NULL;
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ if(mem_type == 1) {
+ mtd = do_map_probe("map_ram", &dev->pcmcia_map);
+ } else if(mem_type == 2) {
+ mtd = do_map_probe("map_rom", &dev->pcmcia_map);
+ } else {
+ for(i = 0; i < sizeof(probes) / sizeof(char *); i++) {
+ DEBUG(1, "Trying %s", probes[i]);
+ mtd = do_map_probe(probes[i], &dev->pcmcia_map);
+ if(mtd)
+ break;
+
+ DEBUG(1, "FAILED: %s", probes[i]);
+ }
+ }
+
+ if(!mtd) {
+ DEBUG(1, "Cant find an MTD");
+ pcmciamtd_release((u_long)link);
+ return;
+ }
+
+ dev->mtd_info = mtd;
+ mtd->module = THIS_MODULE;
+ dev->cardsize = mtd->size;
+
+ if(new_name) {
+ int size = 0;
+ char unit = ' ';
+ /* Since we are using a default name, make it better by adding in the
+ size */
+ if(mtd->size < 1048576) { /* <1MB in size, show size in K */
+ size = mtd->size >> 10;
+ unit = 'K';
+ } else {
+ size = mtd->size >> 20;
+ unit = 'M';
+ }
+ sprintf(mtd->name, "%d%cB %s", size, unit, "PCMCIA Memory card");
+ }
+
+ /* If the memory found is fits completely into the mapped PCMCIA window,
+ use the faster non-remapping read/write functions */
+ if(dev->cardsize <= dev->win_size) {
+ DEBUG(1, "Using non remapping memory functions");
+
+ dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
+ dev->pcmcia_map.read8 = pcmcia_read8;
+ dev->pcmcia_map.read16 = pcmcia_read16;
+ dev->pcmcia_map.copy_from = pcmcia_copy_from;
+ dev->pcmcia_map.write8 = pcmcia_write8;
+ dev->pcmcia_map.write16 = pcmcia_write16;
+ dev->pcmcia_map.copy_to = pcmcia_copy_to;
+ }
+
+ MOD_INC_USE_COUNT;
+ if(add_mtd_device(mtd)) {
+ dev->mtd_info = NULL;
+ MOD_DEC_USE_COUNT;
+ err("Couldnt register MTD device");
+ pcmciamtd_release((u_long)link);
+ return;
+ }
+ DEBUG(1, "mtd added @ %p mtd->priv = %p", mtd, mtd->priv);
+
+ return;
+
+ cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+ err("CS Error, exiting");
+ pcmciamtd_release((u_long)link);
+ return;
+}
+
+
+/* The card status event handler. Mostly, this schedules other
+ * stuff to run after an event is received. A CARD_REMOVAL event
+ * also sets some flags to discourage the driver from trying
+ * to talk to the card any more.
+ */
+
+static int pcmciamtd_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+
+ DEBUG(1, "event=0x%06x", event);
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ DEBUG(2, "EVENT_CARD_REMOVAL");
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ DEBUG(2, "EVENT_CARD_INSERTION");
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ pcmciamtd_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ DEBUG(2, "EVENT_PM_SUSPEND");
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ DEBUG(2, "EVENT_RESET_PHYSICAL");
+ /* get_lock(link); */
+ break;
+ case CS_EVENT_PM_RESUME:
+ DEBUG(2, "EVENT_PM_RESUME");
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ DEBUG(2, "EVENT_CARD_RESET");
+ /* free_lock(link); */
+ break;
+ default:
+ DEBUG(2, "Unknown event %d", event);
+ }
+ return 0;
+}
+
+
+/* This deletes a driver "instance". The device is de-registered
+ * with Card Services. If it has been released, all local data
+ * structures are freed. Otherwise, the structures will be freed
+ * when the device is released.
+ */
+
+static void pcmciamtd_detach(dev_link_t *link)
+{
+ int ret;
+ struct pcmciamtd_dev *dev = NULL;
+ struct list_head *temp1, *temp2;
+
+ DEBUG(3, "link=0x%p", link);
+
+ /* Find device in list */
+ list_for_each_safe(temp1, temp2, &dev_list) {
+ dev = list_entry(temp1, struct pcmciamtd_dev, list);
+ if(link == &dev->link)
+ break;
+ }
+ if(link != &dev->link) {
+ DEBUG(1, "Cant find %p in dev_list", link);
+ return;
+ }
+
+ del_timer(&link->release);
+
+ if(!dev) {
+ DEBUG(3, "dev is NULL");
+ return;
+ }
+
+ if (link->state & DEV_CONFIG) {
+ //pcmciamtd_release((u_long)link);
+ DEBUG(3, "DEV_CONFIG set");
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+
+ if (link->handle) {
+ DEBUG(2, "Deregistering with card services");
+ ret = CardServices(DeregisterClient, link->handle);
+ if (ret != CS_SUCCESS)
+ cs_error(link->handle, DeregisterClient, ret);
+ }
+ DEBUG(3, "Freeing dev (%p)", dev);
+ list_del(&dev->list);
+ link->priv = NULL;
+ kfree(dev);
+}
+
+
+/* pcmciamtd_attach() creates an "instance" of the driver, allocating
+ * local data structures for one device. The device is registered
+ * with Card Services.
+ */
+
+static dev_link_t *pcmciamtd_attach(void)
+{
+ struct pcmciamtd_dev *dev;
+ dev_link_t *link;
+ client_reg_t client_reg;
+ int ret;
+
+ /* Create new memory card device */
+ dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) return NULL;
+ DEBUG(1, "dev=0x%p", dev);
+
+ memset(dev, 0, sizeof(*dev));
+ link = &dev->link; link->priv = dev;
+
+ link->release.function = &pcmciamtd_release;
+ link->release.data = (u_long)link;
+
+ link->conf.Attributes = 0;
+ link->conf.IntType = INT_MEMORY;
+
+ list_add(&dev->list, &dev_list);
+
+ /* Register with Card Services */
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &pcmciamtd_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ DEBUG(2, "Calling RegisterClient");
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ pcmciamtd_detach(link);
+ return NULL;
+ }
+
+ return link;
+}
+
+
+static int __init init_pcmciamtd(void)
+{
+ servinfo_t serv;
+
+ info(DRIVER_DESC " " DRIVER_VERSION);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ err("Card Services release does not match!");
+ return -1;
+ }
+
+ if(buswidth && buswidth != 1 && buswidth != 2) {
+ info("bad buswidth (%d), using default", buswidth);
+ buswidth = 2;
+ }
+ if(force_size && (force_size < 1 || force_size > 64)) {
+ info("bad force_size (%d), using default", force_size);
+ force_size = 0;
+ }
+ if(mem_type && mem_type != 1 && mem_type != 2) {
+ info("bad mem_type (%d), using default", mem_type);
+ mem_type = 0;
+ }
+ register_pccard_driver(&dev_info, &pcmciamtd_attach, &pcmciamtd_detach);
+ return 0;
+}
+
+
+static void __exit exit_pcmciamtd(void)
+{
+ struct list_head *temp1, *temp2;
+
+ DEBUG(1, DRIVER_DESC " unloading");
+ unregister_pccard_driver(&dev_info);
+ list_for_each_safe(temp1, temp2, &dev_list) {
+ dev_link_t *link = &list_entry(temp1, struct pcmciamtd_dev, list)->link;
+ if (link && (link->state & DEV_CONFIG)) {
+ pcmciamtd_release((u_long)link);
+ pcmciamtd_detach(link);
+ }
+ }
+}
+
+module_init(init_pcmciamtd);
+module_exit(exit_pcmciamtd);
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index b6c1c0f9efe7..b2592a0a0d63 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -3,28 +3,35 @@
*
* (C) 2000 Nicolas Pitre <nico@cam.org>
*
- * $Id: sa1100-flash.c,v 1.22 2001/10/02 10:04:52 rmk Exp $
+ * $Id: sa1100-flash.c,v 1.28 2002/05/07 13:48:38 abz Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <asm/hardware.h>
+#include <asm/mach-types.h>
#include <asm/io.h>
+#include <asm/sizes.h>
+#include <asm/arch/h3600.h>
#ifndef CONFIG_ARCH_SA1100
#error This is for SA1100 architecture only
#endif
-
-#define WINDOW_ADDR 0xe8000000
+/*
+ * This isnt complete yet, so...
+ */
+#define CONFIG_MTD_SA1100_STATICMAP 1
static __u8 sa1100_read8(struct map_info *map, unsigned long ofs)
{
@@ -66,33 +73,7 @@ static void sa1100_copy_to(struct map_info *map, unsigned long to, const void *f
memcpy((void *)(map->map_priv_1 + to), from, len);
}
-
-#ifdef CONFIG_SA1100_H3600
-
-static void h3600_set_vpp(struct map_info *map, int vpp)
-{
- if (vpp)
- set_h3600_egpio(EGPIO_H3600_VPP_ON);
- else
- clr_h3600_egpio(EGPIO_H3600_VPP_ON);
-}
-
-#endif
-
-#ifdef CONFIG_SA1100_JORNADA720
-
-static void jornada720_set_vpp(int vpp)
-{
- if (vpp)
- PPSR |= 0x80;
- else
- PPSR &= ~0x80;
- PPDR |= 0x80;
-}
-
-#endif
-
-static struct map_info sa1100_map = {
+static struct map_info sa1100_map __initdata = {
name: "SA1100 flash",
read8: sa1100_read8,
read16: sa1100_read16,
@@ -102,609 +83,1232 @@ static struct map_info sa1100_map = {
write16: sa1100_write16,
write32: sa1100_write32,
copy_to: sa1100_copy_to,
-
- map_priv_1: WINDOW_ADDR,
};
+#ifdef CONFIG_MTD_SA1100_STATICMAP
/*
* Here are partition information for all known SA1100-based devices.
* See include/linux/mtd/partitions.h for definition of the mtd_partition
* structure.
- *
- * The *_max_flash_size is the maximum possible mapped flash size which
- * is not necessarily the actual flash size. It must correspond to the
- * value specified in the mapping definition defined by the
- * "struct map_desc *_io_desc" for the corresponding machine.
+ *
+ * Please note:
+ * 1. We no longer support static flash mappings via the machine io_desc
+ * structure.
+ * 2. The flash size given should be the largest flash size that can
+ * be accomodated.
+ *
+ * The MTD layer will detect flash chip aliasing and reduce the size of
+ * the map accordingly.
+ *
+ * Please keep these in alphabetical order, and formatted as per existing
+ * entries. Thanks.
*/
-#ifdef CONFIG_SA1100_ASSABET
+#ifdef CONFIG_SA1100_ADSBITSY
+static struct mtd_partition adsbitsy_partitions[] = {
+ {
+ name: "bootROM",
+ size: 0x80000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "zImage",
+ size: 0x100000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "ramdisk.gz",
+ size: 0x300000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "User FS",
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
+ }
+};
+#endif
+#ifdef CONFIG_SA1100_ASSABET
/* Phase 4 Assabet has two 28F160B3 flash parts in bank 0: */
-static unsigned long assabet4_max_flash_size = 0x00400000;
static struct mtd_partition assabet4_partitions[] = {
- {
- name: "bootloader",
- size: 0x00020000,
- offset: 0,
- mask_flags: MTD_WRITEABLE
- },{
- name: "bootloader params",
- size: 0x00020000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- },{
- name: "jffs",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND
- }
+ {
+ name: "bootloader",
+ size: 0x00020000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "bootloader params",
+ size: 0x00020000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "jffs",
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
+ }
};
/* Phase 5 Assabet has two 28F128J3A flash parts in bank 0: */
-static unsigned long assabet5_max_flash_size = 0x02000000;
static struct mtd_partition assabet5_partitions[] = {
- {
- name: "bootloader",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE
- },{
- name: "bootloader params",
- size: 0x00040000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- },{
- name: "jffs",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND
- }
+ {
+ name: "bootloader",
+ size: 0x00040000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "bootloader params",
+ size: 0x00040000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "jffs",
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
+ }
};
-#define assabet_max_flash_size assabet5_max_flash_size
-#define assabet_partitions assabet5_partitions
-
+#define assabet_partitions assabet5_partitions
#endif
-#ifdef CONFIG_SA1100_FLEXANET
-
-/* Flexanet has two 28F128J3A flash parts in bank 0: */
-static unsigned long flexanet_max_flash_size = 0x02000000;
-static struct mtd_partition flexanet_partitions[] = {
- {
- name: "bootloader",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE
- },{
- name: "bootloader params",
- size: 0x00040000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- },{
- name: "kernel",
- size: 0x000C0000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- },{
- name: "altkernel",
- size: 0x000C0000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- },{
- name: "root",
- size: 0x00400000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- },{
- name: "free1",
- size: 0x00300000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- },{
- name: "free2",
- size: 0x00300000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- },{
- name: "free3",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- }
+#ifdef CONFIG_SA1100_BADGE4
+/*
+ * 1 x Intel 28F320C3BA100 Advanced+ Boot Block Flash (32 Mi bit)
+ * Eight 4 KiW Parameter Bottom Blocks (64 KiB)
+ * Sixty-three 32 KiW Main Blocks (4032 Ki b)
+ */
+static struct mtd_partition badge4_partitions[] = {
+ {
+ name: "BLOB boot loader",
+ offset: 0,
+ size: 0x0000A000
+ }, {
+ name: "params",
+ offset: MTDPART_OFS_APPEND,
+ size: 0x00006000
+ }, {
+ name: "kernel",
+ offset: MTDPART_OFS_APPEND,
+ size: 0x00100000
+ }, {
+ name: "root",
+ offset: MTDPART_OFS_APPEND,
+ size: MTDPART_SIZ_FULL
+ }
};
-
#endif
-#ifdef CONFIG_SA1100_HUW_WEBPANEL
-static unsigned long huw_webpanel_max_flash_size = 0x01000000;
-static struct mtd_partition huw_webpanel_partitions[] = {
- {
- name: "Loader",
- size: 0x00040000,
- offset: 0,
- },{
- name: "Sector 1",
- size: 0x00040000,
- offset: MTDPART_OFS_APPEND,
- },{
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
+
+#ifdef CONFIG_SA1100_CERF
+#ifdef CONFIG_SA1100_CERF_FLASH_32MB
+static struct mtd_partition cerf_partitions[] = {
+ {
+ name: "firmware",
+ size: 0x00040000,
+ offset: 0,
+ }, {
+ name: "params",
+ size: 0x00040000,
+ offset: 0x00040000,
+ }, {
+ name: "kernel",
+ size: 0x00100000,
+ offset: 0x00080000,
+ }, {
+ name: "rootdisk",
+ size: 0x01E80000,
+ offset: 0x00180000,
}
};
-#endif /* CONFIG_SA1100_HUW_WEBPANEL */
-
-
-#ifdef CONFIG_SA1100_H3600
-
-static unsigned long h3600_max_flash_size = 0x02000000;
-static struct mtd_partition h3600_partitions[] = {
+#elif defined CONFIG_SA1100_CERF_FLASH_16MB
+static struct mtd_partition cerf_partitions[] = {
{
- name: "H3600 boot firmware",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE /* force read-only */
- },{
- name: "H3600 kernel",
- size: 0x00080000,
- offset: 0x40000
- },{
- name: "H3600 params",
- size: 0x00040000,
- offset: 0xC0000
- },{
-#ifdef CONFIG_JFFS2_FS
- name: "H3600 root jffs2",
- offset: 0x00100000,
- size: MTDPART_SIZ_FULL
+ name: "firmware",
+ size: 0x00020000,
+ offset: 0,
+ }, {
+ name: "params",
+ size: 0x00020000,
+ offset: 0x00020000,
+ }, {
+ name: "kernel",
+ size: 0x00100000,
+ offset: 0x00040000,
+ }, {
+ name: "rootdisk",
+ size: 0x00EC0000,
+ offset: 0x00140000,
+ }
+};
+#elif defined CONFIG_SA1100_CERF_FLASH_8MB
+# error "Unwritten type definition"
#else
- name: "H3600 initrd",
- size: 0x00100000,
- offset: 0x00100000
- },{
- name: "H3600 root cramfs",
- size: 0x00300000,
- offset: 0x00200000
- },{
- name: "H3600 usr cramfs",
- size: 0x00800000,
- offset: 0x00500000
- },{
- name: "H3600 usr local",
- offset: 0x00d00000,
- size: MTDPART_SIZ_FULL
+# error "Undefined memory orientation for CERF in sa1100-flash.c"
+#endif
#endif
+
+#ifdef CONFIG_SA1100_CONSUS
+static struct mtd_partition consus_partitions[] = {
+ {
+ name: "Consus boot firmware",
+ offset: 0,
+ size: 0x00040000,
+ mask_flags: MTD_WRITABLE, /* force read-only */
+ }, {
+ name: "Consus kernel",
+ offset: 0x00040000,
+ size: 0x00100000,
+ mask_flags: 0,
+ }, {
+ name: "Consus disk",
+ offset: 0x00140000,
+ /* The rest (up to 16M) for jffs. We could put 0 and
+ make it find the size automatically, but right now
+ i have 32 megs. jffs will use all 32 megs if given
+ the chance, and this leads to horrible problems
+ when you try to re-flash the image because blob
+ won't erase the whole partition. */
+ size: 0x01000000 - 0x00140000,
+ mask_flags: 0,
+ }, {
+ /* this disk is a secondary disk, which can be used as
+ needed, for simplicity, make it the size of the other
+ consus partition, although realistically it could be
+ the remainder of the disk (depending on the file
+ system used) */
+ name: "Consus disk2",
+ offset: 0x01000000,
+ size: 0x01000000 - 0x00140000,
+ mask_flags: 0,
}
};
+#endif
+#ifdef CONFIG_SA1100_FLEXANET
+/* Flexanet has two 28F128J3A flash parts in bank 0: */
+#define FLEXANET_FLASH_SIZE 0x02000000
+static struct mtd_partition flexanet_partitions[] = {
+ {
+ name: "bootloader",
+ size: 0x00040000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "bootloader params",
+ size: 0x00040000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "kernel",
+ size: 0x000C0000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "altkernel",
+ size: 0x000C0000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "root",
+ size: 0x00400000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "free1",
+ size: 0x00300000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "free2",
+ size: 0x00300000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE,
+ }, {
+ name: "free3",
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE,
+ }
+};
#endif
+
#ifdef CONFIG_SA1100_FREEBIRD
-static unsigned long freebird_max_flash_size = 0x02000000;
static struct mtd_partition freebird_partitions[] = {
#if CONFIG_SA1100_FREEBIRD_NEW
- {
- name: "firmware",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE /* force read-only */
- },{
- name: "kernel",
- size: 0x00080000,
- offset: 0x40000
- },{
- name: "params",
- size: 0x00040000,
- offset: 0xC0000
- },{
- name: "initrd",
- size: 0x00100000,
- offset: 0x00100000
- },{
- name: "root cramfs",
- size: 0x00300000,
- offset: 0x00200000
- },{
- name: "usr cramfs",
- size: 0x00C00000,
- offset: 0x00500000
- },{
- name: "local",
- offset: 0x01100000,
- size: MTDPART_SIZ_FULL
+ {
+ name: "firmware",
+ size: 0x00040000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "kernel",
+ size: 0x00080000,
+ offset: 0x00040000,
+ }, {
+ name: "params",
+ size: 0x00040000,
+ offset: 0x000C0000,
+ }, {
+ name: "initrd",
+ size: 0x00100000,
+ offset: 0x00100000,
+ }, {
+ name: "root cramfs",
+ size: 0x00300000,
+ offset: 0x00200000,
+ }, {
+ name: "usr cramfs",
+ size: 0x00C00000,
+ offset: 0x00500000,
+ }, {
+ name: "local",
+ size: MTDPART_SIZ_FULL,
+ offset: 0x01100000,
}
#else
- { offset: 0, size: 0x00040000, },
- { offset: MTDPART_OFS_APPEND, size: 0x000c0000, },
- { offset: MTDPART_OFS_APPEND, size: 0x00400000, },
- { offset: MTDPART_OFS_APPEND, size: MTDPART_SIZ_FULL }
+ {
+ size: 0x00040000,
+ offset: 0,
+ }, {
+ size: 0x000c0000,
+ offset: MTDPART_OFS_APPEND,
+ }, {
+ size: 0x00400000,
+ offset: MTDPART_OFS_APPEND,
+ }, {
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
+ }
#endif
- };
+};
#endif
-
-
-#ifdef CONFIG_SA1100_CERF
-static unsigned long cerf_max_flash_size = 0x01000000;
-static struct mtd_partition cerf_partitions[] = {
- { offset: 0, size: 0x00800000 },
- { offset: MTDPART_OFS_APPEND, size: 0x00800000 }
+#ifdef CONFIG_SA1100_FRODO
+/* Frodo has 2 x 16M 28F128J3A flash chips in bank 0: */
+static struct mtd_partition frodo_partitions[] =
+{
+ {
+ name: "bootloader",
+ size: 0x00040000,
+ offset: 0x00000000,
+ mask_flags: MTD_WRITEABLE
+ }, {
+ name: "bootloader params",
+ size: 0x00040000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE
+ }, {
+ name: "kernel",
+ size: 0x00100000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE
+ }, {
+ name: "ramdisk",
+ size: 0x00400000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE
+ }, {
+ name: "file system",
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND
+ }
};
-
#endif
#ifdef CONFIG_SA1100_GRAPHICSCLIENT
-
-static unsigned long graphicsclient_max_flash_size = 0x01000000;
static struct mtd_partition graphicsclient_partitions[] = {
- {
- name: "zImage",
- offset: 0,
- size: 0x100000
- },
- {
- name: "ramdisk.gz",
- offset: MTDPART_OFS_APPEND,
- size: 0x300000
- },
- {
- name: "User FS",
- offset: MTDPART_OFS_APPEND,
- size: MTDPART_SIZ_FULL
+ {
+ name: "zImage",
+ size: 0x100000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "ramdisk.gz",
+ size: 0x300000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "User FS",
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
}
};
-
#endif
#ifdef CONFIG_SA1100_GRAPHICSMASTER
-
-static unsigned long graphicsmaster_max_flash_size = 0x01000000;
static struct mtd_partition graphicsmaster_partitions[] = {
- {
- name: "zImage",
- offset: 0,
- size: 0x100000
+ {
+ name: "zImage",
+ size: 0x100000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
},
- {
- name: "ramdisk.gz",
- offset: MTDPART_OFS_APPEND,
- size: 0x300000
+ {
+ name: "ramdisk.gz",
+ size: 0x300000,
+ offset: MTDPART_OFS_APPEND,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
},
- {
- name: "User FS",
- offset: MTDPART_OFS_APPEND,
- size: MTDPART_SIZ_FULL
+ {
+ name: "User FS",
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
}
};
+#endif
+#ifdef CONFIG_SA1100_H3XXX
+static struct mtd_partition h3xxx_partitions[] = {
+ {
+ name: "H3XXX boot firmware",
+ size: 0x00040000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+#ifdef CONFIG_MTD_2PARTS_IPAQ
+ name: "H3XXX root jffs2",
+ size: MTDPART_SIZ_FULL,
+ offset: 0x00040000,
+#else
+ name: "H3XXX kernel",
+ size: 0x00080000,
+ offset: 0x00040000,
+ }, {
+ name: "H3XXX params",
+ size: 0x00040000,
+ offset: 0x000C0000,
+ }, {
+#ifdef CONFIG_JFFS2_FS
+ name: "H3XXX root jffs2",
+ size: MTDPART_SIZ_FULL,
+ offset: 0x00100000,
+#else
+ name: "H3XXX initrd",
+ size: 0x00100000,
+ offset: 0x00100000,
+ }, {
+ name: "H3XXX root cramfs",
+ size: 0x00300000,
+ offset: 0x00200000,
+ }, {
+ name: "H3XXX usr cramfs",
+ size: 0x00800000,
+ offset: 0x00500000,
+ }, {
+ name: "H3XXX usr local",
+ size: MTDPART_SIZ_FULL,
+ offset: 0x00d00000,
+#endif
#endif
+ }
+};
-#ifdef CONFIG_SA1100_PANGOLIN
+static void h3xxx_set_vpp(struct map_info *map, int vpp)
+{
+ assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, vpp);
+}
+#else
+#define h3xxx_set_vpp NULL
+#endif
-static unsigned long pangolin_max_flash_size = 0x04000000;
-static struct mtd_partition pangolin_partitions[] = {
- {
- name: "boot firmware",
- offset: 0x00000000,
- size: 0x00080000,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- },
- {
- name: "kernel",
- offset: 0x00080000,
- size: 0x00100000,
- },
+#ifdef CONFIG_SA1100_HUW_WEBPANEL
+static struct mtd_partition huw_webpanel_partitions[] = {
{
- name: "initrd",
- offset: 0x00180000,
- size: 0x00280000,
- },
+ name: "Loader",
+ size: 0x00040000,
+ offset: 0,
+ }, {
+ name: "Sector 1",
+ size: 0x00040000,
+ offset: MTDPART_OFS_APPEND,
+ }, {
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
+ }
+};
+#endif
+
+#ifdef CONFIG_SA1100_JORNADA720
+static struct mtd_partition jornada720_partitions[] = {
{
- name: "initrd-test",
- offset: 0x00400000,
- size: 0x03C00000,
+ name: "JORNADA720 boot firmware",
+ size: 0x00040000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "JORNADA720 kernel",
+ size: 0x000c0000,
+ offset: 0x00040000,
+ }, {
+ name: "JORNADA720 params",
+ size: 0x00040000,
+ offset: 0x00100000,
+ }, {
+ name: "JORNADA720 initrd",
+ size: 0x00100000,
+ offset: 0x00140000,
+ }, {
+ name: "JORNADA720 root cramfs",
+ size: 0x00300000,
+ offset: 0x00240000,
+ }, {
+ name: "JORNADA720 usr cramfs",
+ size: 0x00800000,
+ offset: 0x00540000,
+ }, {
+ name: "JORNADA720 usr local",
+ size: 0 /* will expand to the end of the flash */
+ offset: 0x00d00000,
}
};
+static void jornada720_set_vpp(int vpp)
+{
+ if (vpp)
+ PPSR |= 0x80;
+ else
+ PPSR &= ~0x80;
+ PPDR |= 0x80;
+}
+#else
+#define jornada720_set_vpp NULL
#endif
-#ifdef CONFIG_SA1100_YOPY
+#ifdef CONFIG_SA1100_PANGOLIN
+static struct mtd_partition pangolin_partitions[] = {
+ {
+ name: "boot firmware",
+ size: 0x00080000,
+ offset: 0x00000000,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "kernel",
+ size: 0x00100000,
+ offset: 0x00080000,
+ }, {
+ name: "initrd",
+ size: 0x00280000,
+ offset: 0x00180000,
+ }, {
+ name: "initrd-test",
+ size: 0x03C00000,
+ offset: 0x00400000,
+ }
+};
+#endif
-static unsigned long yopy_max_flash_size = 0x08000000;
-static struct mtd_partition yopy_partitions[] = {
+#ifdef CONFIG_SA1100_PT_SYSTEM3
+/* erase size is 0x40000 == 256k partitions have to have this boundary */
+static struct mtd_partition system3_partitions[] = {
{
- name: "boot firmware",
- offset: 0x00000000,
- size: 0x00040000,
- mask_flags: MTD_WRITEABLE, /* force read-only */
+ name: "BLOB",
+ size: 0x00040000,
+ offset: 0x00000000,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "config",
+ size: 0x00040000,
+ offset: MTDPART_OFS_APPEND,
+ }, {
+ name: "kernel",
+ size: 0x00100000,
+ offset: MTDPART_OFS_APPEND,
+ }, {
+ name: "root",
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
+ }
+};
+#endif
+
+#ifdef CONFIG_SA1100_SHANNON
+static struct mtd_partition shannon_partitions[] = {
+ {
+ name: "BLOB boot loader",
+ offset: 0,
+ size: 0x20000
},
{
name: "kernel",
- offset: 0x00080000,
- size: 0x00080000,
+ offset: MTDPART_OFS_APPEND,
+ size: 0xe0000
},
- {
+ {
name: "initrd",
- offset: 0x00100000,
- size: 0x00300000,
- },
- {
- name: "root",
- offset: 0x00400000,
- size: 0x01000000,
- },
+ offset: MTDPART_OFS_APPEND,
+ size: MTDPART_SIZ_FULL
+ }
};
#endif
-#ifdef CONFIG_SA1100_JORNADA720
-
-static unsigned long jornada720_max_flash_size = 0x02000000;
-static struct mtd_partition jornada720_partitions[] = {
+#ifdef CONFIG_SA1100_SHERMAN
+static struct mtd_partition sherman_partitions[] = {
{
- name: "JORNADA720 boot firmware",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE /* force read-only */
- },{
- name: "JORNADA720 kernel",
- size: 0x000c0000,
- offset: 0x40000
- },{
- name: "JORNADA720 params",
- size: 0x00040000,
- offset: 0x100000
- },{
- name: "JORNADA720 initrd",
- size: 0x00100000,
- offset: 0x00140000
- },{
- name: "JORNADA720 root cramfs",
- size: 0x00300000,
- offset: 0x00240000
- },{
- name: "JORNADA720 usr cramfs",
- size: 0x00800000,
- offset: 0x00540000
- },{
- name: "JORNADA720 usr local",
- offset: 0x00d00000,
- size: 0 /* will expand to the end of the flash */
+ size: 0x50000,
+ offset: 0,
+ }, {
+ size: 0x70000,
+ offset: MTDPART_OFS_APPEND,
+ }, {
+ size: 0x600000,
+ offset: MTDPART_OFS_APPEND,
+ }, {
+ size: 0xA0000,
+ offset: MTDPART_OFS_APPEND,
}
};
#endif
-#ifdef CONFIG_SA1100_SHERMAN
-
-static unsigned long sherman_max_flash_size = 0x02000000;
-static struct mtd_partition sherman_partitions[] = {
- { offset: 0, size: 0x50000 },
- { offset: MTDPART_OFS_APPEND, size: 0x70000 },
- { offset: MTDPART_OFS_APPEND, size: 0x600000 },
- { offset: MTDPART_OFS_APPEND, size: 0xA0000 }
-};
-
+#ifdef CONFIG_SA1100_SIMPAD
+static struct mtd_partition simpad_partitions[] = {
+ {
+ name: "SIMpad boot firmware",
+ size: 0x00080000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "SIMpad kernel",
+ size: 0x00100000,
+ offset: 0x00080000,
+ }, {
+#ifdef CONFIG_JFFS2_FS
+ name: "SIMpad root jffs2",
+ size: MTDPART_SIZ_FULL,
+ offset: 0x00180000,
+#else
+ name: "SIMpad initrd",
+ size: 0x00300000,
+ offset: 0x00180000,
+ }, {
+ name: "SIMpad root cramfs",
+ size: 0x00300000,
+ offset: 0x00480000,
+ }, {
+ name: "SIMpad usr cramfs",
+ size: 0x005c0000,
+ offset: 0x00780000,
+ }, {
+ name: "SIMpad usr local",
+ size: MTDPART_SIZ_FULL,
+ offset: 0x00d40000,
#endif
+ }
+};
+#endif /* CONFIG_SA1100_SIMPAD */
#ifdef CONFIG_SA1100_STORK
-
-static unsigned long stork_max_flash_size = 0x02000000;
static struct mtd_partition stork_partitions[] = {
{
- name: "STORK boot firmware",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE /* force read-only */
- },{
- name: "STORK params",
- size: 0x00040000,
- offset: 0x40000
- },{
- name: "STORK kernel",
- size: 0x00100000,
- offset: 0x80000
- },{
+ name: "STORK boot firmware",
+ size: 0x00040000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "STORK params",
+ size: 0x00040000,
+ offset: 0x00040000,
+ }, {
+ name: "STORK kernel",
+ size: 0x00100000,
+ offset: 0x00080000,
+ }, {
#ifdef CONFIG_JFFS2_FS
- name: "STORK root jffs2",
- offset: 0x00180000,
- size: MTDPART_SIZ_FULL
+ name: "STORK root jffs2",
+ offset: 0x00180000,
+ size: MTDPART_SIZ_FULL,
#else
- name: "STORK initrd",
- size: 0x00100000,
- offset: 0x00180000
- },{
- name: "STORK root cramfs",
- size: 0x00300000,
- offset: 0x00280000
- },{
- name: "STORK usr cramfs",
- size: 0x00800000,
- offset: 0x00580000
- },{
- name: "STORK usr local",
- offset: 0x00d80000,
- size: MTDPART_SIZ_FULL
+ name: "STORK initrd",
+ size: 0x00100000,
+ offset: 0x00180000,
+ }, {
+ name: "STORK root cramfs",
+ size: 0x00300000,
+ offset: 0x00280000,
+ }, {
+ name: "STORK usr cramfs",
+ size: 0x00800000,
+ offset: 0x00580000,
+ }, {
+ name: "STORK usr local",
+ offset: 0x00d80000,
+ size: MTDPART_SIZ_FULL,
#endif
}
};
-
#endif
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
-
-extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts);
-extern int parse_bootldr_partitions(struct mtd_info *master, struct mtd_partition **pparts);
+#ifdef CONFIG_SA1100_TRIZEPS
+static struct mtd_partition trizeps_partitions[] = {
+ {
+ name: "Bootloader & the kernel",
+ size: 0x00200000,
+ offset: 0,
+ }, {
+ name: "Data",
+ size: 0x00400000,
+ offset: MTDPART_OFS_APPEND,
+ }, {
+ size: MTDPART_SIZ_FULL,
+ offset: MTDPART_OFS_APPEND,
+ }
+};
+#endif
-static struct mtd_partition *parsed_parts;
-static struct mtd_info *mymtd;
+#ifdef CONFIG_SA1100_YOPY
+static struct mtd_partition yopy_partitions[] = {
+ {
+ name: "boot firmware",
+ size: 0x00040000,
+ offset: 0x00000000,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "kernel",
+ size: 0x00080000,
+ offset: 0x00080000,
+ }, {
+ name: "initrd",
+ size: 0x00300000,
+ offset: 0x00100000,
+ }, {
+ name: "root",
+ size: 0x01000000,
+ offset: 0x00400000,
+ }
+};
+#endif
-int __init sa1100_mtd_init(void)
+static int __init sa1100_static_partitions(struct mtd_partition **parts)
{
- struct mtd_partition *parts;
int nb_parts = 0;
- int parsed_nr_parts = 0;
- char *part_type;
-
- /* Default flash buswidth */
- sa1100_map.buswidth = (MSC0 & MSC_RBW) ? 2 : 4;
- /*
- * Static partition definition selection
- */
- part_type = "static";
+#ifdef CONFIG_SA1100_ADSBITSY
+ if (machine_is_adsbitsy()) {
+ *parts = adsbitsy_partitions;
+ nb_parts = ARRAY_SIZE(adsbitsy_partitions);
+ }
+#endif
#ifdef CONFIG_SA1100_ASSABET
if (machine_is_assabet()) {
- parts = assabet_partitions;
- nb_parts = NB_OF(assabet_partitions);
- sa1100_map.size = assabet_max_flash_size;
+ *parts = assabet_partitions;
+ nb_parts = ARRAY_SIZE(assabet_partitions);
}
#endif
-
-#ifdef CONFIG_SA1100_HUW_WEBPANEL
- if (machine_is_huw_webpanel()) {
- parts = huw_webpanel_partitions;
- nb_parts = NB_OF(huw_webpanel_partitions);
- sa1100_map.size = huw_webpanel_max_flash_size;
+#ifdef CONFIG_SA1100_BADGE4
+ if (machine_is_badge4()) {
+ *parts = badge4_partitions;
+ nb_parts = ARRAY_SIZE(badge4_partitions);
}
#endif
-
-#ifdef CONFIG_SA1100_H3600
- if (machine_is_h3600()) {
- parts = h3600_partitions;
- nb_parts = NB_OF(h3600_partitions);
- sa1100_map.size = h3600_max_flash_size;
- sa1100_map.set_vpp = h3600_set_vpp;
+#ifdef CONFIG_SA1100_CERF
+ if (machine_is_cerf()) {
+ *parts = cerf_partitions;
+ nb_parts = ARRAY_SIZE(cerf_partitions);
+ }
+#endif
+#ifdef CONFIG_SA1100_CONSUS
+ if (machine_is_consus()) {
+ *parts = consus_partitions;
+ nb_parts = ARRAY_SIZE(consus_partitions);
+ }
+#endif
+#ifdef CONFIG_SA1100_FLEXANET
+ if (machine_is_flexanet()) {
+ *parts = flexanet_partitions;
+ nb_parts = ARRAY_SIZE(flexanet_partitions);
}
#endif
#ifdef CONFIG_SA1100_FREEBIRD
if (machine_is_freebird()) {
- parts = freebird_partitions;
- nb_parts = NB_OF(freebird_partitions);
- sa1100_map.size = freebird_max_flash_size;
+ *parts = freebird_partitions;
+ nb_parts = ARRAY_SIZE(freebird_partitions);
}
#endif
-#ifdef CONFIG_SA1100_CERF
- if (machine_is_cerf()) {
- parts = cerf_partitions;
- nb_parts = NB_OF(cerf_partitions);
- sa1100_map.size = cerf_max_flash_size;
+#ifdef CONFIG_SA1100_FRODO
+ if (machine_is_frodo()) {
+ *parts = frodo_partitions;
+ nb_parts = ARRAY_SIZE(frodo_partitions);
}
-#endif
+#endif
#ifdef CONFIG_SA1100_GRAPHICSCLIENT
if (machine_is_graphicsclient()) {
- parts = graphicsclient_partitions;
- nb_parts = NB_OF(graphicsclient_partitions);
- sa1100_map.size = graphicsclient_max_flash_size;
- sa1100_map.buswidth = (MSC1 & MSC_RBW) ? 2:4;
+ *parts = graphicsclient_partitions;
+ nb_parts = ARRAY_SIZE(graphicsclient_partitions);
}
#endif
#ifdef CONFIG_SA1100_GRAPHICSMASTER
if (machine_is_graphicsmaster()) {
- parts = graphicsmaster_partitions;
- nb_parts = NB_OF(graphicsmaster_partitions);
- sa1100_map.size = graphicsmaster_max_flash_size;
- sa1100_map.buswidth = (MSC1 & MSC_RBW) ? 2:4;
+ *parts = graphicsmaster_partitions;
+ nb_parts = ARRAY_SIZE(graphicsmaster_partitions);
}
#endif
-#ifdef CONFIG_SA1100_PANGOLIN
- if (machine_is_pangolin()) {
- parts = pangolin_partitions;
- nb_parts = NB_OF(pangolin_partitions);
- sa1100_map.size = pangolin_max_flash_size;
+#ifdef CONFIG_SA1100_H3XXX
+ if (machine_is_h3xxx()) {
+ *parts = h3xxx_partitions;
+ nb_parts = ARRAY_SIZE(h3xxx_partitions);
+ }
+#endif
+#ifdef CONFIG_SA1100_HUW_WEBPANEL
+ if (machine_is_huw_webpanel()) {
+ *parts = huw_webpanel_partitions;
+ nb_parts = ARRAY_SIZE(huw_webpanel_partitions);
}
#endif
#ifdef CONFIG_SA1100_JORNADA720
if (machine_is_jornada720()) {
- parts = jornada720_partitions;
- nb_parts = NB_OF(jornada720_partitions);
- sa1100_map.size = jornada720_max_flash_size;
- sa1100_map.set_vpp = jornada720_set_vpp;
+ *parts = jornada720_partitions;
+ nb_parts = ARRAY_SIZE(jornada720_partitions);
}
#endif
-#ifdef CONFIG_SA1100_YOPY
- if (machine_is_yopy()) {
- parts = yopy_partitions;
- nb_parts = NB_OF(yopy_partitions);
- sa1100_map.size = yopy_max_flash_size;
+#ifdef CONFIG_SA1100_PANGOLIN
+ if (machine_is_pangolin()) {
+ *parts = pangolin_partitions;
+ nb_parts = ARRAY_SIZE(pangolin_partitions);
+ }
+#endif
+#ifdef CONFIG_SA1100_PT_SYSTEM3
+ if (machine_is_pt_system3()) {
+ *parts = system3_partitions;
+ nb_parts = ARRAY_SIZE(system3_partitions);
+ }
+#endif
+#ifdef CONFIG_SA1100_SHANNON
+ if (machine_is_shannon()) {
+ *parts = shannon_partitions;
+ nb_parts = ARRAY_SIZE(shannon_partitions);
}
#endif
#ifdef CONFIG_SA1100_SHERMAN
if (machine_is_sherman()) {
- parts = sherman_partitions;
- nb_parts = NB_OF(sherman_partitions);
- sa1100_map.size = sherman_max_flash_size;
+ *parts = sherman_partitions;
+ nb_parts = ARRAY_SIZE(sherman_partitions);
}
#endif
-#ifdef CONFIG_SA1100_FLEXANET
- if (machine_is_flexanet()) {
- parts = flexanet_partitions;
- nb_parts = NB_OF(flexanet_partitions);
- sa1100_map.size = flexanet_max_flash_size;
+#ifdef CONFIG_SA1100_SIMPAD
+ if (machine_is_simpad()) {
+ *parts = simpad_partitions;
+ nb_parts = ARRAY_SIZE(simpad_partitions);
}
#endif
#ifdef CONFIG_SA1100_STORK
if (machine_is_stork()) {
- parts = stork_partitions;
- nb_parts = NB_OF(stork_partitions);
- sa1100_map.size = stork_max_flash_size;
+ *parts = stork_partitions;
+ nb_parts = ARRAY_SIZE(stork_partitions);
+ }
+#endif
+#ifdef CONFIG_SA1100_TRIZEPS
+ if (machine_is_trizeps()) {
+ *parts = trizeps_partitions;
+ nb_parts = ARRAY_SIZE(trizeps_parititons);
+ }
+#endif
+#ifdef CONFIG_SA1100_YOPY
+ if (machine_is_yopy()) {
+ *parts = yopy_partitions;
+ nb_parts = ARRAY_SIZE(yopy_partitions);
}
#endif
+ return nb_parts;
+}
+#endif
+
+struct sa_info {
+ unsigned long base;
+ unsigned long size;
+ int width;
+ void *vbase;
+ struct map_info *map;
+ struct mtd_info *mtd;
+ struct resource *res;
+};
+
+#define NR_SUBMTD 4
+
+static struct sa_info info[NR_SUBMTD];
+
+static int __init sa1100_setup_mtd(struct sa_info *sa, int nr, struct mtd_info **rmtd)
+{
+ struct mtd_info *subdev[nr];
+ struct map_info *maps;
+ int i, found = 0, ret = 0;
+
/*
- * Now let's probe for the actual flash. Do it here since
- * specific machine settings might have been set above.
+ * Allocate the map_info structs in one go.
*/
- printk(KERN_NOTICE "SA1100 flash: probing %d-bit flash bus\n", sa1100_map.buswidth*8);
- mymtd = do_map_probe("cfi_probe", &sa1100_map);
- if (!mymtd)
- return -ENXIO;
- mymtd->module = THIS_MODULE;
+ maps = kmalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
+ if (!maps)
+ return -ENOMEM;
/*
- * Dynamic partition selection stuff (might override the static ones)
+ * Claim and then map the memory regions.
*/
-#ifdef CONFIG_MTD_REDBOOT_PARTS
- if (parsed_nr_parts == 0) {
- int ret = parse_redboot_partitions(mymtd, &parsed_parts);
-
- if (ret > 0) {
- part_type = "RedBoot";
- parsed_nr_parts = ret;
+ for (i = 0; i < nr; i++) {
+ if (sa[i].base == (unsigned long)-1)
+ break;
+
+ sa[i].res = request_mem_region(sa[i].base, sa[i].size, "sa1100 flash");
+ if (!sa[i].res) {
+ ret = -EBUSY;
+ break;
+ }
+
+ sa[i].map = maps + i;
+ memcpy(sa[i].map, &sa1100_map, sizeof(struct map_info));
+
+ sa[i].vbase = ioremap(sa[i].base, sa[i].size);
+ if (!sa[i].vbase) {
+ ret = -ENOMEM;
+ break;
}
+
+ sa[i].map->map_priv_1 = (unsigned long)sa[i].vbase;
+ sa[i].map->buswidth = sa[i].width;
+ sa[i].map->size = sa[i].size;
+
+ /*
+ * Now let's probe for the actual flash. Do it here since
+ * specific machine settings might have been set above.
+ */
+ sa[i].mtd = do_map_probe("cfi_probe", sa[i].map);
+ if (sa[i].mtd == NULL) {
+ ret = -ENXIO;
+ break;
+ }
+ sa[i].mtd->module = THIS_MODULE;
+ subdev[i] = sa[i].mtd;
+
+ printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %dMiB, "
+ "%d-bit\n", sa[i].base, sa[i].mtd->size >> 20,
+ sa[i].width * 8);
+ found += 1;
}
+
+ /*
+ * ENXIO is special. It means we didn't find a chip when
+ * we probed. We need to tear down the mapping, free the
+ * resource and mark it as such.
+ */
+ if (ret == -ENXIO) {
+ iounmap(sa[i].vbase);
+ sa[i].vbase = NULL;
+ release_resource(sa[i].res);
+ sa[i].res = NULL;
+ }
+
+ /*
+ * If we found one device, don't bother with concat support.
+ * If we found multiple devices, use concat if we have it
+ * available, otherwise fail.
+ */
+ if (ret == 0 || ret == -ENXIO) {
+ if (found == 1) {
+ *rmtd = subdev[0];
+ ret = 0;
+ } else if (found > 1) {
+ /*
+ * We detected multiple devices. Concatenate
+ * them together.
+ */
+#ifdef CONFIG_MTD_CONCAT
+ *rmtd = mtd_concat_create(subdev, found,
+ "sa1100 flash");
+ if (*rmtd == NULL)
+ ret = -ENXIO;
+#else
+ printk(KERN_ERR "SA1100 flash: multiple devices "
+ "found but MTD concat support disabled.\n");
+ ret = -ENXIO;
#endif
-#ifdef CONFIG_MTD_BOOTLDR_PARTS
- if (parsed_nr_parts == 0) {
- int ret = parse_bootldr_partitions(mymtd, &parsed_parts);
- if (ret > 0) {
- part_type = "Compaq bootldr";
- parsed_nr_parts = ret;
}
}
-#endif
- if (parsed_nr_parts > 0) {
- parts = parsed_parts;
- nb_parts = parsed_nr_parts;
+ /*
+ * If we failed, clean up.
+ */
+ if (ret) {
+ do {
+ if (sa[i].mtd)
+ map_destroy(sa[i].mtd);
+ if (sa[i].vbase)
+ iounmap(sa[i].vbase);
+ if (sa[i].res)
+ release_resource(sa[i].res);
+ } while (i--);
+
+ kfree(maps);
}
- if (nb_parts == 0) {
- printk(KERN_NOTICE "SA1100 flash: no partition info available, registering whole flash at once\n");
- add_mtd_device(mymtd);
+ return ret;
+}
+
+static void __exit sa1100_destroy_mtd(struct sa_info *sa, struct mtd_info *mtd)
+{
+ int i;
+
+ del_mtd_partitions(mtd);
+
+ if (mtd != sa[0].mtd)
+ mtd_concat_destroy(mtd);
+
+ for (i = NR_SUBMTD; i >= 0; i--) {
+ if (sa[i].mtd)
+ map_destroy(sa[i].mtd);
+ if (sa[i].vbase)
+ iounmap(sa[i].vbase);
+ if (sa[i].res)
+ release_resource(sa[i].res);
+ }
+ kfree(sa[0].map);
+}
+
+static int __init sa1100_locate_flash(void)
+{
+ int i, nr = -ENODEV;
+
+ if (machine_is_adsbitsy()) {
+ info[0].base = SA1100_CS1_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_assabet()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ info[1].base = SA1100_CS1_PHYS; /* neponset */
+ info[1].size = SZ_32M;
+ nr = 2;
+ }
+ if (machine_is_badge4()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_4M;
+ nr = 1;
+ }
+ if (machine_is_cerf()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_consus()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_flexanet()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_freebird()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_frodo()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_graphicsclient()) {
+ info[0].base = SA1100_CS1_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_graphicsmaster()) {
+ info[0].base = SA1100_CS1_PHYS;
+ info[0].size = SZ_16M;
+ nr = 1;
+ }
+ if (machine_is_h3xxx()) {
+ sa1100_map.set_vpp = h3xxx_set_vpp;
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_huw_webpanel()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_16M;
+ nr = 1;
+ }
+ if (machine_is_itsy()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_jornada720()) {
+ sa1100_map.set_vpp = jornada720_set_vpp;
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_nanoengine()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[1].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_pangolin()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_64M;
+ nr = 1;
+ }
+ if (machine_is_pfs168()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_pleb()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_4M;
+ info[1].base = SA1100_CS1_PHYS;
+ info[1].size = SZ_4M;
+ nr = 2;
+ }
+ if (machine_is_pt_system3()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_16M;
+ nr = 1;
+ }
+ if (machine_is_shannon()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_4M;
+ nr = 1;
+ }
+ if (machine_is_sherman()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_simpad()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_stork()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_32M;
+ nr = 1;
+ }
+ if (machine_is_trizeps()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_16M;
+ nr = 1;
+ }
+ if (machine_is_victor()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_2M;
+ nr = 1;
+ }
+ if (machine_is_yopy()) {
+ info[0].base = SA1100_CS0_PHYS;
+ info[0].size = SZ_64M;
+ info[1].base = SA1100_CS1_PHYS;
+ info[1].size = SZ_64M;
+ nr = 2;
+ }
+
+ if (nr < 0)
+ return nr;
+
+ /*
+ * Retrieve the buswidth from the MSC registers.
+ * We currently only implement CS0 and CS1 here.
+ */
+ for (i = 0; i < nr; i++) {
+ switch (info[i].base) {
+ default:
+ printk(KERN_WARNING "SA1100 flash: unknown base address "
+ "0x%08lx, assuming CS0\n", info[i].base);
+ case SA1100_CS0_PHYS:
+ info[i].width = (MSC0 & MSC_RBW) ? 2 : 4;
+ break;
+
+ case SA1100_CS1_PHYS:
+ info[i].width = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4;
+ break;
+ }
+ }
+
+ return nr;
+}
+
+extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts);
+extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, char *);
+
+static struct mtd_partition *parsed_parts;
+
+static void __init sa1100_locate_partitions(struct mtd_info *mtd)
+{
+ const char *part_type = NULL;
+ int nr_parts = 0;
+
+ do {
+ /*
+ * Partition selection stuff.
+ */
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ nr_parts = parse_cmdline_partitions(mtd, &parsed_parts, "sa1100");
+ if (nr_parts > 0) {
+ part_type = "command line";
+ break;
+ }
+#endif
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+ nr_parts = parse_redboot_partitions(mtd, &parsed_parts);
+ if (nr_parts > 0) {
+ part_type = "RedBoot";
+ break;
+ }
+#endif
+#ifdef CONFIG_MTD_SA1100_STATICMAP
+ nr_parts = sa1100_static_partitions(&parsed_parts);
+ if (nr_parts > 0) {
+ part_type = "static";
+ break;
+ }
+#endif
+ } while (0);
+
+ if (nr_parts == 0) {
+ printk(KERN_NOTICE "SA1100 flash: no partition info "
+ "available, registering whole flash\n");
+ add_mtd_device(mtd);
} else {
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(mymtd, parts, nb_parts);
+ printk(KERN_NOTICE "SA1100 flash: using %s partition "
+ "definition\n", part_type);
+ add_mtd_partitions(mtd, parsed_parts, nr_parts);
}
- return 0;
+
+ /* Always succeeds. */
+}
+
+static void __exit sa1100_destroy_partitions(void)
+{
+ if (parsed_parts)
+ kfree(parsed_parts);
+}
+
+static struct mtd_info *mymtd;
+
+static int __init sa1100_mtd_init(void)
+{
+ int ret;
+ int nr;
+
+ nr = sa1100_locate_flash();
+ if (nr < 0)
+ return nr;
+
+ ret = sa1100_setup_mtd(info, nr, &mymtd);
+ if (ret == 0)
+ sa1100_locate_partitions(mymtd);
+
+ return ret;
}
static void __exit sa1100_mtd_cleanup(void)
{
- if (mymtd) {
- del_mtd_partitions(mymtd);
- map_destroy(mymtd);
- if (parsed_parts)
- kfree(parsed_parts);
- }
+ sa1100_destroy_mtd(info, mymtd);
+ sa1100_destroy_partitions();
}
module_init(sa1100_mtd_init);
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 1ad148bd3364..a39bcab25891 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -295,7 +295,7 @@ static int mtdblock_open(struct inode *inode, struct file *file)
spin_unlock(&mtdblks_lock);
mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
- disk = alloc_disk();
+ disk = alloc_disk(1);
if (!mtdblk || !disk)
goto Enomem;
memset(mtdblk, 0, sizeof(*mtdblk));
@@ -313,7 +313,6 @@ static int mtdblock_open(struct inode *inode, struct file *file)
}
disk->major = MAJOR_NR;
disk->first_minor = dev;
- disk->minor_shift = 0;
disk->fops = &mtd_fops;
sprintf(disk->disk_name, "mtd%d", dev);
mtdblk->disk = disk;
@@ -518,8 +517,6 @@ static int mtdblock_ioctl(struct inode * inode, struct file * file,
switch (cmd) {
case BLKFLSBUF:
- if(!capable(CAP_SYS_ADMIN))
- return -EACCES;
fsync_bdev(inode->i_bdev);
invalidate_bdev(inode->i_bdev, 0);
down(&mtdblk->cache_sem);
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 65b97e3a11df..1878f540f3b6 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -201,8 +201,6 @@ static int mtdblock_ioctl(struct inode * inode, struct file * file,
if (!mtd || cmd != BLKFLSBUF)
return -EINVAL;
- if(!capable(CAP_SYS_ADMIN))
- return -EACCES;
fsync_bdev(inode->i_bdev);
invalidate_bdev(inode->i_bdev, 0);
if (mtd->sync)
@@ -224,7 +222,7 @@ int __init init_mtdblock(void)
int i;
for (i = 0; i < MAX_MTD_DEVICES; i++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(1);
if (!disk)
goto out;
disk->major = MAJOR_NR;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
new file mode 100644
index 000000000000..4c16c0e43e0f
--- /dev/null
+++ b/drivers/mtd/mtdconcat.c
@@ -0,0 +1,675 @@
+/*
+ * MTD device concatenation layer
+ *
+ * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
+ *
+ * This code is GPL
+ *
+ * $Id: mtdconcat.c,v 1.3 2002/05/21 21:04:25 dwmw2 Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/concat.h>
+
+/*
+ * Our storage structure:
+ * Subdev points to an array of pointers to struct mtd_info objects
+ * which is allocated along with this structure
+ *
+ */
+struct mtd_concat {
+ struct mtd_info mtd;
+ int num_subdev;
+ struct mtd_info **subdev;
+};
+
+/*
+ * how to calculate the size required for the above structure,
+ * including the pointer array subdev points to:
+ */
+#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
+ ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
+
+
+/*
+ * Given a pointer to the MTD object in the mtd_concat structure,
+ * we can retrieve the pointer to that structure with this macro.
+ */
+#define CONCAT(x) ((struct mtd_concat *)(x))
+
+
+/*
+ * MTD methods which look up the relevant subdevice, translate the
+ * effective address and pass through to the subdevice.
+ */
+
+static int concat_read (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+ *retlen = 0;
+
+ for(i = 0; i < concat->num_subdev; i++)
+ {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (from >= subdev->size)
+ {
+ size = 0;
+ from -= subdev->size;
+ }
+ else
+ {
+ if (from + len > subdev->size)
+ size = subdev->size - from;
+ else
+ size = len;
+
+ err = subdev->read(subdev, from, size, &retsize, buf);
+
+ if(err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if(len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ from = 0;
+ }
+ }
+ return err;
+}
+
+static int concat_write (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ *retlen = 0;
+
+ for(i = 0; i < concat->num_subdev; i++)
+ {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size)
+ {
+ size = 0;
+ to -= subdev->size;
+ }
+ else
+ {
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ if (!(subdev->flags & MTD_WRITEABLE))
+ err = -EROFS;
+ else
+ err = subdev->write(subdev, to, size, &retsize, buf);
+
+ if(err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if(len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
+ }
+ }
+ return err;
+}
+
+static void concat_erase_callback (struct erase_info *instr)
+{
+ wake_up((wait_queue_head_t *)instr->priv);
+}
+
+static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ int err;
+ wait_queue_head_t waitq;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * This code was stol^H^H^H^Hinspired by mtdchar.c
+ */
+ init_waitqueue_head(&waitq);
+
+ erase->mtd = mtd;
+ erase->callback = concat_erase_callback;
+ erase->priv = (unsigned long)&waitq;
+
+ /*
+ * FIXME: Allow INTERRUPTIBLE. Which means
+ * not having the wait_queue head on the stack.
+ */
+ err = mtd->erase(mtd, erase);
+ if (!err)
+ {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&waitq, &wait);
+ if (erase->state != MTD_ERASE_DONE && erase->state != MTD_ERASE_FAILED)
+ schedule();
+ remove_wait_queue(&waitq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
+ }
+ return err;
+}
+
+static int concat_erase (struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_info *subdev;
+ int i, err;
+ u_int32_t length;
+ struct erase_info *erase;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ if(instr->addr > concat->mtd.size)
+ return -EINVAL;
+
+ if(instr->len + instr->addr > concat->mtd.size)
+ return -EINVAL;
+
+ /*
+ * Check for proper erase block alignment of the to-be-erased area.
+ * It is easier to do this based on the super device's erase
+ * region info rather than looking at each particular sub-device
+ * in turn.
+ */
+ if (!concat->mtd.numeraseregions)
+ { /* the easy case: device has uniform erase block size */
+ if(instr->addr & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ if(instr->len & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ }
+ else
+ { /* device has variable erase size */
+ struct mtd_erase_region_info *erase_regions = concat->mtd.eraseregions;
+
+ /*
+ * Find the erase region where the to-be-erased area begins:
+ */
+ for(i = 0; i < concat->mtd.numeraseregions &&
+ instr->addr >= erase_regions[i].offset; i++)
+ ;
+ --i;
+
+ /*
+ * Now erase_regions[i] is the region in which the
+ * to-be-erased area begins. Verify that the starting
+ * offset is aligned to this region's erase size:
+ */
+ if (instr->addr & (erase_regions[i].erasesize-1))
+ return -EINVAL;
+
+ /*
+ * now find the erase region where the to-be-erased area ends:
+ */
+ for(; i < concat->mtd.numeraseregions &&
+ (instr->addr + instr->len) >= erase_regions[i].offset ; ++i)
+ ;
+ --i;
+ /*
+ * check if the ending offset is aligned to this region's erase size
+ */
+ if ((instr->addr + instr->len) & (erase_regions[i].erasesize-1))
+ return -EINVAL;
+ }
+
+ /* make a local copy of instr to avoid modifying the caller's struct */
+ erase = kmalloc(sizeof(struct erase_info),GFP_KERNEL);
+
+ if (!erase)
+ return -ENOMEM;
+
+ *erase = *instr;
+ length = instr->len;
+
+ /*
+ * find the subdevice where the to-be-erased area begins, adjust
+ * starting offset to be relative to the subdevice start
+ */
+ for(i = 0; i < concat->num_subdev; i++)
+ {
+ subdev = concat->subdev[i];
+ if(subdev->size <= erase->addr)
+ erase->addr -= subdev->size;
+ else
+ break;
+ }
+ if(i >= concat->num_subdev) /* must never happen since size */
+ BUG(); /* limit has been verified above */
+
+ /* now do the erase: */
+ err = 0;
+ for(;length > 0; i++) /* loop for all subevices affected by this request */
+ {
+ subdev = concat->subdev[i]; /* get current subdevice */
+
+ /* limit length to subdevice's size: */
+ if(erase->addr + length > subdev->size)
+ erase->len = subdev->size - erase->addr;
+ else
+ erase->len = length;
+
+ if (!(subdev->flags & MTD_WRITEABLE))
+ {
+ err = -EROFS;
+ break;
+ }
+ length -= erase->len;
+ if ((err = concat_dev_erase(subdev, erase)))
+ {
+ if(err == -EINVAL) /* sanity check: must never happen since */
+ BUG(); /* block alignment has been checked above */
+ break;
+ }
+ /*
+ * erase->addr specifies the offset of the area to be
+ * erased *within the current subdevice*. It can be
+ * non-zero only the first time through this loop, i.e.
+ * for the first subdevice where blocks need to be erased.
+ * All the following erases must begin at the start of the
+ * current subdevice, i.e. at offset zero.
+ */
+ erase->addr = 0;
+ }
+ kfree(erase);
+ if (err)
+ return err;
+
+ instr->state = MTD_ERASE_DONE;
+ if (instr->callback)
+ instr->callback(instr);
+ return 0;
+}
+
+static int concat_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+
+ for(i = 0; i < concat->num_subdev; i++)
+ {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size;
+
+ if (ofs >= subdev->size)
+ {
+ size = 0;
+ ofs -= subdev->size;
+ }
+ else
+ {
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = subdev->lock(subdev, ofs, size);
+
+ if(err)
+ break;
+
+ len -= size;
+ if(len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+ }
+ return err;
+}
+
+static int concat_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = 0;
+
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+
+ for(i = 0; i < concat->num_subdev; i++)
+ {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size;
+
+ if (ofs >= subdev->size)
+ {
+ size = 0;
+ ofs -= subdev->size;
+ }
+ else
+ {
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = subdev->unlock(subdev, ofs, size);
+
+ if(err)
+ break;
+
+ len -= size;
+ if(len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+ }
+ return err;
+}
+
+static void concat_sync(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for(i = 0; i < concat->num_subdev; i++)
+ {
+ struct mtd_info *subdev = concat->subdev[i];
+ subdev->sync(subdev);
+ }
+}
+
+static int concat_suspend(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, rc = 0;
+
+ for(i = 0; i < concat->num_subdev; i++)
+ {
+ struct mtd_info *subdev = concat->subdev[i];
+ if((rc = subdev->suspend(subdev)) < 0)
+ return rc;
+ }
+ return rc;
+}
+
+static void concat_resume(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for(i = 0; i < concat->num_subdev; i++)
+ {
+ struct mtd_info *subdev = concat->subdev[i];
+ subdev->resume(subdev);
+ }
+}
+
+/*
+ * This function constructs a virtual MTD device by concatenating
+ * num_devs MTD devices. A pointer to the new device object is
+ * stored to *new_dev upon success. This function does _not_
+ * register any devices: this is the caller's responsibility.
+ */
+struct mtd_info *mtd_concat_create(
+ struct mtd_info *subdev[], /* subdevices to concatenate */
+ int num_devs, /* number of subdevices */
+ char *name) /* name for the new device */
+{
+ int i;
+ size_t size;
+ struct mtd_concat *concat;
+ u_int32_t max_erasesize, curr_erasesize;
+ int num_erase_region;
+
+ printk(KERN_NOTICE "Concatenating MTD devices:\n");
+ for(i = 0; i < num_devs; i++)
+ printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
+ printk(KERN_NOTICE "into device \"%s\"\n", name);
+
+ /* allocate the device structure */
+ size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
+ concat = kmalloc (size, GFP_KERNEL);
+ if(!concat)
+ {
+ printk ("memory allocation error while creating concatenated device \"%s\"\n",
+ name);
+ return NULL;
+ }
+ memset(concat, 0, size);
+ concat->subdev = (struct mtd_info **)(concat + 1);
+
+ /*
+ * Set up the new "super" device's MTD object structure, check for
+ * incompatibilites between the subdevices.
+ */
+ concat->mtd.type = subdev[0]->type;
+ concat->mtd.flags = subdev[0]->flags;
+ concat->mtd.size = subdev[0]->size;
+ concat->mtd.erasesize = subdev[0]->erasesize;
+ concat->mtd.oobblock = subdev[0]->oobblock;
+ concat->mtd.oobsize = subdev[0]->oobsize;
+ concat->mtd.ecctype = subdev[0]->ecctype;
+ concat->mtd.eccsize = subdev[0]->eccsize;
+
+ concat->subdev[0] = subdev[0];
+
+ for(i = 1; i < num_devs; i++)
+ {
+ if(concat->mtd.type != subdev[i]->type)
+ {
+ kfree(concat);
+ printk ("Incompatible device type on \"%s\"\n", subdev[i]->name);
+ return NULL;
+ }
+ if(concat->mtd.flags != subdev[i]->flags)
+ { /*
+ * Expect all flags except MTD_WRITEABLE to be equal on
+ * all subdevices.
+ */
+ if((concat->mtd.flags ^ subdev[i]->flags) & ~MTD_WRITEABLE)
+ {
+ kfree(concat);
+ printk ("Incompatible device flags on \"%s\"\n", subdev[i]->name);
+ return NULL;
+ }
+ else /* if writeable attribute differs, make super device writeable */
+ concat->mtd.flags |= subdev[i]->flags & MTD_WRITEABLE;
+ }
+ concat->mtd.size += subdev[i]->size;
+ if(concat->mtd.oobblock != subdev[i]->oobblock ||
+ concat->mtd.oobsize != subdev[i]->oobsize ||
+ concat->mtd.ecctype != subdev[i]->ecctype ||
+ concat->mtd.eccsize != subdev[i]->eccsize)
+ {
+ kfree(concat);
+ printk ("Incompatible OOB or ECC data on \"%s\"\n", subdev[i]->name);
+ return NULL;
+ }
+ concat->subdev[i] = subdev[i];
+
+ }
+
+ concat->num_subdev = num_devs;
+ concat->mtd.name = name;
+
+ /*
+ * NOTE: for now, we do not provide any readv()/writev() methods
+ * because they are messy to implement and they are not
+ * used to a great extent anyway.
+ */
+ concat->mtd.erase = concat_erase;
+ concat->mtd.read = concat_read;
+ concat->mtd.write = concat_write;
+ concat->mtd.sync = concat_sync;
+ concat->mtd.lock = concat_lock;
+ concat->mtd.unlock = concat_unlock;
+ concat->mtd.suspend = concat_suspend;
+ concat->mtd.resume = concat_resume;
+
+
+ /*
+ * Combine the erase block size info of the subdevices:
+ *
+ * first, walk the map of the new device and see how
+ * many changes in erase size we have
+ */
+ max_erasesize = curr_erasesize = subdev[0]->erasesize;
+ num_erase_region = 1;
+ for(i = 0; i < num_devs; i++)
+ {
+ if(subdev[i]->numeraseregions == 0)
+ { /* current subdevice has uniform erase size */
+ if(subdev[i]->erasesize != curr_erasesize)
+ { /* if it differs from the last subdevice's erase size, count it */
+ ++num_erase_region;
+ curr_erasesize = subdev[i]->erasesize;
+ if(curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ }
+ else
+ { /* current subdevice has variable erase size */
+ int j;
+ for(j = 0; j < subdev[i]->numeraseregions; j++)
+ { /* walk the list of erase regions, count any changes */
+ if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
+ {
+ ++num_erase_region;
+ curr_erasesize = subdev[i]->eraseregions[j].erasesize;
+ if(curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ }
+ }
+ }
+
+ if(num_erase_region == 1)
+ { /*
+ * All subdevices have the same uniform erase size.
+ * This is easy:
+ */
+ concat->mtd.erasesize = curr_erasesize;
+ concat->mtd.numeraseregions = 0;
+ }
+ else
+ { /*
+ * erase block size varies across the subdevices: allocate
+ * space to store the data describing the variable erase regions
+ */
+ struct mtd_erase_region_info *erase_region_p;
+ u_int32_t begin, position;
+
+ concat->mtd.erasesize = max_erasesize;
+ concat->mtd.numeraseregions = num_erase_region;
+ concat->mtd.eraseregions = erase_region_p = kmalloc (
+ num_erase_region * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
+ if(!erase_region_p)
+ {
+ kfree(concat);
+ printk ("memory allocation error while creating erase region list"
+ " for device \"%s\"\n", name);
+ return NULL;
+ }
+
+ /*
+ * walk the map of the new device once more and fill in
+ * in erase region info:
+ */
+ curr_erasesize = subdev[0]->erasesize;
+ begin = position = 0;
+ for(i = 0; i < num_devs; i++)
+ {
+ if(subdev[i]->numeraseregions == 0)
+ { /* current subdevice has uniform erase size */
+ if(subdev[i]->erasesize != curr_erasesize)
+ { /*
+ * fill in an mtd_erase_region_info structure for the area
+ * we have walked so far:
+ */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize = curr_erasesize;
+ erase_region_p->numblocks = (position - begin) / curr_erasesize;
+ begin = position;
+
+ curr_erasesize = subdev[i]->erasesize;
+ ++erase_region_p;
+ }
+ position += subdev[i]->size;
+ }
+ else
+ { /* current subdevice has variable erase size */
+ int j;
+ for(j = 0; j < subdev[i]->numeraseregions; j++)
+ { /* walk the list of erase regions, count any changes */
+ if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
+ {
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize = curr_erasesize;
+ erase_region_p->numblocks = (position - begin) / curr_erasesize;
+ begin = position;
+
+ curr_erasesize = subdev[i]->eraseregions[j].erasesize;
+ ++erase_region_p;
+ }
+ position += subdev[i]->eraseregions[j].numblocks * curr_erasesize;
+ }
+ }
+ }
+ /* Now write the final entry */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize = curr_erasesize;
+ erase_region_p->numblocks = (position - begin) / curr_erasesize;
+ }
+
+ return &concat->mtd;
+}
+
+/*
+ * This function destroys an MTD object obtained from concat_mtd_devs()
+ */
+
+void mtd_concat_destroy(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ if(concat->mtd.numeraseregions)
+ kfree(concat->mtd.eraseregions);
+ kfree(concat);
+}
+
+
+EXPORT_SYMBOL(mtd_concat_create);
+EXPORT_SYMBOL(mtd_concat_destroy);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
+MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 60d26b10740e..292894af8252 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -74,7 +74,7 @@ static void NFTL_setup(struct mtd_info *mtd)
}
nftl = kmalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
- gd = alloc_disk();
+ gd = alloc_disk(1 << NFTL_PARTN_BITS);
if (!nftl || !gd) {
kfree(nftl);
put_disk(gd);
@@ -132,7 +132,6 @@ static void NFTL_setup(struct mtd_info *mtd)
sprintf(gd->disk_name, "nftl%c", 'a' + firstfree);
gd->major = MAJOR_NR;
gd->first_minor = firstfree << NFTL_PARTN_BITS;
- gd->minor_shift = NFTL_PARTN_BITS;
set_capacity(gd, nftl->nr_sects);
nftl->disk = gd;
add_disk(gd);
@@ -771,7 +770,6 @@ static int nftl_ioctl(struct inode * inode, struct file * file, unsigned int cmd
return copy_to_user((void *)arg, &g, sizeof g) ? -EFAULT : 0;
}
case BLKFLSBUF:
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
fsync_bdev(inode->i_bdev);
invalidate_bdev(inode->i_bdev, 0);
if (nftl->mtd->sync)
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
new file mode 100644
index 000000000000..79b92c1c7965
--- /dev/null
+++ b/drivers/oprofile/buffer_sync.c
@@ -0,0 +1,394 @@
+/**
+ * @file buffer_sync.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
+ * This is the core of the buffer management. Each
+ * CPU buffer is processed and entered into the
+ * global event buffer. Such processing is necessary
+ * in several circumstances, mentioned below.
+ *
+ * The processing does the job of converting the
+ * transitory EIP value into a persistent dentry/offset
+ * value that the profiler can record at its leisure.
+ *
+ * See fs/dcookies.c for a description of the dentry/offset
+ * objects.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
+#include <linux/dcookies.h>
+#include <linux/notifier.h>
+#include <linux/profile.h>
+#include <linux/workqueue.h>
+
+#include "event_buffer.h"
+#include "cpu_buffer.h"
+#include "oprofile_stats.h"
+
+#define DEFAULT_EXPIRE (HZ / 4)
+
+static void wq_sync_buffers(void *);
+static DECLARE_WORK(sync_wq, wq_sync_buffers, 0);
+
+static struct timer_list sync_timer;
+static void timer_ping(unsigned long data);
+static void sync_cpu_buffers(void);
+
+
+/* We must make sure to process every entry in the CPU buffers
+ * before a task got the PF_EXITING flag, otherwise we will hold
+ * references to a possibly freed task_struct. We are safe with
+ * samples past the PF_EXITING point in do_exit(), because we
+ * explicitly check for that in cpu_buffer.c
+ */
+static int exit_task_notify(struct notifier_block * self, unsigned long val, void * data)
+{
+ sync_cpu_buffers();
+ return 0;
+}
+
+/* There are two cases of tasks modifying task->mm->mmap list we
+ * must concern ourselves with. First, when a task is about to
+ * exit (exit_mmap()), we should process the buffer to deal with
+ * any samples in the CPU buffer, before we lose the ->mmap information
+ * we need. Second, a task may unmap (part of) an executable mmap,
+ * so we want to process samples before that happens too
+ */
+static int mm_notify(struct notifier_block * self, unsigned long val, void * data)
+{
+ sync_cpu_buffers();
+ return 0;
+}
+
+
+static struct notifier_block exit_task_nb = {
+ .notifier_call = exit_task_notify,
+};
+
+static struct notifier_block exec_unmap_nb = {
+ .notifier_call = mm_notify,
+};
+
+static struct notifier_block exit_mmap_nb = {
+ .notifier_call = mm_notify,
+};
+
+
+int sync_start(void)
+{
+ int err = profile_event_register(EXIT_TASK, &exit_task_nb);
+ if (err)
+ goto out;
+ err = profile_event_register(EXIT_MMAP, &exit_mmap_nb);
+ if (err)
+ goto out2;
+ err = profile_event_register(EXEC_UNMAP, &exec_unmap_nb);
+ if (err)
+ goto out3;
+
+ sync_timer.function = timer_ping;
+ sync_timer.expires = jiffies + DEFAULT_EXPIRE;
+ add_timer(&sync_timer);
+out:
+ return err;
+out3:
+ profile_event_unregister(EXIT_MMAP, &exit_mmap_nb);
+out2:
+ profile_event_unregister(EXIT_TASK, &exit_task_nb);
+ goto out;
+}
+
+
+void sync_stop(void)
+{
+ profile_event_unregister(EXIT_TASK, &exit_task_nb);
+ profile_event_unregister(EXIT_MMAP, &exit_mmap_nb);
+ profile_event_unregister(EXEC_UNMAP, &exec_unmap_nb);
+ del_timer_sync(&sync_timer);
+}
+
+
+/* Optimisation. We can manage without taking the dcookie sem
+ * because we cannot reach this code without at least one
+ * dcookie user still being registered (namely, the reader
+ * of the event buffer). */
+static inline u32 fast_get_dcookie(struct dentry * dentry,
+ struct vfsmount * vfsmnt)
+{
+ u32 cookie;
+
+ if (dentry->d_cookie)
+ return (u32)dentry;
+ get_dcookie(dentry, vfsmnt, &cookie);
+ return cookie;
+}
+
+
+/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
+ * which corresponds loosely to "application name". This is
+ * not strictly necessary but allows oprofile to associate
+ * shared-library samples with particular applications
+ */
+static u32 get_exec_dcookie(struct mm_struct * mm)
+{
+ u32 cookie = 0;
+ struct vm_area_struct * vma;
+
+ if (!mm)
+ goto out;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (!vma->vm_file)
+ continue;
+ if (!vma->vm_flags & VM_EXECUTABLE)
+ continue;
+ cookie = fast_get_dcookie(vma->vm_file->f_dentry,
+ vma->vm_file->f_vfsmnt);
+ break;
+ }
+
+out:
+ return cookie;
+}
+
+
+/* Convert the EIP value of a sample into a persistent dentry/offset
+ * pair that can then be added to the global event buffer. We make
+ * sure to do this lookup before a mm->mmap modification happens so
+ * we don't lose track.
+ */
+static u32 lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset)
+{
+ u32 cookie = 0;
+ struct vm_area_struct * vma;
+
+ for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ if (!vma)
+ goto out;
+
+ if (!vma->vm_file)
+ continue;
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ continue;
+
+ cookie = fast_get_dcookie(vma->vm_file->f_dentry,
+ vma->vm_file->f_vfsmnt);
+ *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start;
+ break;
+ }
+out:
+ return cookie;
+}
+
+
+static u32 last_cookie = ~0UL;
+
+static void add_cpu_switch(int i)
+{
+ add_event_entry(ESCAPE_CODE);
+ add_event_entry(CPU_SWITCH_CODE);
+ add_event_entry(i);
+ last_cookie = ~0UL;
+}
+
+
+static void add_ctx_switch(pid_t pid, u32 cookie)
+{
+ add_event_entry(ESCAPE_CODE);
+ add_event_entry(CTX_SWITCH_CODE);
+ add_event_entry(pid);
+ add_event_entry(cookie);
+}
+
+
+static void add_cookie_switch(u32 cookie)
+{
+ add_event_entry(ESCAPE_CODE);
+ add_event_entry(COOKIE_SWITCH_CODE);
+ add_event_entry(cookie);
+}
+
+
+static void add_sample_entry(unsigned long offset, unsigned long event)
+{
+ add_event_entry(offset);
+ add_event_entry(event);
+}
+
+
+static void add_us_sample(struct mm_struct * mm, struct op_sample * s)
+{
+ u32 cookie;
+ off_t offset;
+
+ cookie = lookup_dcookie(mm, s->eip, &offset);
+
+ if (!cookie)
+ return;
+
+ if (cookie != last_cookie) {
+ add_cookie_switch(cookie);
+ last_cookie = cookie;
+ }
+
+ add_sample_entry(offset, s->event);
+}
+
+
+static inline int is_kernel(unsigned long val)
+{
+ return val > __PAGE_OFFSET;
+}
+
+
+/* Add a sample to the global event buffer. If possible the
+ * sample is converted into a persistent dentry/offset pair
+ * for later lookup from userspace.
+ */
+static void add_sample(struct mm_struct * mm, struct op_sample * s)
+{
+ if (is_kernel(s->eip)) {
+ add_sample_entry(s->eip, s->event);
+ } else if (mm) {
+ add_us_sample(mm, s);
+ }
+}
+
+
+static void release_mm(struct mm_struct * mm)
+{
+ if (mm)
+ up_read(&mm->mmap_sem);
+}
+
+
+/* Take the task's mmap_sem to protect ourselves from
+ * races when we do lookup_dcookie().
+ */
+static struct mm_struct * take_task_mm(struct task_struct * task)
+{
+ struct mm_struct * mm;
+ task_lock(task);
+ mm = task->mm;
+ task_unlock(task);
+
+ /* if task->mm !NULL, mm_count must be at least 1. It cannot
+ * drop to 0 without the task exiting, which will have to sleep
+ * on buffer_sem first. So we do not need to mark mm_count
+ * ourselves.
+ */
+ if (mm) {
+ /* More ugliness. If a task took its mmap
+ * sem then came to sleep on buffer_sem we
+ * will deadlock waiting for it. So we can
+ * but try. This will lose samples :/
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ /* FIXME: this underestimates samples lost */
+ atomic_inc(&oprofile_stats.sample_lost_mmap_sem);
+ mm = NULL;
+ }
+ }
+
+ return mm;
+}
+
+
+static inline int is_ctx_switch(unsigned long val)
+{
+ return val == ~0UL;
+}
+
+
+/* Sync one of the CPU's buffers into the global event buffer.
+ * Here we need to go through each batch of samples punctuated
+ * by context switch notes, taking the task's mmap_sem and doing
+ * lookup in task->mm->mmap to convert EIP into dcookie/offset
+ * value.
+ */
+static void sync_buffer(struct oprofile_cpu_buffer * cpu_buf)
+{
+ struct mm_struct * mm = 0;
+ struct task_struct * new;
+ u32 cookie;
+ int i;
+
+ for (i=0; i < cpu_buf->pos; ++i) {
+ struct op_sample * s = &cpu_buf->buffer[i];
+
+ if (is_ctx_switch(s->eip)) {
+ new = (struct task_struct *)s->event;
+
+ release_mm(mm);
+ mm = take_task_mm(new);
+
+ cookie = get_exec_dcookie(mm);
+ add_ctx_switch(new->pid, cookie);
+ } else {
+ add_sample(mm, s);
+ }
+ }
+ release_mm(mm);
+
+ cpu_buf->pos = 0;
+}
+
+
+/* Process each CPU's local buffer into the global
+ * event buffer.
+ */
+static void sync_cpu_buffers(void)
+{
+ int i;
+
+ down(&buffer_sem);
+
+ for (i = 0; i < NR_CPUS; ++i) {
+ struct oprofile_cpu_buffer * cpu_buf;
+
+ if (!cpu_possible(i))
+ continue;
+
+ cpu_buf = &cpu_buffer[i];
+
+ /* We take a spin lock even though we might
+ * sleep. It's OK because other users are try
+ * lockers only, and this region is already
+ * protected by buffer_sem. It's raw to prevent
+ * the preempt bogometer firing. Fruity, huh ? */
+ _raw_spin_lock(&cpu_buf->int_lock);
+ add_cpu_switch(i);
+ sync_buffer(cpu_buf);
+ _raw_spin_unlock(&cpu_buf->int_lock);
+ }
+
+ up(&buffer_sem);
+
+ mod_timer(&sync_timer, jiffies + DEFAULT_EXPIRE);
+}
+
+
+static void wq_sync_buffers(void * data)
+{
+ sync_cpu_buffers();
+}
+
+
+/* It is possible that we could have no munmap() or
+ * other events for a period of time. This will lead
+ * the CPU buffers to overflow and lose samples and
+ * context switches. We try to reduce the problem
+ * by timing out when nothing happens for a while.
+ */
+static void timer_ping(unsigned long data)
+{
+ schedule_work(&sync_wq);
+ /* timer is re-added by the scheduled task */
+}
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h
new file mode 100644
index 000000000000..a8def27d8502
--- /dev/null
+++ b/drivers/oprofile/buffer_sync.h
@@ -0,0 +1,19 @@
+/**
+ * @file buffer_sync.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROFILE_BUFFER_SYNC_H
+#define OPROFILE_BUFFER_SYNC_H
+
+/* add the necessary profiling hooks */
+int sync_start(void);
+
+/* remove the hooks */
+void sync_stop(void);
+
+#endif /* OPROFILE_BUFFER_SYNC_H */
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
new file mode 100644
index 000000000000..42af606defd4
--- /dev/null
+++ b/drivers/oprofile/cpu_buffer.c
@@ -0,0 +1,135 @@
+/**
+ * @file cpu_buffer.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
+ * Each CPU has a local buffer that stores PC value/event
+ * pairs. We also log context switches when we notice them.
+ * Eventually each CPU's buffer is processed into the global
+ * event buffer by sync_cpu_buffers().
+ *
+ * We use a local buffer for two reasons: an NMI or similar
+ * interrupt cannot synchronise, and high sampling rates
+ * would lead to catastrophic global synchronisation if
+ * a global buffer was used.
+ */
+
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/smp.h>
+
+#include "cpu_buffer.h"
+#include "oprof.h"
+#include "oprofile_stats.h"
+
+struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
+
+static unsigned long buffer_size;
+
+static void __free_cpu_buffers(int num)
+{
+ int i;
+
+ for (i=0; i < num; ++i) {
+ struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+
+ if (!cpu_possible(i))
+ continue;
+
+ vfree(b->buffer);
+ }
+}
+
+
+int alloc_cpu_buffers(void)
+{
+ int i;
+
+ buffer_size = fs_cpu_buffer_size;
+
+ for (i=0; i < NR_CPUS; ++i) {
+ struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+
+ if (!cpu_possible(i))
+ continue;
+
+ b->buffer = vmalloc(sizeof(struct op_sample) * buffer_size);
+ if (!b->buffer)
+ goto fail;
+
+ spin_lock_init(&b->int_lock);
+ b->pos = 0;
+ b->last_task = 0;
+ b->sample_received = 0;
+ b->sample_lost_locked = 0;
+ b->sample_lost_overflow = 0;
+ }
+ return 0;
+fail:
+ __free_cpu_buffers(i);
+ return -ENOMEM;
+}
+
+
+void free_cpu_buffers(void)
+{
+ __free_cpu_buffers(NR_CPUS);
+}
+
+
+/* Note we can't use a semaphore here as this is supposed to
+ * be safe from any context. Instead we trylock the CPU's int_lock.
+ * int_lock is taken by the processing code in sync_cpu_buffers()
+ * so we avoid disturbing that.
+ */
+void oprofile_add_sample(unsigned long eip, unsigned long event, int cpu)
+{
+ struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu];
+ struct task_struct * task;
+
+ /* temporary ? */
+ BUG_ON(!oprofile_started);
+
+ cpu_buf->sample_received++;
+
+ if (!spin_trylock(&cpu_buf->int_lock)) {
+ cpu_buf->sample_lost_locked++;
+ return;
+ }
+
+ if (cpu_buf->pos > buffer_size - 2) {
+ cpu_buf->sample_lost_overflow++;
+ goto out;
+ }
+
+ task = current;
+
+ /* notice a task switch */
+ if (cpu_buf->last_task != task) {
+ cpu_buf->last_task = task;
+ if (!(task->flags & PF_EXITING)) {
+ cpu_buf->buffer[cpu_buf->pos].eip = ~0UL;
+ cpu_buf->buffer[cpu_buf->pos].event = (unsigned long)task;
+ cpu_buf->pos++;
+ }
+ }
+
+ /* If the task is exiting it's not safe to take a sample
+ * as the task_struct is about to be freed. We can't just
+ * notify at release_task() time because of CLONE_DETACHED
+ * tasks that release_task() themselves.
+ */
+ if (task->flags & PF_EXITING) {
+ cpu_buf->sample_lost_task_exit++;
+ goto out;
+ }
+
+ cpu_buf->buffer[cpu_buf->pos].eip = eip;
+ cpu_buf->buffer[cpu_buf->pos].event = event;
+ cpu_buf->pos++;
+out:
+ spin_unlock(&cpu_buf->int_lock);
+}
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
new file mode 100644
index 000000000000..87ce0a18550d
--- /dev/null
+++ b/drivers/oprofile/cpu_buffer.h
@@ -0,0 +1,45 @@
+/**
+ * @file cpu_buffer.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROFILE_CPU_BUFFER_H
+#define OPROFILE_CPU_BUFFER_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+struct task_struct;
+
+/* allocate a sample buffer for each CPU */
+int alloc_cpu_buffers(void);
+
+void free_cpu_buffers(void);
+
+/* CPU buffer is composed of such entries (which are
+ * also used for context switch notes)
+ */
+struct op_sample {
+ unsigned long eip;
+ unsigned long event;
+};
+
+struct oprofile_cpu_buffer {
+ spinlock_t int_lock;
+ /* protected by int_lock */
+ unsigned long pos;
+ struct task_struct * last_task;
+ struct op_sample * buffer;
+ unsigned long sample_received;
+ unsigned long sample_lost_locked;
+ unsigned long sample_lost_overflow;
+ unsigned long sample_lost_task_exit;
+} ____cacheline_aligned;
+
+extern struct oprofile_cpu_buffer cpu_buffer[];
+
+#endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
new file mode 100644
index 000000000000..3552be34eca7
--- /dev/null
+++ b/drivers/oprofile/event_buffer.c
@@ -0,0 +1,186 @@
+/**
+ * @file event_buffer.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
+ * This is the global event buffer that the user-space
+ * daemon reads from. The event buffer is an untyped array
+ * of unsigned longs. Entries are prefixed by the
+ * escape value ESCAPE_CODE followed by an identifying code.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/smp.h>
+#include <linux/dcookies.h>
+#include <linux/oprofile.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+
+#include "event_buffer.h"
+#include "cpu_buffer.h"
+#include "oprof.h"
+#include "oprofile_stats.h"
+
+DECLARE_MUTEX(buffer_sem);
+
+static unsigned long buffer_opened;
+static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
+static unsigned long * event_buffer;
+static unsigned long buffer_size;
+static unsigned long buffer_watershed;
+static size_t buffer_pos;
+/* atomic_t because wait_event checks it outside of buffer_sem */
+static atomic_t buffer_ready = ATOMIC_INIT(0);
+
+/* Add an entry to the event buffer. When we
+ * get near to the end we wake up the process
+ * sleeping on the read() of the file.
+ */
+void add_event_entry(unsigned long value)
+{
+ if (buffer_pos == buffer_size) {
+ atomic_inc(&oprofile_stats.event_lost_overflow);
+ return;
+ }
+
+ event_buffer[buffer_pos] = value;
+ if (++buffer_pos == buffer_size - buffer_watershed) {
+ atomic_set(&buffer_ready, 1);
+ wake_up(&buffer_wait);
+ }
+}
+
+
+/* Wake up the waiting process if any. This happens
+ * on "echo 0 >/dev/oprofile/enable" so the daemon
+ * processes the data remaining in the event buffer.
+ */
+void wake_up_buffer_waiter(void)
+{
+ down(&buffer_sem);
+ atomic_set(&buffer_ready, 1);
+ wake_up(&buffer_wait);
+ up(&buffer_sem);
+}
+
+
+int alloc_event_buffer(void)
+{
+ int err = -ENOMEM;
+
+ spin_lock(&oprofilefs_lock);
+ buffer_size = fs_buffer_size;
+ buffer_watershed = fs_buffer_watershed;
+ spin_unlock(&oprofilefs_lock);
+
+ if (buffer_watershed >= buffer_size)
+ return -EINVAL;
+
+ event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
+ if (!event_buffer)
+ goto out;
+
+ err = 0;
+out:
+ return err;
+}
+
+
+void free_event_buffer(void)
+{
+ vfree(event_buffer);
+}
+
+
+int event_buffer_open(struct inode * inode, struct file * file)
+{
+ int err = -EPERM;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (test_and_set_bit(0, &buffer_opened))
+ return -EBUSY;
+
+ /* Register as a user of dcookies
+ * to ensure they persist for the lifetime of
+ * the open event file
+ */
+ err = -EINVAL;
+ file->private_data = dcookie_register();
+ if (!file->private_data)
+ goto out;
+
+ if ((err = oprofile_setup()))
+ goto fail;
+
+ /* NB: the actual start happens from userspace
+ * echo 1 >/dev/oprofile/enable
+ */
+
+ return 0;
+
+fail:
+ dcookie_unregister(file->private_data);
+out:
+ clear_bit(0, &buffer_opened);
+ return err;
+}
+
+
+int event_buffer_release(struct inode * inode, struct file * file)
+{
+ oprofile_stop();
+ oprofile_shutdown();
+ dcookie_unregister(file->private_data);
+ buffer_pos = 0;
+ atomic_set(&buffer_ready, 0);
+ clear_bit(0, &buffer_opened);
+ return 0;
+}
+
+
+ssize_t event_buffer_read(struct file * file, char * buf, size_t count, loff_t * offset)
+{
+ int retval = -EINVAL;
+ size_t const max = buffer_size * sizeof(unsigned long);
+
+ /* handling partial reads is more trouble than it's worth */
+ if (count != max || *offset)
+ return -EINVAL;
+
+ /* wait for the event buffer to fill up with some data */
+ wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
+ if (signal_pending(current))
+ return -EINTR;
+
+ down(&buffer_sem);
+
+ atomic_set(&buffer_ready, 0);
+
+ retval = -EFAULT;
+
+ count = buffer_pos * sizeof(unsigned long);
+
+ if (copy_to_user(buf, event_buffer, count))
+ goto out;
+
+ retval = count;
+ buffer_pos = 0;
+
+out:
+ up(&buffer_sem);
+ return retval;
+}
+
+struct file_operations event_buffer_fops = {
+ .open = event_buffer_open,
+ .release = event_buffer_release,
+ .read = event_buffer_read,
+};
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
new file mode 100644
index 000000000000..11d2ed4dea42
--- /dev/null
+++ b/drivers/oprofile/event_buffer.h
@@ -0,0 +1,42 @@
+/**
+ * @file event_buffer.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef EVENT_BUFFER_H
+#define EVENT_BUFFER_H
+
+#include <linux/types.h>
+#include <linux/sem.h>
+
+int alloc_event_buffer(void);
+
+void free_event_buffer(void);
+
+/* wake up the process sleeping on the event file */
+void wake_up_buffer_waiter(void);
+
+/* Each escaped entry is prefixed by ESCAPE_CODE
+ * then one of the following codes, then the
+ * relevant data.
+ */
+#define ESCAPE_CODE ~0UL
+#define CTX_SWITCH_CODE 1
+#define CPU_SWITCH_CODE 2
+#define COOKIE_SWITCH_CODE 3
+
+/* add data to the event buffer */
+void add_event_entry(unsigned long data);
+
+extern struct file_operations event_buffer_fops;
+
+/* mutex between sync_cpu_buffers() and the
+ * file reading code.
+ */
+extern struct semaphore buffer_sem;
+
+#endif /* EVENT_BUFFER_H */
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
new file mode 100644
index 000000000000..91e120f1ac75
--- /dev/null
+++ b/drivers/oprofile/oprof.c
@@ -0,0 +1,153 @@
+/**
+ * @file oprof.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/profile.h>
+#include <linux/oprofile.h>
+
+#include "oprof.h"
+#include "event_buffer.h"
+#include "cpu_buffer.h"
+#include "buffer_sync.h"
+#include "oprofile_stats.h"
+
+struct oprofile_operations * oprofile_ops;
+enum oprofile_cpu oprofile_cpu_type;
+unsigned long oprofile_started;
+static unsigned long is_setup;
+static DECLARE_MUTEX(start_sem);
+
+int oprofile_setup(void)
+{
+ int err;
+
+ if ((err = alloc_cpu_buffers()))
+ goto out;
+
+ if ((err = alloc_event_buffer()))
+ goto out1;
+
+ if (oprofile_ops->setup && (err = oprofile_ops->setup()))
+ goto out2;
+
+ /* Note even though this starts part of the
+ * profiling overhead, it's necessary to prevent
+ * us missing task deaths and eventually oopsing
+ * when trying to process the event buffer.
+ */
+ if ((err = sync_start()))
+ goto out3;
+
+ down(&start_sem);
+ is_setup = 1;
+ up(&start_sem);
+ return 0;
+
+out3:
+ if (oprofile_ops->shutdown)
+ oprofile_ops->shutdown();
+out2:
+ free_event_buffer();
+out1:
+ free_cpu_buffers();
+out:
+ return err;
+}
+
+
+/* Actually start profiling (echo 1>/dev/oprofile/enable) */
+int oprofile_start(void)
+{
+ int err = -EINVAL;
+
+ down(&start_sem);
+
+ if (!is_setup)
+ goto out;
+
+ err = 0;
+
+ if (oprofile_started)
+ goto out;
+
+ if ((err = oprofile_ops->start()))
+ goto out;
+
+ oprofile_started = 1;
+ oprofile_reset_stats();
+out:
+ up(&start_sem);
+ return err;
+}
+
+
+/* echo 0>/dev/oprofile/enable */
+void oprofile_stop(void)
+{
+ down(&start_sem);
+ if (!oprofile_started)
+ goto out;
+ oprofile_ops->stop();
+ oprofile_started = 0;
+ /* wake up the daemon to read what remains */
+ wake_up_buffer_waiter();
+out:
+ up(&start_sem);
+}
+
+
+void oprofile_shutdown(void)
+{
+ sync_stop();
+ if (oprofile_ops->shutdown)
+ oprofile_ops->shutdown();
+ /* down() is also necessary to synchronise all pending events
+ * before freeing */
+ down(&buffer_sem);
+ is_setup = 0;
+ up(&buffer_sem);
+ free_event_buffer();
+ free_cpu_buffers();
+}
+
+
+static int __init oprofile_init(void)
+{
+ int err;
+
+ /* Architecture must fill in the interrupt ops and the
+ * logical CPU type.
+ */
+ err = oprofile_arch_init(&oprofile_ops, &oprofile_cpu_type);
+ if (err)
+ goto out;
+
+ err = oprofilefs_register();
+ if (err)
+ goto out;
+
+out:
+ return err;
+}
+
+
+static void __exit oprofile_exit(void)
+{
+ oprofilefs_unregister();
+}
+
+MODULE_LICENSE("GPL");
+module_init(oprofile_init);
+module_exit(oprofile_exit);
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
new file mode 100644
index 000000000000..9f19ba5f39b9
--- /dev/null
+++ b/drivers/oprofile/oprof.h
@@ -0,0 +1,34 @@
+/**
+ * @file oprof.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROF_H
+#define OPROF_H
+
+#include <linux/spinlock.h>
+#include <linux/oprofile.h>
+
+int oprofile_setup(void);
+void oprofile_shutdown(void);
+
+int oprofilefs_register(void);
+void oprofilefs_unregister(void);
+
+int oprofile_start(void);
+void oprofile_stop(void);
+
+extern unsigned long fs_buffer_size;
+extern unsigned long fs_cpu_buffer_size;
+extern unsigned long fs_buffer_watershed;
+extern enum oprofile_cpu oprofile_cpu_type;
+extern struct oprofile_operations * oprofile_ops;
+extern unsigned long oprofile_started;
+
+void oprofile_create_files(struct super_block * sb, struct dentry * root);
+
+#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
new file mode 100644
index 000000000000..22d8bf5994b6
--- /dev/null
+++ b/drivers/oprofile/oprofile_files.c
@@ -0,0 +1,91 @@
+/**
+ * @file oprofile_files.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+
+#include "oprof.h"
+#include "event_buffer.h"
+#include "oprofile_stats.h"
+
+unsigned long fs_buffer_size = 131072;
+unsigned long fs_cpu_buffer_size = 8192;
+unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
+
+
+static int simple_open(struct inode * inode, struct file * filp)
+{
+ return 0;
+}
+
+
+static ssize_t cpu_type_read(struct file * file, char * buf, size_t count, loff_t * offset)
+{
+ unsigned long cpu_type = oprofile_cpu_type;
+
+ return oprofilefs_ulong_to_user(&cpu_type, buf, count, offset);
+}
+
+
+static struct file_operations cpu_type_fops = {
+ .open = simple_open,
+ .read = cpu_type_read,
+};
+
+
+static ssize_t enable_read(struct file * file, char * buf, size_t count, loff_t * offset)
+{
+ return oprofilefs_ulong_to_user(&oprofile_started, buf, count, offset);
+}
+
+
+static ssize_t enable_write(struct file *file, char const * buf, size_t count, loff_t * offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (val)
+ retval = oprofile_start();
+ else
+ oprofile_stop();
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+
+static struct file_operations enable_fops = {
+ .open = simple_open,
+ .read = enable_read,
+ .write = enable_write,
+};
+
+
+void oprofile_create_files(struct super_block * sb, struct dentry * root)
+{
+ oprofilefs_create_file(sb, root, "enable", &enable_fops);
+ oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
+ oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
+ oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
+ oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
+ oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
+ oprofile_create_stats_files(sb, root);
+ if (oprofile_ops->create_files)
+ oprofile_ops->create_files(sb, root);
+}
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
new file mode 100644
index 000000000000..479d8315558f
--- /dev/null
+++ b/drivers/oprofile/oprofile_stats.c
@@ -0,0 +1,77 @@
+/**
+ * @file oprofile_stats.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ */
+
+#include <linux/oprofile.h>
+#include <linux/smp.h>
+
+#include "oprofile_stats.h"
+#include "cpu_buffer.h"
+
+struct oprofile_stat_struct oprofile_stats;
+
+void oprofile_reset_stats(void)
+{
+ struct oprofile_cpu_buffer * cpu_buf;
+ int i;
+
+ for (i = 0; i < NR_CPUS; ++i) {
+ if (!cpu_possible(i))
+ continue;
+
+ cpu_buf = &cpu_buffer[i];
+ cpu_buf->sample_received = 0;
+ cpu_buf->sample_lost_locked = 0;
+ cpu_buf->sample_lost_overflow = 0;
+ cpu_buf->sample_lost_task_exit = 0;
+ }
+
+ atomic_set(&oprofile_stats.sample_lost_mmap_sem, 0);
+ atomic_set(&oprofile_stats.event_lost_overflow, 0);
+}
+
+
+void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
+{
+ struct oprofile_cpu_buffer * cpu_buf;
+ struct dentry * cpudir;
+ struct dentry * dir;
+ char buf[10];
+ int i;
+
+ dir = oprofilefs_mkdir(sb, root, "stats");
+ if (!dir)
+ return;
+
+ for (i = 0; i < NR_CPUS; ++i) {
+ if (!cpu_possible(i))
+ continue;
+
+ cpu_buf = &cpu_buffer[i];
+ snprintf(buf, 6, "cpu%d", i);
+ cpudir = oprofilefs_mkdir(sb, dir, buf);
+
+ /* Strictly speaking access to these ulongs is racy,
+ * but we can't simply lock them, and they are
+ * informational only.
+ */
+ oprofilefs_create_ro_ulong(sb, cpudir, "sample_received",
+ &cpu_buf->sample_received);
+ oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_locked",
+ &cpu_buf->sample_lost_locked);
+ oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow",
+ &cpu_buf->sample_lost_overflow);
+ oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_task_exit",
+ &cpu_buf->sample_lost_task_exit);
+ }
+
+ oprofilefs_create_ro_atomic(sb, dir, "sample_lost_mmap_sem",
+ &oprofile_stats.sample_lost_mmap_sem);
+ oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow",
+ &oprofile_stats.event_lost_overflow);
+}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
new file mode 100644
index 000000000000..8ca3596c2bef
--- /dev/null
+++ b/drivers/oprofile/oprofile_stats.h
@@ -0,0 +1,31 @@
+/**
+ * @file oprofile_stats.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ */
+
+#ifndef OPROFILE_STATS_H
+#define OPROFILE_STATS_H
+
+#include <asm/atomic.h>
+
+struct oprofile_stat_struct {
+ atomic_t sample_lost_mmap_sem;
+ atomic_t event_lost_overflow;
+};
+
+extern struct oprofile_stat_struct oprofile_stats;
+
+/* reset all stats to zero */
+void oprofile_reset_stats(void);
+
+struct super_block;
+struct dentry;
+
+/* create the stats/ dir */
+void oprofile_create_stats_files(struct super_block * sb, struct dentry * root);
+
+#endif /* OPROFILE_STATS_H */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
new file mode 100644
index 000000000000..a86100975cb8
--- /dev/null
+++ b/drivers/oprofile/oprofilefs.c
@@ -0,0 +1,306 @@
+/**
+ * @file oprofilefs.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ *
+ * A simple filesystem for configuration and
+ * access of oprofile.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/namei.h>
+#include <linux/oprofile.h>
+#include <asm/uaccess.h>
+
+#include "oprof.h"
+
+#define OPROFILEFS_MAGIC 0x6f70726f
+
+spinlock_t oprofilefs_lock = SPIN_LOCK_UNLOCKED;
+
+static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode)
+{
+ struct inode * inode = new_inode(sb);
+
+ if (inode) {
+ inode->i_mode = mode;
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ }
+ return inode;
+}
+
+
+static struct super_operations s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+#define TMPBUFSIZE 50
+
+ssize_t oprofilefs_ulong_to_user(unsigned long * val, char * buf, size_t count, loff_t * offset)
+{
+ char tmpbuf[TMPBUFSIZE];
+ size_t maxlen;
+
+ if (!count)
+ return 0;
+
+ spin_lock(&oprofilefs_lock);
+ maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", *val);
+ spin_unlock(&oprofilefs_lock);
+ if (maxlen > TMPBUFSIZE)
+ maxlen = TMPBUFSIZE;
+
+ if (*offset > maxlen)
+ return 0;
+
+ if (count > maxlen - *offset)
+ count = maxlen - *offset;
+
+ if (copy_to_user(buf, tmpbuf + *offset, count))
+ return -EFAULT;
+
+ *offset += count;
+
+ return count;
+}
+
+
+int oprofilefs_ulong_from_user(unsigned long * val, char const * buf, size_t count)
+{
+ char tmpbuf[TMPBUFSIZE];
+
+ if (!count)
+ return 0;
+
+ if (count > TMPBUFSIZE - 1)
+ return -EINVAL;
+
+ memset(tmpbuf, 0x0, TMPBUFSIZE);
+
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EFAULT;
+
+ spin_lock(&oprofilefs_lock);
+ *val = simple_strtoul(tmpbuf, NULL, 10);
+ spin_unlock(&oprofilefs_lock);
+ return 0;
+}
+
+
+static ssize_t ulong_read_file(struct file * file, char * buf, size_t count, loff_t * offset)
+{
+ return oprofilefs_ulong_to_user(file->private_data, buf, count, offset);
+}
+
+
+static ssize_t ulong_write_file(struct file * file, char const * buf, size_t count, loff_t * offset)
+{
+ unsigned long * value = file->private_data;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(value, buf, count);
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+
+static int default_open(struct inode * inode, struct file * filp)
+{
+ if (inode->u.generic_ip)
+ filp->private_data = inode->u.generic_ip;
+ return 0;
+}
+
+
+static struct file_operations ulong_fops = {
+ .read = ulong_read_file,
+ .write = ulong_write_file,
+ .open = default_open,
+};
+
+
+static struct file_operations ulong_ro_fops = {
+ .read = ulong_read_file,
+ .open = default_open,
+};
+
+
+static struct dentry * __oprofilefs_create_file(struct super_block * sb,
+ struct dentry * root, char const * name, struct file_operations * fops)
+{
+ struct dentry * dentry;
+ struct inode * inode;
+ struct qstr qname;
+ qname.name = name;
+ qname.len = strlen(name);
+ qname.hash = full_name_hash(qname.name, qname.len);
+ dentry = d_alloc(root, &qname);
+ if (!dentry)
+ return 0;
+ inode = oprofilefs_get_inode(sb, S_IFREG | 0644);
+ if (!inode) {
+ dput(dentry);
+ return 0;
+ }
+ inode->i_fop = fops;
+ d_add(dentry, inode);
+ return dentry;
+}
+
+
+int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
+ char const * name, unsigned long * val)
+{
+ struct dentry * d = __oprofilefs_create_file(sb, root, name, &ulong_fops);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->u.generic_ip = val;
+ return 0;
+}
+
+
+int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
+ char const * name, unsigned long * val)
+{
+ struct dentry * d = __oprofilefs_create_file(sb, root, name, &ulong_ro_fops);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->u.generic_ip = val;
+ return 0;
+}
+
+
+static ssize_t atomic_read_file(struct file * file, char * buf, size_t count, loff_t * offset)
+{
+ atomic_t * aval = file->private_data;
+ unsigned long val = atomic_read(aval);
+ return oprofilefs_ulong_to_user(&val, buf, count, offset);
+}
+
+
+static struct file_operations atomic_ro_fops = {
+ .read = atomic_read_file,
+ .open = default_open,
+};
+
+
+int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
+ char const * name, atomic_t * val)
+{
+ struct dentry * d = __oprofilefs_create_file(sb, root, name, &atomic_ro_fops);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->u.generic_ip = val;
+ return 0;
+}
+
+
+int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
+ char const * name, struct file_operations * fops)
+{
+ if (!__oprofilefs_create_file(sb, root, name, fops))
+ return -EFAULT;
+ return 0;
+}
+
+
+struct dentry * oprofilefs_mkdir(struct super_block * sb,
+ struct dentry * root, char const * name)
+{
+ struct dentry * dentry;
+ struct inode * inode;
+ struct qstr qname;
+ qname.name = name;
+ qname.len = strlen(name);
+ qname.hash = full_name_hash(qname.name, qname.len);
+ dentry = d_alloc(root, &qname);
+ if (!dentry)
+ return 0;
+ inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
+ if (!inode) {
+ dput(dentry);
+ return 0;
+ }
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ d_add(dentry, inode);
+ return dentry;
+}
+
+
+static int oprofilefs_fill_super(struct super_block * sb, void * data, int silent)
+{
+ struct inode * root_inode;
+ struct dentry * root_dentry;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = OPROFILEFS_MAGIC;
+ sb->s_op = &s_ops;
+
+ root_inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
+ if (!root_inode)
+ return -ENOMEM;
+ root_inode->i_op = &simple_dir_inode_operations;
+ root_inode->i_fop = &simple_dir_operations;
+ root_dentry = d_alloc_root(root_inode);
+ if (!root_dentry) {
+ iput(root_inode);
+ return -ENOMEM;
+ }
+
+ sb->s_root = root_dentry;
+
+ oprofile_create_files(sb, root_dentry);
+
+ // FIXME: verify kill_litter_super removes our dentries
+ return 0;
+}
+
+
+static struct super_block * oprofilefs_get_sb(struct file_system_type * fs_type,
+ int flags, char * dev_name, void * data)
+{
+ return get_sb_single(fs_type, flags, data, oprofilefs_fill_super);
+}
+
+
+static struct file_system_type oprofilefs_type = {
+ .owner = THIS_MODULE,
+ .name = "oprofilefs",
+ .get_sb = oprofilefs_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
+
+int __init oprofilefs_register(void)
+{
+ return register_filesystem(&oprofilefs_type);
+}
+
+
+void __exit oprofilefs_unregister(void)
+{
+ unregister_filesystem(&oprofilefs_type);
+}
diff --git a/drivers/pcmcia/bulkmem.c b/drivers/pcmcia/bulkmem.c
index e4757451ab17..bb68d267ad2b 100644
--- a/drivers/pcmcia/bulkmem.c
+++ b/drivers/pcmcia/bulkmem.c
@@ -211,7 +211,7 @@ static void handle_erase_timeout(u_long arg)
retry_erase((erase_busy_t *)arg, MTD_REQ_TIMEOUT);
}
-static int setup_erase_request(client_handle_t handle, eraseq_entry_t *erase)
+static void setup_erase_request(client_handle_t handle, eraseq_entry_t *erase)
{
erase_busy_t *busy;
region_info_t *info;
@@ -229,8 +229,10 @@ static int setup_erase_request(client_handle_t handle, eraseq_entry_t *erase)
else {
erase->State = 1;
busy = kmalloc(sizeof(erase_busy_t), GFP_KERNEL);
- if (!busy)
- return CS_GENERAL_FAILURE;
+ if (!busy) {
+ erase->State = ERASE_FAILED;
+ return;
+ }
busy->erase = erase;
busy->client = handle;
init_timer(&busy->timeout);
@@ -240,7 +242,6 @@ static int setup_erase_request(client_handle_t handle, eraseq_entry_t *erase)
retry_erase(busy, 0);
}
}
- return CS_SUCCESS;
} /* setup_erase_request */
/*======================================================================
@@ -325,7 +326,7 @@ int MTDHelperEntry(int func, void *a1, void *a2)
======================================================================*/
-static int setup_regions(client_handle_t handle, int attr,
+static void setup_regions(client_handle_t handle, int attr,
memory_handle_t *list)
{
int i, code, has_jedec, has_geo;
@@ -340,7 +341,7 @@ static int setup_regions(client_handle_t handle, int attr,
code = (attr) ? CISTPL_DEVICE_A : CISTPL_DEVICE;
if (read_tuple(handle, code, &device) != CS_SUCCESS)
- return CS_GENERAL_FAILURE;
+ return;
code = (attr) ? CISTPL_JEDEC_A : CISTPL_JEDEC_C;
has_jedec = (read_tuple(handle, code, &jedec) == CS_SUCCESS);
if (has_jedec && (device.ndev != jedec.nid)) {
@@ -363,8 +364,10 @@ static int setup_regions(client_handle_t handle, int attr,
if ((device.dev[i].type != CISTPL_DTYPE_NULL) &&
(device.dev[i].size != 0)) {
r = kmalloc(sizeof(*r), GFP_KERNEL);
- if (!r)
- return CS_GENERAL_FAILURE;
+ if (!r) {
+ printk(KERN_NOTICE "cs: setup_regions: kmalloc failed!\n");
+ return;
+ }
r->region_magic = REGION_MAGIC;
r->state = 0;
r->dev_info[0] = '\0';
@@ -389,7 +392,6 @@ static int setup_regions(client_handle_t handle, int attr,
}
offset += device.dev[i].size;
}
- return CS_SUCCESS;
} /* setup_regions */
/*======================================================================
@@ -423,10 +425,8 @@ int pcmcia_get_first_region(client_handle_t handle, region_info_t *rgn)
if ((handle->Attributes & INFO_MASTER_CLIENT) &&
(!(s->state & SOCKET_REGION_INFO))) {
- if (setup_regions(handle, 0, &s->c_region) != CS_SUCCESS)
- return CS_GENERAL_FAILURE;
- if (setup_regions(handle, 1, &s->a_region) != CS_SUCCESS)
- return CS_GENERAL_FAILURE;
+ setup_regions(handle, 0, &s->c_region);
+ setup_regions(handle, 1, &s->a_region);
s->state |= SOCKET_REGION_INFO;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 98c4f53bcec3..2d97de4e1e2a 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2110,8 +2110,6 @@ dasd_open(struct inode *inp, struct file *filp)
dasd_device_t *device;
int rc;
- if ((!inp) || kdev_none(inp->i_rdev))
- return -EINVAL;
if (dasd_probeonly) {
MESSAGE(KERN_INFO,
"No access to device (%d:%d) due to probeonly mode",
@@ -2154,8 +2152,6 @@ dasd_release(struct inode *inp, struct file *filp)
dasd_devmap_t *devmap;
dasd_device_t *device;
- if ((!inp) || kdev_none(inp->i_rdev))
- return -EINVAL;
devmap = dasd_devmap_from_kdev(inp->i_rdev);
device = (devmap != NULL) ?
dasd_get_device(devmap) : ERR_PTR(-ENODEV);
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index be6c7dc5aa0a..67597043b718 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -190,14 +190,13 @@ dasd_gendisk_alloc(int devindex)
}
}
- gdp = alloc_disk();
+ gdp = alloc_disk(1 << DASD_PARTN_BITS);
if (!gdp)
return ERR_PTR(-ENOMEM);
/* Initialize gendisk structure. */
gdp->major = mi->major;
gdp->first_minor = index << DASD_PARTN_BITS;
- gdp->minor_shift = DASD_PARTN_BITS;
gdp->fops = &dasd_device_operations;
/*
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 80f8b7573a41..2c22f4d9e78d 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -324,24 +324,12 @@ fail:
return 0;
}
-/*
- * The file operations
- */
-static int xpram_open (struct inode *inode, struct file *filp)
-{
- if (minor(inode->i_rdev) >= xpram_devs)
- return -ENODEV;
- return 0;
-}
-
static int xpram_ioctl (struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct hd_geometry *geo;
unsigned long size;
int idx = minor(inode->i_rdev);
- if (idx >= xpram_devs)
- return -ENODEV;
if (cmd != HDIO_GETGEO)
return -EINVAL;
/*
@@ -350,8 +338,6 @@ static int xpram_ioctl (struct inode *inode, struct file *filp,
* whatever cylinders. Tell also that data starts at sector. 4.
*/
geo = (struct hd_geometry *) arg;
- if (geo == NULL)
- return -EINVAL;
size = (xpram_pages * 8) & ~0x3f;
put_user(size >> 6, &geo->cylinders);
put_user(4, &geo->heads);
@@ -364,7 +350,6 @@ static struct block_device_operations xpram_devops =
{
owner: THIS_MODULE,
ioctl: xpram_ioctl,
- open: xpram_open,
};
/*
@@ -441,7 +426,7 @@ static int __init xpram_setup_blkdev(void)
int i, rc = -ENOMEM;
for (i = 0; i < xpram_devs; i++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(1);
if (!disk)
goto out;
xpram_disks[i] = disk;
@@ -481,7 +466,6 @@ static int __init xpram_setup_blkdev(void)
offset += xpram_devices[i].size;
disk->major = XPRAM_MAJOR;
disk->first_minor = i;
- disk->minor_shift = 0;
disk->fops = &xpram_devops;
sprintf(disk->disk_name, "slram%d", i);
set_capacity(disk, xpram_sizes[i] << 1);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 16386c234938..932c1ded3f32 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -104,7 +104,6 @@ static void jsf_outl(unsigned long addr, __u32 data)
struct jsfd_part {
unsigned long dbase;
unsigned long dsize;
- int refcnt;
};
struct jsflash {
@@ -454,54 +453,12 @@ static int jsf_open(struct inode * inode, struct file * filp)
return 0; /* XXX What security? */
}
-static int jsfd_open(struct inode *inode, struct file *file)
-{
- struct jsfd_part *jdp;
- int dev;
-
- if (!inode)
- return -EINVAL;
- dev = MINOR(inode->i_rdev);
- if (dev >= JSF_MAX || (dev & JSF_PART_MASK) >= JSF_NPART) {
- printk(KERN_ALERT "jsfd_open: illegal minor %d\n", dev);
- return -ENODEV;
- }
-
- jdp = &jsf0.dv[dev];
- jdp->refcnt++;
-
- return 0;
-}
-
static int jsf_release(struct inode *inode, struct file *file)
{
jsf0.busy = 0;
return 0;
}
-static int jsfd_release(struct inode *inode, struct file *file)
-{
- struct jsfd_part *jdp;
- int dev;
-
- if (!inode)
- return -ENODEV;
- dev = MINOR(inode->i_rdev);
- if (dev >= JSF_MAX || (dev & JSF_PART_MASK) >= JSF_NPART) {
- printk(KERN_ALERT "jsfd_release: illegal minor %d\n", dev);
- return -ENODEV;
- }
-
- jdp = &jsf0.dv[dev];
- if (jdp->refcnt <= 0) {
- printk(KERN_ALERT "jsfd_release: bad ref on minor %d\n", dev);
- } else {
- --jdp->refcnt;
- }
- /* N.B. Doesn't lo->file need an fput?? */
- return 0;
-}
-
static struct file_operations jsf_fops = {
.owner = THIS_MODULE,
.llseek = jsf_lseek,
@@ -517,8 +474,6 @@ static struct miscdevice jsf_dev = { JSF_MINOR, "jsflash", &jsf_fops };
static struct block_device_operations jsfd_fops = {
.owner = THIS_MODULE,
- .open = jsfd_open,
- .release = jsfd_release,
};
static int jsflash_init(void)
@@ -622,7 +577,7 @@ static int jsfd_init(void)
err = -ENOMEM;
for (i = 0; i < JSF_MAX; i++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(1);
if (!disk)
goto out;
jsfd_disk[i] = disk;
@@ -642,13 +597,10 @@ static int jsfd_init(void)
jsf = &jsf0; /* actually, &jsfv[i >> JSF_PART_BITS] */
jdp = &jsf->dv[i&JSF_PART_MASK];
- jdp->refcnt = 0;
-
disk->major = JSFD_MAJOR;
disk->first_minor = i;
sprintf(disk->disk_name, "jsfd%d", i);
disk->fops = &jsfd_fops;
- disk->minor_shift = 0;
set_capacity(disk, jdp->dsize >> 9);
add_disk(disk);
set_device_ro(MKDEV(JSFD_MAJOR, i), 1);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index d32420b99220..b4ee7032b7d7 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2604,21 +2604,21 @@ static boolean BusLogic_TargetDeviceInquiry(BusLogic_HostAdapter_T
through Host Adapter.
*/
-static void BusLogic_ReportTargetDeviceInfo(BusLogic_HostAdapter_T
+/*static void BusLogic_ReportTargetDeviceInfo(BusLogic_HostAdapter_T
*HostAdapter)
{
int TargetID;
- /*
+*/ /*
Inhibit the Target Device Inquiry and Reporting if requested.
*/
- if (BusLogic_MultiMasterHostAdapterP(HostAdapter) &&
+/* if (BusLogic_MultiMasterHostAdapterP(HostAdapter) &&
HostAdapter->DriverOptions != NULL &&
HostAdapter->DriverOptions->LocalOptions.InhibitTargetInquiry)
return;
- /*
+*/ /*
Report on the Target Devices found.
*/
- for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+/* for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
{
BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
if (TargetFlags->TargetExists && !TargetFlags->TargetInfoReported)
@@ -2674,7 +2674,7 @@ static void BusLogic_ReportTargetDeviceInfo(BusLogic_HostAdapter_T
}
}
}
-
+*/
/*
BusLogic_InitializeHostStructure initializes the fields in the SCSI Host
@@ -2700,6 +2700,49 @@ static void BusLogic_InitializeHostStructure(BusLogic_HostAdapter_T
Host->cmd_per_lun = HostAdapter->UntaggedQueueDepth;
}
+/*
+ BusLogic_SlaveAttach will actually set the queue depth on individual
+ scsi devices as they are permanently added to the device chain. We
+ shamelessly rip off the SelectQueueDepths code to make this work mostly
+ like it used to. Since we don't get called once at the end of the scan
+ but instead get called for each device, we have to do things a bit
+ differently.
+*/
+int BusLogic_SlaveAttach(SCSI_Device_T *Device)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Device->host->hostdata;
+ int TargetID = Device->id;
+ int QueueDepth = HostAdapter->QueueDepth[TargetID];
+
+ if (HostAdapter->TargetFlags[TargetID].TaggedQueuingSupported &&
+ (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)))
+ {
+ if (QueueDepth == 0)
+ QueueDepth = BusLogic_MaxAutomaticTaggedQueueDepth;
+ HostAdapter->QueueDepth[TargetID] = QueueDepth;
+ scsi_adjust_queue_depth(Device, MSG_SIMPLE_TAG, QueueDepth);
+ }
+ else
+ {
+ HostAdapter->TaggedQueuingPermitted &= ~(1 << TargetID);
+ QueueDepth = HostAdapter->UntaggedQueueDepth;
+ HostAdapter->QueueDepth[TargetID] = QueueDepth;
+ scsi_adjust_queue_depth(Device, 0, QueueDepth);
+ }
+ QueueDepth = 0;
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (HostAdapter->TargetFlags[TargetID].TargetExists)
+ {
+ QueueDepth += HostAdapter->QueueDepth[TargetID];
+ }
+ if (QueueDepth > HostAdapter->AllocatedCCBs)
+ BusLogic_CreateAdditionalCCBs(HostAdapter,
+ QueueDepth
+ - HostAdapter->AllocatedCCBs,
+ false);
+ return 0;
+}
/*
BusLogic_SelectQueueDepths selects Queue Depths for each Target Device based
@@ -2709,7 +2752,7 @@ static void BusLogic_InitializeHostStructure(BusLogic_HostAdapter_T
since all the Target Devices have now been probed.
*/
-static void BusLogic_SelectQueueDepths(SCSI_Host_T *Host,
+/* static void BusLogic_SelectQueueDepths(SCSI_Host_T *Host,
SCSI_Device_T *DeviceList)
{
BusLogic_HostAdapter_T *HostAdapter =
@@ -2764,8 +2807,8 @@ static void BusLogic_SelectQueueDepths(SCSI_Host_T *Host,
for (Device = DeviceList; Device != NULL; Device = Device->next)
if (Device->host == Host)
Device->queue_depth = HostAdapter->QueueDepth[Device->id];
- /* Allocate an extra CCB for each Target Device for a Bus Device Reset. */
- AllocatedQueueDepth += HostAdapter->TargetDeviceCount;
+*/ /* Allocate an extra CCB for each Target Device for a Bus Device Reset. */
+/* AllocatedQueueDepth += HostAdapter->TargetDeviceCount;
if (AllocatedQueueDepth > HostAdapter->DriverQueueDepth)
AllocatedQueueDepth = HostAdapter->DriverQueueDepth;
BusLogic_CreateAdditionalCCBs(HostAdapter,
@@ -2778,7 +2821,7 @@ static void BusLogic_SelectQueueDepths(SCSI_Host_T *Host,
HostAdapter = HostAdapter->Next)
BusLogic_ReportTargetDeviceInfo(HostAdapter);
}
-
+*/
/*
BusLogic_DetectHostAdapter probes for BusLogic Host Adapters at the standard
@@ -2881,7 +2924,10 @@ int BusLogic_DetectHostAdapter(SCSI_Host_Template_T *HostTemplate)
memcpy(HostAdapter, PrototypeHostAdapter, sizeof(BusLogic_HostAdapter_T));
HostAdapter->SCSI_Host = Host;
HostAdapter->HostNumber = Host->host_no;
+ /*
+ * This function is deprecated
Host->select_queue_depths = BusLogic_SelectQueueDepths;
+ */
/*
Add Host Adapter to the end of the list of registered BusLogic
Host Adapters.
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 00fb8207a3a8..4f064b12903e 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -60,6 +60,7 @@ extern int BusLogic_ResetCommand(SCSI_Command_T *, unsigned int);
extern int BusLogic_BIOSDiskParameters(SCSI_Disk_T *, struct block_device *,
int *);
extern int BusLogic_ProcDirectoryInfo(char *, char **, off_t, int, int, int);
+extern int BusLogic_SlaveAttach(SCSI_Device_T *);
/*
@@ -76,6 +77,7 @@ extern int BusLogic_ProcDirectoryInfo(char *, char **, off_t, int, int, int);
queuecommand: BusLogic_QueueCommand, /* Queue Command Function */ \
abort: BusLogic_AbortCommand, /* Abort Command Function */ \
reset: BusLogic_ResetCommand, /* Reset Command Function */ \
+ slave_attach: BusLogic_SlaveAttach, /* Configure a SCSI_Device*/ \
bios_param: BusLogic_BIOSDiskParameters, /* BIOS Disk Parameters */ \
unchecked_isa_dma: 1, /* Default Initial Value */ \
max_sectors: 128, /* I/O queue len limit */ \
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 797025c9bfc6..27b28318d974 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -128,7 +128,7 @@ static int aac_eh_device_reset(Scsi_Cmnd* cmd);
static int aac_eh_bus_reset(Scsi_Cmnd* cmd);
static int aac_eh_reset(Scsi_Cmnd* cmd);
-static void aac_queuedepth(struct Scsi_Host *, Scsi_Device *);
+static int aac_slave_attach(Scsi_Device *);
/**
* aac_detect - Probe for aacraid cards
@@ -227,12 +227,6 @@ static int aac_detect(Scsi_Host_Template *template)
* value returned as aac->id.
*/
host_ptr->unique_id = aac_count - 1;
- /*
- * This function is called after the device list has
- * been built to find the tagged queueing depth
- * supported for each device.
- */
- host_ptr->select_queue_depths = aac_queuedepth;
aac = (struct aac_dev *)host_ptr->hostdata;
/* attach a pointer back to Scsi_Host */
aac->scsi_host_ptr = host_ptr;
@@ -520,31 +514,25 @@ static int aac_biosparm(Scsi_Disk *disk, struct block_device *bdev, int *geom)
}
/**
- * aac_queuedepth - compute queue depths
- * @host: SCSI host in question
- * @dev: SCSI device we are considering
+ * aac_slave_attach - do device specific setup
+ * @dev: SCSI device we are attaching
*
- * Selects queue depths for each target device based on the host adapter's
- * total capacity and the queue depth supported by the target device.
- * A queue depth of one automatically disables tagged queueing.
+ * Currently, all we do is set the queue depth on the device.
*/
-static void aac_queuedepth(struct Scsi_Host * host, Scsi_Device * dev )
+static int aac_slave_attach(Scsi_Device * dev )
{
- Scsi_Device * dptr;
- dprintk((KERN_DEBUG "aac_queuedepth.\n"));
- dprintk((KERN_DEBUG "Device # Q Depth Online\n"));
- dprintk((KERN_DEBUG "---------------------------\n"));
- for(dptr = dev; dptr != NULL; dptr = dptr->next)
- {
- if(dptr->host == host)
- {
- dptr->queue_depth = 10;
- dprintk((KERN_DEBUG " %2d %d %d\n",
- dptr->id, dptr->queue_depth, dptr->online));
- }
- }
+ if(dev->tagged_supported)
+ scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, 128);
+ else
+ scsi_adjust_queue_depth(dev, 0, 1);
+
+ dprintk((KERN_DEBUG "(scsi%d:%d:%d:%d) Tagged Queue depth %2d, "
+ "%s\n", dev->host->host_no, dev->channel,
+ dev->id, dev->lun, dev->new_queue_depth,
+ dev->online ? "OnLine" : "OffLine"));
+ return 0;
}
@@ -693,6 +681,7 @@ static Scsi_Host_Template driver_template = {
ioctl: aac_ioctl,
queuecommand: aac_queuecommand,
bios_param: aac_biosparm,
+ slave_attach: aac_slave_attach,
can_queue: AAC_NUM_IO_FIB,
this_id: 16,
sg_tablesize: 16,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 3aeaa5953e25..6df9ff071b23 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -673,6 +673,10 @@
3.3GJ (4/15/02):
1. hacks for lk 2.5 series (D. Gilbert)
+ 3.3GJD (10/14/02):
+ 1. change select_queue_depths to slave_attach
+ 2. make cmd_per_lun be sane again
+
I. Known Problems/Fix List (XXX)
1. Need to add memory mapping workaround. Test the memory mapping.
@@ -4208,8 +4212,7 @@ STATIC PortAddr _asc_def_iop_base[];
*/
STATIC void advansys_interrupt(int, void *, struct pt_regs *);
-STATIC void advansys_select_queue_depths(struct Scsi_Host *,
- Scsi_Device *);
+STATIC int advansys_slave_attach(Scsi_Device *);
STATIC void asc_scsi_done_list(Scsi_Cmnd *, int from_isr);
STATIC int asc_execute_scsi_cmnd(Scsi_Cmnd *);
STATIC int asc_build_req(asc_board_t *, Scsi_Cmnd *);
@@ -5307,17 +5310,15 @@ advansys_detect(Scsi_Host_Template *tpnt)
* compiled as a module and 'cmd_per_lun' is zero, the Mid-Level
* SCSI function 'allocate_device' will panic. To allow the driver
* to work as a module in these kernels set 'cmd_per_lun' to 1.
- */
+ *
+ * Note: This is wrong. cmd_per_lun should be set to the depth
+ * you want on untagged devices always.
#ifdef MODULE
+ */
shp->cmd_per_lun = 1;
-#else /* MODULE */
+/* #else
shp->cmd_per_lun = 0;
-#endif /* MODULE */
- /*
- * Use the host 'select_queue_depths' function to determine
- * the number of commands to queue per device.
- */
- shp->select_queue_depths = advansys_select_queue_depths;
+#endif */
/*
* Set the maximum number of scatter-gather elements the
@@ -6346,34 +6347,33 @@ advansys_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* Set the number of commands to queue per device for the
* specified host adapter.
*/
-STATIC void
-advansys_select_queue_depths(struct Scsi_Host *shp, Scsi_Device *devicelist)
+STATIC int
+advansys_slave_attach(Scsi_Device *device)
{
- Scsi_Device *device;
asc_board_t *boardp;
- boardp = ASC_BOARDP(shp);
+ boardp = ASC_BOARDP(device->host);
boardp->flags |= ASC_SELECT_QUEUE_DEPTHS;
- for (device = devicelist; device != NULL; device = device->next) {
- if (device->host != shp) {
- continue;
- }
- /*
- * Save a pointer to the device and set its initial/maximum
- * queue depth.
- */
+ /*
+ * Save a pointer to the device and set its initial/maximum
+ * queue depth. Only save the pointer for a lun0 dev though.
+ */
+ if(device->lun == 0)
boardp->device[device->id] = device;
+ if(device->tagged_supported) {
if (ASC_NARROW_BOARD(boardp)) {
- device->queue_depth =
- boardp->dvc_var.asc_dvc_var.max_dvc_qng[device->id];
+ scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
+ boardp->dvc_var.asc_dvc_var.max_dvc_qng[device->id]);
} else {
- device->queue_depth =
- boardp->dvc_var.adv_dvc_var.max_dvc_qng;
+ scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
+ boardp->dvc_var.adv_dvc_var.max_dvc_qng);
}
- ASC_DBG3(1,
- "advansys_select_queue_depths: shp 0x%lx, id %d, depth %d\n",
- (ulong) shp, device->id, device->queue_depth);
+ } else {
+ scsi_adjust_queue_depth(device, 0, device->host->cmd_per_lun);
}
+ ASC_DBG3(1, "advansys_slave_attach: shp 0x%lx, id %d, depth %d\n",
+ (ulong) shp, device->id, device->queue_depth);
+ return 0;
}
/*
@@ -8432,7 +8432,7 @@ asc_prt_driver_conf(struct Scsi_Host *shp, char *cp, int cplen)
continue;
}
len = asc_prt_line(cp, leftlen, " %X:%d",
- i, boardp->device[i]->queue_depth);
+ i, boardp->device[i]->current_queue_depth);
ASC_PRT_NEXT();
}
len = asc_prt_line(cp, leftlen, "\n");
diff --git a/drivers/scsi/advansys.h b/drivers/scsi/advansys.h
index 756e5c64cc78..131ee5378922 100644
--- a/drivers/scsi/advansys.h
+++ b/drivers/scsi/advansys.h
@@ -53,6 +53,7 @@ const char *advansys_info(struct Scsi_Host *);
int advansys_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
int advansys_reset(Scsi_Cmnd *);
int advansys_biosparam(Disk *, struct block_device *, int[]);
+static int advansys_slave_attach(Scsi_Device *);
#ifdef CONFIG_PROC_FS
#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,3,28)
extern struct proc_dir_entry proc_scsi_advansys;
@@ -79,6 +80,7 @@ void advansys_setup(char *, int *);
queuecommand: advansys_queuecommand, \
eh_bus_reset_handler: advansys_reset, \
bios_param: advansys_biosparam, \
+ slave_attach: advansys_slave_attach, \
/* \
* Because the driver may control an ISA adapter 'unchecked_isa_dma' \
* must be set. The flag will be cleared in advansys_detect for non-ISA \
diff --git a/drivers/scsi/aic7xxx/aic7xxx_linux.c b/drivers/scsi/aic7xxx/aic7xxx_linux.c
index 70c2109703d5..21e35f436d75 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_linux.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_linux.c
@@ -430,8 +430,7 @@ static void ahc_linux_freeze_sim_queue(struct ahc_softc *ahc);
static void ahc_linux_release_sim_queue(u_long arg);
static int ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag);
static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
-static void ahc_linux_select_queue_depth(struct Scsi_Host *host,
- Scsi_Device *scsi_devs);
+static int ahc_linux_slave_attach(Scsi_Device *device);
static void ahc_linux_device_queue_depth(struct ahc_softc *ahc,
Scsi_Device *device);
static struct ahc_linux_target* ahc_linux_alloc_target(struct ahc_softc*,
@@ -1131,7 +1130,6 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
host->can_queue = AHC_MAX_QUEUE;
host->cmd_per_lun = 2;
host->sg_tablesize = AHC_NSEG;
- host->select_queue_depths = ahc_linux_select_queue_depth;
/* XXX No way to communicate the ID for multiple channels */
host->this_id = ahc->our_id;
host->irq = ahc->platform_data->irq;
@@ -1449,25 +1447,17 @@ ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
* Sets the queue depth for each SCSI device hanging
* off the input host adapter.
*/
-static void
-ahc_linux_select_queue_depth(struct Scsi_Host * host,
- Scsi_Device * scsi_devs)
+static int
+ahc_linux_slave_attach(Scsi_Device * device)
{
- Scsi_Device *device;
struct ahc_softc *ahc;
u_long flags;
- int scbnum;
- ahc = *((struct ahc_softc **)host->hostdata);
+ ahc = *((struct ahc_softc **)device->host->hostdata);
ahc_lock(ahc, &flags);
- scbnum = 0;
- for (device = scsi_devs; device != NULL; device = device->next) {
- if (device->host == host) {
- ahc_linux_device_queue_depth(ahc, device);
- scbnum += device->queue_depth;
- }
- }
+ ahc_linux_device_queue_depth(ahc, device);
ahc_unlock(ahc, &flags);
+ return 0;
}
/*
@@ -1512,7 +1502,8 @@ ahc_linux_device_queue_depth(struct ahc_softc *ahc, Scsi_Device * device)
}
}
if (tags != 0) {
- device->queue_depth = tags;
+ scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, tags);
+ /* device->queue_depth = tags; */
ahc_set_tags(ahc, &devinfo, AHC_QUEUE_TAGGED);
printf("scsi%d:%c:%d:%d: Tagged Queuing enabled. Depth %d\n",
ahc->platform_data->host->host_no, device->channel + 'A',
@@ -1523,8 +1514,9 @@ ahc_linux_device_queue_depth(struct ahc_softc *ahc, Scsi_Device * device)
* us at any time even though we can only execute them
* serially on the controller/device. This should remove
* some latency.
- */
device->queue_depth = 2;
+ */
+ scsi_adjust_queue_depth(device, 0, device->host->cmd_per_lun);
}
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_linux_host.h b/drivers/scsi/aic7xxx/aic7xxx_linux_host.h
index 4c3735f5a392..de53201f4df1 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_linux_host.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_linux_host.h
@@ -80,7 +80,7 @@ int ahc_linux_abort(Scsi_Cmnd *);
eh_host_reset_handler: NULL, \
abort: NULL, \
reset: NULL, \
- slave_attach: NULL, \
+ slave_attach: ahc_linux_slave_attach, \
bios_param: AIC7XXX_BIOSPARAM, \
can_queue: 253, /* max simultaneous cmds */\
this_id: -1, /* scsi id of host adapter */\
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 9be5e152413b..a8b2860f9c21 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -48,7 +48,6 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
#include <linux/proc_fs.h>
#include <linux/blk.h>
#include <linux/delay.h> /* for udelay */
-#include <linux/tqueue.h>
#include <linux/interrupt.h>
#include <linux/kernel.h> /* for printk */
#include <linux/sched.h>
@@ -356,23 +355,20 @@ static void adpt_inquiry(adpt_hba* pHba)
}
-static void adpt_select_queue_depths(struct Scsi_Host *host, Scsi_Device * devicelist)
+static int adpt_slave_attach(Scsi_Device * device)
{
- Scsi_Device *device; /* scsi layer per device information */
+ struct Scsi_Host *host = device->host;
adpt_hba* pHba;
pHba = (adpt_hba *) host->hostdata[0];
- for (device = devicelist; device != NULL; device = device->next) {
- if (device->host != host) {
- continue;
- }
- if (host->can_queue) {
- device->queue_depth = host->can_queue - 1;
- } else {
- device->queue_depth = 1;
- }
+ if (host->can_queue && device->tagged_supported) {
+ scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
+ host->can_queue - 1);
+ } else {
+ scsi_adjust_queue_depth(device, 0, 1);
}
+ return 0;
}
static int adpt_queue(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
@@ -2194,11 +2190,10 @@ static s32 adpt_scsi_register(adpt_hba* pHba,Scsi_Host_Template * sht)
host->max_id = 16;
host->max_lun = 256;
host->max_channel = pHba->top_scsi_channel + 1;
- host->cmd_per_lun = 256;
+ host->cmd_per_lun = 1;
host->unique_id = (uint) pHba;
host->sg_tablesize = pHba->sg_tablesize;
host->can_queue = pHba->post_fifo_size;
- host->select_queue_depths = adpt_select_queue_depths;
return 0;
}
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index dfff119adc2b..f9ff2b603629 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -43,6 +43,7 @@ static int adpt_queue(Scsi_Cmnd * cmd, void (*cmdcomplete) (Scsi_Cmnd *));
static int adpt_abort(Scsi_Cmnd * cmd);
static int adpt_reset(Scsi_Cmnd* cmd);
static int adpt_release(struct Scsi_Host *host);
+static int adpt_slave_attach(Scsi_Device *);
static const char *adpt_info(struct Scsi_Host *pSHost);
static int adpt_bios_param(Disk * disk, struct block_device *dev, int geom[]);
@@ -90,10 +91,11 @@ static int adpt_device_reset(Scsi_Cmnd* cmd);
eh_bus_reset_handler: adpt_bus_reset, \
eh_host_reset_handler: adpt_reset, \
bios_param: adpt_bios_param, \
+ slave_attach: adpt_slave_attach, \
can_queue: MAX_TO_IOP_MESSAGES, /* max simultaneous cmds */\
this_id: 7, /* scsi id of host adapter */\
sg_tablesize: 0, /* max scatter-gather cmds */\
- cmd_per_lun: 256, /* cmds per lun (linked cmds) */\
+ cmd_per_lun: 1, /* cmds per lun (linked cmds) */\
use_clustering: ENABLE_CLUSTERING, \
proc_name: "dpt_i2o" /* this is the name of our proc node*/ \
}
@@ -346,7 +348,6 @@ static s32 adpt_rescan(adpt_hba* pHba);
static s32 adpt_i2o_reparse_lct(adpt_hba* pHba);
static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
static void adpt_i2o_delete_hba(adpt_hba* pHba);
-static void adpt_select_queue_depths(struct Scsi_Host *host, Scsi_Device * devicelist);
static void adpt_inquiry(adpt_hba* pHba);
static void adpt_fail_posted_scbs(adpt_hba* pHba);
static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun);
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 9690e38bed42..41835b5571a2 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -750,63 +750,34 @@ static int max_queue_depth = CONFIG_SCSI_EATA_MAX_TAGS;
static int max_queue_depth = MAX_CMD_PER_LUN;
#endif
-static void select_queue_depths(struct Scsi_Host *host, Scsi_Device *devlist) {
- Scsi_Device *dev;
- int j, ntag = 0, nuntag = 0, tqd, utqd;
+static int eata2x_slave_attach(Scsi_Device *dev) {
+ int j, tqd, utqd;
+ char *link_suffix = "";
+ struct Scsi_Host *host = dev->host;
j = ((struct hostdata *) host->hostdata)->board_number;
- for(dev = devlist; dev; dev = dev->next) {
-
- if (dev->host != host) continue;
-
- if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
- ntag++;
- else
- nuntag++;
- }
-
utqd = MAX_CMD_PER_LUN;
+ tqd = (host->can_queue - utqd);
- tqd = (host->can_queue - utqd * nuntag) / (ntag ? ntag : 1);
-
- if (tqd > max_queue_depth) tqd = max_queue_depth;
-
- if (tqd < MAX_CMD_PER_LUN) tqd = MAX_CMD_PER_LUN;
-
- for(dev = devlist; dev; dev = dev->next) {
- char *tag_suffix = "", *link_suffix = "";
-
- if (dev->host != host) continue;
-
- if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
- dev->queue_depth = tqd;
+ if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm)) {
+ if(!dev->tagged_supported)
+ scsi_adjust_queue_depth(dev, 0, tqd);
else
- dev->queue_depth = utqd;
-
- if (TLDEV(dev->type)) {
- if (linked_comm && dev->queue_depth > 2)
- link_suffix = ", sorted";
- else
- link_suffix = ", unsorted";
- }
-
- if (tagged_comm && dev->tagged_supported && TLDEV(dev->type)) {
- dev->tagged_queue = 1;
- dev->current_tag = 1;
- }
-
- if (dev->tagged_supported && TLDEV(dev->type) && dev->tagged_queue)
- tag_suffix = ", soft-tagged";
- else if (dev->tagged_supported && TLDEV(dev->type))
- tag_suffix = ", tagged";
+ scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd);
+ } else {
+ scsi_adjust_queue_depth(dev, 0, utqd);
+ }
- printk("%s: scsi%d, channel %d, id %d, lun %d, cmds/lun %d%s%s.\n",
- BN(j), host->host_no, dev->channel, dev->id, dev->lun,
- dev->queue_depth, link_suffix, tag_suffix);
- }
+ if (!dev->simple_tags && dev->new_queue_depth > 2)
+ link_suffix = ", sorted";
+ else if (dev->simple_tags)
+ link_suffix = ", unsorted";
- return;
+ printk("%s: scsi%d, channel %d, id %d, lun %d, cmds/lun %d%s.\n",
+ BN(j), host->host_no, dev->channel, dev->id, dev->lun,
+ dev->new_queue_depth, link_suffix);
+ return 0;
}
static inline int wait_on_busy(unsigned long iobase, unsigned int loop) {
@@ -1071,7 +1042,6 @@ static inline int port_detect \
sh[j]->this_id = (ushort) info.host_addr[3];
sh[j]->can_queue = (ushort) be16_to_cpu(info.queue_size);
sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
- sh[j]->select_queue_depths = select_queue_depths;
memset(HD(j), 0, sizeof(struct hostdata));
HD(j)->subversion = subversion;
HD(j)->protocol_rev = protocol_rev;
@@ -1542,7 +1512,7 @@ static inline int do_qcomm(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
/* Map DMA buffers and SG list */
map_dma(i, j);
- if (SCpnt->device->tagged_queue) {
+ if (SCpnt->device->simple_tags) {
if (HD(j)->target_redo[SCpnt->target][SCpnt->channel] ||
HD(j)->target_to[SCpnt->target][SCpnt->channel])
@@ -1560,8 +1530,7 @@ static inline int do_qcomm(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
cpp->mess[1] = SCpnt->device->current_tag++;
}
- if (linked_comm && SCpnt->device->queue_depth > 2
- && TLDEV(SCpnt->device->type)) {
+ if (SCpnt->device->new_queue_depth > 2 && !SCpnt->device->simple_tags) {
HD(j)->cp_stat[i] = READY;
flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE);
return 0;
@@ -2071,7 +2040,7 @@ static inline void ihdlr(int irq, unsigned int j) {
sync_dma(i, j);
- if (linked_comm && SCpnt->device->queue_depth > 2
+ if (linked_comm && SCpnt->device->new_queue_depth > 2
&& TLDEV(SCpnt->device->type))
flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE);
diff --git a/drivers/scsi/eata.h b/drivers/scsi/eata.h
index 56f00bf01c7b..471e51ebfb55 100644
--- a/drivers/scsi/eata.h
+++ b/drivers/scsi/eata.h
@@ -12,6 +12,7 @@ int eata2x_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
int eata2x_abort(Scsi_Cmnd *);
int eata2x_reset(Scsi_Cmnd *);
int eata2x_biosparam(Disk *, struct block_device *, int *);
+static int eata2x_slave_attach(Scsi_Device *);
#define EATA_VERSION "7.22.00"
@@ -27,6 +28,7 @@ int eata2x_biosparam(Disk *, struct block_device *, int *);
eh_bus_reset_handler: NULL, \
eh_host_reset_handler: eata2x_reset, \
bios_param: eata2x_biosparam, \
+ slave_attach: eata2x_slave_attach, \
this_id: 7, \
unchecked_isa_dma: 1, \
use_clustering: ENABLE_CLUSTERING, \
diff --git a/drivers/scsi/fcal.c b/drivers/scsi/fcal.c
index 47d20082c1af..77192c05310f 100644
--- a/drivers/scsi/fcal.c
+++ b/drivers/scsi/fcal.c
@@ -70,17 +70,21 @@ static unsigned char target2alpa[] = {
static int fcal_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd);
-static void fcal_select_queue_depths(struct Scsi_Host *host, Scsi_Device *devlist)
+int fcal_slave_attach(Scsi_Device *device)
{
- Scsi_Device *device;
+ int depth_to_use;
- for (device = devlist; device; device = device->next) {
- if (device->host != host) continue;
- if (device->tagged_supported)
- device->queue_depth = /* 254 */ 8;
- else
- device->queue_depth = 2;
- }
+ if (device->tagged_supported)
+ depth_to_use = /* 254 */ 8;
+ else
+ depth_to_use = 2;
+
+ scsi_adjust_queue_depth(device,
+ (device->tagged_supported ?
+ MSG_SIMPLE_TAG : 0),
+ depth_to_use);
+
+ return 0;
}
/* Detect all FC Arbitrated Loops attached to the machine.
@@ -165,7 +169,6 @@ int __init fcal_detect(Scsi_Host_Template *tpnt)
#ifdef __sparc_v9__
host->unchecked_isa_dma = 1;
#endif
- host->select_queue_depths = fcal_select_queue_depths;
fc->channels = 1;
fc->targets = 127;
diff --git a/drivers/scsi/fcal.h b/drivers/scsi/fcal.h
index 8246571b9cb8..194e3d0e0637 100644
--- a/drivers/scsi/fcal.h
+++ b/drivers/scsi/fcal.h
@@ -23,6 +23,7 @@ struct fcal {
int fcal_detect(Scsi_Host_Template *);
int fcal_release(struct Scsi_Host *);
int fcal_proc_info (char *, char **, off_t, int, int, int);
+int fcal_slave_attach(Scsi_Device *);
#define FCAL { \
name: "Fibre Channel Arbitrated Loop",\
@@ -30,6 +31,7 @@ int fcal_proc_info (char *, char **, off_t, int, int, int);
release: fcal_release, \
proc_info: fcal_proc_info, \
queuecommand: fcp_scsi_queuecommand, \
+ slave_attach: fcal_slave_attach, \
can_queue: FCAL_CAN_QUEUE, \
this_id: -1, \
sg_tablesize: 1, \
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 1e295555b26f..520f31dfbf82 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -229,7 +229,6 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j)
if (!blk_nohighio)
retval->highmem_io = tpnt->highmem_io;
- retval->select_queue_depths = tpnt->select_queue_depths;
retval->max_sectors = tpnt->max_sectors;
retval->use_blk_tcq = tpnt->use_blk_tcq;
diff --git a/drivers/scsi/hosts.h b/drivers/scsi/hosts.h
index 44eb4ac53880..a899c89ded0b 100644
--- a/drivers/scsi/hosts.h
+++ b/drivers/scsi/hosts.h
@@ -266,14 +266,6 @@ typedef struct SHT
*/
int (* bios_param)(Disk *, struct block_device *, int []);
-
- /*
- * Used to set the queue depth for a specific device.
- *
- * Once the slave_attach() function is in full use, this will go away.
- */
- void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *);
-
/*
* This determines if we will use a non-interrupt driven
* or an interrupt driven scheme, It is set to the maximum number
@@ -384,6 +376,8 @@ struct Scsi_Host
*/
struct Scsi_Host * next;
Scsi_Device * host_queue;
+ struct list_head all_scsi_hosts;
+ struct list_head my_devices;
spinlock_t default_lock;
spinlock_t *host_lock;
@@ -489,8 +483,6 @@ struct Scsi_Host
*/
unsigned int max_host_blocked;
- void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *);
-
/*
* For SCSI hosts which are PCI devices, set pci_dev so that
* we can do BIOS EDD 3.0 mappings
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 6963127669ce..fa060c14118c 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1879,10 +1879,12 @@ ips_slave_attach(Scsi_Device *SDptr)
int min;
ha = IPS_HA(SDptr->host);
- min = ha->max_cmds / 4;
- if (min < 8)
- min = ha->max_cmds - 1;
- scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG, min);
+ if (SDptr->tagged_supported) {
+ min = ha->max_cmds / 2;
+ if (min <= 16)
+ min = ha->max_cmds - 1;
+ scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG, min);
+ }
return 0;
}
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index e13b6bc9d4cc..13d2ecb5a6ba 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -429,7 +429,7 @@
can_queue : 0, \
this_id: -1, \
sg_tablesize : IPS_MAX_SG, \
- cmd_per_lun: 16, \
+ cmd_per_lun: 3, \
present : 0, \
unchecked_isa_dma : 0, \
use_clustering : ENABLE_CLUSTERING, \
@@ -458,7 +458,7 @@
can_queue : 0, \
this_id: -1, \
sg_tablesize : IPS_MAX_SG, \
- cmd_per_lun: 16, \
+ cmd_per_lun: 3, \
present : 0, \
unchecked_isa_dma : 0, \
use_clustering : ENABLE_CLUSTERING, \
@@ -488,7 +488,7 @@
can_queue : 0, \
this_id: -1, \
sg_tablesize : IPS_MAX_SG, \
- cmd_per_lun: 16, \
+ cmd_per_lun: 3, \
present : 0, \
unchecked_isa_dma : 0, \
use_clustering : ENABLE_CLUSTERING, \
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c056da9c0ed9..26ce03f229c2 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -378,8 +378,6 @@ static Scsi_Host_Template *the_template = NULL;
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) + ((scsi_code) & 0x7f))
-static void ncr53c8xx_select_queue_depths(
- struct Scsi_Host *host, struct scsi_device *devlist);
static void ncr53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs);
static void ncr53c8xx_timeout(unsigned long np);
@@ -3710,7 +3708,6 @@ ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device)
instance->dma_channel = 0;
instance->cmd_per_lun = MAX_TAGS;
instance->can_queue = (MAX_START-4);
- instance->select_queue_depths = ncr53c8xx_select_queue_depths;
scsi_set_pci_device(instance, device->pdev);
#ifdef SCSI_NCR_INTEGRITY_CHECKING
@@ -8500,56 +8497,57 @@ static void __init ncr_getclock (ncb_p np, int mult)
** Linux select queue depths function
*/
-static void ncr53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist)
+int ncr53c8xx_slave_attach(Scsi_Device *device)
{
- struct scsi_device *device;
+ struct Scsi_Host *host = device->host;
+ ncb_p np;
+ tcb_p tp;
+ lcb_p lp;
+ int numtags, depth_to_use;
- for (device = devlist; device; device = device->next) {
- ncb_p np;
- tcb_p tp;
- lcb_p lp;
- int numtags;
+ np = ((struct host_data *) host->hostdata)->ncb;
+ tp = &np->target[device->id];
+ lp = tp->lp[device->lun];
- if (device->host != host)
- continue;
+ /*
+ ** Select queue depth from driver setup.
+ ** Donnot use more than configured by user.
+ ** Use at least 2.
+ ** Donnot use more than our maximum.
+ */
+ numtags = device_queue_depth(np->unit, device->id, device->lun);
+ if (numtags > tp->usrtags)
+ numtags = tp->usrtags;
+ if (!device->tagged_supported)
+ numtags = 1;
+ depth_to_use = numtags;
+ if (depth_to_use < 2)
+ depth_to_use = 2;
+ if (depth_to_use > MAX_TAGS)
+ depth_to_use = MAX_TAGS;
- np = ((struct host_data *) host->hostdata)->ncb;
- tp = &np->target[device->id];
- lp = tp->lp[device->lun];
+ scsi_adjust_queue_depth(device,
+ (device->tagged_supported ?
+ MSG_SIMPLE_TAG : 0),
+ depth_to_use);
- /*
- ** Select queue depth from driver setup.
- ** Donnot use more than configured by user.
- ** Use at least 2.
- ** Donnot use more than our maximum.
- */
- numtags = device_queue_depth(np->unit, device->id, device->lun);
- if (numtags > tp->usrtags)
- numtags = tp->usrtags;
- if (!device->tagged_supported)
- numtags = 1;
- device->queue_depth = numtags;
- if (device->queue_depth < 2)
- device->queue_depth = 2;
- if (device->queue_depth > MAX_TAGS)
- device->queue_depth = MAX_TAGS;
-
- /*
- ** Since the queue depth is not tunable under Linux,
- ** we need to know this value in order not to
- ** announce stupid things to user.
- */
- if (lp) {
- lp->numtags = lp->maxtags = numtags;
- lp->scdev_depth = device->queue_depth;
- }
- ncr_setup_tags (np, device->id, device->lun);
+ /*
+ ** Since the queue depth is not tunable under Linux,
+ ** we need to know this value in order not to
+ ** announce stupid things to user.
+ */
+ if (lp) {
+ lp->numtags = lp->maxtags = numtags;
+ lp->scdev_depth = depth_to_use;
+ }
+ ncr_setup_tags (np, device->id, device->lun);
#ifdef DEBUG_NCR53C8XX
-printk("ncr53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n",
- np->unit, device->id, device->lun, device->queue_depth);
+ printk("ncr53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n",
+ np->unit, device->id, device->lun, depth_to_use);
#endif
- }
+
+ return 0;
}
/*
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index ac4e795a1403..ed4e569aa613 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -59,6 +59,7 @@ int ncr53c8xx_detect(Scsi_Host_Template *tpnt);
const char *ncr53c8xx_info(struct Scsi_Host *host);
int ncr53c8xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
int ncr53c8xx_reset(Scsi_Cmnd *, unsigned int);
+int ncr53c8xx_slave_attach(Scsi_Device *);
#ifdef MODULE
int ncr53c8xx_release(struct Scsi_Host *);
@@ -74,6 +75,7 @@ int ncr53c8xx_release(struct Scsi_Host *);
release: ncr53c8xx_release, \
info: ncr53c8xx_info, \
queuecommand: ncr53c8xx_queue_command,\
+ slave_attach: ncr53c8xx_slave_attach, \
abort: ncr53c8xx_abort, \
reset: ncr53c8xx_reset, \
bios_param: scsicam_bios_param, \
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index c812e3b291d6..52d03fa98168 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -71,17 +71,21 @@ static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt)
up(&fc_sem);
}
-static void pluto_select_queue_depths(struct Scsi_Host *host, Scsi_Device *devlist)
+int pluto_slave_attach(Scsi_Device *device)
{
- Scsi_Device *device;
-
- for (device = devlist; device; device = device->next) {
- if (device->host != host) continue;
- if (device->tagged_supported)
- device->queue_depth = /* 254 */ 8;
- else
- device->queue_depth = 2;
- }
+ int depth_to_use;
+
+ if (device->tagged_supported)
+ depth_to_use = /* 254 */ 8;
+ else
+ depth_to_use = 2;
+
+ scsi_adjust_queue_depth(device,
+ (device->tagged_supported ?
+ MSG_SIMPLE_TAG : 0),
+ depth_to_use);
+
+ return 0;
}
/* Detect all SSAs attached to the machine.
@@ -241,7 +245,6 @@ int __init pluto_detect(Scsi_Host_Template *tpnt)
host->unchecked_isa_dma = 1;
#endif
- host->select_queue_depths = pluto_select_queue_depths;
fc->channels = inq->channels + 1;
fc->targets = inq->targets;
diff --git a/drivers/scsi/pluto.h b/drivers/scsi/pluto.h
index 01d34564f1b8..558693d10b90 100644
--- a/drivers/scsi/pluto.h
+++ b/drivers/scsi/pluto.h
@@ -41,6 +41,7 @@ struct pluto_inquiry {
int pluto_detect(Scsi_Host_Template *);
int pluto_release(struct Scsi_Host *);
const char * pluto_info(struct Scsi_Host *);
+int pluto_slave_attach(Scsi_Device *);
#define PLUTO { \
name: "Sparc Storage Array 100/200", \
@@ -48,6 +49,7 @@ const char * pluto_info(struct Scsi_Host *);
release: pluto_release, \
info: pluto_info, \
queuecommand: fcp_scsi_queuecommand, \
+ slave_attach: pluto_slave_attach, \
can_queue: PLUTO_CAN_QUEUE, \
this_id: -1, \
sg_tablesize: 1, \
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 68731d7505ec..9417679c4ef1 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -382,8 +382,7 @@ static void qla1280_done(struct scsi_qla_host *, srb_t **, srb_t **);
static void qla1280_next(struct scsi_qla_host *, scsi_lu_t *, int);
static void qla1280_putq_t(scsi_lu_t *, srb_t *);
static void qla1280_done_q_put(srb_t *, srb_t **, srb_t **);
-static void qla1280_device_queue_depth(struct scsi_qla_host *, Scsi_Device *);
-static void qla1280_select_queue_depth(struct Scsi_Host *, Scsi_Device *);
+static int qla1280_slave_attach(Scsi_Device *);
#if STOP_ON_ERROR
static void qla1280_panic(char *, struct Scsi_Host *host);
#endif
@@ -840,7 +839,6 @@ qla1280_do_device_init(struct pci_dev *pdev,
host->can_queue = 0xfffff; /* unlimited */
host->cmd_per_lun = 1;
- host->select_queue_depths = qla1280_select_queue_depth;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,18)
host->base = (unsigned char *)ha->mmpbase;
#else
@@ -1796,7 +1794,7 @@ qla1280_do_dpc(void *p)
}
/**************************************************************************
- * qla1280_device_queue_depth
+ * qla1280_slave_attach
*
* Description:
* Determines the queue depth for a given device. There are two ways
@@ -1806,51 +1804,28 @@ qla1280_do_dpc(void *p)
* as the default queue depth. Otherwise, we use either 4 or 8 as the
* default queue depth (dependent on the number of hardware SCBs).
**************************************************************************/
-static void
-qla1280_device_queue_depth(struct scsi_qla_host *p, Scsi_Device * device)
+static int
+qla1280_slave_attach(Scsi_Device * device)
{
- int default_depth = 3;
+ struct scsi_qla_host *p = (struct scsi_qla_host *)device->host->hostdata;
int bus = device->channel;
int target = device->id;
- device->queue_depth = default_depth;
-
+ if (qla1280_check_for_dead_scsi_bus(p, bus))
+ return 1;
if (device->tagged_supported &&
(p->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
- device->tagged_queue = 1;
- device->current_tag = 0;
- device->queue_depth = p->bus_settings[bus].hiwat;
+ scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
+ p->bus_settings[bus].hiwat);
/* device->queue_depth = 20; */
printk(KERN_INFO "scsi(%li:%d:%d:%d): Enabled tagged queuing, "
"queue depth %d.\n", p->host_no, device->channel,
- device->id, device->lun, device->queue_depth);
+ device->id, device->lun, device->new_queue_depth);
+ } else {
+ scsi_adjust_queue_depth(device, 0 /* TCQ off */, 3);
}
qla12160_get_target_parameters(p, bus, target, device->lun);
-}
-
-/**************************************************************************
- * qla1280_select_queue_depth
- *
- * Sets the queue depth for each SCSI device hanging off the input
- * host adapter. We use a queue depth of 2 for devices that do not
- * support tagged queueing.
- **************************************************************************/
-static void
-qla1280_select_queue_depth(struct Scsi_Host *host, Scsi_Device * scsi_devs)
-{
- Scsi_Device *device;
- struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
-
- ENTER("qla1280_select_queue_depth");
- for (device = scsi_devs; device != NULL; device = device->next) {
- if (device->host == host)
- qla1280_device_queue_depth (ha, device);
- }
-
- if (scsi_devs)
- qla1280_check_for_dead_scsi_bus(ha, scsi_devs->channel);
-
- LEAVE("qla1280_select_queue_depth");
+ return 0;
}
/*
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index 1139d7d023a7..5dbaf0a89cb8 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -1314,6 +1314,7 @@ int qla1280_queuecommand(Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
int qla1280_abort(Scsi_Cmnd *);
int qla1280_reset(Scsi_Cmnd *, unsigned int);
int qla1280_biosparam(Disk *, struct block_device *, int[]);
+static int qla1280_slave_attach(Scsi_Device *);
void qla1280_intr_handler(int, void *, struct pt_regs *);
void qla1280_setup(char *s, int *dummy);
@@ -1342,7 +1343,7 @@ void qla1280_setup(char *s, int *dummy);
/* use_new_eh_code: 0, */ \
abort: qla1280_abort, \
reset: qla1280_reset, \
- slave_attach: NULL, \
+ slave_attach: qla1280_slave_attach, \
bios_param: qla1280_biosparam, \
can_queue: 255, /* max simultaneous cmds */\
this_id: -1, /* scsi id of host adapter */\
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1ba787fcad2e..d05400f6d047 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -568,7 +568,7 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
atomic_read(&SCpnt->host->host_active),
SCpnt->host->host_failed));
- if(SDpnt->queue_depth > SDpnt->new_queue_depth) {
+ if(SDpnt->current_queue_depth > SDpnt->new_queue_depth) {
Scsi_Cmnd *prev, *next;
/*
* Release the command block and decrement the queue
@@ -582,10 +582,10 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
else
prev->next = next->next;
kfree((char *)SCpnt);
- SDpnt->queue_depth--;
- } else if(SDpnt->queue_depth < SDpnt->new_queue_depth) {
+ SDpnt->current_queue_depth--;
+ } else if(SDpnt->current_queue_depth < SDpnt->new_queue_depth) {
alloc_cmd = 1;
- SDpnt->queue_depth++;
+ SDpnt->current_queue_depth++;
}
spin_unlock_irqrestore(&device_request_lock, flags);
@@ -633,7 +633,7 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
spin_unlock_irqrestore(&device_request_lock, flags);
} else {
spin_lock_irqsave(&device_request_lock, flags);
- SDpnt->queue_depth--;
+ SDpnt->current_queue_depth--;
spin_unlock_irqrestore(&device_request_lock, flags);
}
}
@@ -1505,7 +1505,7 @@ void scsi_release_commandblocks(Scsi_Device * SDpnt)
SDpnt->device_queue = SCnext = SCpnt->next;
kfree((char *) SCpnt);
}
- SDpnt->queue_depth = 0;
+ SDpnt->current_queue_depth = 0;
SDpnt->new_queue_depth = 0;
spin_unlock_irqrestore(&device_request_lock, flags);
}
@@ -1529,7 +1529,7 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt)
unsigned long flags;
Scsi_Cmnd *SCpnt;
- if (SDpnt->queue_depth != 0)
+ if (SDpnt->current_queue_depth != 0)
return;
SCpnt = (Scsi_Cmnd *) kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC |
@@ -1567,7 +1567,7 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt)
spin_lock_irqsave(&device_request_lock, flags);
if(SDpnt->new_queue_depth == 0)
SDpnt->new_queue_depth = 1;
- SDpnt->queue_depth++;
+ SDpnt->current_queue_depth++;
SCpnt->next = SDpnt->device_queue;
SDpnt->device_queue = SCpnt;
spin_unlock_irqrestore(&device_request_lock, flags);
@@ -1597,12 +1597,13 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt)
*
* If cmdblocks != 0 then we are a live device. We just set the
* new_queue_depth variable and when the scsi completion handler
- * notices that queue_depth != new_queue_depth it will work to
- * rectify the situation. If new_queue_depth is less than current
- * queue_depth, then it will free the completed command instead of
- * putting it back on the free list and dec queue_depth. Otherwise
- * it will try to allocate a new command block for the device and
- * put it on the free list along with the command that is being
+ * notices that current_queue_depth != new_queue_depth it will
+ * work to rectify the situation. If new_queue_depth is less than
+ * current_queue_depth, then it will free the completed command
+ * instead of putting it back on the free list and dec
+ * current_queue_depth. Otherwise it will try to allocate a new
+ * command block for the device and put it on the free list along
+ * with the command that is being
* completed. Obviously, if the device isn't doing anything then
* neither is this code, so it will bring the devices queue depth
* back into line when the device is actually being used. This
@@ -1648,14 +1649,11 @@ void scsi_adjust_queue_depth(Scsi_Device *SDpnt, int tagged, int tags)
SDpnt->channel, SDpnt->id, SDpnt->lun);
case 0:
SDpnt->ordered_tags = SDpnt->simple_tags = 0;
- if(SDpnt->host->cmd_per_lun)
- SDpnt->new_queue_depth = SDpnt->host->cmd_per_lun;
- else
- SDpnt->new_queue_depth = 1;
+ SDpnt->new_queue_depth = tags;
break;
}
spin_unlock_irqrestore(&device_request_lock, flags);
- if(SDpnt->queue_depth == 0)
+ if(SDpnt->current_queue_depth == 0)
{
scsi_build_commandblocks(SDpnt);
}
@@ -2116,7 +2114,7 @@ int scsi_register_host(Scsi_Host_Template * tpnt)
(*sdtpnt->attach) (SDpnt);
if (SDpnt->attached) {
scsi_build_commandblocks(SDpnt);
- if (SDpnt->queue_depth == 0)
+ if (SDpnt->current_queue_depth == 0)
out_of_space = 1;
}
}
@@ -2405,10 +2403,10 @@ int scsi_register_device(struct Scsi_Device_Template *tpnt)
* If this driver attached to the device, and don't have any
* command blocks for this device, allocate some.
*/
- if (SDpnt->attached && SDpnt->queue_depth == 0) {
+ if (SDpnt->attached && SDpnt->current_queue_depth == 0) {
SDpnt->online = TRUE;
scsi_build_commandblocks(SDpnt);
- if (SDpnt->queue_depth == 0)
+ if (SDpnt->current_queue_depth == 0)
out_of_space = 1;
}
}
@@ -2816,7 +2814,7 @@ Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
SDpnt->new_queue_depth = 1;
scsi_build_commandblocks(SDpnt);
- if(SDpnt->queue_depth == 0) {
+ if(SDpnt->current_queue_depth == 0) {
kfree(SDpnt);
return NULL;
}
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index cd936544cef6..f616c4b8cdf7 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -557,15 +557,19 @@ struct scsi_device {
*/
struct scsi_device *next; /* Used for linked list */
struct scsi_device *prev; /* Used for linked list */
+ struct list_head siblings; /* list of all devices on this host */
+ struct list_head same_target_siblings; /* just the devices sharing same target id */
wait_queue_head_t scpnt_wait; /* Used to wait if
device is busy */
struct Scsi_Host *host;
request_queue_t request_queue;
atomic_t device_active; /* commands checked out for device */
volatile unsigned short device_busy; /* commands actually active on low-level */
+ struct list_head free_cmnds; /* list of available Scsi_Cmnd structs */
+ struct list_head busy_cmnds; /* list of Scsi_Cmnd structs in use */
Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */
Scsi_Cmnd *current_cmnd; /* currently active command */
- unsigned short queue_depth; /* How deep of a queue we have */
+ unsigned short current_queue_depth;/* How deep of a queue we have */
unsigned short new_queue_depth; /* How deep of a queue we want */
unsigned int id, lun, channel;
@@ -713,6 +717,7 @@ struct scsi_cmnd {
Scsi_Request *sc_request;
struct scsi_cmnd *next;
struct scsi_cmnd *reset_chain;
+ struct list_head list_entry; /* Used to place us on the cmd lists */
int eh_state; /* Used for state tracking in error handlr */
int eh_eflags; /* Used by error handlr */
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0f52ed55b210..6d174708da82 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1532,7 +1532,7 @@ static int scsi_probe_and_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
*/
sdevscan->new_queue_depth = 1;
scsi_build_commandblocks(sdevscan);
- if (sdevscan->queue_depth == 0)
+ if (sdevscan->current_queue_depth == 0)
goto alloc_failed;
sreq = scsi_allocate_request(sdevscan);
@@ -1606,7 +1606,7 @@ alloc_failed:
kfree(scsi_result);
if (sreq != NULL)
scsi_release_request(sreq);
- if (sdevscan->queue_depth != 0)
+ if (sdevscan->current_queue_depth != 0)
scsi_release_commandblocks(sdevscan);
return SCSI_SCAN_NO_RESPONSE;
}
@@ -1762,7 +1762,7 @@ static int scsi_report_lun_scan(Scsi_Device *sdevscan)
sdevscan->new_queue_depth = 1;
scsi_build_commandblocks(sdevscan);
- if (sdevscan->queue_depth == 0) {
+ if (sdevscan->current_queue_depth == 0) {
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
/*
* We are out of memory, don't try scanning any further.
@@ -2030,7 +2030,7 @@ static void scsi_scan_selected_lun(struct Scsi_Host *shost, uint channel,
(*sdt->attach) (sdev);
if (sdev->attached) {
scsi_build_commandblocks(sdev);
- if (sdev->queue_depth == 0)
+ if (sdev->current_queue_depth == 0)
printk(ALLOC_FAILURE_MSG,
__FUNCTION__);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5863cdcf9bba..1b7abd00b167 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1386,7 +1386,7 @@ static int sd_attach(Scsi_Device * sdp)
((sdp->type != TYPE_DISK) && (sdp->type != TYPE_MOD)))
return 0;
- gd = alloc_disk();
+ gd = alloc_disk(16);
if (!gd)
return 1;
@@ -1423,7 +1423,6 @@ static int sd_attach(Scsi_Device * sdp)
gd->de = sdp->de;
gd->major = SD_MAJOR(dsk_nr>>4);
gd->first_minor = (dsk_nr & 15)<<4;
- gd->minor_shift = 4;
gd->fops = &sd_fops;
if (dsk_nr > 26)
sprintf(gd->disk_name, "sd%c%c",'a'+dsk_nr/26-1,'a'+dsk_nr%26);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 056861b80c12..a18bf6db8fcf 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -852,7 +852,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
__put_user((int) sdp->device->type, &sg_idp->scsi_type);
__put_user((short) sdp->device->host->cmd_per_lun,
&sg_idp->h_cmd_per_lun);
- __put_user((short) sdp->device->queue_depth,
+ __put_user((short) sdp->device->new_queue_depth,
&sg_idp->d_queue_depth);
__put_user(0, &sg_idp->unused[0]);
__put_user(0, &sg_idp->unused[1]);
@@ -3039,7 +3039,7 @@ sg_proc_dev_info(char *buffer, int *len, off_t * begin, off_t offset, int size)
scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, (int) scsidp->type,
(int) scsidp->access_count,
- (int) scsidp->queue_depth,
+ (int) scsidp->new_queue_depth,
(int) scsidp->device_busy,
(int) scsidp->online);
else
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ab50575b899c..39af5cce16f0 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -726,24 +726,6 @@ cleanup_dev:
return 1;
}
-/* Driverfs file support */
-static ssize_t sr_device_kdev_read(struct device *driverfs_dev,
- char *page, size_t count, loff_t off)
-{
- kdev_t kdev;
- kdev.value=(int)(long)driverfs_dev->driver_data;
- return off ? 0 : sprintf(page, "%x\n",kdev.value);
-}
-static DEVICE_ATTR(kdev,S_IRUGO,sr_device_kdev_read,NULL);
-
-static ssize_t sr_device_type_read(struct device *driverfs_dev,
- char *page, size_t count, loff_t off)
-{
- return off ? 0 : sprintf (page, "CHR\n");
-}
-static DEVICE_ATTR(type,S_IRUGO,sr_device_type_read,NULL);
-
-
void sr_finish()
{
int i;
@@ -757,7 +739,7 @@ void sr_finish()
* with loadable modules. */
if (cd->disk)
continue;
- disk = alloc_disk();
+ disk = alloc_disk(1);
if (!disk)
continue;
if (cd->disk) {
@@ -766,7 +748,6 @@ void sr_finish()
}
disk->major = MAJOR_NR;
disk->first_minor = i;
- disk->minor_shift = 0;
strcpy(disk->disk_name, cd->cdi.name);
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD;
@@ -798,22 +779,8 @@ void sr_finish()
*/
get_capabilities(cd);
sr_vendor_init(cd);
-
- sprintf(cd->cdi.cdrom_driverfs_dev.bus_id, "%s:cd",
- cd->device->sdev_driverfs_dev.bus_id);
- sprintf(cd->cdi.cdrom_driverfs_dev.name, "%scdrom",
- cd->device->sdev_driverfs_dev.name);
- cd->cdi.cdrom_driverfs_dev.parent =
- &cd->device->sdev_driverfs_dev;
- cd->cdi.cdrom_driverfs_dev.bus = &scsi_driverfs_bus_type;
- cd->cdi.cdrom_driverfs_dev.driver_data =
- (void *)(long)__mkdev(MAJOR_NR, i);
- device_register(&cd->cdi.cdrom_driverfs_dev);
- device_create_file(&cd->cdi.cdrom_driverfs_dev,
- &dev_attr_type);
- device_create_file(&cd->cdi.cdrom_driverfs_dev,
- &dev_attr_kdev);
disk->de = cd->device->de;
+ disk->driverfs_dev = &cd->device->sdev_driverfs_dev;
register_cdrom(&cd->cdi);
set_capacity(disk, cd->capacity);
add_disk(disk);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 8f22ecfc2dae..194077d101ba 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -12,13 +12,13 @@
Copyright 1992 - 2002 Kai Makisara
email Kai.Makisara@metla.fi
- Last modified: Sun Sep 29 22:29:16 2002 by makisara
+ Last modified: Tue Oct 15 22:01:04 2002 by makisara
Some small formal changes - aeb, 950809
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/
-static char *verstr = "20020929";
+static char *verstr = "20021015";
#include <linux/module.h>
@@ -291,6 +291,8 @@ static int st_chk_result(Scsi_Tape *STp, Scsi_Request * SRpnt)
if (sense[12] == 0 && sense[13] == 0x17) /* ASC and ASCQ => cleaning requested */
STp->cleaning_req = 1;
+ STp->pos_unknown |= STp->device->was_reset;
+
if ((sense[0] & 0x70) == 0x70 &&
scode == RECOVERED_ERROR
#if ST_RECOVERED_WRITE_FATAL
@@ -566,7 +568,7 @@ static int flush_buffer(Scsi_Tape *STp, int seek_next)
* If there was a bus reset, block further access
* to this device.
*/
- if (STp->device->was_reset)
+ if (STp->pos_unknown)
return (-EIO);
if (STp->ready != ST_READY)
@@ -640,6 +642,52 @@ static int set_mode_densblk(Scsi_Tape * STp, ST_mode * STm)
}
return 0;
}
+
+
+/* Lock or unlock the drive door. Don't use when Scsi_Request allocated. */
+static int do_door_lock(Scsi_Tape * STp, int do_lock)
+{
+ int retval, cmd;
+ DEB(int dev = TAPE_NR(STp->devt);)
+
+
+ cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK;
+ DEBC(printk(ST_DEB_MSG "st%d: %socking drive door.\n", dev,
+ do_lock ? "L" : "Unl"));
+ retval = scsi_ioctl(STp->device, cmd, NULL);
+ if (!retval) {
+ STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED;
+ }
+ else {
+ STp->door_locked = ST_LOCK_FAILS;
+ }
+ return retval;
+}
+
+
+/* Set the internal state after reset */
+static void reset_state(Scsi_Tape *STp)
+{
+ int i;
+ ST_partstat *STps;
+
+ STp->pos_unknown = 0;
+ for (i = 0; i < ST_NBR_PARTITIONS; i++) {
+ STps = &(STp->ps[i]);
+ STps->rw = ST_IDLE;
+ STps->eof = ST_NOEOF;
+ STps->at_sm = 0;
+ STps->last_block_valid = FALSE;
+ STps->drv_block = -1;
+ STps->drv_file = -1;
+ }
+ if (STp->can_partitions) {
+ STp->partition = find_partition(STp);
+ if (STp->partition < 0)
+ STp->partition = 0;
+ STp->new_partition = STp->partition;
+ }
+}
/* Test if the drive is ready. Returns either one of the codes below or a negative system
error code. */
@@ -757,7 +805,7 @@ static int check_tape(Scsi_Tape *STp, struct file *filp)
goto err_out;
if (retval == CHKRES_NEW_SESSION) {
- (STp->device)->was_reset = 0;
+ STp->pos_unknown = 0;
STp->partition = STp->new_partition = 0;
if (STp->can_partitions)
STp->nbr_partitions = 1; /* This guess will be updated later
@@ -1021,7 +1069,7 @@ static int st_flush(struct file *filp)
STm = &(STp->modes[STp->current_mode]);
STps = &(STp->ps[STp->partition]);
- if (STps->rw == ST_WRITING && !(STp->device)->was_reset) {
+ if (STps->rw == ST_WRITING && !STp->pos_unknown) {
result = flush_write_buffer(STp);
if (result != 0 && result != (-ENOSPC))
goto out;
@@ -1040,7 +1088,7 @@ static int st_flush(struct file *filp)
printk(KERN_WARNING "st%d: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
dev, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable));
- if (STps->rw == ST_WRITING && !(STp->device)->was_reset) {
+ if (STps->rw == ST_WRITING && !STp->pos_unknown) {
DEBC(printk(ST_DEB_MSG "st%d: File length %ld bytes.\n",
dev, (long) (filp->f_pos));
@@ -1136,7 +1184,7 @@ static int st_release(struct inode *inode, struct file *filp)
read_unlock(&st_dev_arr_lock);
if (STp->door_locked == ST_LOCKED_AUTO)
- st_int_ioctl(STp, MTUNLOCK, 0);
+ do_door_lock(STp, 0);
normalize_buffer(STp->buffer);
write_lock(&st_dev_arr_lock);
@@ -1189,7 +1237,7 @@ static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count, loff_t
* If there was a bus reset, block further access
* to this device.
*/
- if (STp->device->was_reset) {
+ if (STp->pos_unknown) {
retval = (-EIO);
goto out;
}
@@ -1216,7 +1264,7 @@ static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count, loff_t
}
if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED &&
- !st_int_ioctl(STp, MTLOCK, 0))
+ !do_door_lock(STp, 1))
STp->door_locked = ST_LOCKED_AUTO;
out:
@@ -2502,18 +2550,6 @@ static int st_int_ioctl(Scsi_Tape *STp, unsigned int cmd_in, unsigned long arg)
DEBC(printk(ST_DEB_MSG "st%d: Erasing tape.\n", dev));
fileno = blkno = at_sm = 0;
break;
- case MTLOCK:
- chg_eof = FALSE;
- cmd[0] = ALLOW_MEDIUM_REMOVAL;
- cmd[4] = SCSI_REMOVAL_PREVENT;
- DEBC(printk(ST_DEB_MSG "st%d: Locking drive door.\n", dev));
- break;
- case MTUNLOCK:
- chg_eof = FALSE;
- cmd[0] = ALLOW_MEDIUM_REMOVAL;
- cmd[4] = SCSI_REMOVAL_ALLOW;
- DEBC(printk(ST_DEB_MSG "st%d: Unlocking drive door.\n", dev));
- break;
case MTSETBLK: /* Set block length */
case MTSETDENSITY: /* Set tape density */
case MTSETDRVBUFFER: /* Set drive buffering */
@@ -2594,11 +2630,6 @@ static int st_int_ioctl(Scsi_Tape *STp, unsigned int cmd_in, unsigned long arg)
STps->drv_file = fileno;
STps->at_sm = at_sm;
- if (cmd_in == MTLOCK)
- STp->door_locked = ST_LOCKED_EXPLICIT;
- else if (cmd_in == MTUNLOCK)
- STp->door_locked = ST_UNLOCKED;
-
if (cmd_in == MTBSFM)
ioctl_result = st_int_ioctl(STp, MTFSF, 1);
else if (cmd_in == MTFSFM)
@@ -2713,9 +2744,6 @@ static int st_int_ioctl(Scsi_Tape *STp, unsigned int cmd_in, unsigned long arg)
if ((SRpnt->sr_sense_buffer[2] & 0x0f) == BLANK_CHECK)
STps->eof = ST_EOD;
- if (cmd_in == MTLOCK)
- STp->door_locked = ST_LOCK_FAILS;
-
scsi_release_request(SRpnt);
SRpnt = NULL;
}
@@ -3104,7 +3132,7 @@ static int st_ioctl(struct inode *inode, struct file *file,
goto out;
}
- if (!(STp->device)->was_reset) {
+ if (!STp->pos_unknown) {
if (STps->eof == ST_FM_HIT) {
if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM ||
@@ -3152,16 +3180,9 @@ static int st_ioctl(struct inode *inode, struct file *file,
retval = (-EIO);
goto out;
}
+ reset_state(STp);
+ /* remove this when the midlevel properly clears was_reset */
STp->device->was_reset = 0;
- if (STp->door_locked != ST_UNLOCKED &&
- STp->door_locked != ST_LOCK_FAILS) {
- if (st_int_ioctl(STp, MTLOCK, 0)) {
- printk(KERN_NOTICE
- "st%d: Could not relock door after bus reset.\n",
- dev);
- STp->door_locked = ST_UNLOCKED;
- }
- }
}
if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
@@ -3170,7 +3191,7 @@ static int st_ioctl(struct inode *inode, struct file *file,
STps->rw = ST_IDLE; /* Prevent automatic WEOF and fsf */
if (mtc.mt_op == MTOFFL && STp->door_locked != ST_UNLOCKED)
- st_int_ioctl(STp, MTUNLOCK, 0); /* Ignore result! */
+ do_door_lock(STp, 0); /* Ignore result! */
if (mtc.mt_op == MTSETDRVBUFFER &&
(mtc.mt_count & MT_ST_OPTIONS) != 0) {
@@ -3238,6 +3259,11 @@ static int st_ioctl(struct inode *inode, struct file *file,
goto out;
}
+ if (mtc.mt_op == MTLOCK || mtc.mt_op == MTUNLOCK) {
+ retval = do_door_lock(STp, (mtc.mt_op == MTLOCK));
+ goto out;
+ }
+
if (STp->can_partitions && STp->ready == ST_READY &&
(i = switch_partition(STp)) < 0) {
retval = i;
@@ -3642,13 +3668,13 @@ static DEVICE_ATTR(type,S_IRUGO,st_device_type_read,NULL);
static struct file_operations st_fops =
{
- owner: THIS_MODULE,
- read: st_read,
- write: st_write,
- ioctl: st_ioctl,
- open: st_open,
- flush: st_flush,
- release: st_release,
+ .owner = THIS_MODULE,
+ .read = st_read,
+ .write = st_write,
+ .ioctl = st_ioctl,
+ .open = st_open,
+ .flush = st_flush,
+ .release = st_release,
};
static int st_attach(Scsi_Device * SDp)
@@ -3909,12 +3935,12 @@ static void st_detach(Scsi_Device * SDp)
&dev_attr_type);
device_remove_file(&tpnt->driverfs_dev_r[mode],
&dev_attr_kdev);
- put_device(&tpnt->driverfs_dev_r[mode]);
+ device_unregister(&tpnt->driverfs_dev_r[mode]);
device_remove_file(&tpnt->driverfs_dev_n[mode],
&dev_attr_type);
device_remove_file(&tpnt->driverfs_dev_n[mode],
&dev_attr_kdev);
- put_device(&tpnt->driverfs_dev_n[mode]);
+ device_unregister(&tpnt->driverfs_dev_n[mode]);
}
if (tpnt->buffer) {
tpnt->buffer->orig_frp_segs = 0;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 79f71bb9081e..922b9e07abf9 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -94,6 +94,7 @@ typedef struct {
unsigned char use_pf; /* Set Page Format bit in all mode selects? */
unsigned char try_dio; /* try direct i/o? */
unsigned char c_algo; /* compression algorithm */
+ unsigned char pos_unknown; /* after reset position unknown */
int tape_type;
int write_threshold;
int timeout; /* timeout for normal commands */
diff --git a/drivers/scsi/sym53c8xx.c b/drivers/scsi/sym53c8xx.c
index 5558ae08a692..4e02050bd885 100644
--- a/drivers/scsi/sym53c8xx.c
+++ b/drivers/scsi/sym53c8xx.c
@@ -1341,8 +1341,6 @@ MODULE_PARM(sym53c8xx, "s");
#define SetScsiAbortResult(cmd) SetScsiResult(cmd, DID_ABORT, 0xff)
#endif
-static void sym53c8xx_select_queue_depths(
- struct Scsi_Host *host, struct scsi_device *devlist);
static void sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs);
static void sym53c8xx_timeout(unsigned long np);
@@ -5923,8 +5921,6 @@ ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device)
#endif
#endif
- instance->select_queue_depths = sym53c8xx_select_queue_depths;
-
NCR_UNLOCK_NCB(np, flags);
/*
@@ -13545,56 +13541,57 @@ static int device_queue_depth(ncb_p np, int target, int lun)
return DEF_DEPTH;
}
-static void sym53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist)
+int sym53c8xx_slave_attach(Scsi_Device *device)
{
- struct scsi_device *device;
-
- for (device = devlist; device; device = device->next) {
- ncb_p np;
- tcb_p tp;
- lcb_p lp;
- int numtags;
+ struct Scsi_Host *host = device->host;
+ ncb_p np;
+ tcb_p tp;
+ lcb_p lp;
+ int numtags, depth_to_use;
- if (device->host != host)
- continue;
+ np = ((struct host_data *) host->hostdata)->ncb;
+ tp = &np->target[device->id];
+ lp = ncr_lp(np, tp, device->lun);
- np = ((struct host_data *) host->hostdata)->ncb;
- tp = &np->target[device->id];
- lp = ncr_lp(np, tp, device->lun);
+ /*
+ ** Select queue depth from driver setup.
+ ** Donnot use more than configured by user.
+ ** Use at least 2.
+ ** Donnot use more than our maximum.
+ */
+ numtags = device_queue_depth(np, device->id, device->lun);
+ if (numtags > tp->usrtags)
+ numtags = tp->usrtags;
+ if (!device->tagged_supported)
+ numtags = 1;
+ depth_to_use = numtags;
+ if (depth_to_use < 2)
+ depth_to_use = 2;
+ if (depth_to_use > MAX_TAGS)
+ depth_to_use = MAX_TAGS;
- /*
- ** Select queue depth from driver setup.
- ** Donnot use more than configured by user.
- ** Use at least 2.
- ** Donnot use more than our maximum.
- */
- numtags = device_queue_depth(np, device->id, device->lun);
- if (numtags > tp->usrtags)
- numtags = tp->usrtags;
- if (!device->tagged_supported)
- numtags = 1;
- device->queue_depth = numtags;
- if (device->queue_depth < 2)
- device->queue_depth = 2;
- if (device->queue_depth > MAX_TAGS)
- device->queue_depth = MAX_TAGS;
+ scsi_adjust_queue_depth(device,
+ (device->tagged_supported ?
+ MSG_SIMPLE_TAG : 0),
+ depth_to_use);
- /*
- ** Since the queue depth is not tunable under Linux,
- ** we need to know this value in order not to
- ** announce stupid things to user.
- */
- if (lp) {
- lp->numtags = lp->maxtags = numtags;
- lp->scdev_depth = device->queue_depth;
- }
- ncr_setup_tags (np, device->id, device->lun);
+ /*
+ ** Since the queue depth is not tunable under Linux,
+ ** we need to know this value in order not to
+ ** announce stupid things to user.
+ */
+ if (lp) {
+ lp->numtags = lp->maxtags = numtags;
+ lp->scdev_depth = depth_to_use;
+ }
+ ncr_setup_tags (np, device->id, device->lun);
#ifdef DEBUG_SYM53C8XX
-printk("sym53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n",
- np->unit, device->id, device->lun, device->queue_depth);
+ printk("sym53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n",
+ np->unit, device->id, device->lun, depth_to_use);
#endif
- }
+
+ return 0;
}
/*
diff --git a/drivers/scsi/sym53c8xx.h b/drivers/scsi/sym53c8xx.h
index 256d34b6461b..cc689df0373d 100644
--- a/drivers/scsi/sym53c8xx.h
+++ b/drivers/scsi/sym53c8xx.h
@@ -74,6 +74,7 @@ int sym53c8xx_detect(Scsi_Host_Template *tpnt);
const char *sym53c8xx_info(struct Scsi_Host *host);
int sym53c8xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
int sym53c8xx_reset(Scsi_Cmnd *, unsigned int);
+int sym53c8xx_slave_attach(Scsi_Device *);
#ifdef MODULE
int sym53c8xx_release(struct Scsi_Host *);
@@ -89,6 +90,7 @@ int sym53c8xx_release(struct Scsi_Host *);
release: sym53c8xx_release, \
info: sym53c8xx_info, \
queuecommand: sym53c8xx_queue_command,\
+ slave_attach: sym53c8xx_slave_attach, \
abort: sym53c8xx_abort, \
reset: sym53c8xx_reset, \
bios_param: scsicam_bios_param, \
diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
index 0f6114bda636..f36c1c5f2903 100644
--- a/drivers/scsi/sym53c8xx_2/sym53c8xx.h
+++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
@@ -89,6 +89,8 @@ int sym53c8xx_eh_device_reset_handler(Scsi_Cmnd *);
int sym53c8xx_eh_bus_reset_handler(Scsi_Cmnd *);
int sym53c8xx_eh_host_reset_handler(Scsi_Cmnd *);
+int sym53c8xx_slave_attach(Scsi_Device *);
+
#ifdef MODULE
int sym53c8xx_release(struct Scsi_Host *);
#else
@@ -109,6 +111,7 @@ int sym53c8xx_release(struct Scsi_Host *);
release: sym53c8xx_release, \
info: sym53c8xx_info, \
queuecommand: sym53c8xx_queue_command, \
+ slave_attach: sym53c8xx_slave_attach, \
eh_abort_handler: sym53c8xx_eh_abort_handler, \
eh_device_reset_handler:sym53c8xx_eh_device_reset_handler, \
eh_bus_reset_handler: sym53c8xx_eh_bus_reset_handler, \
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 564802b6c8c2..685a29399965 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1327,66 +1327,63 @@ static int device_queue_depth(hcb_p np, int target, int lun)
/*
* Linux entry point for device queue sizing.
*/
-static void
-sym53c8xx_select_queue_depths(struct Scsi_Host *host,
- struct scsi_device *devlist)
+int
+sym53c8xx_slave_attach(Scsi_Device *device)
{
- struct scsi_device *device;
-
- for (device = devlist; device; device = device->next) {
- hcb_p np;
- tcb_p tp;
- lcb_p lp;
- int reqtags;
-
- if (device->host != host)
- continue;
+ struct Scsi_Host *host = device->host;
+ hcb_p np;
+ tcb_p tp;
+ lcb_p lp;
+ int reqtags, depth_to_use;
- np = ((struct host_data *) host->hostdata)->ncb;
- tp = &np->target[device->id];
+ np = ((struct host_data *) host->hostdata)->ncb;
+ tp = &np->target[device->id];
- /*
- * Get user settings for transfer parameters.
- */
- tp->inq_byte7_valid = (INQ7_SYNC|INQ7_WIDE16);
- sym_update_trans_settings(np, tp);
+ /*
+ * Get user settings for transfer parameters.
+ */
+ tp->inq_byte7_valid = (INQ7_SYNC|INQ7_WIDE16);
+ sym_update_trans_settings(np, tp);
- /*
- * Allocate the LCB if not yet.
- * If it fail, we may well be in the sh*t. :)
- */
- lp = sym_alloc_lcb(np, device->id, device->lun);
- if (!lp) {
- device->queue_depth = 1;
- continue;
- }
+ /*
+ * Allocate the LCB if not yet.
+ * If it fail, we may well be in the sh*t. :)
+ */
+ lp = sym_alloc_lcb(np, device->id, device->lun);
+ if (!lp)
+ return -ENOMEM;
- /*
- * Get user flags.
- */
- lp->curr_flags = lp->user_flags;
+ /*
+ * Get user flags.
+ */
+ lp->curr_flags = lp->user_flags;
- /*
- * Select queue depth from driver setup.
- * Donnot use more than configured by user.
- * Use at least 2.
- * Donnot use more than our maximum.
- */
- reqtags = device_queue_depth(np, device->id, device->lun);
- if (reqtags > tp->usrtags)
- reqtags = tp->usrtags;
- if (!device->tagged_supported)
- reqtags = 0;
+ /*
+ * Select queue depth from driver setup.
+ * Donnot use more than configured by user.
+ * Use at least 2.
+ * Donnot use more than our maximum.
+ */
+ reqtags = device_queue_depth(np, device->id, device->lun);
+ if (reqtags > tp->usrtags)
+ reqtags = tp->usrtags;
+ if (!device->tagged_supported)
+ reqtags = 0;
#if 1 /* Avoid to locally queue commands for no good reasons */
- if (reqtags > SYM_CONF_MAX_TAG)
- reqtags = SYM_CONF_MAX_TAG;
- device->queue_depth = reqtags ? reqtags : 2;
+ if (reqtags > SYM_CONF_MAX_TAG)
+ reqtags = SYM_CONF_MAX_TAG;
+ depth_to_use = (reqtags ? reqtags : 2);
#else
- device->queue_depth = reqtags ? SYM_CONF_MAX_TAG : 2;
+ depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2);
#endif
- lp->s.scdev_depth = device->queue_depth;
- sym_tune_dev_queuing(np, device->id, device->lun, reqtags);
- }
+ scsi_adjust_queue_depth(device,
+ (device->tagged_supported ?
+ MSG_SIMPLE_TAG : 0),
+ depth_to_use);
+ lp->s.scdev_depth = depth_to_use;
+ sym_tune_dev_queuing(np, device->id, device->lun, reqtags);
+
+ return 0;
}
/*
@@ -2132,7 +2129,6 @@ sym_attach (Scsi_Host_Template *tpnt, int unit, sym_device *dev)
#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0)
instance->max_cmd_len = 16;
#endif
- instance->select_queue_depths = sym53c8xx_select_queue_depths;
instance->highmem_io = 1;
SYM_UNLOCK_HCB(np, flags);
diff --git a/fs/Config.help b/fs/Config.help
index 44622847c4df..76ffd7584add 100644
--- a/fs/Config.help
+++ b/fs/Config.help
@@ -535,6 +535,13 @@ CONFIG_NFS_DIRECTIO
causes open() to return EINVAL if a file residing in NFS is
opened with the O_DIRECT flag.
+CONFIG_NFS_V4
+ Say Y here if you want your NFS client to be able to speak the newer
+ version 4 of the NFS protocol. This feature is experimental, and
+ should only be used if you are interested in helping to test NFSv4.
+
+ If unsure, say N.
+
CONFIG_ROOT_NFS
If you want your Linux box to mount its whole root file system (the
one containing the directory /) from some other computer over the
@@ -1137,3 +1144,11 @@ CONFIG_XFS_RT
If unsure, say N.
+CONFIG_AFS_FS
+ If you say Y here, you will get an experimental Andrew File System
+ driver. It currently only supports unsecured read-only AFS access.
+
+ See Documentation/filesystems/afs.txt for more intormation.
+
+ If unsure, say N.
+
diff --git a/fs/Config.in b/fs/Config.in
index e6eb844338ec..0464a17a8dbd 100644
--- a/fs/Config.in
+++ b/fs/Config.in
@@ -117,6 +117,7 @@ if [ "$CONFIG_NET" = "y" ]; then
dep_tristate 'InterMezzo file system support (replicating fs) (EXPERIMENTAL)' CONFIG_INTERMEZZO_FS $CONFIG_INET $CONFIG_EXPERIMENTAL
dep_tristate 'NFS file system support' CONFIG_NFS_FS $CONFIG_INET
dep_mbool ' Provide NFSv3 client support' CONFIG_NFS_V3 $CONFIG_NFS_FS
+ dep_mbool ' Provide NFSv4 client support (EXPERIMENTAL)' CONFIG_NFS_V4 $CONFIG_NFS_FS $CONFIG_EXPERIMENTAL
dep_bool ' Root file system on NFS' CONFIG_ROOT_NFS $CONFIG_NFS_FS $CONFIG_IP_PNP
dep_tristate 'NFS server support' CONFIG_NFSD $CONFIG_INET
@@ -157,6 +158,16 @@ if [ "$CONFIG_NET" = "y" ]; then
# for fs/nls/Config.in
define_bool CONFIG_NCPFS_NLS n
fi
+
+ dep_tristate 'Andrew File System support (AFS) (Experimental)' CONFIG_AFS_FS $CONFIG_INET $CONFIG_EXPERIMENTAL
+ if [ "$CONFIG_AFS_FS" = "y" ]; then
+ define_tristate CONFIG_RXRPC y
+ else
+ if [ "$CONFIG_AFS_FS" = "m" ]; then
+ define_tristate CONFIG_RXRPC m
+ fi
+ fi
+
endmenu
else
diff --git a/fs/Makefile b/fs/Makefile
index d902bdd8bda3..c28d57ab55a9 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -6,7 +6,7 @@
#
export-objs := open.o dcache.o buffer.o bio.o inode.o dquot.o mpage.o aio.o \
- fcntl.o read_write.o
+ fcntl.o read_write.o dcookies.o
obj-y := open.o read_write.o devices.o file_table.o buffer.o \
bio.o super.o block_dev.o char_dev.o stat.o exec.o pipe.o \
@@ -40,6 +40,8 @@ obj-y += partitions/
obj-y += driverfs/
obj-y += devpts/
+obj-$(CONFIG_PROFILING) += dcookies.o
+
# Do not add any filesystems before this line
obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext3
obj-$(CONFIG_JBD) += jbd/
@@ -84,5 +86,6 @@ obj-$(CONFIG_REISERFS_FS) += reiserfs/
obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
obj-$(CONFIG_JFS_FS) += jfs/
obj-$(CONFIG_XFS_FS) += xfs/
+obj-$(CONFIG_AFS_FS) += afs/
include $(TOPDIR)/Rules.make
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
new file mode 100644
index 000000000000..753cf8c5b4eb
--- /dev/null
+++ b/fs/afs/Makefile
@@ -0,0 +1,36 @@
+#
+# Makefile for Red Hat Linux AFS client.
+#
+
+kafs-objs := \
+ callback.o \
+ cell.o \
+ cmservice.o \
+ dir.o \
+ file.o \
+ fsclient.o \
+ inode.o \
+ kafsasyncd.o \
+ kafstimod.o \
+ main.o \
+ misc.o \
+ mntpt.o \
+ proc.o \
+ server.o \
+ super.o \
+ vlclient.o \
+ vlocation.o \
+ vnode.o \
+ volume.o
+
+# cache.o
+
+obj-m := kafs.o
+
+# superfluous for 2.5, but needed for 2.4..
+ifeq "$(VERSION).$(PATCHLEVEL)" "2.4"
+kafs.o: $(kafs-objs)
+ $(LD) -r -o kafs.o $(kafs-objs)
+endif
+
+include $(TOPDIR)/Rules.make
diff --git a/fs/afs/cache-layout.h b/fs/afs/cache-layout.h
new file mode 100644
index 000000000000..e71afd719a3f
--- /dev/null
+++ b/fs/afs/cache-layout.h
@@ -0,0 +1,224 @@
+/* cache-layout.h: AFS cache layout
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * The cache is stored on a block device and is laid out as:
+ *
+ * 0 +------------------------------------------------
+ * |
+ * | SuperBlock
+ * |
+ * 1 +------------------------------------------------
+ * |
+ * | file-meta-data File: Data block #0
+ * | - file-meta-data file (volix #0 file #0) : Meta-data block
+ * | - contains direct pointers to first 64 file data blocks
+ * | - Cached cell catalogue file (volix #0 file #1) file: Meta-data block
+ * | - Cached volume location catalogue file (volix #0 file #2): Meta-data block
+ * | - Vnode catalogue hash bucket #n file: Meta-data block
+ * |
+ * 2 +------------------------------------------------
+ * |
+ * | Bitmap Block Allocation Bitmap
+ * | - 1 bit per block in the bitmap block
+ * | - bit 0 of dword 0 refers to the bitmap block 0
+ * | - set if the bitmap block is full
+ * | - 32768 bits per block, requiring 4 blocks for a 16Tb cache
+ * | - bitmap bitmap blocks are cleared initially
+ * | - not present if <4 bitmap blocks
+ * |
+ * +------------------------------------------------
+ * |
+ * | File Block Allocation Bitmap
+ * | - 1 bit per block in the cache
+ * | - bit 0 of dword 0 refers to the first block of the data cache
+ * | - set if block is allocated
+ * | - 32768 bits per block, requiring 131072 blocks for a 16Tb cache
+ * | - bitmap blocks are cleared lazily (sb->bix_bitmap_unready)
+ * |
+ * +------------------------------------------------
+ * |
+ * | Data Cache
+ * |
+ * End +------------------------------------------------
+ *
+ * Blocks are indexed by an unsigned 32-bit word, meaning that the cache can hold up to 2^32 pages,
+ * or 16Tb in total.
+ *
+ * Credentials will be cached in memory, since they are subject to change without notice, and are
+ * difficult to derive manually, being constructed from the following information:
+ * - per vnode user ID and mode mask
+ * - parent directory ACL
+ * - directory ACL (dirs only)
+ * - group lists from ptserver
+ */
+
+#ifndef _LINUX_AFS_CACHE_LAYOUT_H
+#define _LINUX_AFS_CACHE_LAYOUT_H
+
+#include "types.h"
+
+typedef u32 afsc_blockix_t;
+typedef u32 afsc_cellix_t;
+
+/* Cached volume index
+ * - afsc_volix_t/4 is the index into the volume cache
+ * - afsc_volix_t%4 is 0 for R/W, 1 for R/O and 2 for Bak (3 is not used)
+ * - afsc_volix_t==0-3 refers to a "virtual" volume that stores meta-data about the cache
+ */
+typedef struct {
+ u32 index;
+} afsc_volix_t;
+
+#define AFSC_VNCAT_HASH_NBUCKETS 128
+
+/* special meta file IDs (all cell 0 vol 0) */
+enum afsc_meta_fids {
+ AFSC_META_FID_METADATA = 0,
+ AFSC_META_FID_CELL_CATALOGUE = 1,
+ AFSC_META_FID_VLDB_CATALOGUE = 2,
+ AFSC_META_FID_VNODE_CATALOGUE0 = 3,
+ AFSC_META_FID__COUNT = AFSC_VNCAT_HASH_NBUCKETS + 3
+};
+
+/*****************************************************************************/
+/*
+ * cache superblock block layout
+ * - the blockdev is prepared for initialisation by 'echo "kafsuninit" >/dev/hdaXX' before mounting
+ * - when initialised, the magic number is changed to "kafs-cache"
+ */
+struct afsc_super_block
+{
+ char magic[10]; /* magic number */
+#define AFSC_SUPER_MAGIC "kafs-cache"
+#define AFSC_SUPER_MAGIC_NEEDS_INIT "kafsuninit"
+#define AFSC_SUPER_MAGIC_SIZE 10
+
+ unsigned short endian; /* 0x1234 stored CPU-normal order */
+#define AFSC_SUPER_ENDIAN 0x1234
+
+ unsigned version; /* format version */
+#define AFSC_SUPER_VERSION 1
+
+ /* layout */
+ unsigned bsize; /* cache block size */
+ afsc_blockix_t bix_bitmap_fullmap; /* block ix of bitmap full bitmap */
+ afsc_blockix_t bix_bitmap; /* block ix of alloc bitmap */
+ afsc_blockix_t bix_bitmap_unready; /* block ix of unready area of bitmap */
+ afsc_blockix_t bix_cache; /* block ix of data cache */
+ afsc_blockix_t bix_end; /* block ix of end of cache */
+};
+
+/*****************************************************************************/
+/*
+ * vnode (inode) metadata cache record
+ * - padded out to 512 bytes and stored eight to a page
+ * - only the data version is necessary
+ * - disconnected operation is not supported
+ * - afs_iget() contacts the server to get the meta-data _anyway_ when an inode is first brought
+ * into memory
+ * - at least 64 direct block pointers will be available (a directory is max 256Kb)
+ * - any block pointer which is 0 indicates an uncached page
+ */
+struct afsc_vnode_meta
+{
+ /* file ID */
+ afsc_volix_t volume_ix; /* volume catalogue index */
+ unsigned vnode; /* vnode number */
+ unsigned unique; /* FID unique */
+ unsigned size; /* size of file */
+ time_t mtime; /* last modification time */
+
+ /* file status */
+ afs_dataversion_t version; /* current data version */
+
+ /* file contents */
+ afsc_blockix_t dbl_indirect; /* double indirect block index */
+ afsc_blockix_t indirect; /* single indirect block 0 index */
+ afsc_blockix_t direct[0]; /* direct block index (#AFSC_VNODE_META_DIRECT) */
+};
+
+#define AFSC_VNODE_META_RECSIZE 512 /* record size */
+
+#define AFSC_VNODE_META_DIRECT \
+ ((AFSC_VNODE_META_RECSIZE-sizeof(struct afsc_vnode_meta))/sizeof(afsc_blockix_t))
+
+#define AFSC_VNODE_META_PER_PAGE (PAGE_SIZE / AFSC_VNODE_META_RECSIZE)
+
+/*****************************************************************************/
+/*
+ * entry in the cached cell catalogue
+ */
+struct afsc_cell_record
+{
+ char name[64]; /* cell name (padded with NULs) */
+ struct in_addr servers[16]; /* cached cell servers */
+};
+
+/*****************************************************************************/
+/*
+ * entry in the cached volume location catalogue
+ * - indexed by afsc_volix_t/4
+ */
+struct afsc_vldb_record
+{
+ char name[64]; /* volume name (padded with NULs) */
+ afs_volid_t vid[3]; /* volume IDs for R/W, R/O and Bak volumes */
+ unsigned char vidmask; /* voltype mask for vid[] */
+ unsigned char _pad[1];
+ unsigned short nservers; /* number of entries used in servers[] */
+ struct in_addr servers[8]; /* fileserver addresses */
+ unsigned char srvtmask[8]; /* voltype masks for servers[] */
+#define AFSC_VOL_STM_RW 0x01 /* server holds a R/W version of the volume */
+#define AFSC_VOL_STM_RO 0x02 /* server holds a R/O version of the volume */
+#define AFSC_VOL_STM_BAK 0x04 /* server holds a backup version of the volume */
+
+ afsc_cellix_t cell_ix; /* cell catalogue index (MAX_UINT if unused) */
+ time_t ctime; /* time at which cached */
+};
+
+/*****************************************************************************/
+/*
+ * vnode catalogue entry
+ * - must be 2^x size so that do_generic_file_read doesn't present them split across pages
+ */
+struct afsc_vnode_catalogue
+{
+ afsc_volix_t volume_ix; /* volume catalogue index */
+ afs_vnodeid_t vnode; /* vnode ID */
+ u32 meta_ix; /* metadata file index */
+ u32 atime; /* last time entry accessed */
+} __attribute__((packed));
+
+#define AFSC_VNODE_CATALOGUE_PER_BLOCK ((size_t)(PAGE_SIZE/sizeof(struct afsc_vnode_catalogue)))
+
+/*****************************************************************************/
+/*
+ * vnode data "page directory" block
+ * - first 1024 pages don't map through here
+ * - PAGE_SIZE in size
+ */
+struct afsc_indirect_block
+{
+ afsc_blockix_t pt_bix[1024]; /* "page table" block indices */
+};
+
+/*****************************************************************************/
+/*
+ * vnode data "page table" block
+ * - PAGE_SIZE in size
+ */
+struct afsc_dbl_indirect_block
+{
+ afsc_blockix_t page_bix[1024]; /* "page" block indices */
+};
+
+
+#endif /* _LINUX_AFS_CACHE_LAYOUT_H */
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
new file mode 100644
index 000000000000..8d030bd67aa0
--- /dev/null
+++ b/fs/afs/callback.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
+ * David Howells <dhowells@redhat.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include "server.h"
+#include "vnode.h"
+#include "internal.h"
+
+/*****************************************************************************/
+/*
+ * allow the fileserver to request callback state (re-)initialisation
+ */
+int SRXAFSCM_InitCallBackState(afs_server_t *server)
+{
+ struct list_head callbacks;
+
+ _enter("%p",server);
+
+ INIT_LIST_HEAD(&callbacks);
+
+ /* transfer the callback list from the server to a temp holding area */
+ spin_lock(&server->cb_lock);
+
+ list_add(&callbacks,&server->cb_promises);
+ list_del_init(&server->cb_promises);
+
+ /* munch our way through the list, grabbing the inode, dropping all the locks and regetting
+ * them in the right order
+ */
+ while (!list_empty(&callbacks)) {
+ struct inode *inode;
+ afs_vnode_t *vnode;
+
+ vnode = list_entry(callbacks.next,afs_vnode_t,cb_link);
+ list_del_init(&vnode->cb_link);
+
+ /* try and grab the inode - may fail */
+ inode = igrab(AFS_VNODE_TO_I(vnode));
+ if (inode) {
+ int release = 0;
+
+ spin_unlock(&server->cb_lock);
+ spin_lock(&vnode->lock);
+
+ if (vnode->cb_server==server) {
+ vnode->cb_server = NULL;
+ afs_kafstimod_del_timer(&vnode->cb_timeout);
+ spin_lock(&afs_cb_hash_lock);
+ list_del_init(&vnode->cb_hash_link);
+ spin_unlock(&afs_cb_hash_lock);
+ release = 1;
+ }
+
+ spin_unlock(&vnode->lock);
+
+ iput(inode);
+ if (release) afs_put_server(server);
+
+ spin_lock(&server->cb_lock);
+ }
+ }
+
+ spin_unlock(&server->cb_lock);
+
+ _leave(" = 0");
+ return 0;
+} /* end SRXAFSCM_InitCallBackState() */
+
+/*****************************************************************************/
+/*
+ * allow the fileserver to break callback promises
+ */
+int SRXAFSCM_CallBack(afs_server_t *server, size_t count, afs_callback_t callbacks[])
+{
+ struct list_head *_p;
+
+ _enter("%p,%u,",server,count);
+
+ for (; count>0; callbacks++, count--) {
+ struct inode *inode = NULL;
+ afs_vnode_t *vnode = NULL;
+ int valid = 0;
+
+ _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }",
+ callbacks->fid.vid,
+ callbacks->fid.vnode,
+ callbacks->fid.unique,
+ callbacks->version,
+ callbacks->expiry,
+ callbacks->type
+ );
+
+ /* find the inode for this fid */
+ spin_lock(&afs_cb_hash_lock);
+
+ list_for_each(_p,&afs_cb_hash(server,&callbacks->fid)) {
+ vnode = list_entry(_p,afs_vnode_t,cb_hash_link);
+
+ if (memcmp(&vnode->fid,&callbacks->fid,sizeof(afs_fid_t))!=0)
+ continue;
+
+ /* right vnode, but is it same server? */
+ if (vnode->cb_server!=server)
+ break; /* no */
+
+ /* try and nail the inode down */
+ inode = igrab(AFS_VNODE_TO_I(vnode));
+ break;
+ }
+
+ spin_unlock(&afs_cb_hash_lock);
+
+ if (inode) {
+ /* we've found the record for this vnode */
+ spin_lock(&vnode->lock);
+ if (vnode->cb_server==server) {
+ /* the callback _is_ on the calling server */
+ vnode->cb_server = NULL;
+ valid = 1;
+
+ afs_kafstimod_del_timer(&vnode->cb_timeout);
+ vnode->flags |= AFS_VNODE_CHANGED;
+
+ spin_lock(&server->cb_lock);
+ list_del_init(&vnode->cb_link);
+ spin_unlock(&server->cb_lock);
+
+ spin_lock(&afs_cb_hash_lock);
+ list_del_init(&vnode->cb_hash_link);
+ spin_unlock(&afs_cb_hash_lock);
+ }
+ spin_unlock(&vnode->lock);
+
+ if (valid) {
+ invalidate_inode_pages(inode->i_mapping);
+ afs_put_server(server);
+ }
+ iput(inode);
+ }
+ }
+
+ _leave(" = 0");
+ return 0;
+} /* end SRXAFSCM_CallBack() */
+
+/*****************************************************************************/
+/*
+ * allow the fileserver to see if the cache manager is still alive
+ */
+int SRXAFSCM_Probe(afs_server_t *server)
+{
+ _debug("SRXAFSCM_Probe(%p)\n",server);
+ return 0;
+} /* end SRXAFSCM_Probe() */
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
new file mode 100644
index 000000000000..f7f00a2bec9e
--- /dev/null
+++ b/fs/afs/cell.c
@@ -0,0 +1,452 @@
+/* cell.c: AFS cell and server record management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/connection.h>
+#include "volume.h"
+#include "cell.h"
+#include "server.h"
+#include "transport.h"
+#include "vlclient.h"
+#include "kafstimod.h"
+#include "super.h"
+#include "internal.h"
+
+DECLARE_RWSEM(afs_proc_cells_sem);
+LIST_HEAD(afs_proc_cells);
+
+static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells);
+static rwlock_t afs_cells_lock = RW_LOCK_UNLOCKED;
+static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
+static afs_cell_t *afs_cell_root;
+
+static char *rootcell;
+
+MODULE_PARM(rootcell,"s");
+MODULE_PARM_DESC(rootcell,"root AFS cell name and VL server IP addr list");
+
+/*****************************************************************************/
+/*
+ * create a cell record
+ * - "name" is the name of the cell
+ * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
+ */
+int afs_cell_create(const char *name, char *vllist, afs_cell_t **_cell)
+{
+ afs_cell_t *cell;
+ char *next;
+ int ret;
+
+ _enter("%s",name);
+
+ if (!name) BUG(); /* TODO: want to look up "this cell" in the cache */
+
+ down_write(&afs_cells_sem);
+
+ /* allocate and initialise a cell record */
+ cell = kmalloc(sizeof(afs_cell_t) + strlen(name) + 1,GFP_KERNEL);
+ if (!cell) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ memset(cell,0,sizeof(afs_cell_t));
+ atomic_set(&cell->usage,0);
+
+ INIT_LIST_HEAD(&cell->link);
+ INIT_LIST_HEAD(&cell->caches);
+
+ rwlock_init(&cell->sv_lock);
+ INIT_LIST_HEAD(&cell->sv_list);
+ INIT_LIST_HEAD(&cell->sv_graveyard);
+ spin_lock_init(&cell->sv_gylock);
+
+ init_rwsem(&cell->vl_sem);
+ INIT_LIST_HEAD(&cell->vl_list);
+ INIT_LIST_HEAD(&cell->vl_graveyard);
+ spin_lock_init(&cell->vl_gylock);
+
+ strcpy(cell->name,name);
+
+ /* fill in the VL server list from the rest of the string */
+ ret = -EINVAL;
+ do {
+ unsigned a, b, c, d;
+
+ next = strchr(vllist,':');
+ if (next) *next++ = 0;
+
+ if (sscanf(vllist,"%u.%u.%u.%u",&a,&b,&c,&d)!=4)
+ goto badaddr;
+
+ if (a>255 || b>255 || c>255 || d>255)
+ goto badaddr;
+
+ cell->vl_addrs[cell->vl_naddrs++].s_addr =
+ htonl((a<<24)|(b<<16)|(c<<8)|d);
+
+ if (cell->vl_naddrs>=16)
+ break;
+
+ } while(vllist=next, vllist);
+
+ /* add a proc dir for this cell */
+ ret = afs_proc_cell_setup(cell);
+ if (ret<0)
+ goto error;
+
+ /* add to the cell lists */
+ write_lock(&afs_cells_lock);
+ list_add_tail(&cell->link,&afs_cells);
+ write_unlock(&afs_cells_lock);
+
+ down_write(&afs_proc_cells_sem);
+ list_add_tail(&cell->proc_link,&afs_proc_cells);
+ up_write(&afs_proc_cells_sem);
+
+ *_cell = cell;
+ up_write(&afs_cells_sem);
+
+ _leave(" = 0 (%p)",cell);
+ return 0;
+
+ badaddr:
+ printk("kAFS: bad VL server IP address: '%s'\n",vllist);
+ error:
+ up_write(&afs_cells_sem);
+ kfree(afs_cell_root);
+ return ret;
+} /* end afs_cell_create() */
+
+/*****************************************************************************/
+/*
+ * initialise the cell database from module parameters
+ */
+int afs_cell_init(void)
+{
+ char *cp;
+ int ret;
+
+ _enter("");
+
+ if (!rootcell) {
+ printk("kAFS: no root cell specified\n");
+ return -EINVAL;
+ }
+
+ cp = strchr(rootcell,':');
+ if (!cp) {
+ printk("kAFS: no VL server IP addresses specified\n");
+ return -EINVAL;
+ }
+
+ /* allocate a cell record for the root cell */
+ *cp++ = 0;
+ ret = afs_cell_create(rootcell,cp,&afs_cell_root);
+ if (ret==0)
+ afs_get_cell(afs_cell_root);
+
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end afs_cell_init() */
+
+/*****************************************************************************/
+/*
+ * lookup a cell record
+ */
+int afs_cell_lookup(const char *name, afs_cell_t **_cell)
+{
+ struct list_head *_p;
+ afs_cell_t *cell;
+
+ _enter("\"%s\",",name?name:"*thiscell*");
+
+ cell = afs_cell_root;
+
+ if (name) {
+ /* if the cell was named, look for it in the cell record list */
+ cell = NULL;
+ read_lock(&afs_cells_lock);
+
+ list_for_each(_p,&afs_cells) {
+ cell = list_entry(_p,afs_cell_t,link);
+ if (strcmp(cell->name,name)==0)
+ break;
+ cell = NULL;
+ }
+
+ read_unlock(&afs_cells_lock);
+ }
+
+ if (cell)
+ afs_get_cell(cell);
+
+ *_cell = cell;
+ _leave(" = %d (%p)",cell?0:-ENOENT,cell);
+ return cell ? 0 : -ENOENT;
+
+} /* end afs_cell_lookup() */
+
+/*****************************************************************************/
+/*
+ * try and get a cell record
+ */
+afs_cell_t *afs_get_cell_maybe(afs_cell_t **_cell)
+{
+ afs_cell_t *cell;
+
+ write_lock(&afs_cells_lock);
+
+ cell = *_cell;
+ if (cell && !list_empty(&cell->link))
+ atomic_inc(&cell->usage);
+ else
+ cell = NULL;
+
+ write_unlock(&afs_cells_lock);
+
+ return cell;
+} /* end afs_get_cell_maybe() */
+
+/*****************************************************************************/
+/*
+ * destroy a cell record
+ */
+void afs_put_cell(afs_cell_t *cell)
+{
+ _enter("%p{%d,%s}",cell,atomic_read(&cell->usage),cell->name);
+
+ /* sanity check */
+ if (atomic_read(&cell->usage)<=0)
+ BUG();
+
+ /* to prevent a race, the decrement and the dequeue must be effectively atomic */
+ write_lock(&afs_cells_lock);
+
+ if (likely(!atomic_dec_and_test(&cell->usage))) {
+ write_unlock(&afs_cells_lock);
+ _leave("");
+ return;
+ }
+
+ write_unlock(&afs_cells_lock);
+
+ if (!list_empty(&cell->sv_list)) BUG();
+ if (!list_empty(&cell->sv_graveyard)) BUG();
+ if (!list_empty(&cell->vl_list)) BUG();
+ if (!list_empty(&cell->vl_graveyard)) BUG();
+
+ _leave(" [unused]");
+} /* end afs_put_cell() */
+
+/*****************************************************************************/
+/*
+ * destroy a cell record
+ */
+static void afs_cell_destroy(afs_cell_t *cell)
+{
+ _enter("%p{%d,%s}",cell,atomic_read(&cell->usage),cell->name);
+
+ /* to prevent a race, the decrement and the dequeue must be effectively atomic */
+ write_lock(&afs_cells_lock);
+
+ /* sanity check */
+ if (atomic_read(&cell->usage)!=0)
+ BUG();
+
+ list_del_init(&cell->link);
+
+ write_unlock(&afs_cells_lock);
+
+ down_write(&afs_cells_sem);
+
+ afs_proc_cell_remove(cell);
+
+ down_write(&afs_proc_cells_sem);
+ list_del_init(&cell->proc_link);
+ up_write(&afs_proc_cells_sem);
+
+ up_write(&afs_cells_sem);
+
+ if (!list_empty(&cell->sv_list)) BUG();
+ if (!list_empty(&cell->sv_graveyard)) BUG();
+ if (!list_empty(&cell->vl_list)) BUG();
+ if (!list_empty(&cell->vl_graveyard)) BUG();
+
+ /* finish cleaning up the cell */
+ kfree(cell);
+
+ _leave(" [destroyed]");
+} /* end afs_cell_destroy() */
+
+/*****************************************************************************/
+/*
+ * lookup the server record corresponding to an Rx RPC peer
+ */
+int afs_server_find_by_peer(const struct rxrpc_peer *peer, afs_server_t **_server)
+{
+ struct list_head *_pc, *_ps;
+ afs_server_t *server;
+ afs_cell_t *cell;
+
+ _enter("%p{a=%08x},",peer,ntohl(peer->addr.s_addr));
+
+ /* search the cell list */
+ read_lock(&afs_cells_lock);
+
+ list_for_each(_pc,&afs_cells) {
+ cell = list_entry(_pc,afs_cell_t,link);
+
+ _debug("? cell %s",cell->name);
+
+ write_lock(&cell->sv_lock);
+
+ /* check the active list */
+ list_for_each(_ps,&cell->sv_list) {
+ server = list_entry(_ps,afs_server_t,link);
+
+ _debug("?? server %08x",ntohl(server->addr.s_addr));
+
+ if (memcmp(&server->addr,&peer->addr,sizeof(struct in_addr))==0)
+ goto found_server;
+ }
+
+ /* check the inactive list */
+ spin_lock(&cell->sv_gylock);
+ list_for_each(_ps,&cell->sv_graveyard) {
+ server = list_entry(_ps,afs_server_t,link);
+
+ _debug("?? dead server %08x",ntohl(server->addr.s_addr));
+
+ if (memcmp(&server->addr,&peer->addr,sizeof(struct in_addr))==0)
+ goto found_dead_server;
+ }
+ spin_unlock(&cell->sv_gylock);
+
+ write_unlock(&cell->sv_lock);
+ }
+ read_unlock(&afs_cells_lock);
+
+ _leave(" = -ENOENT");
+ return -ENOENT;
+
+ /* we found it in the graveyard - resurrect it */
+ found_dead_server:
+ list_del(&server->link);
+ list_add_tail(&server->link,&cell->sv_list);
+ afs_get_server(server);
+ afs_kafstimod_del_timer(&server->timeout);
+ spin_unlock(&cell->sv_gylock);
+ goto success;
+
+ /* we found it - increment its ref count and return it */
+ found_server:
+ afs_get_server(server);
+
+ success:
+ write_unlock(&cell->sv_lock);
+ read_unlock(&afs_cells_lock);
+
+ *_server = server;
+ _leave(" = 0 (s=%p c=%p)",server,cell);
+ return 0;
+
+} /* end afs_server_find_by_peer() */
+
+/*****************************************************************************/
+/*
+ * purge in-memory cell database on module unload
+ * - the timeout daemon is stopped before calling this
+ */
+void afs_cell_purge(void)
+{
+ afs_vlocation_t *vlocation;
+ afs_cell_t *cell;
+
+ _enter("");
+
+ if (afs_cell_root)
+ afs_put_cell(afs_cell_root);
+
+ while (!list_empty(&afs_cells)) {
+ cell = NULL;
+
+ /* remove the next cell from the front of the list */
+ write_lock(&afs_cells_lock);
+
+ if (!list_empty(&afs_cells)) {
+ cell = list_entry(afs_cells.next,afs_cell_t,link);
+ list_del_init(&cell->link);
+ }
+
+ write_unlock(&afs_cells_lock);
+
+ if (cell) {
+ _debug("PURGING CELL %s (%d)",cell->name,atomic_read(&cell->usage));
+
+ if (!list_empty(&cell->sv_list)) BUG();
+ if (!list_empty(&cell->vl_list)) BUG();
+
+ /* purge the cell's VL graveyard list */
+ _debug(" - clearing VL graveyard");
+
+ spin_lock(&cell->vl_gylock);
+
+ while (!list_empty(&cell->vl_graveyard)) {
+ vlocation = list_entry(cell->vl_graveyard.next,
+ afs_vlocation_t,link);
+ list_del_init(&vlocation->link);
+
+ afs_kafstimod_del_timer(&vlocation->timeout);
+
+ spin_unlock(&cell->vl_gylock);
+
+ afs_vlocation_do_timeout(vlocation);
+ /* TODO: race if move to use krxtimod instead of kafstimod */
+
+ spin_lock(&cell->vl_gylock);
+ }
+
+ spin_unlock(&cell->vl_gylock);
+
+ /* purge the cell's server graveyard list */
+ _debug(" - clearing server graveyard");
+
+ spin_lock(&cell->sv_gylock);
+
+ while (!list_empty(&cell->sv_graveyard)) {
+ afs_server_t *server;
+
+ server = list_entry(cell->sv_graveyard.next,afs_server_t,link);
+ list_del_init(&server->link);
+
+ afs_kafstimod_del_timer(&server->timeout);
+
+ spin_unlock(&cell->sv_gylock);
+
+ afs_server_do_timeout(server);
+
+ spin_lock(&cell->sv_gylock);
+ }
+
+ spin_unlock(&cell->sv_gylock);
+
+ /* now the cell should be left with no references */
+ afs_cell_destroy(cell);
+ }
+ }
+
+ _leave("");
+} /* end afs_cell_purge() */
diff --git a/fs/afs/cell.h b/fs/afs/cell.h
new file mode 100644
index 000000000000..48eb9fa91f19
--- /dev/null
+++ b/fs/afs/cell.h
@@ -0,0 +1,63 @@
+/* cell.h: AFS cell record
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_CELL_H
+#define _LINUX_AFS_CELL_H
+
+#include "types.h"
+
+extern volatile int afs_cells_being_purged; /* T when cells are being purged by rmmod */
+
+/*****************************************************************************/
+/*
+ * AFS cell record
+ */
+struct afs_cell
+{
+ atomic_t usage;
+ struct list_head link; /* main cell list link */
+ struct list_head proc_link; /* /proc cell list link */
+ struct proc_dir_entry *proc_dir; /* /proc dir for this cell */
+ struct list_head caches; /* list of caches currently backing this cell */
+
+ /* server record management */
+ rwlock_t sv_lock; /* active server list lock */
+ struct list_head sv_list; /* active server list */
+ struct list_head sv_graveyard; /* inactive server list */
+ spinlock_t sv_gylock; /* inactive server list lock */
+
+ /* volume location record management */
+ struct rw_semaphore vl_sem; /* volume management serialisation semaphore */
+ struct list_head vl_list; /* cell's active VL record list */
+ struct list_head vl_graveyard; /* cell's inactive VL record list */
+ spinlock_t vl_gylock; /* graveyard lock */
+ unsigned short vl_naddrs; /* number of VL servers in addr list */
+ unsigned short vl_curr_svix; /* current server index */
+ struct in_addr vl_addrs[16]; /* cell VL server addresses */
+
+ char name[0]; /* cell name - must go last */
+};
+
+extern int afs_cell_init(void);
+
+extern int afs_cell_create(const char *name, char *vllist, afs_cell_t **_cell);
+
+extern int afs_cell_lookup(const char *name, afs_cell_t **_cell);
+
+#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
+
+extern afs_cell_t *afs_get_cell_maybe(afs_cell_t **_cell);
+
+extern void afs_put_cell(afs_cell_t *cell);
+
+extern void afs_cell_purge(void);
+
+#endif /* _LINUX_AFS_CELL_H */
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
new file mode 100644
index 000000000000..b95c3625257a
--- /dev/null
+++ b/fs/afs/cmservice.c
@@ -0,0 +1,639 @@
+/* cmservice.c: AFS Cache Manager Service
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include "server.h"
+#include "cell.h"
+#include "transport.h"
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include "cmservice.h"
+#include "internal.h"
+
+static unsigned afscm_usage; /* AFS cache manager usage count */
+static struct rw_semaphore afscm_sem; /* AFS cache manager start/stop semaphore */
+
+static int afscm_new_call(struct rxrpc_call *call);
+static void afscm_attention(struct rxrpc_call *call);
+static void afscm_error(struct rxrpc_call *call);
+static void afscm_aemap(struct rxrpc_call *call);
+
+static void _SRXAFSCM_CallBack(struct rxrpc_call *call);
+static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call);
+static void _SRXAFSCM_Probe(struct rxrpc_call *call);
+
+typedef void (*_SRXAFSCM_xxxx_t)(struct rxrpc_call *call);
+
+static const struct rxrpc_operation AFSCM_ops[] = {
+ {
+ .id = 204,
+ .asize = RXRPC_APP_MARK_EOF,
+ .name = "CallBack",
+ .user = _SRXAFSCM_CallBack,
+ },
+ {
+ .id = 205,
+ .asize = RXRPC_APP_MARK_EOF,
+ .name = "InitCallBackState",
+ .user = _SRXAFSCM_InitCallBackState,
+ },
+ {
+ .id = 206,
+ .asize = RXRPC_APP_MARK_EOF,
+ .name = "Probe",
+ .user = _SRXAFSCM_Probe,
+ },
+#if 0
+ {
+ .id = 207,
+ .asize = RXRPC_APP_MARK_EOF,
+ .name = "GetLock",
+ .user = _SRXAFSCM_GetLock,
+ },
+ {
+ .id = 208,
+ .asize = RXRPC_APP_MARK_EOF,
+ .name = "GetCE",
+ .user = _SRXAFSCM_GetCE,
+ },
+ {
+ .id = 209,
+ .asize = RXRPC_APP_MARK_EOF,
+ .name = "GetXStatsVersion",
+ .user = _SRXAFSCM_GetXStatsVersion,
+ },
+ {
+ .id = 210,
+ .asize = RXRPC_APP_MARK_EOF,
+ .name = "GetXStats",
+ .user = _SRXAFSCM_GetXStats,
+ }
+#endif
+};
+
+static struct rxrpc_service AFSCM_service = {
+ .name = "AFS/CM",
+ .owner = THIS_MODULE,
+ .link = LIST_HEAD_INIT(AFSCM_service.link),
+ .new_call = afscm_new_call,
+ .service_id = 1,
+ .attn_func = afscm_attention,
+ .error_func = afscm_error,
+ .aemap_func = afscm_aemap,
+ .ops_begin = &AFSCM_ops[0],
+ .ops_end = &AFSCM_ops[sizeof(AFSCM_ops)/sizeof(AFSCM_ops[0])],
+};
+
+static DECLARE_COMPLETION(kafscmd_alive);
+static DECLARE_COMPLETION(kafscmd_dead);
+static DECLARE_WAIT_QUEUE_HEAD(kafscmd_sleepq);
+static LIST_HEAD(kafscmd_attention_list);
+static LIST_HEAD(afscm_calls);
+static spinlock_t afscm_calls_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t kafscmd_attention_lock = SPIN_LOCK_UNLOCKED;
+static int kafscmd_die;
+
+/*****************************************************************************/
+/*
+ * AFS Cache Manager kernel thread
+ */
+static int kafscmd(void *arg)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_call *call;
+ _SRXAFSCM_xxxx_t func;
+ int die;
+
+ printk("kAFS: Started kafscmd %d\n",current->pid);
+ strcpy(current->comm,"kafscmd");
+
+ daemonize();
+
+ complete(&kafscmd_alive);
+
+ /* only certain signals are of interest */
+ spin_lock_irq(&current->sig->siglock);
+ siginitsetinv(&current->blocked,0);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
+ recalc_sigpending();
+#else
+ recalc_sigpending(current);
+#endif
+ spin_unlock_irq(&current->sig->siglock);
+
+ /* loop around looking for things to attend to */
+ do {
+ if (list_empty(&kafscmd_attention_list)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kafscmd_sleepq,&myself);
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!list_empty(&kafscmd_attention_list) ||
+ signal_pending(current) ||
+ kafscmd_die)
+ break;
+
+ schedule();
+ }
+
+ remove_wait_queue(&kafscmd_sleepq,&myself);
+ set_current_state(TASK_RUNNING);
+ }
+
+ die = kafscmd_die;
+
+ /* dequeue the next call requiring attention */
+ call = NULL;
+ spin_lock(&kafscmd_attention_lock);
+
+ if (!list_empty(&kafscmd_attention_list)) {
+ call = list_entry(kafscmd_attention_list.next,
+ struct rxrpc_call,
+ app_attn_link);
+ list_del_init(&call->app_attn_link);
+ die = 0;
+ }
+
+ spin_unlock(&kafscmd_attention_lock);
+
+ if (call) {
+ /* act upon it */
+ _debug("@@@ Begin Attend Call %p",call);
+
+ func = call->app_user;
+ if (func)
+ func(call);
+
+ rxrpc_put_call(call);
+
+ _debug("@@@ End Attend Call %p",call);
+ }
+
+ } while(!die);
+
+ /* and that's all */
+ complete_and_exit(&kafscmd_dead,0);
+
+} /* end kafscmd() */
+
+/*****************************************************************************/
+/*
+ * handle a call coming in to the cache manager
+ * - if I want to keep the call, I must increment its usage count
+ * - the return value will be negated and passed back in an abort packet if non-zero
+ * - serialised by virtue of there only being one krxiod
+ */
+static int afscm_new_call(struct rxrpc_call *call)
+{
+ _enter("%p{cid=%u u=%d}",call,ntohl(call->call_id),atomic_read(&call->usage));
+
+ rxrpc_get_call(call);
+
+ /* add to my current call list */
+ spin_lock(&afscm_calls_lock);
+ list_add(&call->app_link,&afscm_calls);
+ spin_unlock(&afscm_calls_lock);
+
+ _leave(" = 0");
+ return 0;
+
+} /* end afscm_new_call() */
+
+/*****************************************************************************/
+/*
+ * queue on the kafscmd queue for attention
+ */
+static void afscm_attention(struct rxrpc_call *call)
+{
+ _enter("%p{cid=%u u=%d}",call,ntohl(call->call_id),atomic_read(&call->usage));
+
+ spin_lock(&kafscmd_attention_lock);
+
+ if (list_empty(&call->app_attn_link)) {
+ list_add_tail(&call->app_attn_link,&kafscmd_attention_list);
+ rxrpc_get_call(call);
+ }
+
+ spin_unlock(&kafscmd_attention_lock);
+
+ wake_up(&kafscmd_sleepq);
+
+ _leave(" {u=%d}",atomic_read(&call->usage));
+} /* end afscm_attention() */
+
+/*****************************************************************************/
+/*
+ * handle my call being aborted
+ * - clean up, dequeue and put my ref to the call
+ */
+static void afscm_error(struct rxrpc_call *call)
+{
+ int removed;
+
+ _enter("%p{est=%s ac=%u er=%d}",
+ call,
+ rxrpc_call_error_states[call->app_err_state],
+ call->app_abort_code,
+ call->app_errno);
+
+ spin_lock(&kafscmd_attention_lock);
+
+ if (list_empty(&call->app_attn_link)) {
+ list_add_tail(&call->app_attn_link,&kafscmd_attention_list);
+ rxrpc_get_call(call);
+ }
+
+ spin_unlock(&kafscmd_attention_lock);
+
+ removed = 0;
+ spin_lock(&afscm_calls_lock);
+ if (!list_empty(&call->app_link)) {
+ list_del_init(&call->app_link);
+ removed = 1;
+ }
+ spin_unlock(&afscm_calls_lock);
+
+ if (removed)
+ rxrpc_put_call(call);
+
+ wake_up(&kafscmd_sleepq);
+
+ _leave("");
+} /* end afscm_error() */
+
+/*****************************************************************************/
+/*
+ * map afs abort codes to/from Linux error codes
+ * - called with call->lock held
+ */
+static void afscm_aemap(struct rxrpc_call *call)
+{
+ switch (call->app_err_state) {
+ case RXRPC_ESTATE_LOCAL_ABORT:
+ call->app_abort_code = -call->app_errno;
+ break;
+ case RXRPC_ESTATE_PEER_ABORT:
+ call->app_errno = -ECONNABORTED;
+ break;
+ default:
+ break;
+ }
+} /* end afscm_aemap() */
+
+/*****************************************************************************/
+/*
+ * start the cache manager service if not already started
+ */
+int afscm_start(void)
+{
+ int ret;
+
+ down_write(&afscm_sem);
+ if (!afscm_usage) {
+ ret = kernel_thread(kafscmd,NULL,0);
+ if (ret<0)
+ goto out;
+
+ wait_for_completion(&kafscmd_alive);
+
+ ret = rxrpc_add_service(afs_transport,&AFSCM_service);
+ if (ret<0)
+ goto kill;
+ }
+
+ afscm_usage++;
+ up_write(&afscm_sem);
+
+ return 0;
+
+ kill:
+ kafscmd_die = 1;
+ wake_up(&kafscmd_sleepq);
+ wait_for_completion(&kafscmd_dead);
+
+ out:
+ up_write(&afscm_sem);
+ return ret;
+
+} /* end afscm_start() */
+
+/*****************************************************************************/
+/*
+ * stop the cache manager service
+ */
+void afscm_stop(void)
+{
+ struct rxrpc_call *call;
+
+ down_write(&afscm_sem);
+
+ if (afscm_usage==0) BUG();
+ afscm_usage--;
+
+ if (afscm_usage==0) {
+ /* don't want more incoming calls */
+ rxrpc_del_service(afs_transport,&AFSCM_service);
+
+ /* abort any calls I've still got open (the afscm_error() will dequeue them) */
+ spin_lock(&afscm_calls_lock);
+ while (!list_empty(&afscm_calls)) {
+ call = list_entry(afscm_calls.next,struct rxrpc_call,app_link);
+ list_del_init(&call->app_link);
+ rxrpc_get_call(call);
+ spin_unlock(&afscm_calls_lock);
+
+ rxrpc_call_abort(call,-ESRCH); /* abort, dequeue and put */
+
+ rxrpc_put_call(call);
+
+ spin_lock(&afscm_calls_lock);
+ }
+ spin_unlock(&afscm_calls_lock);
+
+ /* get rid of my daemon */
+ kafscmd_die = 1;
+ wake_up(&kafscmd_sleepq);
+ wait_for_completion(&kafscmd_dead);
+
+ /* dispose of any calls waiting for attention */
+ spin_lock(&kafscmd_attention_lock);
+ while (!list_empty(&kafscmd_attention_list)) {
+ call = list_entry(kafscmd_attention_list.next,
+ struct rxrpc_call,
+ app_attn_link);
+
+ list_del_init(&call->app_attn_link);
+ spin_unlock(&kafscmd_attention_lock);
+
+ rxrpc_put_call(call);
+
+ spin_lock(&kafscmd_attention_lock);
+ }
+ spin_unlock(&kafscmd_attention_lock);
+ }
+
+ up_write(&afscm_sem);
+
+} /* end afscm_stop() */
+
+/*****************************************************************************/
+/*
+ * handle the fileserver breaking a set of callbacks
+ */
+static void _SRXAFSCM_CallBack(struct rxrpc_call *call)
+{
+ afs_server_t *server;
+ size_t count, qty, tmp;
+ int ret = 0, removed;
+
+ _enter("%p{acs=%s}",call,rxrpc_call_states[call->app_call_state]);
+
+ server = afs_server_get_from_peer(call->conn->peer);
+
+ switch (call->app_call_state) {
+ /* we've received the last packet
+ * - drain all the data from the call and send the reply
+ */
+ case RXRPC_CSTATE_SRVR_GOT_ARGS:
+ ret = -EBADMSG;
+ qty = call->app_ready_qty;
+ if (qty<8 || qty>50*(6*4)+8)
+ break;
+
+ {
+ afs_callback_t *cb, *pcb;
+ int loop;
+ u32 *fp, *bp;
+
+ fp = rxrpc_call_alloc_scratch(call,qty);
+
+ /* drag the entire argument block out to the scratch space */
+ ret = rxrpc_call_read_data(call,fp,qty,0);
+ if (ret<0)
+ break;
+
+ /* and unmarshall the parameter block */
+ ret = -EBADMSG;
+ count = ntohl(*fp++);
+ if (count>AFSCBMAX ||
+ (count*(3*4)+8 != qty && count*(6*4)+8 != qty))
+ break;
+
+ bp = fp + count*3;
+ tmp = ntohl(*bp++);
+ if (tmp>0 && tmp!=count)
+ break;
+ if (tmp==0)
+ bp = NULL;
+
+ pcb = cb = rxrpc_call_alloc_scratch_s(call,afs_callback_t);
+
+ for (loop=count-1; loop>=0; loop--) {
+ pcb->fid.vid = ntohl(*fp++);
+ pcb->fid.vnode = ntohl(*fp++);
+ pcb->fid.unique = ntohl(*fp++);
+ if (bp) {
+ pcb->version = ntohl(*bp++);
+ pcb->expiry = ntohl(*bp++);
+ pcb->type = ntohl(*bp++);
+ }
+ else {
+ pcb->version = 0;
+ pcb->expiry = 0;
+ pcb->type = AFSCM_CB_UNTYPED;
+ }
+ pcb++;
+ }
+
+ /* invoke the actual service routine */
+ ret = SRXAFSCM_CallBack(server,count,cb);
+ if (ret<0)
+ break;
+ }
+
+ /* send the reply */
+ ret = rxrpc_call_write_data(call,0,NULL,RXRPC_LAST_PACKET,GFP_KERNEL,0,&count);
+ if (ret<0)
+ break;
+ break;
+
+ /* operation complete */
+ case RXRPC_CSTATE_COMPLETE:
+ call->app_user = NULL;
+ removed = 0;
+ spin_lock(&afscm_calls_lock);
+ if (!list_empty(&call->app_link)) {
+ list_del_init(&call->app_link);
+ removed = 1;
+ }
+ spin_unlock(&afscm_calls_lock);
+
+ if (removed)
+ rxrpc_put_call(call);
+ break;
+
+ /* operation terminated on error */
+ case RXRPC_CSTATE_ERROR:
+ call->app_user = NULL;
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret<0)
+ rxrpc_call_abort(call,ret);
+
+ if (server) afs_put_server(server);
+
+ _leave(" = %d",ret);
+
+} /* end _SRXAFSCM_CallBack() */
+
+/*****************************************************************************/
+/*
+ * handle the fileserver asking us to initialise our callback state
+ */
+static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call)
+{
+ afs_server_t *server;
+ size_t count;
+ int ret = 0, removed;
+
+ _enter("%p{acs=%s}",call,rxrpc_call_states[call->app_call_state]);
+
+ server = afs_server_get_from_peer(call->conn->peer);
+
+ switch (call->app_call_state) {
+ /* we've received the last packet - drain all the data from the call */
+ case RXRPC_CSTATE_SRVR_GOT_ARGS:
+ /* shouldn't be any args */
+ ret = -EBADMSG;
+ break;
+
+ /* send the reply when asked for it */
+ case RXRPC_CSTATE_SRVR_SND_REPLY:
+ /* invoke the actual service routine */
+ ret = SRXAFSCM_InitCallBackState(server);
+ if (ret<0)
+ break;
+
+ ret = rxrpc_call_write_data(call,0,NULL,RXRPC_LAST_PACKET,GFP_KERNEL,0,&count);
+ if (ret<0)
+ break;
+ break;
+
+ /* operation complete */
+ case RXRPC_CSTATE_COMPLETE:
+ call->app_user = NULL;
+ removed = 0;
+ spin_lock(&afscm_calls_lock);
+ if (!list_empty(&call->app_link)) {
+ list_del_init(&call->app_link);
+ removed = 1;
+ }
+ spin_unlock(&afscm_calls_lock);
+
+ if (removed)
+ rxrpc_put_call(call);
+ break;
+
+ /* operation terminated on error */
+ case RXRPC_CSTATE_ERROR:
+ call->app_user = NULL;
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret<0)
+ rxrpc_call_abort(call,ret);
+
+ if (server) afs_put_server(server);
+
+ _leave(" = %d",ret);
+
+} /* end _SRXAFSCM_InitCallBackState() */
+
+/*****************************************************************************/
+/*
+ * handle a probe from a fileserver
+ */
+static void _SRXAFSCM_Probe(struct rxrpc_call *call)
+{
+ afs_server_t *server;
+ size_t count;
+ int ret = 0, removed;
+
+ _enter("%p{acs=%s}",call,rxrpc_call_states[call->app_call_state]);
+
+ server = afs_server_get_from_peer(call->conn->peer);
+
+ switch (call->app_call_state) {
+ /* we've received the last packet - drain all the data from the call */
+ case RXRPC_CSTATE_SRVR_GOT_ARGS:
+ /* shouldn't be any args */
+ ret = -EBADMSG;
+ break;
+
+ /* send the reply when asked for it */
+ case RXRPC_CSTATE_SRVR_SND_REPLY:
+ /* invoke the actual service routine */
+ ret = SRXAFSCM_Probe(server);
+ if (ret<0)
+ break;
+
+ ret = rxrpc_call_write_data(call,0,NULL,RXRPC_LAST_PACKET,GFP_KERNEL,0,&count);
+ if (ret<0)
+ break;
+ break;
+
+ /* operation complete */
+ case RXRPC_CSTATE_COMPLETE:
+ call->app_user = NULL;
+ removed = 0;
+ spin_lock(&afscm_calls_lock);
+ if (!list_empty(&call->app_link)) {
+ list_del_init(&call->app_link);
+ removed = 1;
+ }
+ spin_unlock(&afscm_calls_lock);
+
+ if (removed)
+ rxrpc_put_call(call);
+ break;
+
+ /* operation terminated on error */
+ case RXRPC_CSTATE_ERROR:
+ call->app_user = NULL;
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret<0)
+ rxrpc_call_abort(call,ret);
+
+ if (server) afs_put_server(server);
+
+ _leave(" = %d",ret);
+
+} /* end _SRXAFSCM_Probe() */
diff --git a/fs/afs/cmservice.h b/fs/afs/cmservice.h
new file mode 100644
index 000000000000..89fb14e7615b
--- /dev/null
+++ b/fs/afs/cmservice.h
@@ -0,0 +1,27 @@
+/* cmservice.h: AFS Cache Manager Service declarations
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_CMSERVICE_H
+#define _LINUX_AFS_CMSERVICE_H
+
+#include <rxrpc/transport.h>
+#include "types.h"
+
+/* cache manager start/stop */
+extern int afscm_start(void);
+extern void afscm_stop(void);
+
+/* cache manager server functions */
+extern int SRXAFSCM_InitCallBackState(afs_server_t *server);
+extern int SRXAFSCM_CallBack(afs_server_t *server, size_t count, afs_callback_t callbacks[]);
+extern int SRXAFSCM_Probe(afs_server_t *server);
+
+#endif /* _LINUX_AFS_CMSERVICE_H */
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
new file mode 100644
index 000000000000..d39345f4e277
--- /dev/null
+++ b/fs/afs/dir.c
@@ -0,0 +1,642 @@
+/* dir.c: AFS filesystem directory handling
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include "vnode.h"
+#include "volume.h"
+#include <rxrpc/call.h>
+#include "super.h"
+#include "internal.h"
+
+static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry);
+static int afs_dir_open(struct inode *inode, struct file *file);
+static int afs_dir_readdir(struct file *file, void *dirent, filldir_t filldir);
+static int afs_d_revalidate(struct dentry *dentry, int flags);
+static int afs_d_delete(struct dentry *dentry);
+static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos,
+ ino_t ino, unsigned dtype);
+
+struct file_operations afs_dir_file_operations = {
+ .open = afs_dir_open,
+ .readdir = afs_dir_readdir,
+};
+
+struct inode_operations afs_dir_inode_operations = {
+ .lookup = afs_dir_lookup,
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ .getattr = afs_inode_getattr,
+#else
+ .revalidate = afs_inode_revalidate,
+#endif
+// .create = afs_dir_create,
+// .link = afs_dir_link,
+// .unlink = afs_dir_unlink,
+// .symlink = afs_dir_symlink,
+// .mkdir = afs_dir_mkdir,
+// .rmdir = afs_dir_rmdir,
+// .mknod = afs_dir_mknod,
+// .rename = afs_dir_rename,
+};
+
+static struct dentry_operations afs_fs_dentry_operations = {
+ .d_revalidate = afs_d_revalidate,
+ .d_delete = afs_d_delete,
+};
+
+#define AFS_DIR_HASHTBL_SIZE 128
+#define AFS_DIR_DIRENT_SIZE 32
+#define AFS_DIRENT_PER_BLOCK 64
+
+typedef union afs_dirent {
+ struct {
+ u8 valid;
+ u8 unused[1];
+ u16 hash_next;
+ u32 vnode;
+ u32 unique;
+ u8 name[16];
+ u8 overflow[4]; /* if any char of the name (inc NUL) reaches here, consume
+ * the next dirent too */
+ };
+ u8 extended_name[32];
+} afs_dirent_t;
+
+/* AFS directory page header (one at the beginning of every 2048-byte chunk) */
+typedef struct afs_dir_pagehdr {
+ u16 npages;
+ u16 magic;
+#define AFS_DIR_MAGIC htons(1234)
+ u8 nentries;
+ u8 bitmap[8];
+ u8 pad[19];
+} afs_dir_pagehdr_t;
+
+/* directory block layout */
+typedef union afs_dir_block {
+
+ afs_dir_pagehdr_t pagehdr;
+
+ struct {
+ afs_dir_pagehdr_t pagehdr;
+ u8 alloc_ctrs[128];
+ u16 hashtable[AFS_DIR_HASHTBL_SIZE]; /* dir hash table */
+ } hdr;
+
+ afs_dirent_t dirents[AFS_DIRENT_PER_BLOCK];
+} afs_dir_block_t;
+
+/* layout on a linux VM page */
+typedef struct afs_dir_page {
+ afs_dir_block_t blocks[PAGE_SIZE/sizeof(afs_dir_block_t)];
+} afs_dir_page_t;
+
+struct afs_dir_lookup_cookie {
+ afs_fid_t fid;
+ const char *name;
+ size_t nlen;
+ int found;
+};
+
+/*****************************************************************************/
+/*
+ * check that a directory page is valid
+ */
+static inline void afs_dir_check_page(struct inode *dir, struct page *page)
+{
+ afs_dir_page_t *dbuf;
+ loff_t latter;
+ int tmp, qty;
+
+#if 0
+ /* check the page count */
+ qty = desc.size/sizeof(dbuf->blocks[0]);
+ if (qty==0)
+ goto error;
+
+ if (page->index==0 && qty!=ntohs(dbuf->blocks[0].pagehdr.npages)) {
+ printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n",
+ __FUNCTION__,dir->i_ino,qty,ntohs(dbuf->blocks[0].pagehdr.npages));
+ goto error;
+ }
+#endif
+
+ /* determine how many magic numbers there should be in this page */
+ latter = dir->i_size - (page->index << PAGE_CACHE_SHIFT);
+ if (latter >= PAGE_SIZE)
+ qty = PAGE_SIZE;
+ else
+ qty = latter;
+ qty /= sizeof(afs_dir_block_t);
+
+ /* check them */
+ dbuf = page_address(page);
+ for (tmp=0; tmp<qty; tmp++) {
+ if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) {
+ printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n",
+ __FUNCTION__,dir->i_ino,tmp,
+ qty,ntohs(dbuf->blocks[tmp].pagehdr.magic));
+ goto error;
+ }
+ }
+
+ SetPageChecked(page);
+ return;
+
+ error:
+ SetPageChecked(page);
+ SetPageError(page);
+
+} /* end afs_dir_check_page() */
+
+/*****************************************************************************/
+/*
+ * discard a page cached in the pagecache
+ */
+static inline void afs_dir_put_page(struct page *page)
+{
+ kunmap(page);
+ page_cache_release(page);
+
+} /* end afs_dir_put_page() */
+
+/*****************************************************************************/
+/*
+ * get a page into the pagecache
+ */
+static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
+{
+ struct page *page;
+
+ _enter("{%lu},%lu",dir->i_ino,index);
+
+ page = read_cache_page(dir->i_mapping,index,
+ (filler_t*)dir->i_mapping->a_ops->readpage,NULL);
+ if (!IS_ERR(page)) {
+ wait_on_page_locked(page);
+ kmap(page);
+ if (!PageUptodate(page))
+ goto fail;
+ if (!PageChecked(page))
+ afs_dir_check_page(dir,page);
+ if (PageError(page))
+ goto fail;
+ }
+ return page;
+
+ fail:
+ afs_dir_put_page(page);
+ return ERR_PTR(-EIO);
+} /* end afs_dir_get_page() */
+
+/*****************************************************************************/
+/*
+ * open an AFS directory file
+ */
+static int afs_dir_open(struct inode *inode, struct file *file)
+{
+ _enter("{%lu}",inode->i_ino);
+
+ if (sizeof(afs_dir_block_t) != 2048) BUG();
+ if (sizeof(afs_dirent_t) != 32) BUG();
+
+ if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED)
+ return -ENOENT;
+
+ _leave(" = 0");
+ return 0;
+
+} /* end afs_dir_open() */
+
+/*****************************************************************************/
+/*
+ * deal with one block in an AFS directory
+ */
+static int afs_dir_iterate_block(unsigned *fpos,
+ afs_dir_block_t *block,
+ unsigned blkoff,
+ void *cookie,
+ filldir_t filldir)
+{
+ afs_dirent_t *dire;
+ unsigned offset, next, curr;
+ size_t nlen;
+ int tmp, ret;
+
+ _enter("%u,%x,%p,,",*fpos,blkoff,block);
+
+ curr = (*fpos - blkoff) / sizeof(afs_dirent_t);
+
+ /* walk through the block, an entry at a time */
+ for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries;
+ offset < AFS_DIRENT_PER_BLOCK;
+ offset = next
+ ) {
+ next = offset + 1;
+
+ /* skip entries marked unused in the bitmap */
+ if (!(block->pagehdr.bitmap[offset/8] & (1 << (offset % 8)))) {
+ _debug("ENT[%u.%u]: unused\n",blkoff/sizeof(afs_dir_block_t),offset);
+ if (offset>=curr)
+ *fpos = blkoff + next * sizeof(afs_dirent_t);
+ continue;
+ }
+
+ /* got a valid entry */
+ dire = &block->dirents[offset];
+ nlen = strnlen(dire->name,sizeof(*block) - offset*sizeof(afs_dirent_t));
+
+ _debug("ENT[%u.%u]: %s %u \"%.*s\"\n",
+ blkoff/sizeof(afs_dir_block_t),offset,
+ offset<curr ? "skip" : "fill",
+ nlen,nlen,dire->name);
+
+ /* work out where the next possible entry is */
+ for (tmp=nlen; tmp>15; tmp-=sizeof(afs_dirent_t)) {
+ if (next>=AFS_DIRENT_PER_BLOCK) {
+ _debug("ENT[%u.%u]:"
+ " %u travelled beyond end dir block (len %u/%u)\n",
+ blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen);
+ return -EIO;
+ }
+ if (!(block->pagehdr.bitmap[next/8] & (1 << (next % 8)))) {
+ _debug("ENT[%u.%u]: %u unmarked extension (len %u/%u)\n",
+ blkoff/sizeof(afs_dir_block_t),offset,next,tmp,nlen);
+ return -EIO;
+ }
+
+ _debug("ENT[%u.%u]: ext %u/%u\n",
+ blkoff/sizeof(afs_dir_block_t),next,tmp,nlen);
+ next++;
+ }
+
+ /* skip if starts before the current position */
+ if (offset<curr)
+ continue;
+
+ /* found the next entry */
+ ret = filldir(cookie,
+ dire->name,
+ nlen,
+ blkoff + offset * sizeof(afs_dirent_t),
+ ntohl(dire->vnode),
+ filldir==afs_dir_lookup_filldir ? dire->unique : DT_UNKNOWN);
+ if (ret<0) {
+ _leave(" = 0 [full]");
+ return 0;
+ }
+
+ *fpos = blkoff + next * sizeof(afs_dirent_t);
+ }
+
+ _leave(" = 1 [more]");
+ return 1;
+} /* end afs_dir_iterate_block() */
+
+/*****************************************************************************/
+/*
+ * read an AFS directory
+ */
+static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, filldir_t filldir)
+{
+ afs_dir_block_t *dblock;
+ afs_dir_page_t *dbuf;
+ struct page *page;
+ unsigned blkoff, limit;
+ int ret;
+
+ _enter("{%lu},%u,,",dir->i_ino,*fpos);
+
+ if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
+ _leave(" = -ESTALE");
+ return -ESTALE;
+ }
+
+ /* round the file position up to the next entry boundary */
+ *fpos += sizeof(afs_dirent_t) - 1;
+ *fpos &= ~(sizeof(afs_dirent_t) - 1);
+
+ /* walk through the blocks in sequence */
+ ret = 0;
+ while (*fpos < dir->i_size) {
+ blkoff = *fpos & ~(sizeof(afs_dir_block_t) - 1);
+
+ /* fetch the appropriate page from the directory */
+ page = afs_dir_get_page(dir,blkoff/PAGE_SIZE);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ break;
+ }
+
+ limit = blkoff & ~(PAGE_SIZE-1);
+
+ dbuf = page_address(page);
+
+ /* deal with the individual blocks stashed on this page */
+ do {
+ dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) / sizeof(afs_dir_block_t)];
+ ret = afs_dir_iterate_block(fpos,dblock,blkoff,cookie,filldir);
+ if (ret!=1) {
+ afs_dir_put_page(page);
+ goto out;
+ }
+
+ blkoff += sizeof(afs_dir_block_t);
+
+ } while (*fpos < dir->i_size && blkoff < limit);
+
+ afs_dir_put_page(page);
+ ret = 0;
+ }
+
+ out:
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_dir_iterate() */
+
+/*****************************************************************************/
+/*
+ * read an AFS directory
+ */
+static int afs_dir_readdir(struct file *file, void *cookie, filldir_t filldir)
+{
+ unsigned fpos;
+ int ret;
+
+ _enter("{%Ld,{%lu}}",file->f_pos,file->f_dentry->d_inode->i_ino);
+
+ fpos = file->f_pos;
+ ret = afs_dir_iterate(file->f_dentry->d_inode,&fpos,cookie,filldir);
+ file->f_pos = fpos;
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_dir_readdir() */
+
+/*****************************************************************************/
+/*
+ * search the directory for a name
+ * - if afs_dir_iterate_block() spots this function, it'll pass the FID uniquifier through dtype
+ */
+static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos,
+ ino_t ino, unsigned dtype)
+{
+ struct afs_dir_lookup_cookie *cookie = _cookie;
+
+ _enter("{%s,%u},%s,%u,,%lu,%u",cookie->name,cookie->nlen,name,nlen,ino,ntohl(dtype));
+
+ if (cookie->nlen != nlen || memcmp(cookie->name,name,nlen)!=0) {
+ _leave(" = 0 [no]");
+ return 0;
+ }
+
+ cookie->fid.vnode = ino;
+ cookie->fid.unique = ntohl(dtype);
+ cookie->found = 1;
+
+ _leave(" = -1 [found]");
+ return -1;
+} /* end afs_dir_lookup_filldir() */
+
+/*****************************************************************************/
+/*
+ * look up an entry in a directory
+ */
+static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry)
+{
+ struct afs_dir_lookup_cookie cookie;
+ struct afs_super_info *as;
+ struct inode *inode;
+ afs_vnode_t *vnode;
+ unsigned fpos;
+ int ret;
+
+ _enter("{%lu},{%s}",dir->i_ino,dentry->d_name.name);
+
+ /* insanity checks first */
+ if (sizeof(afs_dir_block_t) != 2048) BUG();
+ if (sizeof(afs_dirent_t) != 32) BUG();
+
+ if (dentry->d_name.len > 255) {
+ _leave(" = -ENAMETOOLONG");
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+
+ vnode = AFS_FS_I(dir);
+ if (vnode->flags & AFS_VNODE_DELETED) {
+ _leave(" = -ESTALE");
+ return ERR_PTR(-ESTALE);
+ }
+
+ as = dir->i_sb->s_fs_info;
+
+ /* search the directory */
+ cookie.name = dentry->d_name.name;
+ cookie.nlen = dentry->d_name.len;
+ cookie.fid.vid = as->volume->vid;
+ cookie.found = 0;
+
+ fpos = 0;
+ ret = afs_dir_iterate(dir,&fpos,&cookie,afs_dir_lookup_filldir);
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = -ENOENT;
+ if (!cookie.found) {
+ _leave(" = %d",ret);
+ return ERR_PTR(ret);
+ }
+
+ /* instantiate the dentry */
+ ret = afs_iget(dir->i_sb,&cookie.fid,&inode);
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ERR_PTR(ret);
+ }
+
+ dentry->d_op = &afs_fs_dentry_operations;
+ dentry->d_fsdata = (void*) (unsigned) vnode->status.version;
+
+ d_add(dentry,inode);
+ _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }",
+ cookie.fid.vnode,
+ cookie.fid.unique,
+ dentry->d_inode->i_ino,
+ dentry->d_inode->i_version);
+
+ return NULL;
+} /* end afs_dir_lookup() */
+
+/*****************************************************************************/
+/*
+ * check that a dentry lookup hit has found a valid entry
+ * - NOTE! the hit can be a negative hit too, so we can't assume we have an inode
+ * (derived from nfs_lookup_revalidate)
+ */
+static int afs_d_revalidate(struct dentry *dentry, int flags)
+{
+ struct afs_dir_lookup_cookie cookie;
+ struct dentry *parent;
+ struct inode *inode, *dir;
+ unsigned fpos;
+ int ret;
+
+ _enter("%s,%x",dentry->d_name.name,flags);
+
+ /* lock down the parent dentry so we can peer at it */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ read_lock(&dparent_lock);
+ parent = dget(dentry->d_parent);
+ read_unlock(&dparent_lock);
+#else
+ lock_kernel();
+ parent = dget(dentry->d_parent);
+ unlock_kernel();
+#endif
+
+ dir = parent->d_inode;
+ inode = dentry->d_inode;
+
+ /* handle a negative inode */
+ if (!inode)
+ goto out_bad;
+
+ /* handle a bad inode */
+ if (is_bad_inode(inode)) {
+ printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
+ dentry->d_parent->d_name.name,dentry->d_name.name);
+ goto out_bad;
+ }
+
+ /* force a full look up if the parent directory changed since last the server was consulted
+ * - otherwise this inode must still exist, even if the inode details themselves have
+ * changed
+ */
+ if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED)
+ afs_vnode_fetch_status(AFS_FS_I(dir));
+
+ if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
+ _debug("%s: parent dir deleted",dentry->d_name.name);
+ goto out_bad;
+ }
+
+ if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) {
+ _debug("%s: file already deleted",dentry->d_name.name);
+ goto out_bad;
+ }
+
+ if ((unsigned)dentry->d_fsdata != (unsigned)AFS_FS_I(dir)->status.version) {
+ _debug("%s: parent changed %u -> %u",
+ dentry->d_name.name,
+ (unsigned)dentry->d_fsdata,
+ (unsigned)AFS_FS_I(dir)->status.version);
+
+ /* search the directory for this vnode */
+ cookie.name = dentry->d_name.name;
+ cookie.nlen = dentry->d_name.len;
+ cookie.fid.vid = AFS_FS_I(inode)->volume->vid;
+ cookie.found = 0;
+
+ fpos = 0;
+ ret = afs_dir_iterate(dir,&fpos,&cookie,afs_dir_lookup_filldir);
+ if (ret<0) {
+ _debug("failed to iterate dir %s: %d",parent->d_name.name,ret);
+ goto out_bad;
+ }
+
+ if (!cookie.found) {
+ _debug("%s: dirent not found",dentry->d_name.name);
+ goto not_found;
+ }
+
+ /* if the vnode ID has changed, then the dirent points to a different file */
+ if (cookie.fid.vnode!=AFS_FS_I(inode)->fid.vnode) {
+ _debug("%s: dirent changed",dentry->d_name.name);
+ goto not_found;
+ }
+
+ /* if the vnode ID uniqifier has changed, then the file has been deleted */
+ if (cookie.fid.unique!=AFS_FS_I(inode)->fid.unique) {
+ _debug("%s: file deleted (uq %u -> %u I:%lu)",
+ dentry->d_name.name,
+ cookie.fid.unique,
+ AFS_FS_I(inode)->fid.unique,
+ inode->i_version);
+ spin_lock(&AFS_FS_I(inode)->lock);
+ AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED;
+ spin_unlock(&AFS_FS_I(inode)->lock);
+ invalidate_inode_pages(inode->i_mapping);
+ goto out_bad;
+ }
+
+ dentry->d_fsdata = (void*) (unsigned) AFS_FS_I(dir)->status.version;
+ }
+
+ out_valid:
+ dput(parent);
+ _leave(" = 1 [valid]");
+ return 1;
+
+ /* the dirent, if it exists, now points to a different vnode */
+ not_found:
+ dentry->d_flags |= DCACHE_NFSFS_RENAMED;
+
+ out_bad:
+ if (inode) {
+ /* don't unhash if we have submounts */
+ if (have_submounts(dentry))
+ goto out_valid;
+ }
+
+ shrink_dcache_parent(dentry);
+
+ _debug("dropping dentry %s/%s",dentry->d_parent->d_name.name,dentry->d_name.name);
+ d_drop(dentry);
+
+ dput(parent);
+
+ _leave(" = 0 [bad]");
+ return 0;
+} /* end afs_d_revalidate() */
+
+/*****************************************************************************/
+/*
+ * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't sleep)
+ * - called from dput() when d_count is going to 0.
+ * - return 1 to request dentry be unhashed, 0 otherwise
+ */
+static int afs_d_delete(struct dentry *dentry)
+{
+ _enter("%s",dentry->d_name.name);
+
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ goto zap;
+
+ if (dentry->d_inode) {
+ if (AFS_FS_I(dentry->d_inode)->flags & AFS_VNODE_DELETED)
+ goto zap;
+ }
+
+ _leave(" = 0 [keep]");
+ return 0;
+
+ zap:
+ _leave(" = 1 [zap]");
+ return 1;
+} /* end afs_d_delete() */
diff --git a/fs/afs/errors.h b/fs/afs/errors.h
new file mode 100644
index 000000000000..115befe16450
--- /dev/null
+++ b/fs/afs/errors.h
@@ -0,0 +1,34 @@
+/* errors.h: AFS abort/error codes
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _H_DB712916_5113_11D6_9A6D_0002B3163499
+#define _H_DB712916_5113_11D6_9A6D_0002B3163499
+
+#include "types.h"
+
+/* file server abort codes */
+typedef enum {
+ VSALVAGE = 101, /* volume needs salvaging */
+ VNOVNODE = 102, /* no such file/dir (vnode) */
+ VNOVOL = 103, /* no such volume or volume unavailable */
+ VVOLEXISTS = 104, /* volume name already exists */
+ VNOSERVICE = 105, /* volume not currently in service */
+ VOFFLINE = 106, /* volume is currently offline (more info available [VVL-spec]) */
+ VONLINE = 107, /* volume is already online */
+ VDISKFULL = 108, /* disk partition is full */
+ VOVERQUOTA = 109, /* volume's maximum quota exceeded */
+ VBUSY = 110, /* volume is temporarily unavailable */
+ VMOVED = 111, /* volume moved to new server - ask this FS where */
+} afs_rxfs_abort_t;
+
+extern int afs_abort_to_error(int abortcode);
+
+#endif /* _H_DB712916_5113_11D6_9A6D_0002B3163499 */
diff --git a/fs/afs/file.c b/fs/afs/file.c
new file mode 100644
index 000000000000..d14e427b5784
--- /dev/null
+++ b/fs/afs/file.c
@@ -0,0 +1,143 @@
+/* file.c: AFS filesystem file handling
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "vnode.h"
+#include <rxrpc/call.h>
+#include "internal.h"
+
+//static int afs_file_open(struct inode *inode, struct file *file);
+//static int afs_file_release(struct inode *inode, struct file *file);
+
+static int afs_file_readpage(struct file *file, struct page *page);
+
+//static ssize_t afs_file_read(struct file *file, char *buf, size_t size, loff_t *off);
+
+static ssize_t afs_file_write(struct file *file, const char *buf, size_t size, loff_t *off);
+
+struct inode_operations afs_file_inode_operations = {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ .getattr = afs_inode_getattr,
+#else
+ .revalidate = afs_inode_revalidate,
+#endif
+};
+
+struct file_operations afs_file_file_operations = {
+// .open = afs_file_open,
+// .release = afs_file_release,
+ .read = generic_file_read, //afs_file_read,
+ .write = afs_file_write,
+ .mmap = generic_file_mmap,
+// .fsync = afs_file_fsync,
+};
+
+struct address_space_operations afs_fs_aops = {
+ .readpage = afs_file_readpage,
+};
+
+/*****************************************************************************/
+/*
+ * AFS file read
+ */
+#if 0
+static ssize_t afs_file_read(struct file *file, char *buf, size_t size, loff_t *off)
+{
+ struct afs_inode_info *ai;
+
+ ai = AFS_FS_I(file->f_dentry->d_inode);
+ if (ai->flags & AFS_INODE_DELETED)
+ return -ESTALE;
+
+ return -EIO;
+} /* end afs_file_read() */
+#endif
+
+/*****************************************************************************/
+/*
+ * AFS file write
+ */
+static ssize_t afs_file_write(struct file *file, const char *buf, size_t size, loff_t *off)
+{
+ afs_vnode_t *vnode;
+
+ vnode = AFS_FS_I(file->f_dentry->d_inode);
+ if (vnode->flags & AFS_VNODE_DELETED)
+ return -ESTALE;
+
+ return -EIO;
+} /* end afs_file_write() */
+
+/*****************************************************************************/
+/*
+ * AFS read page from file (or symlink)
+ */
+static int afs_file_readpage(struct file *file, struct page *page)
+{
+ struct afs_rxfs_fetch_descriptor desc;
+ struct inode *inode;
+ afs_vnode_t *vnode;
+ int ret;
+
+ inode = page->mapping->host;
+
+ _enter("{%lu},{%lu}",inode->i_ino,page->index);
+
+ vnode = AFS_FS_I(inode);
+
+ if (!PageLocked(page))
+ PAGE_BUG(page);
+
+ ret = -ESTALE;
+ if (vnode->flags & AFS_VNODE_DELETED)
+ goto error;
+
+ /* work out how much to get and from where */
+ desc.fid = vnode->fid;
+ desc.offset = page->index << PAGE_CACHE_SHIFT;
+ desc.size = min((size_t)(inode->i_size - desc.offset),(size_t)PAGE_SIZE);
+ desc.buffer = kmap(page);
+
+ clear_page(desc.buffer);
+
+ /* read the contents of the file from the server into the page */
+ ret = afs_vnode_fetch_data(vnode,&desc);
+ kunmap(page);
+ if (ret<0) {
+ if (ret==-ENOENT) {
+ _debug("got NOENT from server - marking file deleted and stale");
+ vnode->flags |= AFS_VNODE_DELETED;
+ ret = -ESTALE;
+ }
+ goto error;
+ }
+
+ SetPageUptodate(page);
+ unlock_page(page);
+
+ _leave(" = 0");
+ return 0;
+
+ error:
+ SetPageError(page);
+ unlock_page(page);
+
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end afs_file_readpage() */
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
new file mode 100644
index 000000000000..e4aabcb85d7d
--- /dev/null
+++ b/fs/afs/fsclient.c
@@ -0,0 +1,816 @@
+/* fsclient.c: AFS File Server client stubs
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include "fsclient.h"
+#include "cmservice.h"
+#include "vnode.h"
+#include "server.h"
+#include "errors.h"
+#include "internal.h"
+
+#define FSFETCHSTATUS 132 /* AFS Fetch file status */
+#define FSFETCHDATA 130 /* AFS Fetch file data */
+#define FSGIVEUPCALLBACKS 147 /* AFS Discard server callback promises */
+#define FSGETVOLUMEINFO 148 /* AFS Get root volume information */
+#define FSGETROOTVOLUME 151 /* AFS Get root volume name */
+#define FSLOOKUP 161 /* AFS lookup file in directory */
+
+/*****************************************************************************/
+/*
+ * map afs abort codes to/from Linux error codes
+ * - called with call->lock held
+ */
+static void afs_rxfs_aemap(struct rxrpc_call *call)
+{
+ switch (call->app_err_state) {
+ case RXRPC_ESTATE_LOCAL_ABORT:
+ call->app_abort_code = -call->app_errno;
+ break;
+ case RXRPC_ESTATE_PEER_ABORT:
+ call->app_errno = afs_abort_to_error(call->app_abort_code);
+ break;
+ default:
+ break;
+ }
+} /* end afs_rxfs_aemap() */
+
+/*****************************************************************************/
+/*
+ * get the root volume name from a fileserver
+ * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
+ */
+#if 0
+int afs_rxfs_get_root_volume(afs_server_t *server, char *buf, size_t *buflen)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_connection *conn;
+ struct rxrpc_call *call;
+ struct iovec piov[2];
+ size_t sent;
+ int ret;
+ u32 param[1];
+
+ kenter("%p,%p,%u",server,buf,*buflen);
+
+ /* get hold of the fileserver connection */
+ ret = afs_server_get_fsconn(server,&conn);
+ if (ret<0)
+ goto out;
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(conn,NULL,NULL,afs_rxfs_aemap,&call);
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ goto out_put_conn;
+ }
+ call->app_opcode = FSGETROOTVOLUME;
+
+ /* we want to get event notifications from the call */
+ add_wait_queue(&call->waitq,&myself);
+
+ /* marshall the parameters */
+ param[0] = htonl(FSGETROOTVOLUME);
+
+ piov[0].iov_len = sizeof(param);
+ piov[0].iov_base = param;
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
+ if (ret<0)
+ goto abort;
+
+ /* wait for the reply to completely arrive */
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (call->app_call_state!=RXRPC_CSTATE_CLNT_RCV_REPLY ||
+ signal_pending(current))
+ break;
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
+
+ ret = -EINTR;
+ if (signal_pending(current))
+ goto abort;
+
+ switch (call->app_call_state) {
+ case RXRPC_CSTATE_ERROR:
+ ret = call->app_errno;
+ kdebug("Got Error: %d",ret);
+ goto out_unwait;
+
+ case RXRPC_CSTATE_CLNT_GOT_REPLY:
+ /* read the reply */
+ kdebug("Got Reply: qty=%d",call->app_ready_qty);
+
+ ret = -EBADMSG;
+ if (call->app_ready_qty <= 4)
+ goto abort;
+
+ ret = rxrpc_call_read_data(call,NULL,call->app_ready_qty,0);
+ if (ret<0)
+ goto abort;
+
+#if 0
+ /* unmarshall the reply */
+ bp = buffer;
+ for (loop=0; loop<65; loop++)
+ entry->name[loop] = ntohl(*bp++);
+ entry->name[64] = 0;
+
+ entry->type = ntohl(*bp++);
+ entry->num_servers = ntohl(*bp++);
+
+ for (loop=0; loop<8; loop++)
+ entry->servers[loop].addr.s_addr = *bp++;
+
+ for (loop=0; loop<8; loop++)
+ entry->servers[loop].partition = ntohl(*bp++);
+
+ for (loop=0; loop<8; loop++)
+ entry->servers[loop].flags = ntohl(*bp++);
+
+ for (loop=0; loop<3; loop++)
+ entry->volume_ids[loop] = ntohl(*bp++);
+
+ entry->clone_id = ntohl(*bp++);
+ entry->flags = ntohl(*bp);
+#endif
+
+ /* success */
+ ret = 0;
+ goto out_unwait;
+
+ default:
+ BUG();
+ }
+
+ abort:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ rxrpc_call_abort(call,ret);
+ schedule();
+ out_unwait:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&call->waitq,&myself);
+ rxrpc_put_call(call);
+ out_put_conn:
+ afs_server_release_fsconn(server,conn);
+ out:
+ kleave("");
+ return ret;
+} /* end afs_rxfs_get_root_volume() */
+#endif
+
+/*****************************************************************************/
+/*
+ * get information about a volume
+ */
+#if 0
+int afs_rxfs_get_volume_info(afs_server_t *server,
+ const char *name,
+ afs_volume_info_t *vinfo)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_connection *conn;
+ struct rxrpc_call *call;
+ struct iovec piov[3];
+ size_t sent;
+ int ret;
+ u32 param[2], *bp, zero;
+
+ _enter("%p,%s,%p",server,name,vinfo);
+
+ /* get hold of the fileserver connection */
+ ret = afs_server_get_fsconn(server,&conn);
+ if (ret<0)
+ goto out;
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(conn,NULL,NULL,afs_rxfs_aemap,&call);
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ goto out_put_conn;
+ }
+ call->app_opcode = FSGETVOLUMEINFO;
+
+ /* we want to get event notifications from the call */
+ add_wait_queue(&call->waitq,&myself);
+
+ /* marshall the parameters */
+ piov[1].iov_len = strlen(name);
+ piov[1].iov_base = (char*)name;
+
+ zero = 0;
+ piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
+ piov[2].iov_base = &zero;
+
+ param[0] = htonl(FSGETVOLUMEINFO);
+ param[1] = htonl(piov[1].iov_len);
+
+ piov[0].iov_len = sizeof(param);
+ piov[0].iov_base = param;
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,3,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
+ if (ret<0)
+ goto abort;
+
+ /* wait for the reply to completely arrive */
+ bp = rxrpc_call_alloc_scratch(call,64);
+
+ ret = rxrpc_call_read_data(call,bp,64,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
+ if (ret<0) {
+ if (ret==-ECONNABORTED) {
+ ret = call->app_errno;
+ goto out_unwait;
+ }
+ goto abort;
+ }
+
+ /* unmarshall the reply */
+ vinfo->vid = ntohl(*bp++);
+ vinfo->type = ntohl(*bp++);
+
+ vinfo->type_vids[0] = ntohl(*bp++);
+ vinfo->type_vids[1] = ntohl(*bp++);
+ vinfo->type_vids[2] = ntohl(*bp++);
+ vinfo->type_vids[3] = ntohl(*bp++);
+ vinfo->type_vids[4] = ntohl(*bp++);
+
+ vinfo->nservers = ntohl(*bp++);
+ vinfo->servers[0].addr.s_addr = *bp++;
+ vinfo->servers[1].addr.s_addr = *bp++;
+ vinfo->servers[2].addr.s_addr = *bp++;
+ vinfo->servers[3].addr.s_addr = *bp++;
+ vinfo->servers[4].addr.s_addr = *bp++;
+ vinfo->servers[5].addr.s_addr = *bp++;
+ vinfo->servers[6].addr.s_addr = *bp++;
+ vinfo->servers[7].addr.s_addr = *bp++;
+
+ ret = -EBADMSG;
+ if (vinfo->nservers>8)
+ goto abort;
+
+ /* success */
+ ret = 0;
+
+ out_unwait:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&call->waitq,&myself);
+ rxrpc_put_call(call);
+ out_put_conn:
+ afs_server_release_fsconn(server,conn);
+ out:
+ _leave("");
+ return ret;
+
+ abort:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ rxrpc_call_abort(call,ret);
+ schedule();
+ goto out_unwait;
+
+} /* end afs_rxfs_get_volume_info() */
+#endif
+
+/*****************************************************************************/
+/*
+ * fetch the status information for a file
+ */
+int afs_rxfs_fetch_file_status(afs_server_t *server,
+ afs_vnode_t *vnode,
+ afs_volsync_t *volsync)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct afs_server_callslot callslot;
+ struct rxrpc_call *call;
+ struct iovec piov[1];
+ size_t sent;
+ int ret;
+ u32 *bp;
+
+ _enter("%p,{%u,%u,%u}",server,vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
+
+ /* get hold of the fileserver connection */
+ ret = afs_server_request_callslot(server,&callslot);
+ if (ret<0)
+ goto out;
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(callslot.conn,NULL,NULL,afs_rxfs_aemap,&call);
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ goto out_put_conn;
+ }
+ call->app_opcode = FSFETCHSTATUS;
+
+ /* we want to get event notifications from the call */
+ add_wait_queue(&call->waitq,&myself);
+
+ /* marshall the parameters */
+ bp = rxrpc_call_alloc_scratch(call,16);
+ bp[0] = htonl(FSFETCHSTATUS);
+ bp[1] = htonl(vnode->fid.vid);
+ bp[2] = htonl(vnode->fid.vnode);
+ bp[3] = htonl(vnode->fid.unique);
+
+ piov[0].iov_len = 16;
+ piov[0].iov_base = bp;
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
+ if (ret<0)
+ goto abort;
+
+ /* wait for the reply to completely arrive */
+ bp = rxrpc_call_alloc_scratch(call,120);
+
+ ret = rxrpc_call_read_data(call,bp,120,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
+ if (ret<0) {
+ if (ret==-ECONNABORTED) {
+ ret = call->app_errno;
+ goto out_unwait;
+ }
+ goto abort;
+ }
+
+ /* unmarshall the reply */
+ vnode->status.if_version = ntohl(*bp++);
+ vnode->status.type = ntohl(*bp++);
+ vnode->status.nlink = ntohl(*bp++);
+ vnode->status.size = ntohl(*bp++);
+ vnode->status.version = ntohl(*bp++);
+ vnode->status.author = ntohl(*bp++);
+ vnode->status.owner = ntohl(*bp++);
+ vnode->status.caller_access = ntohl(*bp++);
+ vnode->status.anon_access = ntohl(*bp++);
+ vnode->status.mode = ntohl(*bp++);
+ vnode->status.parent.vid = vnode->fid.vid;
+ vnode->status.parent.vnode = ntohl(*bp++);
+ vnode->status.parent.unique = ntohl(*bp++);
+ bp++; /* seg size */
+ vnode->status.mtime_client = ntohl(*bp++);
+ vnode->status.mtime_server = ntohl(*bp++);
+ bp++; /* group */
+ bp++; /* sync counter */
+ vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
+ bp++; /* spare2 */
+ bp++; /* spare3 */
+ bp++; /* spare4 */
+
+ vnode->cb_version = ntohl(*bp++);
+ vnode->cb_expiry = ntohl(*bp++);
+ vnode->cb_type = ntohl(*bp++);
+
+ if (volsync) {
+ volsync->creation = ntohl(*bp++);
+ bp++; /* spare2 */
+ bp++; /* spare3 */
+ bp++; /* spare4 */
+ bp++; /* spare5 */
+ bp++; /* spare6 */
+ }
+
+ /* success */
+ ret = 0;
+
+ out_unwait:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&call->waitq,&myself);
+ rxrpc_put_call(call);
+ out_put_conn:
+ afs_server_release_callslot(server,&callslot);
+ out:
+ _leave("");
+ return ret;
+
+ abort:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ rxrpc_call_abort(call,ret);
+ schedule();
+ goto out_unwait;
+} /* end afs_rxfs_fetch_file_status() */
+
+/*****************************************************************************/
+/*
+ * fetch the contents of a file or directory
+ */
+int afs_rxfs_fetch_file_data(afs_server_t *server,
+ afs_vnode_t *vnode,
+ struct afs_rxfs_fetch_descriptor *desc,
+ afs_volsync_t *volsync)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct afs_server_callslot callslot;
+ struct rxrpc_call *call;
+ struct iovec piov[1];
+ size_t sent;
+ int ret;
+ u32 *bp;
+
+ _enter("%p,{fid={%u,%u,%u},sz=%u,of=%lu}",
+ server,
+ desc->fid.vid,
+ desc->fid.vnode,
+ desc->fid.unique,
+ desc->size,
+ desc->offset);
+
+ /* get hold of the fileserver connection */
+ ret = afs_server_request_callslot(server,&callslot);
+ if (ret<0)
+ goto out;
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(callslot.conn,NULL,NULL,afs_rxfs_aemap,&call);
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ goto out_put_conn;
+ }
+ call->app_opcode = FSFETCHDATA;
+
+ /* we want to get event notifications from the call */
+ add_wait_queue(&call->waitq,&myself);
+
+ /* marshall the parameters */
+ bp = rxrpc_call_alloc_scratch(call,24);
+ bp[0] = htonl(FSFETCHDATA);
+ bp[1] = htonl(desc->fid.vid);
+ bp[2] = htonl(desc->fid.vnode);
+ bp[3] = htonl(desc->fid.unique);
+ bp[4] = htonl(desc->offset);
+ bp[5] = htonl(desc->size);
+
+ piov[0].iov_len = 24;
+ piov[0].iov_base = bp;
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
+ if (ret<0)
+ goto abort;
+
+ /* wait for the data count to arrive */
+ ret = rxrpc_call_read_data(call,bp,4,RXRPC_CALL_READ_BLOCK);
+ if (ret<0)
+ goto read_failed;
+
+ desc->actual = ntohl(bp[0]);
+ if (desc->actual!=desc->size) {
+ ret = -EBADMSG;
+ goto abort;
+ }
+
+ /* call the app to read the actual data */
+ rxrpc_call_reset_scratch(call);
+
+ ret = rxrpc_call_read_data(call,desc->buffer,desc->actual,RXRPC_CALL_READ_BLOCK);
+ if (ret<0)
+ goto read_failed;
+
+ /* wait for the rest of the reply to completely arrive */
+ rxrpc_call_reset_scratch(call);
+ bp = rxrpc_call_alloc_scratch(call,120);
+
+ ret = rxrpc_call_read_data(call,bp,120,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
+ if (ret<0)
+ goto read_failed;
+
+ /* unmarshall the reply */
+ vnode->status.if_version = ntohl(*bp++);
+ vnode->status.type = ntohl(*bp++);
+ vnode->status.nlink = ntohl(*bp++);
+ vnode->status.size = ntohl(*bp++);
+ vnode->status.version = ntohl(*bp++);
+ vnode->status.author = ntohl(*bp++);
+ vnode->status.owner = ntohl(*bp++);
+ vnode->status.caller_access = ntohl(*bp++);
+ vnode->status.anon_access = ntohl(*bp++);
+ vnode->status.mode = ntohl(*bp++);
+ vnode->status.parent.vid = desc->fid.vid;
+ vnode->status.parent.vnode = ntohl(*bp++);
+ vnode->status.parent.unique = ntohl(*bp++);
+ bp++; /* seg size */
+ vnode->status.mtime_client = ntohl(*bp++);
+ vnode->status.mtime_server = ntohl(*bp++);
+ bp++; /* group */
+ bp++; /* sync counter */
+ vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
+ bp++; /* spare2 */
+ bp++; /* spare3 */
+ bp++; /* spare4 */
+
+ vnode->cb_version = ntohl(*bp++);
+ vnode->cb_expiry = ntohl(*bp++);
+ vnode->cb_type = ntohl(*bp++);
+
+ if (volsync) {
+ volsync->creation = ntohl(*bp++);
+ bp++; /* spare2 */
+ bp++; /* spare3 */
+ bp++; /* spare4 */
+ bp++; /* spare5 */
+ bp++; /* spare6 */
+ }
+
+ /* success */
+ ret = 0;
+
+ out_unwait:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&call->waitq,&myself);
+ rxrpc_put_call(call);
+ out_put_conn:
+ afs_server_release_callslot(server,&callslot);
+ out:
+ _leave(" = %d",ret);
+ return ret;
+
+ read_failed:
+ if (ret==-ECONNABORTED) {
+ ret = call->app_errno;
+ goto out_unwait;
+ }
+
+ abort:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ rxrpc_call_abort(call,ret);
+ schedule();
+ goto out_unwait;
+
+} /* end afs_rxfs_fetch_file_data() */
+
+/*****************************************************************************/
+/*
+ * ask the AFS fileserver to discard a callback request on a file
+ */
+int afs_rxfs_give_up_callback(afs_server_t *server, afs_vnode_t *vnode)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct afs_server_callslot callslot;
+ struct rxrpc_call *call;
+ struct iovec piov[1];
+ size_t sent;
+ int ret;
+ u32 *bp;
+
+ _enter("%p,{%u,%u,%u}",server,vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
+
+ /* get hold of the fileserver connection */
+ ret = afs_server_request_callslot(server,&callslot);
+ if (ret<0)
+ goto out;
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(callslot.conn,NULL,NULL,afs_rxfs_aemap,&call);
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ goto out_put_conn;
+ }
+ call->app_opcode = FSGIVEUPCALLBACKS;
+
+ /* we want to get event notifications from the call */
+ add_wait_queue(&call->waitq,&myself);
+
+ /* marshall the parameters */
+ bp = rxrpc_call_alloc_scratch(call,(1+4+4)*4);
+
+ piov[0].iov_len = (1+4+4)*4;
+ piov[0].iov_base = bp;
+
+ *bp++ = htonl(FSGIVEUPCALLBACKS);
+ *bp++ = htonl(1);
+ *bp++ = htonl(vnode->fid.vid);
+ *bp++ = htonl(vnode->fid.vnode);
+ *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(1);
+ *bp++ = htonl(vnode->cb_version);
+ *bp++ = htonl(vnode->cb_expiry);
+ *bp++ = htonl(vnode->cb_type);
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
+ if (ret<0)
+ goto abort;
+
+ /* wait for the reply to completely arrive */
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (call->app_call_state!=RXRPC_CSTATE_CLNT_RCV_REPLY ||
+ signal_pending(current))
+ break;
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
+
+ ret = -EINTR;
+ if (signal_pending(current))
+ goto abort;
+
+ switch (call->app_call_state) {
+ case RXRPC_CSTATE_ERROR:
+ ret = call->app_errno;
+ goto out_unwait;
+
+ case RXRPC_CSTATE_CLNT_GOT_REPLY:
+ ret = 0;
+ goto out_unwait;
+
+ default:
+ BUG();
+ }
+
+ out_unwait:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&call->waitq,&myself);
+ rxrpc_put_call(call);
+ out_put_conn:
+ afs_server_release_callslot(server,&callslot);
+ out:
+ _leave("");
+ return ret;
+
+ abort:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ rxrpc_call_abort(call,ret);
+ schedule();
+ goto out_unwait;
+} /* end afs_rxfs_give_up_callback() */
+
+/*****************************************************************************/
+/*
+ * look a filename up in a directory
+ * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
+ */
+#if 0
+int afs_rxfs_lookup(afs_server_t *server,
+ afs_vnode_t *dir,
+ const char *filename,
+ afs_vnode_t *vnode,
+ afs_volsync_t *volsync)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_connection *conn;
+ struct rxrpc_call *call;
+ struct iovec piov[3];
+ size_t sent;
+ int ret;
+ u32 *bp, zero;
+
+ kenter("%p,{%u,%u,%u},%s",server,fid->vid,fid->vnode,fid->unique,filename);
+
+ /* get hold of the fileserver connection */
+ ret = afs_server_get_fsconn(server,&conn);
+ if (ret<0)
+ goto out;
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(conn,NULL,NULL,afs_rxfs_aemap,&call);
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ goto out_put_conn;
+ }
+ call->app_opcode = FSLOOKUP;
+
+ /* we want to get event notifications from the call */
+ add_wait_queue(&call->waitq,&myself);
+
+ /* marshall the parameters */
+ bp = rxrpc_call_alloc_scratch(call,20);
+
+ zero = 0;
+
+ piov[0].iov_len = 20;
+ piov[0].iov_base = bp;
+ piov[1].iov_len = strlen(filename);
+ piov[1].iov_base = (char*) filename;
+ piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
+ piov[2].iov_base = &zero;
+
+ *bp++ = htonl(FSLOOKUP);
+ *bp++ = htonl(dirfid->vid);
+ *bp++ = htonl(dirfid->vnode);
+ *bp++ = htonl(dirfid->unique);
+ *bp++ = htonl(piov[1].iov_len);
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,3,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
+ if (ret<0)
+ goto abort;
+
+ /* wait for the reply to completely arrive */
+ bp = rxrpc_call_alloc_scratch(call,220);
+
+ ret = rxrpc_call_read_data(call,bp,220,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
+ if (ret<0) {
+ if (ret==-ECONNABORTED) {
+ ret = call->app_errno;
+ goto out_unwait;
+ }
+ goto abort;
+ }
+
+ /* unmarshall the reply */
+ fid->vid = ntohl(*bp++);
+ fid->vnode = ntohl(*bp++);
+ fid->unique = ntohl(*bp++);
+
+ vnode->status.if_version = ntohl(*bp++);
+ vnode->status.type = ntohl(*bp++);
+ vnode->status.nlink = ntohl(*bp++);
+ vnode->status.size = ntohl(*bp++);
+ vnode->status.version = ntohl(*bp++);
+ vnode->status.author = ntohl(*bp++);
+ vnode->status.owner = ntohl(*bp++);
+ vnode->status.caller_access = ntohl(*bp++);
+ vnode->status.anon_access = ntohl(*bp++);
+ vnode->status.mode = ntohl(*bp++);
+ vnode->status.parent.vid = dirfid->vid;
+ vnode->status.parent.vnode = ntohl(*bp++);
+ vnode->status.parent.unique = ntohl(*bp++);
+ bp++; /* seg size */
+ vnode->status.mtime_client = ntohl(*bp++);
+ vnode->status.mtime_server = ntohl(*bp++);
+ bp++; /* group */
+ bp++; /* sync counter */
+ vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
+ bp++; /* spare2 */
+ bp++; /* spare3 */
+ bp++; /* spare4 */
+
+ dir->status.if_version = ntohl(*bp++);
+ dir->status.type = ntohl(*bp++);
+ dir->status.nlink = ntohl(*bp++);
+ dir->status.size = ntohl(*bp++);
+ dir->status.version = ntohl(*bp++);
+ dir->status.author = ntohl(*bp++);
+ dir->status.owner = ntohl(*bp++);
+ dir->status.caller_access = ntohl(*bp++);
+ dir->status.anon_access = ntohl(*bp++);
+ dir->status.mode = ntohl(*bp++);
+ dir->status.parent.vid = dirfid->vid;
+ dir->status.parent.vnode = ntohl(*bp++);
+ dir->status.parent.unique = ntohl(*bp++);
+ bp++; /* seg size */
+ dir->status.mtime_client = ntohl(*bp++);
+ dir->status.mtime_server = ntohl(*bp++);
+ bp++; /* group */
+ bp++; /* sync counter */
+ dir->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
+ bp++; /* spare2 */
+ bp++; /* spare3 */
+ bp++; /* spare4 */
+
+ callback->fid = *fid;
+ callback->version = ntohl(*bp++);
+ callback->expiry = ntohl(*bp++);
+ callback->type = ntohl(*bp++);
+
+ if (volsync) {
+ volsync->creation = ntohl(*bp++);
+ bp++; /* spare2 */
+ bp++; /* spare3 */
+ bp++; /* spare4 */
+ bp++; /* spare5 */
+ bp++; /* spare6 */
+ }
+
+ /* success */
+ ret = 0;
+
+ out_unwait:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&call->waitq,&myself);
+ rxrpc_put_call(call);
+ out_put_conn:
+ afs_server_release_fsconn(server,conn);
+ out:
+ kleave("");
+ return ret;
+
+ abort:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ rxrpc_call_abort(call,ret);
+ schedule();
+ goto out_unwait;
+} /* end afs_rxfs_lookup() */
+#endif
diff --git a/fs/afs/fsclient.h b/fs/afs/fsclient.h
new file mode 100644
index 000000000000..0931a5b1be8f
--- /dev/null
+++ b/fs/afs/fsclient.h
@@ -0,0 +1,53 @@
+/* fsclient.h: AFS File Server client stub declarations
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_FSCLIENT_H
+#define _LINUX_AFS_FSCLIENT_H
+
+#include "server.h"
+
+extern int afs_rxfs_get_volume_info(afs_server_t *server,
+ const char *name,
+ afs_volume_info_t *vinfo);
+
+extern int afs_rxfs_fetch_file_status(afs_server_t *server,
+ afs_vnode_t *vnode,
+ afs_volsync_t *volsync);
+
+struct afs_rxfs_fetch_descriptor {
+ afs_fid_t fid; /* file ID to fetch */
+ size_t size; /* total number of bytes to fetch */
+ off_t offset; /* offset in file to start from */
+ void *buffer; /* read buffer */
+ size_t actual; /* actual size sent back by server */
+};
+
+extern int afs_rxfs_fetch_file_data(afs_server_t *server,
+ afs_vnode_t *vnode,
+ struct afs_rxfs_fetch_descriptor *desc,
+ afs_volsync_t *volsync);
+
+extern int afs_rxfs_give_up_callback(afs_server_t *server, afs_vnode_t *vnode);
+
+/* this doesn't appear to work in OpenAFS server */
+extern int afs_rxfs_lookup(afs_server_t *server,
+ afs_vnode_t *dir,
+ const char *filename,
+ afs_vnode_t *vnode,
+ afs_volsync_t *volsync);
+
+/* this is apparently mis-implemented in OpenAFS server */
+extern int afs_rxfs_get_root_volume(afs_server_t *server,
+ char *buf,
+ size_t *buflen);
+
+
+#endif /* _LINUX_AFS_FSCLIENT_H */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
new file mode 100644
index 000000000000..235b7b0bcf5e
--- /dev/null
+++ b/fs/afs/inode.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
+ * David Howells <dhowells@redhat.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "vnode.h"
+#include "super.h"
+#include "internal.h"
+
+struct afs_iget_data {
+ afs_fid_t fid;
+ afs_volume_t *volume; /* volume on which resides */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ afs_vnode_t *new_vnode; /* new vnode record */
+#endif
+};
+
+/*****************************************************************************/
+/*
+ * map the AFS file status to the inode member variables
+ */
+static int afs_inode_map_status(afs_vnode_t *vnode)
+{
+ struct inode *inode = AFS_VNODE_TO_I(vnode);
+
+ _debug("FS: ft=%d lk=%d sz=%u ver=%Lu mod=%hu",
+ vnode->status.type,
+ vnode->status.nlink,
+ vnode->status.size,
+ vnode->status.version,
+ vnode->status.mode);
+
+ switch (vnode->status.type) {
+ case AFS_FTYPE_FILE:
+ inode->i_mode = S_IFREG | vnode->status.mode;
+ inode->i_op = &afs_file_inode_operations;
+ inode->i_fop = &afs_file_file_operations;
+ break;
+ case AFS_FTYPE_DIR:
+ inode->i_mode = S_IFDIR | vnode->status.mode;
+ inode->i_op = &afs_dir_inode_operations;
+ inode->i_fop = &afs_dir_file_operations;
+ break;
+ case AFS_FTYPE_SYMLINK:
+ inode->i_mode = S_IFLNK | vnode->status.mode;
+ inode->i_op = &page_symlink_inode_operations;
+ break;
+ default:
+ printk("kAFS: AFS vnode with undefined type\n");
+ return -EBADMSG;
+ }
+
+ inode->i_nlink = vnode->status.nlink;
+ inode->i_uid = vnode->status.owner;
+ inode->i_gid = 0;
+ inode->i_rdev = NODEV;
+ inode->i_size = vnode->status.size;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = vnode->status.mtime_server;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_version = vnode->fid.unique;
+ inode->i_mapping->a_ops = &afs_fs_aops;
+
+ /* check to see whether a symbolic link is really a mountpoint */
+ if (vnode->status.type==AFS_FTYPE_SYMLINK) {
+ afs_mntpt_check_symlink(vnode);
+
+ if (vnode->flags & AFS_VNODE_MOUNTPOINT) {
+ inode->i_mode = S_IFDIR | vnode->status.mode;
+ inode->i_op = &afs_mntpt_inode_operations;
+ inode->i_fop = &afs_mntpt_file_operations;
+ }
+ }
+
+ return 0;
+} /* end afs_inode_map_status() */
+
+/*****************************************************************************/
+/*
+ * attempt to fetch the status of an inode, coelescing multiple simultaneous fetches
+ */
+int afs_inode_fetch_status(struct inode *inode)
+{
+ afs_vnode_t *vnode;
+ int ret;
+
+ vnode = AFS_FS_I(inode);
+
+ ret = afs_vnode_fetch_status(vnode);
+
+ if (ret==0)
+ ret = afs_inode_map_status(vnode);
+
+ return ret;
+
+} /* end afs_inode_fetch_status() */
+
+/*****************************************************************************/
+/*
+ * iget5() comparator
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+static int afs_iget5_test(struct inode *inode, void *opaque)
+{
+ struct afs_iget_data *data = opaque;
+
+ /* only match inodes with the same version number */
+ return inode->i_ino==data->fid.vnode && inode->i_version==data->fid.unique;
+} /* end afs_iget5_test() */
+#endif
+
+/*****************************************************************************/
+/*
+ * iget5() inode initialiser
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+static int afs_iget5_set(struct inode *inode, void *opaque)
+{
+ struct afs_iget_data *data = opaque;
+ afs_vnode_t *vnode = AFS_FS_I(inode);
+
+ inode->i_ino = data->fid.vnode;
+ inode->i_version = data->fid.unique;
+ vnode->fid = data->fid;
+ vnode->volume = data->volume;
+
+ return 0;
+} /* end afs_iget5_set() */
+#endif
+
+/*****************************************************************************/
+/*
+ * iget4() comparator
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+static int afs_iget4_test(struct inode *inode, ino_t ino, void *opaque)
+{
+ struct afs_iget_data *data = opaque;
+
+ /* only match inodes with the same version number */
+ return inode->i_ino==data->fid.vnode && inode->i_version==data->fid.unique;
+} /* end afs_iget4_test() */
+#endif
+
+/*****************************************************************************/
+/*
+ * read an inode (2.4 only)
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+void afs_read_inode2(struct inode *inode, void *opaque)
+{
+ struct afs_iget_data *data = opaque;
+ afs_vnode_t *vnode;
+ int ret;
+
+ _enter(",{{%u,%u,%u},%p}",data->fid.vid,data->fid.vnode,data->fid.unique,data->volume);
+
+ if (inode->u.generic_ip) BUG();
+
+ /* attach a pre-allocated vnode record */
+ inode->u.generic_ip = vnode = data->new_vnode;
+ data->new_vnode = NULL;
+
+ memset(vnode,0,sizeof(*vnode));
+ vnode->inode = inode;
+ init_waitqueue_head(&vnode->update_waitq);
+ spin_lock_init(&vnode->lock);
+ INIT_LIST_HEAD(&vnode->cb_link);
+ INIT_LIST_HEAD(&vnode->cb_hash_link);
+ afs_timer_init(&vnode->cb_timeout,&afs_vnode_cb_timed_out_ops);
+ vnode->flags |= AFS_VNODE_CHANGED;
+ vnode->volume = data->volume;
+ vnode->fid = data->fid;
+
+ /* ask the server for a status check */
+ ret = afs_vnode_fetch_status(vnode);
+ if (ret<0) {
+ make_bad_inode(inode);
+ _leave(" [bad inode]");
+ return;
+ }
+
+ ret = afs_inode_map_status(vnode);
+ if (ret<0) {
+ make_bad_inode(inode);
+ _leave(" [bad inode]");
+ return;
+ }
+
+ _leave("");
+ return;
+} /* end afs_read_inode2() */
+#endif
+
+/*****************************************************************************/
+/*
+ * inode retrieval
+ */
+inline int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inode)
+{
+ struct afs_iget_data data = { fid: *fid };
+ struct afs_super_info *as;
+ struct inode *inode;
+ afs_vnode_t *vnode;
+ int ret;
+
+ _enter(",{%u,%u,%u},,",fid->vid,fid->vnode,fid->unique);
+
+ as = sb->s_fs_info;
+ data.volume = as->volume;
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ inode = iget5_locked(sb,fid->vnode,afs_iget5_test,afs_iget5_set,&data);
+ if (!inode) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ vnode = AFS_FS_I(inode);
+
+ /* deal with an existing inode */
+ if (!(inode->i_state & I_NEW)) {
+ ret = afs_vnode_fetch_status(vnode);
+ if (ret==0)
+ *_inode = inode;
+ else
+ iput(inode);
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ /* okay... it's a new inode */
+ vnode->flags |= AFS_VNODE_CHANGED;
+ ret = afs_inode_fetch_status(inode);
+ if (ret<0)
+ goto bad_inode;
+
+#if 0
+ /* find a cache entry for it */
+ ret = afs_cache_lookup_vnode(as->volume,vnode);
+ if (ret<0)
+ goto bad_inode;
+#endif
+
+ /* success */
+ unlock_new_inode(inode);
+
+ *_inode = inode;
+ _leave(" = 0 [CB { v=%u x=%lu t=%u nix=%u }]",
+ vnode->cb_version,
+ vnode->cb_timeout.timo_jif,
+ vnode->cb_type,
+ vnode->nix
+ );
+ return 0;
+
+ /* failure */
+ bad_inode:
+ make_bad_inode(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+
+ _leave(" = %d [bad]",ret);
+ return ret;
+
+#else
+
+ /* pre-allocate a vnode record so that afs_read_inode2() doesn't have to return an inode
+ * without one attached
+ */
+ data.new_vnode = kmalloc(sizeof(afs_vnode_t),GFP_KERNEL);
+ if (!data.new_vnode) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ inode = iget4(sb,fid->vnode,afs_iget4_test,&data);
+ if (data.new_vnode) kfree(data.new_vnode);
+ if (!inode) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ vnode = AFS_FS_I(inode);
+ *_inode = inode;
+ _leave(" = 0 [CB { v=%u x=%lu t=%u nix=%u }]",
+ vnode->cb_version,
+ vnode->cb_timeout.timo_jif,
+ vnode->cb_type,
+ vnode->nix
+ );
+ return 0;
+#endif
+} /* end afs_iget() */
+
+/*****************************************************************************/
+/*
+ * read the attributes of an inode
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+ struct inode *inode;
+ afs_vnode_t *vnode;
+ int ret;
+
+ inode = dentry->d_inode;
+
+ _enter("{ ino=%lu v=%lu }",inode->i_ino,inode->i_version);
+
+ vnode = AFS_FS_I(inode);
+
+ ret = afs_inode_fetch_status(inode);
+ if (ret==-ENOENT) {
+ _leave(" = %d [%d %p]",ret,atomic_read(&dentry->d_count),dentry->d_inode);
+ return ret;
+ }
+ else if (ret<0) {
+ make_bad_inode(inode);
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ /* transfer attributes from the inode structure to the stat structure */
+ generic_fillattr(inode,stat);
+
+ _leave(" = 0 CB { v=%u x=%u t=%u }",
+ vnode->cb_version,
+ vnode->cb_expiry,
+ vnode->cb_type);
+
+ return 0;
+} /* end afs_inode_getattr() */
+#endif
+
+/*****************************************************************************/
+/*
+ * revalidate the inode
+ */
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
+int afs_inode_revalidate(struct dentry *dentry)
+{
+ struct inode *inode;
+ afs_vnode_t *vnode;
+ int ret;
+
+ inode = dentry->d_inode;
+
+ _enter("{ ino=%lu v=%lu }",inode->i_ino,inode->i_version);
+
+ vnode = AFS_FS_I(inode);
+
+ ret = afs_inode_fetch_status(inode);
+ if (ret==-ENOENT) {
+ _leave(" = %d [%d %p]",ret,atomic_read(&dentry->d_count),dentry->d_inode);
+ return ret;
+ }
+ else if (ret<0) {
+ make_bad_inode(inode);
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ _leave(" = 0 CB { v=%u x=%u t=%u }",
+ vnode->cb_version,
+ vnode->cb_expiry,
+ vnode->cb_type);
+
+ return 0;
+} /* end afs_inode_revalidate() */
+#endif
+
+/*****************************************************************************/
+/*
+ * clear an AFS inode
+ */
+void afs_clear_inode(struct inode *inode)
+{
+ afs_vnode_t *vnode;
+
+ vnode = AFS_FS_I(inode);
+
+ _enter("ino=%lu { vn=%08x v=%u x=%u t=%u }",
+ inode->i_ino,
+ vnode->fid.vnode,
+ vnode->cb_version,
+ vnode->cb_expiry,
+ vnode->cb_type
+ );
+
+ if (inode->i_ino!=vnode->fid.vnode) BUG();
+
+ afs_vnode_give_up_callback(vnode);
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
+ if (inode->u.generic_ip) kfree(inode->u.generic_ip);
+#endif
+
+ _leave("");
+} /* end afs_clear_inode() */
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
new file mode 100644
index 000000000000..37f84bb11891
--- /dev/null
+++ b/fs/afs/internal.h
@@ -0,0 +1,127 @@
+/* internal.h: internal AFS stuff
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef AFS_INTERNAL_H
+#define AFS_INTERNAL_H
+
+#include <linux/version.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+/*
+ * debug tracing
+ */
+#define kenter(FMT,...) printk("==> %s("FMT")\n",__FUNCTION__,##__VA_ARGS__)
+#define kleave(FMT,...) printk("<== %s()"FMT"\n",__FUNCTION__,##__VA_ARGS__)
+#define kdebug(FMT,...) printk(FMT"\n",##__VA_ARGS__)
+#define kproto(FMT,...) printk("### "FMT"\n",##__VA_ARGS__)
+#define knet(FMT,...) printk(FMT"\n",##__VA_ARGS__)
+
+#if 0
+#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
+#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
+#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
+#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
+#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
+#else
+#define _enter(FMT,...) do { } while(0)
+#define _leave(FMT,...) do { } while(0)
+#define _debug(FMT,...) do { } while(0)
+#define _proto(FMT,...) do { } while(0)
+#define _net(FMT,...) do { } while(0)
+#endif
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
+#define wait_on_page_locked wait_on_page
+#define PageUptodate Page_Uptodate
+
+static inline struct proc_dir_entry *PDE(const struct inode *inode)
+{
+ return (struct proc_dir_entry *)inode->u.generic_ip;
+}
+#endif
+
+static inline void afs_discard_my_signals(void)
+{
+ while (signal_pending(current)) {
+ siginfo_t sinfo;
+
+ spin_lock_irq(&current->sig->siglock);
+ dequeue_signal(&current->blocked,&sinfo);
+ spin_unlock_irq(&current->sig->siglock);
+ }
+}
+
+/*
+ * cell.c
+ */
+extern struct rw_semaphore afs_proc_cells_sem;
+extern struct list_head afs_proc_cells;
+
+/*
+ * dir.c
+ */
+extern struct inode_operations afs_dir_inode_operations;
+extern struct file_operations afs_dir_file_operations;
+
+/*
+ * file.c
+ */
+extern struct address_space_operations afs_fs_aops;
+extern struct inode_operations afs_file_inode_operations;
+extern struct file_operations afs_file_file_operations;
+
+/*
+ * inode.c
+ */
+extern int afs_iget(struct super_block *sb, afs_fid_t *fid, struct inode **_inode);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+extern int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
+#else
+extern void afs_read_inode2(struct inode *inode, void *opaque);
+extern int afs_inode_revalidate(struct dentry *dentry);
+#endif
+extern void afs_clear_inode(struct inode *inode);
+
+/*
+ * mntpt.c
+ */
+extern struct inode_operations afs_mntpt_inode_operations;
+extern struct file_operations afs_mntpt_file_operations;
+
+extern int afs_mntpt_check_symlink(afs_vnode_t *vnode);
+
+/*
+ * super.c
+ */
+extern int afs_fs_init(void);
+extern void afs_fs_exit(void);
+
+#define AFS_CB_HASH_COUNT (PAGE_SIZE/sizeof(struct list_head))
+
+extern struct list_head afs_cb_hash_tbl[];
+extern spinlock_t afs_cb_hash_lock;
+
+#define afs_cb_hash(SRV,FID) \
+ afs_cb_hash_tbl[((unsigned)(SRV) + (FID)->vid + (FID)->vnode + (FID)->unique) % \
+ AFS_CB_HASH_COUNT]
+
+/*
+ * proc.c
+ */
+extern int afs_proc_init(void);
+extern void afs_proc_cleanup(void);
+extern int afs_proc_cell_setup(afs_cell_t *cell);
+extern void afs_proc_cell_remove(afs_cell_t *cell);
+
+#endif /* AFS_INTERNAL_H */
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c
new file mode 100644
index 000000000000..2891e98be91d
--- /dev/null
+++ b/fs/afs/kafsasyncd.c
@@ -0,0 +1,260 @@
+/* kafsasyncd.c: AFS asynchronous operation daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * The AFS async daemon is used to the following:
+ * - probe "dead" servers to see whether they've come back to life yet.
+ * - probe "live" servers that we haven't talked to for a while to see if they are better
+ * candidates for serving than what we're currently using
+ * - poll volume location servers to keep up to date volume location lists
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include "cell.h"
+#include "server.h"
+#include "volume.h"
+#include "kafsasyncd.h"
+#include "kafstimod.h"
+#include <rxrpc/call.h>
+#include <asm/errno.h>
+#include "internal.h"
+
+static DECLARE_COMPLETION(kafsasyncd_alive);
+static DECLARE_COMPLETION(kafsasyncd_dead);
+static DECLARE_WAIT_QUEUE_HEAD(kafsasyncd_sleepq);
+static struct task_struct *kafsasyncd_task;
+static int kafsasyncd_die;
+
+static int kafsasyncd(void *arg);
+
+static LIST_HEAD(kafsasyncd_async_attnq);
+static LIST_HEAD(kafsasyncd_async_busyq);
+static spinlock_t kafsasyncd_async_lock = SPIN_LOCK_UNLOCKED;
+
+static void kafsasyncd_null_call_attn_func(struct rxrpc_call *call)
+{
+}
+
+static void kafsasyncd_null_call_error_func(struct rxrpc_call *call)
+{
+}
+
+/*****************************************************************************/
+/*
+ * start the async daemon
+ */
+int afs_kafsasyncd_start(void)
+{
+ int ret;
+
+ ret = kernel_thread(kafsasyncd,NULL,0);
+ if (ret<0)
+ return ret;
+
+ wait_for_completion(&kafsasyncd_alive);
+
+ return ret;
+} /* end afs_kafsasyncd_start() */
+
+/*****************************************************************************/
+/*
+ * stop the async daemon
+ */
+void afs_kafsasyncd_stop(void)
+{
+ /* get rid of my daemon */
+ kafsasyncd_die = 1;
+ wake_up(&kafsasyncd_sleepq);
+ wait_for_completion(&kafsasyncd_dead);
+
+} /* end afs_kafsasyncd_stop() */
+
+/*****************************************************************************/
+/*
+ * probing daemon
+ */
+static int kafsasyncd(void *arg)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct list_head *_p;
+ int die;
+
+ kafsasyncd_task = current;
+
+ printk("kAFS: Started kafsasyncd %d\n",current->pid);
+ strcpy(current->comm,"kafsasyncd");
+
+ daemonize();
+
+ complete(&kafsasyncd_alive);
+
+ /* only certain signals are of interest */
+ spin_lock_irq(&current->sig->siglock);
+ siginitsetinv(&current->blocked,0);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
+ recalc_sigpending();
+#else
+ recalc_sigpending(current);
+#endif
+ spin_unlock_irq(&current->sig->siglock);
+
+ /* loop around looking for things to attend to */
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kafsasyncd_sleepq,&myself);
+
+ for (;;) {
+ if (!list_empty(&kafsasyncd_async_attnq) ||
+ signal_pending(current) ||
+ kafsasyncd_die)
+ break;
+
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ remove_wait_queue(&kafsasyncd_sleepq,&myself);
+ set_current_state(TASK_RUNNING);
+
+ /* discard pending signals */
+ afs_discard_my_signals();
+
+ die = kafsasyncd_die;
+
+ /* deal with the next asynchronous operation requiring attention */
+ if (!list_empty(&kafsasyncd_async_attnq)) {
+ struct afs_async_op *op;
+
+ _debug("@@@ Begin Asynchronous Operation");
+
+ op = NULL;
+ spin_lock(&kafsasyncd_async_lock);
+
+ if (!list_empty(&kafsasyncd_async_attnq)) {
+ op = list_entry(kafsasyncd_async_attnq.next,afs_async_op_t,link);
+ list_del(&op->link);
+ list_add_tail(&op->link,&kafsasyncd_async_busyq);
+ }
+
+ spin_unlock(&kafsasyncd_async_lock);
+
+ _debug("@@@ Operation %p {%p}\n",op,op?op->ops:NULL);
+
+ if (op)
+ op->ops->attend(op);
+
+ _debug("@@@ End Asynchronous Operation");
+ }
+
+ } while(!die);
+
+ /* need to kill all outstanding asynchronous operations before exiting */
+ kafsasyncd_task = NULL;
+ spin_lock(&kafsasyncd_async_lock);
+
+ /* fold the busy and attention queues together */
+ list_splice(&kafsasyncd_async_busyq,&kafsasyncd_async_attnq);
+ list_del_init(&kafsasyncd_async_busyq);
+
+ /* dequeue kafsasyncd from all their wait queues */
+ list_for_each(_p,&kafsasyncd_async_attnq) {
+ afs_async_op_t *op = list_entry(_p,afs_async_op_t,link);
+
+ op->call->app_attn_func = kafsasyncd_null_call_attn_func;
+ op->call->app_error_func = kafsasyncd_null_call_error_func;
+ remove_wait_queue(&op->call->waitq,&op->waiter);
+ }
+
+ spin_unlock(&kafsasyncd_async_lock);
+
+ /* abort all the operations */
+ while (!list_empty(&kafsasyncd_async_attnq)) {
+ afs_async_op_t *op = list_entry(_p,afs_async_op_t,link);
+ list_del_init(&op->link);
+
+ rxrpc_call_abort(op->call,-EIO);
+ rxrpc_put_call(op->call);
+ op->call = NULL;
+
+ op->ops->discard(op);
+ }
+
+ /* and that's all */
+ _leave("");
+ complete_and_exit(&kafsasyncd_dead,0);
+
+} /* end kafsasyncd() */
+
+/*****************************************************************************/
+/*
+ * begin an operation
+ * - place operation on busy queue
+ */
+void afs_kafsasyncd_begin_op(afs_async_op_t *op)
+{
+ _enter("");
+
+ spin_lock(&kafsasyncd_async_lock);
+
+ init_waitqueue_entry(&op->waiter,kafsasyncd_task);
+
+ list_del(&op->link);
+ list_add_tail(&op->link,&kafsasyncd_async_busyq);
+
+ spin_unlock(&kafsasyncd_async_lock);
+
+ _leave("");
+} /* end afs_kafsasyncd_begin_op() */
+
+/*****************************************************************************/
+/*
+ * request attention for an operation
+ * - move to attention queue
+ */
+void afs_kafsasyncd_attend_op(afs_async_op_t *op)
+{
+ _enter("");
+
+ spin_lock(&kafsasyncd_async_lock);
+
+ list_del(&op->link);
+ list_add_tail(&op->link,&kafsasyncd_async_attnq);
+
+ spin_unlock(&kafsasyncd_async_lock);
+
+ wake_up(&kafsasyncd_sleepq);
+
+ _leave("");
+} /* end afs_kafsasyncd_attend_op() */
+
+/*****************************************************************************/
+/*
+ * terminate an operation
+ * - remove from either queue
+ */
+void afs_kafsasyncd_terminate_op(afs_async_op_t *op)
+{
+ _enter("");
+
+ spin_lock(&kafsasyncd_async_lock);
+
+ list_del_init(&op->link);
+
+ spin_unlock(&kafsasyncd_async_lock);
+
+ wake_up(&kafsasyncd_sleepq);
+
+ _leave("");
+} /* end afs_kafsasyncd_terminate_op() */
diff --git a/fs/afs/kafsasyncd.h b/fs/afs/kafsasyncd.h
new file mode 100644
index 000000000000..6438c17833a1
--- /dev/null
+++ b/fs/afs/kafsasyncd.h
@@ -0,0 +1,49 @@
+/* kafsasyncd.h: AFS asynchronous operation daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_KAFSASYNCD_H
+#define _LINUX_AFS_KAFSASYNCD_H
+
+#include "types.h"
+
+struct afs_async_op_ops {
+ void (*attend)(afs_async_op_t *op);
+ void (*discard)(afs_async_op_t *op);
+};
+
+/*****************************************************************************/
+/*
+ * asynchronous operation record
+ */
+struct afs_async_op
+{
+ struct list_head link;
+ afs_server_t *server; /* server being contacted */
+ struct rxrpc_call *call; /* RxRPC call performing op */
+ wait_queue_t waiter; /* wait queue for kafsasyncd */
+ const struct afs_async_op_ops *ops; /* operations */
+};
+
+static inline void afs_async_op_init(afs_async_op_t *op, const struct afs_async_op_ops *ops)
+{
+ INIT_LIST_HEAD(&op->link);
+ op->call = NULL;
+ op->ops = ops;
+}
+
+extern int afs_kafsasyncd_start(void);
+extern void afs_kafsasyncd_stop(void);
+
+extern void afs_kafsasyncd_begin_op(afs_async_op_t *op);
+extern void afs_kafsasyncd_attend_op(afs_async_op_t *op);
+extern void afs_kafsasyncd_terminate_op(afs_async_op_t *op);
+
+#endif /* _LINUX_AFS_KAFSASYNCD_H */
diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c
new file mode 100644
index 000000000000..ccc1b4e6b94d
--- /dev/null
+++ b/fs/afs/kafstimod.c
@@ -0,0 +1,211 @@
+/* kafstimod.c: AFS timeout daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include "cell.h"
+#include "volume.h"
+#include "kafstimod.h"
+#include <asm/errno.h>
+#include "internal.h"
+
+static DECLARE_COMPLETION(kafstimod_alive);
+static DECLARE_COMPLETION(kafstimod_dead);
+static DECLARE_WAIT_QUEUE_HEAD(kafstimod_sleepq);
+static int kafstimod_die;
+
+static LIST_HEAD(kafstimod_list);
+static spinlock_t kafstimod_lock = SPIN_LOCK_UNLOCKED;
+
+static int kafstimod(void *arg);
+
+/*****************************************************************************/
+/*
+ * start the timeout daemon
+ */
+int afs_kafstimod_start(void)
+{
+ int ret;
+
+ ret = kernel_thread(kafstimod,NULL,0);
+ if (ret<0)
+ return ret;
+
+ wait_for_completion(&kafstimod_alive);
+
+ return ret;
+} /* end afs_kafstimod_start() */
+
+/*****************************************************************************/
+/*
+ * stop the timeout daemon
+ */
+void afs_kafstimod_stop(void)
+{
+ /* get rid of my daemon */
+ kafstimod_die = 1;
+ wake_up(&kafstimod_sleepq);
+ wait_for_completion(&kafstimod_dead);
+
+} /* end afs_kafstimod_stop() */
+
+/*****************************************************************************/
+/*
+ * timeout processing daemon
+ */
+static int kafstimod(void *arg)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ afs_timer_t *timer;
+
+ printk("kAFS: Started kafstimod %d\n",current->pid);
+ strcpy(current->comm,"kafstimod");
+
+ daemonize();
+
+ complete(&kafstimod_alive);
+
+ /* only certain signals are of interest */
+ spin_lock_irq(&current->sig->siglock);
+ siginitsetinv(&current->blocked,0);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
+ recalc_sigpending();
+#else
+ recalc_sigpending(current);
+#endif
+ spin_unlock_irq(&current->sig->siglock);
+
+ /* loop around looking for things to attend to */
+ loop:
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kafstimod_sleepq,&myself);
+
+ for (;;) {
+ unsigned long jif;
+ signed long timeout;
+
+ /* deal with the server being asked to die */
+ if (kafstimod_die) {
+ remove_wait_queue(&kafstimod_sleepq,&myself);
+ _leave("");
+ complete_and_exit(&kafstimod_dead,0);
+ }
+
+ /* discard pending signals */
+ afs_discard_my_signals();
+
+ /* work out the time to elapse before the next event */
+ spin_lock(&kafstimod_lock);
+ if (list_empty(&kafstimod_list)) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+ else {
+ timer = list_entry(kafstimod_list.next,afs_timer_t,link);
+ timeout = timer->timo_jif;
+ jif = jiffies;
+
+ if (time_before_eq(timeout,jif))
+ goto immediate;
+
+ else {
+ timeout = (long)timeout - (long)jiffies;
+ }
+ }
+ spin_unlock(&kafstimod_lock);
+
+ schedule_timeout(timeout);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ /* the thing on the front of the queue needs processing
+ * - we come here with the lock held and timer pointing to the expired entry
+ */
+ immediate:
+ remove_wait_queue(&kafstimod_sleepq,&myself);
+ set_current_state(TASK_RUNNING);
+
+ _debug("@@@ Begin Timeout of %p",timer);
+
+ /* dequeue the timer */
+ list_del_init(&timer->link);
+ spin_unlock(&kafstimod_lock);
+
+ /* call the timeout function */
+ timer->ops->timed_out(timer);
+
+ _debug("@@@ End Timeout");
+ goto loop;
+
+} /* end kafstimod() */
+
+/*****************************************************************************/
+/*
+ * (re-)queue a timer
+ */
+void afs_kafstimod_add_timer(afs_timer_t *timer, unsigned long timeout)
+{
+ struct list_head *_p;
+ afs_timer_t *ptimer;
+
+ _enter("%p,%lu",timer,timeout);
+
+ spin_lock(&kafstimod_lock);
+
+ list_del(&timer->link);
+
+ /* the timer was deferred or reset - put it back in the queue at the right place */
+ timer->timo_jif = jiffies + timeout;
+
+ list_for_each(_p,&kafstimod_list) {
+ ptimer = list_entry(_p,afs_timer_t,link);
+ if (time_before(timer->timo_jif,ptimer->timo_jif))
+ break;
+ }
+
+ list_add_tail(&timer->link,_p); /* insert before stopping point */
+
+ spin_unlock(&kafstimod_lock);
+
+ wake_up(&kafstimod_sleepq);
+
+ _leave("");
+} /* end afs_kafstimod_queue_vlocation() */
+
+/*****************************************************************************/
+/*
+ * dequeue a timer
+ * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
+ */
+int afs_kafstimod_del_timer(afs_timer_t *timer)
+{
+ int ret = 0;
+
+ _enter("%p",timer);
+
+ spin_lock(&kafstimod_lock);
+
+ if (list_empty(&timer->link))
+ ret = -ENOENT;
+ else
+ list_del_init(&timer->link);
+
+ spin_unlock(&kafstimod_lock);
+
+ wake_up(&kafstimod_sleepq);
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_kafstimod_del_timer() */
diff --git a/fs/afs/kafstimod.h b/fs/afs/kafstimod.h
new file mode 100644
index 000000000000..342d81d6025b
--- /dev/null
+++ b/fs/afs/kafstimod.h
@@ -0,0 +1,45 @@
+/* kafstimod.h: AFS timeout daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_KAFSTIMOD_H
+#define _LINUX_AFS_KAFSTIMOD_H
+
+#include "types.h"
+
+struct afs_timer_ops {
+ /* called when the front of the timer queue has timed out */
+ void (*timed_out)(struct afs_timer *timer);
+};
+
+/*****************************************************************************/
+/*
+ * AFS timer/timeout record
+ */
+struct afs_timer
+{
+ struct list_head link; /* link in timer queue */
+ unsigned long timo_jif; /* timeout time */
+ const struct afs_timer_ops *ops; /* timeout expiry function */
+};
+
+static inline void afs_timer_init(afs_timer_t *timer, const struct afs_timer_ops *ops)
+{
+ INIT_LIST_HEAD(&timer->link);
+ timer->ops = ops;
+}
+
+extern int afs_kafstimod_start(void);
+extern void afs_kafstimod_stop(void);
+
+extern void afs_kafstimod_add_timer(afs_timer_t *timer, unsigned long timeout);
+extern int afs_kafstimod_del_timer(afs_timer_t *timer);
+
+#endif /* _LINUX_AFS_KAFSTIMOD_H */
diff --git a/fs/afs/main.c b/fs/afs/main.c
new file mode 100644
index 000000000000..dc20f670a021
--- /dev/null
+++ b/fs/afs/main.c
@@ -0,0 +1,193 @@
+/* main.c: AFS client file system
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/call.h>
+#include <rxrpc/peer.h>
+#include "cell.h"
+#include "server.h"
+#include "fsclient.h"
+#include "cmservice.h"
+#include "kafstimod.h"
+#include "kafsasyncd.h"
+#include "internal.h"
+
+struct rxrpc_transport *afs_transport;
+
+static int afs_init(void);
+static void afs_exit(void);
+static int afs_adding_peer(struct rxrpc_peer *peer);
+static void afs_discarding_peer(struct rxrpc_peer *peer);
+
+module_init(afs_init);
+module_exit(afs_exit);
+
+MODULE_DESCRIPTION("AFS Client File System");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+static struct rxrpc_peer_ops afs_peer_ops = {
+ .adding = afs_adding_peer,
+ .discarding = afs_discarding_peer,
+};
+
+struct list_head afs_cb_hash_tbl[AFS_CB_HASH_COUNT];
+spinlock_t afs_cb_hash_lock = SPIN_LOCK_UNLOCKED;
+
+/*****************************************************************************/
+/*
+ * initialise the AFS client FS module
+ */
+static int afs_init(void)
+{
+ int loop, ret;
+
+ printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
+
+ /* initialise the callback hash table */
+ spin_lock_init(&afs_cb_hash_lock);
+ for (loop=AFS_CB_HASH_COUNT-1; loop>=0; loop--)
+ INIT_LIST_HEAD(&afs_cb_hash_tbl[loop]);
+
+ /* register the /proc stuff */
+ ret = afs_proc_init();
+ if (ret<0)
+ return ret;
+
+ /* initialise the cell DB */
+ ret = afs_cell_init();
+ if (ret<0)
+ goto error;
+
+ /* start the timeout daemon */
+ ret = afs_kafstimod_start();
+ if (ret<0)
+ goto error;
+
+ /* start the async operation daemon */
+ ret = afs_kafsasyncd_start();
+ if (ret<0)
+ goto error_kafstimod;
+
+ /* create the RxRPC transport */
+ ret = rxrpc_create_transport(7001,&afs_transport);
+ if (ret<0)
+ goto error_kafsasyncd;
+
+ afs_transport->peer_ops = &afs_peer_ops;
+
+ /* register the filesystems */
+ ret = afs_fs_init();
+ if (ret<0)
+ goto error_transport;
+
+ return ret;
+
+ error_transport:
+ rxrpc_put_transport(afs_transport);
+ error_kafsasyncd:
+ afs_kafsasyncd_stop();
+ error_kafstimod:
+ afs_kafstimod_stop();
+ error:
+ afs_cell_purge();
+ afs_proc_cleanup();
+ printk(KERN_ERR "kAFS: failed to register: %d\n",ret);
+ return ret;
+} /* end afs_init() */
+
+/*****************************************************************************/
+/*
+ * clean up on module removal
+ */
+static void afs_exit(void)
+{
+ printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n");
+
+ afs_fs_exit();
+ rxrpc_put_transport(afs_transport);
+ afs_kafstimod_stop();
+ afs_kafsasyncd_stop();
+ afs_cell_purge();
+ afs_proc_cleanup();
+
+} /* end afs_exit() */
+
+/*****************************************************************************/
+/*
+ * notification that new peer record is being added
+ * - called from krxsecd
+ * - return an error to induce an abort
+ * - mustn't sleep (caller holds an rwlock)
+ */
+static int afs_adding_peer(struct rxrpc_peer *peer)
+{
+ afs_server_t *server;
+ int ret;
+
+ _debug("kAFS: Adding new peer %08x\n",ntohl(peer->addr.s_addr));
+
+ /* determine which server the peer resides in (if any) */
+ ret = afs_server_find_by_peer(peer,&server);
+ if (ret<0)
+ return ret; /* none that we recognise, so abort */
+
+ _debug("Server %p{u=%d}\n",server,atomic_read(&server->usage));
+
+ _debug("Cell %p{u=%d}\n",server->cell,atomic_read(&server->cell->usage));
+
+ /* cross-point the structs under a global lock */
+ spin_lock(&afs_server_peer_lock);
+ peer->user = server;
+ server->peer = peer;
+ spin_unlock(&afs_server_peer_lock);
+
+ afs_put_server(server);
+
+ return 0;
+} /* end afs_adding_peer() */
+
+/*****************************************************************************/
+/*
+ * notification that a peer record is being discarded
+ * - called from krxiod or krxsecd
+ */
+static void afs_discarding_peer(struct rxrpc_peer *peer)
+{
+ afs_server_t *server;
+
+ _enter("%p",peer);
+
+ _debug("Discarding peer %08x (rtt=%lu.%lumS)\n",
+ ntohl(peer->addr.s_addr),
+ peer->rtt/1000,
+ peer->rtt%1000);
+
+ /* uncross-point the structs under a global lock */
+ spin_lock(&afs_server_peer_lock);
+ server = peer->user;
+ if (server) {
+ peer->user = NULL;
+ server->peer = NULL;
+
+ //_debug("Server %p{u=%d}\n",server,atomic_read(&server->usage));
+ //_debug("Cell %p{u=%d}\n",server->cell,atomic_read(&server->cell->usage));
+ }
+ spin_unlock(&afs_server_peer_lock);
+
+ _leave("");
+
+} /* end afs_discarding_peer() */
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
new file mode 100644
index 000000000000..e4fce66d76e0
--- /dev/null
+++ b/fs/afs/misc.c
@@ -0,0 +1,39 @@
+/* misc.c: miscellaneous bits
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include "errors.h"
+#include "internal.h"
+
+/*****************************************************************************/
+/*
+ * convert an AFS abort code to a Linux error number
+ */
+int afs_abort_to_error(int abortcode)
+{
+ switch (abortcode) {
+ case VSALVAGE: return -EIO;
+ case VNOVNODE: return -ENOENT;
+ case VNOVOL: return -ENXIO;
+ case VVOLEXISTS: return -EEXIST;
+ case VNOSERVICE: return -EIO;
+ case VOFFLINE: return -ENOENT;
+ case VONLINE: return -EEXIST;
+ case VDISKFULL: return -ENOSPC;
+ case VOVERQUOTA: return -EDQUOT;
+ case VBUSY: return -EBUSY;
+ case VMOVED: return -ENXIO;
+ default: return -EIO;
+ }
+
+} /* end afs_abort_to_error() */
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
new file mode 100644
index 000000000000..4e88180f1c09
--- /dev/null
+++ b/fs/afs/mntpt.c
@@ -0,0 +1,112 @@
+/* mntpt.c: mountpoint management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "vnode.h"
+#include "internal.h"
+
+
+static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry);
+static int afs_mntpt_open(struct inode *inode, struct file *file);
+
+struct file_operations afs_mntpt_file_operations = {
+ .open = afs_mntpt_open,
+};
+
+struct inode_operations afs_mntpt_inode_operations = {
+ .lookup = afs_mntpt_lookup,
+ .readlink = page_readlink,
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ .getattr = afs_inode_getattr,
+#else
+ .revalidate = afs_inode_revalidate,
+#endif
+};
+
+/*****************************************************************************/
+/*
+ * check a symbolic link to see whether it actually encodes a mountpoint
+ * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
+ */
+int afs_mntpt_check_symlink(afs_vnode_t *vnode)
+{
+ struct page *page;
+ size_t size;
+ char *buf;
+ int ret;
+
+ _enter("{%u,%u}",vnode->fid.vnode,vnode->fid.unique);
+
+ /* read the contents of the symlink into the pagecache */
+ page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping,0,
+ (filler_t*)AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage,NULL);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+
+ ret = -EIO;
+ wait_on_page_locked(page);
+ buf = kmap(page);
+ if (!PageUptodate(page))
+ goto out_free;
+ if (PageError(page))
+ goto out_free;
+
+ /* examine the symlink's contents */
+ size = vnode->status.size;
+ _debug("symlink to %*.*s",size,size,buf);
+
+ if (size>2 &&
+ (buf[0]=='%' || buf[0]=='#') &&
+ buf[size-1]=='.'
+ ) {
+ _debug("symlink is a mountpoint");
+ spin_lock(&vnode->lock);
+ vnode->flags |= AFS_VNODE_MOUNTPOINT;
+ spin_unlock(&vnode->lock);
+ }
+
+ ret = 0;
+
+ out_free:
+ kunmap(page);
+ page_cache_release(page);
+ out:
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end afs_mntpt_check_symlink() */
+
+/*****************************************************************************/
+/*
+ * no valid lookup procedure on this sort of dir
+ */
+static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry)
+{
+ return ERR_PTR(-EREMOTE);
+} /* end afs_mntpt_lookup() */
+
+/*****************************************************************************/
+/*
+ * no valid open procedure on this sort of dir
+ */
+static int afs_mntpt_open(struct inode *inode, struct file *file)
+{
+ return -EREMOTE;
+} /* end afs_mntpt_open() */
diff --git a/fs/afs/mount.h b/fs/afs/mount.h
new file mode 100644
index 000000000000..fbdd77878546
--- /dev/null
+++ b/fs/afs/mount.h
@@ -0,0 +1,23 @@
+/* mount.h: mount parameters
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_MOUNT_H
+#define _LINUX_AFS_MOUNT_H
+
+struct afs_mountdata {
+ const char *volume; /* name of volume */
+ const char *cell; /* name of cell containing volume */
+ const char *cache; /* name of cache block device */
+ size_t nservers; /* number of server addresses listed */
+ u_int32_t servers[10]; /* IP addresses of servers in this cell */
+};
+
+#endif /* _LINUX_AFS_MOUNT_H */
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
new file mode 100644
index 000000000000..83fda6decf57
--- /dev/null
+++ b/fs/afs/proc.c
@@ -0,0 +1,739 @@
+/* proc.c: /proc interface for AFS
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "cell.h"
+#include "volume.h"
+#include <asm/uaccess.h>
+#include "internal.h"
+
+static struct proc_dir_entry *proc_afs;
+
+
+static int afs_proc_cells_open(struct inode *inode, struct file *file);
+static void *afs_proc_cells_start(struct seq_file *p, loff_t *pos);
+static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos);
+static void afs_proc_cells_stop(struct seq_file *p, void *v);
+static int afs_proc_cells_show(struct seq_file *m, void *v);
+static ssize_t afs_proc_cells_write(struct file *file, const char *buf, size_t size, loff_t *_pos);
+
+static struct seq_operations afs_proc_cells_ops = {
+ .start = afs_proc_cells_start,
+ .next = afs_proc_cells_next,
+ .stop = afs_proc_cells_stop,
+ .show = afs_proc_cells_show,
+};
+
+static struct file_operations afs_proc_cells_fops = {
+ .open = afs_proc_cells_open,
+ .read = seq_read,
+ .write = afs_proc_cells_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file);
+static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file);
+static void *afs_proc_cell_volumes_start(struct seq_file *p, loff_t *pos);
+static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, loff_t *pos);
+static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v);
+static int afs_proc_cell_volumes_show(struct seq_file *m, void *v);
+
+static struct seq_operations afs_proc_cell_volumes_ops = {
+ .start = afs_proc_cell_volumes_start,
+ .next = afs_proc_cell_volumes_next,
+ .stop = afs_proc_cell_volumes_stop,
+ .show = afs_proc_cell_volumes_show,
+};
+
+static struct file_operations afs_proc_cell_volumes_fops = {
+ .open = afs_proc_cell_volumes_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = afs_proc_cell_volumes_release,
+};
+
+static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file);
+static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *file);
+static void *afs_proc_cell_vlservers_start(struct seq_file *p, loff_t *pos);
+static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, loff_t *pos);
+static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v);
+static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v);
+
+static struct seq_operations afs_proc_cell_vlservers_ops = {
+ .start = afs_proc_cell_vlservers_start,
+ .next = afs_proc_cell_vlservers_next,
+ .stop = afs_proc_cell_vlservers_stop,
+ .show = afs_proc_cell_vlservers_show,
+};
+
+static struct file_operations afs_proc_cell_vlservers_fops = {
+ .open = afs_proc_cell_vlservers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = afs_proc_cell_vlservers_release,
+};
+
+static int afs_proc_cell_servers_open(struct inode *inode, struct file *file);
+static int afs_proc_cell_servers_release(struct inode *inode, struct file *file);
+static void *afs_proc_cell_servers_start(struct seq_file *p, loff_t *pos);
+static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, loff_t *pos);
+static void afs_proc_cell_servers_stop(struct seq_file *p, void *v);
+static int afs_proc_cell_servers_show(struct seq_file *m, void *v);
+
+static struct seq_operations afs_proc_cell_servers_ops = {
+ .start = afs_proc_cell_servers_start,
+ .next = afs_proc_cell_servers_next,
+ .stop = afs_proc_cell_servers_stop,
+ .show = afs_proc_cell_servers_show,
+};
+
+static struct file_operations afs_proc_cell_servers_fops = {
+ .open = afs_proc_cell_servers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = afs_proc_cell_servers_release,
+};
+
+/*****************************************************************************/
+/*
+ * initialise the /proc/fs/afs/ directory
+ */
+int afs_proc_init(void)
+{
+ struct proc_dir_entry *p;
+
+ _enter("");
+
+ proc_afs = proc_mkdir("fs/afs",NULL);
+ if (!proc_afs)
+ goto error;
+ proc_afs->owner = THIS_MODULE;
+
+ p = create_proc_entry("cells",0,proc_afs);
+ if (!p)
+ goto error_proc;
+ p->proc_fops = &afs_proc_cells_fops;
+ p->owner = THIS_MODULE;
+
+ _leave(" = 0");
+ return 0;
+
+#if 0
+ error_cells:
+ remove_proc_entry("cells",proc_afs);
+#endif
+ error_proc:
+ remove_proc_entry("fs/afs",NULL);
+ error:
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+
+} /* end afs_proc_init() */
+
+/*****************************************************************************/
+/*
+ * clean up the /proc/fs/afs/ directory
+ */
+void afs_proc_cleanup(void)
+{
+ remove_proc_entry("cells",proc_afs);
+
+ remove_proc_entry("fs/afs",NULL);
+
+} /* end afs_proc_cleanup() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/fs/afs/cells" which provides a summary of extant cells
+ */
+static int afs_proc_cells_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file,&afs_proc_cells_ops);
+ if (ret<0)
+ return ret;
+
+ m = file->private_data;
+ m->private = PDE(inode)->data;
+
+ return 0;
+} /* end afs_proc_cells_open() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the cells list and return the first item
+ */
+static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
+{
+ struct list_head *_p;
+ loff_t pos = *_pos;
+
+ /* lock the list against modification */
+ down_read(&afs_proc_cells_sem);
+
+ /* allow for the header line */
+ if (!pos)
+ return (void *)1;
+ pos--;
+
+ /* find the n'th element in the list */
+ list_for_each(_p,&afs_proc_cells)
+ if (!pos--)
+ break;
+
+ return _p!=&afs_proc_cells ? _p : NULL;
+} /* end afs_proc_cells_start() */
+
+/*****************************************************************************/
+/*
+ * move to next cell in cells list
+ */
+static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ struct list_head *_p;
+
+ (*pos)++;
+
+ _p = v;
+ _p = v==(void*)1 ? afs_proc_cells.next : _p->next;
+
+ return _p!=&afs_proc_cells ? _p : NULL;
+} /* end afs_proc_cells_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the cells list
+ */
+static void afs_proc_cells_stop(struct seq_file *p, void *v)
+{
+ up_read(&afs_proc_cells_sem);
+
+} /* end afs_proc_cells_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of cell lines
+ */
+static int afs_proc_cells_show(struct seq_file *m, void *v)
+{
+ afs_cell_t *cell = list_entry(v,afs_cell_t,proc_link);
+
+ /* display header on line 1 */
+ if (v == (void *)1) {
+ seq_puts(m, "USE NAME\n");
+ return 0;
+ }
+
+ /* display one cell per line on subsequent lines */
+ seq_printf(m,"%3d %s\n",atomic_read(&cell->usage),cell->name);
+
+ return 0;
+} /* end afs_proc_cells_show() */
+
+/*****************************************************************************/
+/*
+ * handle writes to /proc/fs/afs/cells
+ * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]*
+ */
+static ssize_t afs_proc_cells_write(struct file *file, const char *buf, size_t size, loff_t *_pos)
+{
+ char *kbuf, *name, *args;
+ int ret;
+
+ /* start by dragging the command into memory */
+ if (size<=1 || size>=PAGE_SIZE)
+ return -EINVAL;
+
+ kbuf = kmalloc(size+1,GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(kbuf,buf,size)!=0)
+ goto done;
+ kbuf[size] = 0;
+
+ /* trim to first NL */
+ name = memchr(kbuf,'\n',size);
+ if (name) *name = 0;
+
+ /* split into command, name and argslist */
+ name = strchr(kbuf,' ');
+ if (!name) goto inval;
+ do { *name++ = 0; } while(*name==' ');
+ if (!*name) goto inval;
+
+ args = strchr(name,' ');
+ if (!args) goto inval;
+ do { *args++ = 0; } while(*args==' ');
+ if (!*args) goto inval;
+
+ /* determine command to perform */
+ _debug("cmd=%s name=%s args=%s",kbuf,name,args);
+
+ if (strcmp(kbuf,"add")==0) {
+ afs_cell_t *cell;
+ ret = afs_cell_create(name,args,&cell);
+ if (ret<0)
+ goto done;
+
+ printk("kAFS: Added new cell '%s'\n",name);
+ }
+ else {
+ goto inval;
+ }
+
+ ret = size;
+
+ done:
+ kfree(kbuf);
+ _leave(" = %d",ret);
+ return ret;
+
+ inval:
+ ret = -EINVAL;
+ printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n");
+ goto done;
+} /* end afs_proc_cells_write() */
+
+/*****************************************************************************/
+/*
+ * initialise /proc/fs/afs/<cell>/
+ */
+int afs_proc_cell_setup(afs_cell_t *cell)
+{
+ struct proc_dir_entry *p;
+
+ _enter("%p{%s}",cell,cell->name);
+
+ cell->proc_dir = proc_mkdir(cell->name,proc_afs);
+ if (!cell->proc_dir)
+ return -ENOMEM;
+
+ p = create_proc_entry("servers",0,cell->proc_dir);
+ if (!p)
+ goto error_proc;
+ p->proc_fops = &afs_proc_cell_servers_fops;
+ p->owner = THIS_MODULE;
+ p->data = cell;
+
+ p = create_proc_entry("vlservers",0,cell->proc_dir);
+ if (!p)
+ goto error_servers;
+ p->proc_fops = &afs_proc_cell_vlservers_fops;
+ p->owner = THIS_MODULE;
+ p->data = cell;
+
+ p = create_proc_entry("volumes",0,cell->proc_dir);
+ if (!p)
+ goto error_vlservers;
+ p->proc_fops = &afs_proc_cell_volumes_fops;
+ p->owner = THIS_MODULE;
+ p->data = cell;
+
+ _leave(" = 0");
+ return 0;
+
+ error_vlservers:
+ remove_proc_entry("vlservers",cell->proc_dir);
+ error_servers:
+ remove_proc_entry("servers",cell->proc_dir);
+ error_proc:
+ remove_proc_entry(cell->name,proc_afs);
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+} /* end afs_proc_cell_setup() */
+
+/*****************************************************************************/
+/*
+ * remove /proc/fs/afs/<cell>/
+ */
+void afs_proc_cell_remove(afs_cell_t *cell)
+{
+ _enter("");
+
+ remove_proc_entry("volumes",cell->proc_dir);
+ remove_proc_entry("vlservers",cell->proc_dir);
+ remove_proc_entry("servers",cell->proc_dir);
+ remove_proc_entry(cell->name,proc_afs);
+
+ _leave("");
+} /* end afs_proc_cell_remove() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/fs/afs/<cell>/volumes" which provides a summary of extant cells
+ */
+static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ afs_cell_t *cell;
+ int ret;
+
+ cell = afs_get_cell_maybe((afs_cell_t**)&PDE(inode)->data);
+ if (!cell)
+ return -ENOENT;
+
+ ret = seq_open(file,&afs_proc_cell_volumes_ops);
+ if (ret<0)
+ return ret;
+
+ m = file->private_data;
+ m->private = cell;
+
+ return 0;
+} /* end afs_proc_cell_volumes_open() */
+
+/*****************************************************************************/
+/*
+ * close the file and release the ref to the cell
+ */
+static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file)
+{
+ afs_cell_t *cell = PDE(inode)->data;
+ int ret;
+
+ ret = seq_release(inode,file);
+
+ afs_put_cell(cell);
+
+} /* end afs_proc_cell_volumes_release() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the cells list and return the first item
+ */
+static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
+{
+ struct list_head *_p;
+ afs_cell_t *cell = m->private;
+ loff_t pos = *_pos;
+
+ _enter("cell=%p pos=%Ld",cell,*_pos);
+
+ /* lock the list against modification */
+ down_read(&cell->vl_sem);
+
+ /* allow for the header line */
+ if (!pos)
+ return (void *)1;
+ pos--;
+
+ /* find the n'th element in the list */
+ list_for_each(_p,&cell->vl_list)
+ if (!pos--)
+ break;
+
+ return _p!=&cell->vl_list ? _p : NULL;
+} /* end afs_proc_cell_volumes_start() */
+
+/*****************************************************************************/
+/*
+ * move to next cell in cells list
+ */
+static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ struct list_head *_p;
+ afs_cell_t *cell = p->private;
+
+ _enter("cell=%p pos=%Ld",cell,*_pos);
+
+ (*_pos)++;
+
+ _p = v;
+ _p = v==(void*)1 ? cell->vl_list.next : _p->next;
+
+ return _p!=&cell->vl_list ? _p : NULL;
+} /* end afs_proc_cell_volumes_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the cells list
+ */
+static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v)
+{
+ afs_cell_t *cell = p->private;
+
+ up_read(&cell->vl_sem);
+
+} /* end afs_proc_cell_volumes_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of volume lines
+ */
+static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
+{
+ afs_vlocation_t *vlocation = list_entry(v,afs_vlocation_t,link);
+
+ /* display header on line 1 */
+ if (v == (void *)1) {
+ seq_puts(m, "USE VLID[0] VLID[1] VLID[2] NAME\n");
+ return 0;
+ }
+
+ /* display one cell per line on subsequent lines */
+ seq_printf(m,"%3d %08x %08x %08x %s\n",
+ atomic_read(&vlocation->usage),
+ vlocation->vldb.vid[0],
+ vlocation->vldb.vid[1],
+ vlocation->vldb.vid[2],
+ vlocation->vldb.name
+ );
+
+ return 0;
+} /* end afs_proc_cell_volumes_show() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/fs/afs/<cell>/vlservers" which provides a list of volume location server
+ */
+static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ afs_cell_t *cell;
+ int ret;
+
+ cell = afs_get_cell_maybe((afs_cell_t**)&PDE(inode)->data);
+ if (!cell)
+ return -ENOENT;
+
+ ret = seq_open(file,&afs_proc_cell_vlservers_ops);
+ if (ret<0)
+ return ret;
+
+ m = file->private_data;
+ m->private = cell;
+
+ return 0;
+} /* end afs_proc_cell_vlservers_open() */
+
+/*****************************************************************************/
+/*
+ * close the file and release the ref to the cell
+ */
+static int afs_proc_cell_vlservers_release(struct inode *inode, struct file *file)
+{
+ afs_cell_t *cell = PDE(inode)->data;
+ int ret;
+
+ ret = seq_release(inode,file);
+
+ afs_put_cell(cell);
+
+} /* end afs_proc_cell_vlservers_release() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the cells list and return the first item
+ */
+static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
+{
+ afs_cell_t *cell = m->private;
+ loff_t pos = *_pos;
+
+ _enter("cell=%p pos=%Ld",cell,*_pos);
+
+ /* lock the list against modification */
+ down_read(&cell->vl_sem);
+
+ /* allow for the header line */
+ if (!pos)
+ return (void *)1;
+ pos--;
+
+ if (pos>=cell->vl_naddrs)
+ return NULL;
+
+ return &cell->vl_addrs[pos];
+} /* end afs_proc_cell_vlservers_start() */
+
+/*****************************************************************************/
+/*
+ * move to next cell in cells list
+ */
+static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ afs_cell_t *cell = p->private;
+ loff_t pos;
+
+ _enter("cell=%p{nad=%u} pos=%Ld",cell,cell->vl_naddrs,*_pos);
+
+ pos = *_pos;
+ (*_pos)++;
+ if (pos>=cell->vl_naddrs)
+ return NULL;
+
+ return &cell->vl_addrs[pos];
+} /* end afs_proc_cell_vlservers_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the cells list
+ */
+static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v)
+{
+ afs_cell_t *cell = p->private;
+
+ up_read(&cell->vl_sem);
+
+} /* end afs_proc_cell_vlservers_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of volume lines
+ */
+static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
+{
+ struct in_addr *addr = v;
+
+ /* display header on line 1 */
+ if (v == (struct in_addr *)1) {
+ seq_puts(m,"ADDRESS\n");
+ return 0;
+ }
+
+ /* display one cell per line on subsequent lines */
+ seq_printf(m,"%u.%u.%u.%u\n",NIPQUAD(addr->s_addr));
+
+ return 0;
+} /* end afs_proc_cell_vlservers_show() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/fs/afs/<cell>/servers" which provides a summary of active servers
+ */
+static int afs_proc_cell_servers_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ afs_cell_t *cell;
+ int ret;
+
+ cell = afs_get_cell_maybe((afs_cell_t**)&PDE(inode)->data);
+ if (!cell)
+ return -ENOENT;
+
+ ret = seq_open(file,&afs_proc_cell_servers_ops);
+ if (ret<0)
+ return ret;
+
+ m = file->private_data;
+ m->private = cell;
+
+ return 0;
+} /* end afs_proc_cell_servers_open() */
+
+/*****************************************************************************/
+/*
+ * close the file and release the ref to the cell
+ */
+static int afs_proc_cell_servers_release(struct inode *inode, struct file *file)
+{
+ afs_cell_t *cell = PDE(inode)->data;
+ int ret;
+
+ ret = seq_release(inode,file);
+
+ afs_put_cell(cell);
+
+} /* end afs_proc_cell_servers_release() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the cells list and return the first item
+ */
+static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
+{
+ struct list_head *_p;
+ afs_cell_t *cell = m->private;
+ loff_t pos = *_pos;
+
+ _enter("cell=%p pos=%Ld",cell,*_pos);
+
+ /* lock the list against modification */
+ read_lock(&cell->sv_lock);
+
+ /* allow for the header line */
+ if (!pos)
+ return (void *)1;
+ pos--;
+
+ /* find the n'th element in the list */
+ list_for_each(_p,&cell->sv_list)
+ if (!pos--)
+ break;
+
+ return _p!=&cell->sv_list ? _p : NULL;
+} /* end afs_proc_cell_servers_start() */
+
+/*****************************************************************************/
+/*
+ * move to next cell in cells list
+ */
+static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ struct list_head *_p;
+ afs_cell_t *cell = p->private;
+
+ _enter("cell=%p pos=%Ld",cell,*_pos);
+
+ (*_pos)++;
+
+ _p = v;
+ _p = v==(void*)1 ? cell->sv_list.next : _p->next;
+
+ return _p!=&cell->sv_list ? _p : NULL;
+} /* end afs_proc_cell_servers_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the cells list
+ */
+static void afs_proc_cell_servers_stop(struct seq_file *p, void *v)
+{
+ afs_cell_t *cell = p->private;
+
+ read_unlock(&cell->sv_lock);
+
+} /* end afs_proc_cell_servers_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of volume lines
+ */
+static int afs_proc_cell_servers_show(struct seq_file *m, void *v)
+{
+ afs_server_t *server = list_entry(v,afs_server_t,link);
+ char ipaddr[20];
+
+ /* display header on line 1 */
+ if (v == (void *)1) {
+ seq_puts(m, "USE ADDR STATE\n");
+ return 0;
+ }
+
+ /* display one cell per line on subsequent lines */
+ sprintf(ipaddr,"%u.%u.%u.%u",NIPQUAD(server->addr));
+ seq_printf(m,"%3d %-15.15s %5d\n",
+ atomic_read(&server->usage),
+ ipaddr,
+ server->fs_state
+ );
+
+ return 0;
+} /* end afs_proc_cell_servers_show() */
diff --git a/fs/afs/server.c b/fs/afs/server.c
new file mode 100644
index 000000000000..b249d7cc3261
--- /dev/null
+++ b/fs/afs/server.c
@@ -0,0 +1,489 @@
+/* server.c: AFS server record management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/connection.h>
+#include "volume.h"
+#include "cell.h"
+#include "server.h"
+#include "transport.h"
+#include "vlclient.h"
+#include "kafstimod.h"
+#include "internal.h"
+
+spinlock_t afs_server_peer_lock = SPIN_LOCK_UNLOCKED;
+
+#define FS_SERVICE_ID 1 /* AFS Volume Location Service ID */
+#define VL_SERVICE_ID 52 /* AFS Volume Location Service ID */
+
+static void __afs_server_timeout(afs_timer_t *timer)
+{
+ afs_server_t *server = list_entry(timer,afs_server_t,timeout);
+
+ _debug("SERVER TIMEOUT [%p{u=%d}]",server,atomic_read(&server->usage));
+
+ afs_server_do_timeout(server);
+}
+
+static const struct afs_timer_ops afs_server_timer_ops = {
+ .timed_out = __afs_server_timeout,
+};
+
+/*****************************************************************************/
+/*
+ * lookup a server record in a cell
+ * - TODO: search the cell's server list
+ */
+int afs_server_lookup(afs_cell_t *cell, const struct in_addr *addr, afs_server_t **_server)
+{
+ struct list_head *_p;
+ afs_server_t *server, *active, *zombie;
+ int loop;
+
+ _enter("%p,%08x,",cell,ntohl(addr->s_addr));
+
+ /* allocate and initialise a server record */
+ server = kmalloc(sizeof(afs_server_t),GFP_KERNEL);
+ if (!server) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ memset(server,0,sizeof(afs_server_t));
+ atomic_set(&server->usage,1);
+
+ INIT_LIST_HEAD(&server->link);
+ init_rwsem(&server->sem);
+ INIT_LIST_HEAD(&server->fs_callq);
+ spin_lock_init(&server->fs_lock);
+ INIT_LIST_HEAD(&server->cb_promises);
+ spin_lock_init(&server->cb_lock);
+
+ for (loop=0; loop<AFS_SERVER_CONN_LIST_SIZE; loop++)
+ server->fs_conn_cnt[loop] = 4;
+
+ memcpy(&server->addr,addr,sizeof(struct in_addr));
+ server->addr.s_addr = addr->s_addr;
+
+ afs_timer_init(&server->timeout,&afs_server_timer_ops);
+
+ /* add to the cell */
+ write_lock(&cell->sv_lock);
+
+ /* check the active list */
+ list_for_each(_p,&cell->sv_list) {
+ active = list_entry(_p,afs_server_t,link);
+
+ if (active->addr.s_addr==addr->s_addr)
+ goto use_active_server;
+ }
+
+ /* check the inactive list */
+ spin_lock(&cell->sv_gylock);
+ list_for_each(_p,&cell->sv_graveyard) {
+ zombie = list_entry(_p,afs_server_t,link);
+
+ if (zombie->addr.s_addr==addr->s_addr)
+ goto resurrect_server;
+ }
+ spin_unlock(&cell->sv_gylock);
+
+ afs_get_cell(cell);
+ server->cell = cell;
+ list_add_tail(&server->link,&cell->sv_list);
+
+ write_unlock(&cell->sv_lock);
+
+ *_server = server;
+ _leave(" = 0 (%p)",server);
+ return 0;
+
+ /* found a matching active server */
+ use_active_server:
+ _debug("active server");
+ afs_get_server(active);
+ write_unlock(&cell->sv_lock);
+
+ kfree(server);
+
+ *_server = active;
+ _leave(" = 0 (%p)",active);
+ return 0;
+
+ /* found a matching server in the graveyard, so resurrect it and dispose of the new rec */
+ resurrect_server:
+ _debug("resurrecting server");
+
+ list_del(&zombie->link);
+ list_add_tail(&zombie->link,&cell->sv_list);
+ afs_get_server(zombie);
+ afs_kafstimod_del_timer(&zombie->timeout);
+ spin_unlock(&cell->sv_gylock);
+ write_unlock(&cell->sv_lock);
+
+ kfree(server);
+
+ *_server = zombie;
+ _leave(" = 0 (%p)",zombie);
+ return 0;
+
+} /* end afs_server_lookup() */
+
+/*****************************************************************************/
+/*
+ * destroy a server record
+ * - removes from the cell list
+ */
+void afs_put_server(afs_server_t *server)
+{
+ afs_cell_t *cell;
+
+ _enter("%p",server);
+
+ cell = server->cell;
+
+ /* sanity check */
+ if (atomic_read(&server->usage)<=0)
+ BUG();
+
+ /* to prevent a race, the decrement and the dequeue must be effectively atomic */
+ write_lock(&cell->sv_lock);
+
+ if (likely(!atomic_dec_and_test(&server->usage))) {
+ write_unlock(&cell->sv_lock);
+ _leave("");
+ return;
+ }
+
+ spin_lock(&cell->sv_gylock);
+ list_del(&server->link);
+ list_add_tail(&server->link,&cell->sv_graveyard);
+
+ /* time out in 10 secs */
+ afs_kafstimod_add_timer(&server->timeout,10*HZ);
+
+ spin_unlock(&cell->sv_gylock);
+ write_unlock(&cell->sv_lock);
+
+ _leave(" [killed]");
+} /* end afs_put_server() */
+
+/*****************************************************************************/
+/*
+ * timeout server record
+ * - removes from the cell's graveyard if the usage count is zero
+ */
+void afs_server_do_timeout(afs_server_t *server)
+{
+ struct rxrpc_peer *peer;
+ afs_cell_t *cell;
+ int loop;
+
+ _enter("%p",server);
+
+ cell = server->cell;
+
+ if (atomic_read(&server->usage)<0) BUG();
+
+ /* remove from graveyard if still dead */
+ spin_lock(&cell->vl_gylock);
+ if (atomic_read(&server->usage)==0)
+ list_del_init(&server->link);
+ else
+ server = NULL;
+ spin_unlock(&cell->vl_gylock);
+
+ if (!server) {
+ _leave("");
+ return; /* resurrected */
+ }
+
+ /* we can now destroy it properly */
+ afs_put_cell(cell);
+
+ /* uncross-point the structs under a global lock */
+ spin_lock(&afs_server_peer_lock);
+ peer = server->peer;
+ if (peer) {
+ server->peer = NULL;
+ peer->user = NULL;
+ }
+ spin_unlock(&afs_server_peer_lock);
+
+ /* finish cleaning up the server */
+ for (loop=AFS_SERVER_CONN_LIST_SIZE-1; loop>=0; loop--)
+ if (server->fs_conn[loop])
+ rxrpc_put_connection(server->fs_conn[loop]);
+
+ if (server->vlserver)
+ rxrpc_put_connection(server->vlserver);
+
+ kfree(server);
+
+ _leave(" [destroyed]");
+} /* end afs_server_do_timeout() */
+
+/*****************************************************************************/
+/*
+ * get a callslot on a connection to the fileserver on the specified server
+ */
+int afs_server_request_callslot(afs_server_t *server, struct afs_server_callslot *callslot)
+{
+ struct afs_server_callslot *pcallslot;
+ struct rxrpc_connection *conn;
+ int nconn, ret;
+
+ _enter("%p,",server);
+
+ INIT_LIST_HEAD(&callslot->link);
+ callslot->task = current;
+ callslot->conn = NULL;
+ callslot->nconn = -1;
+ callslot->ready = 0;
+
+ ret = 0;
+ conn = NULL;
+
+ /* get hold of a callslot first */
+ spin_lock(&server->fs_lock);
+
+ /* resurrect the server if it's death timeout has expired */
+ if (server->fs_state) {
+ if (time_before(jiffies,server->fs_dead_jif)) {
+ ret = server->fs_state;
+ spin_unlock(&server->fs_lock);
+ _leave(" = %d [still dead]",ret);
+ return ret;
+ }
+
+ server->fs_state = 0;
+ }
+
+ /* try and find a connection that has spare callslots */
+ for (nconn=0; nconn<AFS_SERVER_CONN_LIST_SIZE; nconn++) {
+ if (server->fs_conn_cnt[nconn]>0) {
+ server->fs_conn_cnt[nconn]--;
+ spin_unlock(&server->fs_lock);
+ callslot->nconn = nconn;
+ goto obtained_slot;
+ }
+ }
+
+ /* none were available - wait interruptibly for one to become available */
+ set_current_state(TASK_INTERRUPTIBLE);
+ list_add_tail(&callslot->link,&server->fs_callq);
+ spin_unlock(&server->fs_lock);
+
+ while (!callslot->ready && !signal_pending(current)) {
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ set_current_state(TASK_RUNNING);
+
+ /* even if we were interrupted we may still be queued */
+ if (!callslot->ready) {
+ spin_lock(&server->fs_lock);
+ list_del_init(&callslot->link);
+ spin_unlock(&server->fs_lock);
+ }
+
+ nconn = callslot->nconn;
+
+ /* if interrupted, we must release any slot we also got before returning an error */
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ goto error_release;
+ }
+
+ /* if we were woken up with an error, then pass that error back to the called */
+ if (nconn<0) {
+ _leave(" = %d",callslot->errno);
+ return callslot->errno;
+ }
+
+ /* were we given a connection directly? */
+ if (callslot->conn) {
+ /* yes - use it */
+ _leave(" = 0 (nc=%d)",nconn);
+ return 0;
+ }
+
+ /* got a callslot, but no connection */
+ obtained_slot:
+
+ /* need to get hold of the RxRPC connection */
+ down_write(&server->sem);
+
+ /* quick check to see if there's an outstanding error */
+ ret = server->fs_state;
+ if (ret)
+ goto error_release_upw;
+
+ if (server->fs_conn[nconn]) {
+ /* reuse an existing connection */
+ rxrpc_get_connection(server->fs_conn[nconn]);
+ callslot->conn = server->fs_conn[nconn];
+ }
+ else {
+ /* create a new connection */
+ ret = rxrpc_create_connection(afs_transport,
+ htons(7000),
+ server->addr.s_addr,
+ FS_SERVICE_ID,
+ NULL,
+ &server->fs_conn[nconn]);
+
+ if (ret<0)
+ goto error_release_upw;
+
+ callslot->conn = server->fs_conn[0];
+ rxrpc_get_connection(callslot->conn);
+ }
+
+ up_write(&server->sem);
+
+ _leave(" = 0");
+ return 0;
+
+ /* handle an error occurring */
+ error_release_upw:
+ up_write(&server->sem);
+
+ error_release:
+ /* either release the callslot or pass it along to another deserving task */
+ spin_lock(&server->fs_lock);
+
+ if (nconn<0) {
+ /* no callslot allocated */
+ }
+ else if (list_empty(&server->fs_callq)) {
+ /* no one waiting */
+ server->fs_conn_cnt[nconn]++;
+ spin_unlock(&server->fs_lock);
+ }
+ else {
+ /* someone's waiting - dequeue them and wake them up */
+ pcallslot = list_entry(server->fs_callq.next,struct afs_server_callslot,link);
+ list_del_init(&pcallslot->link);
+
+ pcallslot->errno = server->fs_state;
+ if (!pcallslot->errno) {
+ /* pass them out callslot details */
+ callslot->conn = xchg(&pcallslot->conn,callslot->conn);
+ pcallslot->nconn = nconn;
+ callslot->nconn = nconn = -1;
+ }
+ pcallslot->ready = 1;
+ wake_up_process(pcallslot->task);
+ spin_unlock(&server->fs_lock);
+ }
+
+ if (callslot->conn) rxrpc_put_connection(callslot->conn);
+ callslot->conn = NULL;
+
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end afs_server_request_callslot() */
+
+/*****************************************************************************/
+/*
+ * release a callslot back to the server
+ * - transfers the RxRPC connection to the next pending callslot if possible
+ */
+void afs_server_release_callslot(afs_server_t *server, struct afs_server_callslot *callslot)
+{
+ struct afs_server_callslot *pcallslot;
+
+ _enter("{ad=%08x,cnt=%u},{%d}",
+ ntohl(server->addr.s_addr),
+ server->fs_conn_cnt[callslot->nconn],
+ callslot->nconn);
+
+ if (callslot->nconn<0) BUG();
+
+ spin_lock(&server->fs_lock);
+
+ if (list_empty(&server->fs_callq)) {
+ /* no one waiting */
+ server->fs_conn_cnt[callslot->nconn]++;
+ spin_unlock(&server->fs_lock);
+ }
+ else {
+ /* someone's waiting - dequeue them and wake them up */
+ pcallslot = list_entry(server->fs_callq.next,struct afs_server_callslot,link);
+ list_del_init(&pcallslot->link);
+
+ pcallslot->errno = server->fs_state;
+ if (!pcallslot->errno) {
+ /* pass them out callslot details */
+ callslot->conn = xchg(&pcallslot->conn,callslot->conn);
+ pcallslot->nconn = callslot->nconn;
+ callslot->nconn = -1;
+ }
+
+ pcallslot->ready = 1;
+ wake_up_process(pcallslot->task);
+ spin_unlock(&server->fs_lock);
+ }
+
+ if (callslot->conn) rxrpc_put_connection(callslot->conn);
+
+ _leave("");
+} /* end afs_server_release_callslot() */
+
+/*****************************************************************************/
+/*
+ * get a handle to a connection to the vlserver (volume location) on the specified server
+ */
+int afs_server_get_vlconn(afs_server_t *server, struct rxrpc_connection **_conn)
+{
+ struct rxrpc_connection *conn;
+ int ret;
+
+ _enter("%p,",server);
+
+ ret = 0;
+ conn = NULL;
+ down_read(&server->sem);
+
+ if (server->vlserver) {
+ /* reuse an existing connection */
+ rxrpc_get_connection(server->vlserver);
+ conn = server->vlserver;
+ up_read(&server->sem);
+ }
+ else {
+ /* create a new connection */
+ up_read(&server->sem);
+ down_write(&server->sem);
+ if (!server->vlserver) {
+ ret = rxrpc_create_connection(afs_transport,
+ htons(7003),
+ server->addr.s_addr,
+ VL_SERVICE_ID,
+ NULL,
+ &server->vlserver);
+ }
+ if (ret==0) {
+ rxrpc_get_connection(server->vlserver);
+ conn = server->vlserver;
+ }
+ up_write(&server->sem);
+ }
+
+ *_conn = conn;
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_server_get_vlconn() */
diff --git a/fs/afs/server.h b/fs/afs/server.h
new file mode 100644
index 000000000000..feddacf2c954
--- /dev/null
+++ b/fs/afs/server.h
@@ -0,0 +1,97 @@
+/* server.h: AFS server record
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_SERVER_H
+#define _LINUX_AFS_SERVER_H
+
+#include "types.h"
+#include "kafstimod.h"
+#include <rxrpc/peer.h>
+#include <linux/rwsem.h>
+
+extern spinlock_t afs_server_peer_lock;
+
+/*****************************************************************************/
+/*
+ * AFS server record
+ */
+struct afs_server
+{
+ atomic_t usage;
+ afs_cell_t *cell; /* cell in which server resides */
+ struct list_head link; /* link in cell's server list */
+ struct rw_semaphore sem; /* access lock */
+ afs_timer_t timeout; /* graveyard timeout */
+ struct in_addr addr; /* server address */
+ struct rxrpc_peer *peer; /* peer record for this server */
+ struct rxrpc_connection *vlserver; /* connection to the volume location service */
+
+ /* file service access */
+#define AFS_SERVER_CONN_LIST_SIZE 2
+ struct rxrpc_connection *fs_conn[AFS_SERVER_CONN_LIST_SIZE]; /* FS connections */
+ unsigned fs_conn_cnt[AFS_SERVER_CONN_LIST_SIZE]; /* per conn call count */
+ struct list_head fs_callq; /* queue of processes waiting to make a call */
+ spinlock_t fs_lock; /* access lock */
+ int fs_state; /* 0 or reason FS currently marked dead (-errno) */
+ unsigned fs_rtt; /* FS round trip time */
+ unsigned long fs_act_jif; /* time at which last activity occurred */
+ unsigned long fs_dead_jif; /* time at which no longer to be considered dead */
+
+ /* callback promise management */
+ struct list_head cb_promises; /* as yet unbroken promises from this server */
+ spinlock_t cb_lock; /* access lock */
+};
+
+extern int afs_server_lookup(afs_cell_t *cell, const struct in_addr *addr, afs_server_t **_server);
+
+#define afs_get_server(S) do { atomic_inc(&(S)->usage); } while(0)
+
+extern void afs_put_server(afs_server_t *server);
+extern void afs_server_do_timeout(afs_server_t *server);
+
+extern int afs_server_find_by_peer(const struct rxrpc_peer *peer, afs_server_t **_server);
+
+extern int afs_server_get_vlconn(afs_server_t *server, struct rxrpc_connection **_conn);
+
+static inline afs_server_t *afs_server_get_from_peer(struct rxrpc_peer *peer)
+{
+ afs_server_t *server;
+
+ spin_lock(&afs_server_peer_lock);
+ server = peer->user;
+ if (server)
+ afs_get_server(server);
+ spin_unlock(&afs_server_peer_lock);
+
+ return server;
+}
+
+/*****************************************************************************/
+/*
+ * AFS server callslot grant record
+ */
+struct afs_server_callslot
+{
+ struct list_head link; /* link in server's list */
+ struct task_struct *task; /* process waiting to make call */
+ struct rxrpc_connection *conn; /* connection to use (or NULL on error) */
+ short nconn; /* connection slot number (-1 on error) */
+ char ready; /* T when ready */
+ int errno; /* error number if nconn==-1 */
+};
+
+extern int afs_server_request_callslot(afs_server_t *server,
+ struct afs_server_callslot *callslot);
+
+extern void afs_server_release_callslot(afs_server_t *server,
+ struct afs_server_callslot *callslot);
+
+#endif /* _LINUX_AFS_SERVER_H */
diff --git a/fs/afs/super.c b/fs/afs/super.c
new file mode 100644
index 000000000000..18056534b504
--- /dev/null
+++ b/fs/afs/super.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Authors: David Howells <dhowells@redhat.com>
+ * David Woodhouse <dwmw2@cambridge.redhat.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "vnode.h"
+#include "volume.h"
+#include "cell.h"
+#include "cmservice.h"
+#include "fsclient.h"
+#include "super.h"
+#include "internal.h"
+
+#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
+
+static inline char *strdup(const char *s)
+{
+ char *ns = kmalloc(strlen(s)+1,GFP_KERNEL);
+ if (ns)
+ strcpy(ns,s);
+ return ns;
+}
+
+static void afs_i_init_once(void *foo, kmem_cache_t *cachep, unsigned long flags);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+static struct super_block *afs_get_sb(struct file_system_type *fs_type,
+ int flags, char *dev_name, void *data);
+#else
+static struct super_block *afs_read_super(struct super_block *sb, void *data, int);
+#endif
+
+static struct inode *afs_alloc_inode(struct super_block *sb);
+
+static void afs_put_super(struct super_block *sb);
+
+static void afs_destroy_inode(struct inode *inode);
+
+static struct file_system_type afs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "afs",
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ .get_sb = afs_get_sb,
+ .kill_sb = kill_anon_super,
+#else
+ .read_super = afs_read_super,
+#endif
+};
+
+static struct super_operations afs_super_ops = {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ .statfs = simple_statfs,
+ .alloc_inode = afs_alloc_inode,
+ .drop_inode = generic_delete_inode,
+ .destroy_inode = afs_destroy_inode,
+#else
+ .read_inode2 = afs_read_inode2,
+#endif
+ .clear_inode = afs_clear_inode,
+ .put_super = afs_put_super,
+};
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+static kmem_cache_t *afs_inode_cachep;
+#endif
+
+/*****************************************************************************/
+/*
+ * initialise the filesystem
+ */
+int __init afs_fs_init(void)
+{
+ int ret;
+
+ kenter("");
+
+ /* open the cache */
+#if 0
+ ret = -EINVAL;
+ if (!cachedev) {
+ printk(KERN_NOTICE "kAFS: No cache device specified as module parm\n");
+ printk(KERN_NOTICE "kAFS: Set with \"cachedev=<devname>\" on insmod's cmdline\n");
+ return ret;
+ }
+
+ ret = afs_cache_open(cachedev,&afs_cache);
+ if (ret<0) {
+ printk(KERN_NOTICE "kAFS: Failed to open cache device\n");
+ return ret;
+ }
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ /* create ourselves an inode cache */
+ ret = -ENOMEM;
+ afs_inode_cachep = kmem_cache_create("afs_inode_cache",
+ sizeof(afs_vnode_t),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ afs_i_init_once,
+ NULL);
+ if (!afs_inode_cachep) {
+ printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
+#if 0
+ afs_put_cache(afs_cache);
+#endif
+ return ret;
+ }
+#endif
+
+ /* now export our filesystem to lesser mortals */
+ ret = register_filesystem(&afs_fs_type);
+ if (ret<0) {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ kmem_cache_destroy(afs_inode_cachep);
+#endif
+#if 0
+ afs_put_cache(afs_cache);
+#endif
+ kleave(" = %d",ret);
+ return ret;
+ }
+
+ kleave(" = 0");
+ return 0;
+} /* end afs_fs_init() */
+
+/*****************************************************************************/
+/*
+ * clean up the filesystem
+ */
+void __exit afs_fs_exit(void)
+{
+ /* destroy our private inode cache */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ kmem_cache_destroy(afs_inode_cachep);
+#endif
+
+ unregister_filesystem(&afs_fs_type);
+
+#if 0
+ if (afs_cache)
+ afs_put_cache(afs_cache);
+#endif
+} /* end afs_fs_exit() */
+
+/*****************************************************************************/
+/*
+ * check that an argument has a value
+ */
+static int want_arg(char **_value, const char *option)
+{
+ if (!_value || !*_value || !**_value) {
+ printk(KERN_NOTICE "kAFS: %s: argument missing\n",option);
+ return 0;
+ }
+ return 1;
+} /* end want_arg() */
+
+/*****************************************************************************/
+/*
+ * check that there is a value
+ */
+#if 0
+static int want_value(char **_value, const char *option)
+{
+ if (!_value || !*_value || !**_value) {
+ printk(KERN_NOTICE "kAFS: %s: argument incomplete\n",option);
+ return 0;
+ }
+ return 1;
+} /* end want_value() */
+#endif
+
+/*****************************************************************************/
+/*
+ * check that there's no subsequent value
+ */
+static int want_no_value(char *const *_value, const char *option)
+{
+ if (*_value && **_value) {
+ printk(KERN_NOTICE "kAFS: %s: Invalid argument: %s\n",option,*_value);
+ return 0;
+ }
+ return 1;
+} /* end want_no_value() */
+
+/*****************************************************************************/
+/*
+ * extract a number from an option string value
+ */
+#if 0
+static int want_number(char **_value, const char *option, unsigned long *number,
+ unsigned long limit)
+{
+ char *value = *_value;
+
+ if (!want_value(_value,option))
+ return 0;
+
+ *number = simple_strtoul(value,_value,0);
+
+ if (value==*_value) {
+ printk(KERN_NOTICE "kAFS: %s: Invalid number: %s\n",option,value);
+ return 0;
+ }
+
+ if (*number>limit) {
+ printk(KERN_NOTICE "kAFS: %s: numeric value %lu > %lu\n",option,*number,limit);
+ return 0;
+ }
+
+ return 1;
+} /* end want_number() */
+#endif
+
+/*****************************************************************************/
+/*
+ * extract a separator from an option string value
+ */
+#if 0
+static int want_sep(char **_value, const char *option, char sep)
+{
+ if (!want_value(_value,option))
+ return 0;
+
+ if (*(*_value)++ != sep) {
+ printk(KERN_NOTICE "kAFS: %s: '%c' expected: %s\n",option,sep,*_value-1);
+ return 0;
+ }
+
+ return 1;
+} /* end want_number() */
+#endif
+
+/*****************************************************************************/
+/*
+ * extract an IP address from an option string value
+ */
+#if 0
+static int want_ipaddr(char **_value, const char *option, struct in_addr *addr)
+{
+ unsigned long number[4];
+
+ if (!want_value(_value,option))
+ return 0;
+
+ if (!want_number(_value,option,&number[0],255) ||
+ !want_sep(_value,option,'.') ||
+ !want_number(_value,option,&number[1],255) ||
+ !want_sep(_value,option,'.') ||
+ !want_number(_value,option,&number[2],255) ||
+ !want_sep(_value,option,'.') ||
+ !want_number(_value,option,&number[3],255))
+ return 0;
+
+ ((u8*)addr)[0] = number[0];
+ ((u8*)addr)[1] = number[1];
+ ((u8*)addr)[2] = number[2];
+ ((u8*)addr)[3] = number[3];
+
+ return 1;
+} /* end want_numeric() */
+#endif
+
+/*****************************************************************************/
+/*
+ * parse the mount options
+ * - this function has been shamelessly adapted from the ext3 fs which shamelessly adapted it from
+ * the msdos fs
+ */
+static int afs_super_parse_options(struct afs_super_info *as, char *options, char **devname)
+{
+ char *key, *value;
+ int ret;
+
+ _enter("%s",options);
+
+ ret = 0;
+ while ((key = strsep(&options,",")))
+ {
+ value = strchr(key,'=');
+ if (value)
+ *value++ = 0;
+
+ printk("kAFS: KEY: %s, VAL:%s\n",key,value?:"-");
+
+ if (strcmp(key,"rwpath")==0) {
+ if (!want_no_value(&value,"rwpath")) return -EINVAL;
+ as->rwparent = 1;
+ continue;
+ }
+ else if (strcmp(key,"vol")==0) {
+ if (!want_arg(&value,"vol")) return -EINVAL;
+ *devname = value;
+ continue;
+ }
+
+#if 0
+ if (strcmp(key,"servers")==0) {
+ if (!want_arg(&value,"servers")) return -EINVAL;
+
+ _debug("servers=%s",value);
+
+ for (;;) {
+ struct in_addr addr;
+
+ if (!want_ipaddr(&value,"servers",&addr))
+ return -EINVAL;
+
+ ret = afs_create_server(as->cell,&addr,&as->server);
+ if (ret<0) {
+ printk("kAFS: unable to create server: %d\n",ret);
+ return ret;
+ }
+
+ if (!*value)
+ break;
+
+ if (as->server) {
+ printk(KERN_NOTICE
+ "kAFS: only one server can be specified\n");
+ return -EINVAL;
+ }
+
+ if (!want_sep(&value,"servers",':'))
+ return -EINVAL;
+ }
+ continue;
+ }
+#endif
+
+ printk("kAFS: Unknown mount option: '%s'\n",key);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = 0;
+
+ error:
+ _leave(" = %d",ret);
+
+ return ret;
+} /* end afs_super_parse_options() */
+
+/*****************************************************************************/
+/*
+ * fill in the superblock
+ */
+static int afs_fill_super(struct super_block *sb, void *_data, int silent)
+{
+ struct afs_super_info *as = NULL;
+ struct dentry *root = NULL;
+ struct inode *inode = NULL;
+ afs_fid_t fid;
+ void **data = _data;
+ char *options, *devname;
+ int ret;
+
+ _enter("");
+
+ if (!data) {
+ _leave(" = -EINVAL");
+ return -EINVAL;
+ }
+ devname = data[0];
+ options = data[1];
+ if (options)
+ options[PAGE_SIZE-1] = 0;
+
+ /* allocate a superblock info record */
+ as = kmalloc(sizeof(struct afs_super_info),GFP_KERNEL);
+ if (!as) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ memset(as,0,sizeof(struct afs_super_info));
+
+ /* parse the options */
+ if (options) {
+ ret = afs_super_parse_options(as,options,&devname);
+ if (ret<0)
+ goto error;
+ if (!devname) {
+ printk("kAFS: no volume name specified\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+
+ /* parse the device name */
+ ret = afs_volume_lookup(devname,as->rwparent,&as->volume);
+ if (ret<0)
+ goto error;
+
+ /* fill in the superblock */
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = AFS_FS_MAGIC;
+ sb->s_op = &afs_super_ops;
+ sb->s_fs_info = as;
+
+ /* allocate the root inode and dentry */
+ fid.vid = as->volume->vid;
+ fid.vnode = 1;
+ fid.unique = 1;
+ ret = afs_iget(sb,&fid,&inode);
+ if (ret<0)
+ goto error;
+
+ ret = -ENOMEM;
+ root = d_alloc_root(inode);
+ if (!root)
+ goto error;
+
+ sb->s_root = root;
+
+ _leave(" = 0");
+ return 0;
+
+ error:
+ if (root) dput(root);
+ if (inode) iput(inode);
+ if (as) {
+ if (as->volume) afs_put_volume(as->volume);
+ kfree(as);
+ }
+ sb->s_fs_info = NULL;
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_fill_super() */
+
+/*****************************************************************************/
+/*
+ * get an AFS superblock
+ * - TODO: don't use get_sb_nodev(), but rather call sget() directly
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+static struct super_block *afs_get_sb(struct file_system_type *fs_type,
+ int flags,
+ char *dev_name,
+ void *options)
+{
+ struct super_block *sb;
+ void *data[2] = { dev_name, options };
+ int ret;
+
+ _enter(",,%s,%p",dev_name,options);
+
+ /* start the cache manager */
+ ret = afscm_start();
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ERR_PTR(ret);
+ }
+
+ /* allocate a deviceless superblock */
+ sb = get_sb_nodev(fs_type,flags,data,afs_fill_super);
+ if (IS_ERR(sb)) {
+ afscm_stop();
+ return sb;
+ }
+
+ _leave("");
+ return sb;
+} /* end afs_get_sb() */
+#endif
+
+/*****************************************************************************/
+/*
+ * read an AFS superblock
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+static struct super_block *afs_read_super(struct super_block *sb, void *options, int silent)
+{
+ void *data[2] = { NULL, options };
+ int ret;
+
+ _enter(",,%s",(char*)options);
+
+ /* start the cache manager */
+ ret = afscm_start();
+ if (ret<0) {
+ _leave(" = NULL (%d)",ret);
+ return NULL;
+ }
+
+ /* allocate a deviceless superblock */
+ ret = afs_fill_super(sb,data,silent);
+ if (ret<0) {
+ afscm_stop();
+ _leave(" = NULL (%d)",ret);
+ return NULL;
+ }
+
+ _leave(" = %p",sb);
+ return sb;
+} /* end afs_read_super() */
+#endif
+
+/*****************************************************************************/
+/*
+ * finish the unmounting process on the superblock
+ */
+static void afs_put_super(struct super_block *sb)
+{
+ struct afs_super_info *as = sb->s_fs_info;
+
+ _enter("");
+
+ if (as) {
+ if (as->volume) afs_put_volume(as->volume);
+ }
+
+ /* stop the cache manager */
+ afscm_stop();
+
+ _leave("");
+} /* end afs_put_super() */
+
+/*****************************************************************************/
+/*
+ * initialise an inode cache slab element prior to any use
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, unsigned long flags)
+{
+ afs_vnode_t *vnode = (afs_vnode_t *) _vnode;
+
+ if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) {
+ memset(vnode,0,sizeof(*vnode));
+ inode_init_once(&vnode->vfs_inode);
+ init_waitqueue_head(&vnode->update_waitq);
+ spin_lock_init(&vnode->lock);
+ INIT_LIST_HEAD(&vnode->cb_link);
+ INIT_LIST_HEAD(&vnode->cb_hash_link);
+ afs_timer_init(&vnode->cb_timeout,&afs_vnode_cb_timed_out_ops);
+ }
+
+} /* end afs_i_init_once() */
+#endif
+
+/*****************************************************************************/
+/*
+ * allocate an AFS inode struct from our slab cache
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+static struct inode *afs_alloc_inode(struct super_block *sb)
+{
+ afs_vnode_t *vnode;
+
+ vnode = (afs_vnode_t *) kmem_cache_alloc(afs_inode_cachep,SLAB_KERNEL);
+ if (!vnode)
+ return NULL;
+
+ memset(&vnode->fid,0,sizeof(vnode->fid));
+ memset(&vnode->status,0,sizeof(vnode->status));
+
+ vnode->volume = NULL;
+ vnode->update_cnt = 0;
+ vnode->flags = 0;
+
+ return &vnode->vfs_inode;
+} /* end afs_alloc_inode() */
+#endif
+
+/*****************************************************************************/
+/*
+ * destroy an AFS inode struct
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+static void afs_destroy_inode(struct inode *inode)
+{
+ _enter("{%lu}",inode->i_ino);
+ kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode));
+} /* end afs_destroy_inode() */
+#endif
diff --git a/fs/afs/super.h b/fs/afs/super.h
new file mode 100644
index 000000000000..b307b0884181
--- /dev/null
+++ b/fs/afs/super.h
@@ -0,0 +1,43 @@
+/* super.h: AFS filesystem internal private data
+ *
+ * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ *
+ * This software may be freely redistributed under the terms of the
+ * GNU General Public License.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
+ * David Howells <dhowells@redhat.com>
+ *
+ */
+
+#ifndef _LINUX_AFS_SUPER_H
+#define _LINUX_AFS_SUPER_H
+
+#include <linux/fs.h>
+#include "server.h"
+
+#ifdef __KERNEL__
+
+/*****************************************************************************/
+/*
+ * AFS superblock private data
+ * - there's one superblock per volume
+ */
+struct afs_super_info
+{
+ afs_volume_t *volume; /* volume record */
+ char rwparent; /* T if parent is R/W AFS volume */
+};
+
+static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_AFS_SUPER_H */
diff --git a/fs/afs/transport.h b/fs/afs/transport.h
new file mode 100644
index 000000000000..7013ae6ccc8c
--- /dev/null
+++ b/fs/afs/transport.h
@@ -0,0 +1,21 @@
+/* transport.h: AFS transport management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_TRANSPORT_H
+#define _LINUX_AFS_TRANSPORT_H
+
+#include "types.h"
+#include <rxrpc/transport.h>
+
+/* the cache manager transport endpoint */
+extern struct rxrpc_transport *afs_transport;
+
+#endif /* _LINUX_AFS_TRANSPORT_H */
diff --git a/fs/afs/types.h b/fs/afs/types.h
new file mode 100644
index 000000000000..411925f4fa04
--- /dev/null
+++ b/fs/afs/types.h
@@ -0,0 +1,152 @@
+/* types.h: AFS types
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_TYPES_H
+#define _LINUX_AFS_TYPES_H
+
+#ifdef __KERNEL__
+#include <rxrpc/types.h>
+#endif /* __KERNEL__ */
+
+typedef unsigned afs_volid_t;
+typedef unsigned afs_vnodeid_t;
+typedef unsigned long long afs_dataversion_t;
+
+typedef struct afs_async_op afs_async_op_t;
+typedef struct afs_callback afs_callback_t;
+typedef struct afs_cell afs_cell_t;
+typedef struct afs_fid afs_fid_t;
+typedef struct afs_file_status afs_file_status_t;
+typedef struct afs_server afs_server_t;
+typedef struct afs_timer afs_timer_t;
+typedef struct afs_vlocation afs_vlocation_t;
+typedef struct afs_vnode afs_vnode_t;
+typedef struct afs_volsync afs_volsync_t;
+typedef struct afs_volume afs_volume_t;
+typedef struct afs_volume_info afs_volume_info_t;
+
+typedef struct afsc_cache afsc_cache_t;
+typedef struct afsc_cache_cell afsc_cache_cell_t;
+typedef struct afsc_cache_vldb afsc_cache_vldb_t;
+typedef struct afsc_cell_record afsc_cell_record_t;
+typedef struct afsc_inode afsc_inode_t;
+typedef struct afsc_io afsc_io_t;
+typedef struct afsc_io_subop afsc_io_subop_t;
+typedef struct afsc_io_queue afsc_io_queue_t;
+typedef struct afsc_super_block afsc_super_block_t;
+typedef struct afsc_vldb_record afsc_vldb_record_t;
+typedef struct afsc_vnode_catalogue afsc_vnode_catalogue_t;
+typedef struct afsc_vnode_meta afsc_vnode_meta_t;
+
+typedef struct afsvl_dbentry afsvl_dbentry_t;
+
+typedef enum {
+ AFSVL_RWVOL, /* read/write volume */
+ AFSVL_ROVOL, /* read-only volume */
+ AFSVL_BACKVOL, /* backup volume */
+} afs_voltype_t;
+
+extern const char *afs_voltypes[];
+
+typedef enum {
+ AFS_FTYPE_INVALID = 0,
+ AFS_FTYPE_FILE = 1,
+ AFS_FTYPE_DIR = 2,
+ AFS_FTYPE_SYMLINK = 3,
+} afs_file_type_t;
+
+#ifdef __KERNEL__
+
+/*****************************************************************************/
+/*
+ * AFS file identifier
+ */
+struct afs_fid
+{
+ afs_volid_t vid; /* volume ID */
+ afs_vnodeid_t vnode; /* file index within volume */
+ unsigned unique; /* unique ID number (file index version) */
+};
+
+/*****************************************************************************/
+/*
+ * AFS callback notification
+ */
+typedef enum {
+ AFSCM_CB_UNTYPED = 0, /* no type set on CB break */
+ AFSCM_CB_EXCLUSIVE = 1, /* CB exclusive to CM [not implemented] */
+ AFSCM_CB_SHARED = 2, /* CB shared by other CM's */
+ AFSCM_CB_DROPPED = 3, /* CB promise cancelled by file server */
+} afs_callback_type_t;
+
+struct afs_callback
+{
+ afs_server_t *server; /* server that made the promise */
+ afs_fid_t fid; /* file identifier */
+ unsigned version; /* callback version */
+ unsigned expiry; /* time at which expires */
+ afs_callback_type_t type; /* type of callback */
+};
+
+#define AFSCBMAX 50
+
+/*****************************************************************************/
+/*
+ * AFS volume information
+ */
+struct afs_volume_info
+{
+ afs_volid_t vid; /* volume ID */
+ afs_voltype_t type; /* type of this volume */
+ afs_volid_t type_vids[5]; /* volume ID's for possible types for this vol */
+
+ /* list of fileservers serving this volume */
+ size_t nservers; /* number of entries used in servers[] */
+ struct {
+ struct in_addr addr; /* fileserver address */
+ } servers[8];
+};
+
+/*****************************************************************************/
+/*
+ * AFS file status information
+ */
+struct afs_file_status
+{
+ unsigned if_version; /* interface version */
+#define AFS_FSTATUS_VERSION 1
+
+ afs_file_type_t type; /* file type */
+ unsigned nlink; /* link count */
+ size_t size; /* file size */
+ afs_dataversion_t version; /* current data version */
+ unsigned author; /* author ID */
+ unsigned owner; /* owner ID */
+ unsigned caller_access; /* access rights for authenticated caller */
+ unsigned anon_access; /* access rights for unauthenticated caller */
+ umode_t mode; /* UNIX mode */
+ afs_fid_t parent; /* parent file ID */
+ time_t mtime_client; /* last time client changed data */
+ time_t mtime_server; /* last time server changed data */
+};
+
+/*****************************************************************************/
+/*
+ * AFS volume synchronisation information
+ */
+struct afs_volsync
+{
+ time_t creation; /* volume creation time */
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_AFS_TYPES_H */
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
new file mode 100644
index 000000000000..564e9939af40
--- /dev/null
+++ b/fs/afs/vlclient.c
@@ -0,0 +1,662 @@
+/* vlclient.c: AFS Volume Location Service client
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include "server.h"
+#include "volume.h"
+#include "vlclient.h"
+#include "kafsasyncd.h"
+#include "kafstimod.h"
+#include "errors.h"
+#include "internal.h"
+
+#define VLGETENTRYBYID 503 /* AFS Get Cache Entry By ID operation ID */
+#define VLGETENTRYBYNAME 504 /* AFS Get Cache Entry By Name operation ID */
+#define VLPROBE 514 /* AFS Probe Volume Location Service operation ID */
+
+static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call);
+static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call);
+
+/*****************************************************************************/
+/*
+ * map afs VL abort codes to/from Linux error codes
+ * - called with call->lock held
+ */
+static void afs_rxvl_aemap(struct rxrpc_call *call)
+{
+ int err;
+
+ _enter("{%u,%u,%d}",call->app_err_state,call->app_abort_code,call->app_errno);
+
+ switch (call->app_err_state) {
+ case RXRPC_ESTATE_LOCAL_ABORT:
+ call->app_abort_code = -call->app_errno;
+ return;
+
+ case RXRPC_ESTATE_PEER_ABORT:
+ switch (call->app_abort_code) {
+ case AFSVL_IDEXIST: err = -EEXIST; break;
+ case AFSVL_IO: err = -EREMOTEIO; break;
+ case AFSVL_NAMEEXIST: err = -EEXIST; break;
+ case AFSVL_CREATEFAIL: err = -EREMOTEIO; break;
+ case AFSVL_NOENT: err = -ENOMEDIUM; break;
+ case AFSVL_EMPTY: err = -ENOMEDIUM; break;
+ case AFSVL_ENTDELETED: err = -ENOMEDIUM; break;
+ case AFSVL_BADNAME: err = -EINVAL; break;
+ case AFSVL_BADINDEX: err = -EINVAL; break;
+ case AFSVL_BADVOLTYPE: err = -EINVAL; break;
+ case AFSVL_BADSERVER: err = -EINVAL; break;
+ case AFSVL_BADPARTITION: err = -EINVAL; break;
+ case AFSVL_REPSFULL: err = -EFBIG; break;
+ case AFSVL_NOREPSERVER: err = -ENOENT; break;
+ case AFSVL_DUPREPSERVER: err = -EEXIST; break;
+ case AFSVL_RWNOTFOUND: err = -ENOENT; break;
+ case AFSVL_BADREFCOUNT: err = -EINVAL; break;
+ case AFSVL_SIZEEXCEEDED: err = -EINVAL; break;
+ case AFSVL_BADENTRY: err = -EINVAL; break;
+ case AFSVL_BADVOLIDBUMP: err = -EINVAL; break;
+ case AFSVL_IDALREADYHASHED: err = -EINVAL; break;
+ case AFSVL_ENTRYLOCKED: err = -EBUSY; break;
+ case AFSVL_BADVOLOPER: err = -EBADRQC; break;
+ case AFSVL_BADRELLOCKTYPE: err = -EINVAL; break;
+ case AFSVL_RERELEASE: err = -EREMOTEIO; break;
+ case AFSVL_BADSERVERFLAG: err = -EINVAL; break;
+ case AFSVL_PERM: err = -EACCES; break;
+ case AFSVL_NOMEM: err = -EREMOTEIO; break;
+ default:
+ err = afs_abort_to_error(call->app_abort_code);
+ break;
+ }
+ call->app_errno = err;
+ return;
+
+ default:
+ return;
+ }
+} /* end afs_rxvl_aemap() */
+
+/*****************************************************************************/
+/*
+ * probe a volume location server to see if it is still alive
+ */
+int afs_rxvl_probe(afs_server_t *server, int alloc_flags)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_connection *conn;
+ struct rxrpc_call *call;
+ struct iovec piov[1];
+ size_t sent;
+ int ret;
+ u32 param[1];
+
+ /* get hold of the vlserver connection */
+ ret = afs_server_get_vlconn(server,&conn);
+ if (ret<0)
+ goto out;
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(conn,NULL,NULL,afs_rxvl_aemap,&call);
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ goto out_put_conn;
+ }
+ call->app_opcode = VLPROBE;
+
+ /* we want to get event notifications from the call */
+ add_wait_queue(&call->waitq,&myself);
+
+ /* marshall the parameters */
+ param[0] = htonl(VLPROBE);
+ piov[0].iov_len = sizeof(param);
+ piov[0].iov_base = param;
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,alloc_flags,0,&sent);
+ if (ret<0)
+ goto abort;
+
+ /* wait for the reply to completely arrive */
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (call->app_call_state!=RXRPC_CSTATE_CLNT_RCV_REPLY ||
+ signal_pending(current))
+ break;
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
+
+ ret = -EINTR;
+ if (signal_pending(current))
+ goto abort;
+
+ switch (call->app_call_state) {
+ case RXRPC_CSTATE_ERROR:
+ ret = call->app_errno;
+ goto out_unwait;
+
+ case RXRPC_CSTATE_CLNT_GOT_REPLY:
+ ret = 0;
+ goto out_unwait;
+
+ default:
+ BUG();
+ }
+
+ abort:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ rxrpc_call_abort(call,ret);
+ schedule();
+
+ out_unwait:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&call->waitq,&myself);
+ rxrpc_put_call(call);
+ out_put_conn:
+ rxrpc_put_connection(conn);
+ out:
+ return ret;
+
+} /* end afs_rxvl_probe() */
+
+/*****************************************************************************/
+/*
+ * look up a volume location database entry by name
+ */
+int afs_rxvl_get_entry_by_name(afs_server_t *server, const char *volname,
+ afsc_vldb_record_t *entry)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_connection *conn;
+ struct rxrpc_call *call;
+ struct iovec piov[3];
+ unsigned tmp;
+ size_t sent;
+ int ret, loop;
+ u32 *bp, param[2], zero;
+
+ _enter(",%s,",volname);
+
+ memset(entry,0,sizeof(*entry));
+
+ /* get hold of the vlserver connection */
+ ret = afs_server_get_vlconn(server,&conn);
+ if (ret<0)
+ goto out;
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(conn,NULL,NULL,afs_rxvl_aemap,&call);
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ goto out_put_conn;
+ }
+ call->app_opcode = VLGETENTRYBYNAME;
+
+ /* we want to get event notifications from the call */
+ add_wait_queue(&call->waitq,&myself);
+
+ /* marshall the parameters */
+ piov[1].iov_len = strlen(volname);
+ piov[1].iov_base = (char*)volname;
+
+ zero = 0;
+ piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
+ piov[2].iov_base = &zero;
+
+ param[0] = htonl(VLGETENTRYBYNAME);
+ param[1] = htonl(piov[1].iov_len);
+
+ piov[0].iov_len = sizeof(param);
+ piov[0].iov_base = param;
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,3,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
+ if (ret<0)
+ goto abort;
+
+ /* wait for the reply to completely arrive */
+ bp = rxrpc_call_alloc_scratch(call,384);
+
+ ret = rxrpc_call_read_data(call,bp,384,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
+ if (ret<0) {
+ if (ret==-ECONNABORTED) {
+ ret = call->app_errno;
+ goto out_unwait;
+ }
+ goto abort;
+ }
+
+ /* unmarshall the reply */
+ for (loop=0; loop<64; loop++)
+ entry->name[loop] = ntohl(*bp++);
+ bp++; /* final NUL */
+
+ bp++; /* type */
+ entry->nservers = ntohl(*bp++);
+
+ for (loop=0; loop<8; loop++)
+ entry->servers[loop].s_addr = *bp++;
+
+ bp += 8; /* partition IDs */
+
+ for (loop=0; loop<8; loop++) {
+ tmp = ntohl(*bp++);
+ if (tmp & AFS_VLSF_RWVOL ) entry->srvtmask[loop] |= AFSC_VOL_STM_RW;
+ if (tmp & AFS_VLSF_ROVOL ) entry->srvtmask[loop] |= AFSC_VOL_STM_RO;
+ if (tmp & AFS_VLSF_BACKVOL) entry->srvtmask[loop] |= AFSC_VOL_STM_BAK;
+ }
+
+ entry->vid[0] = ntohl(*bp++);
+ entry->vid[1] = ntohl(*bp++);
+ entry->vid[2] = ntohl(*bp++);
+
+ bp++; /* clone ID */
+
+ tmp = ntohl(*bp++); /* flags */
+ if (tmp & AFS_VLF_RWEXISTS ) entry->vidmask |= AFSC_VOL_STM_RW;
+ if (tmp & AFS_VLF_ROEXISTS ) entry->vidmask |= AFSC_VOL_STM_RO;
+ if (tmp & AFS_VLF_BACKEXISTS) entry->vidmask |= AFSC_VOL_STM_BAK;
+
+ ret = -ENOMEDIUM;
+ if (!entry->vidmask)
+ goto abort;
+
+ /* success */
+ entry->ctime = xtime.tv_sec;
+ ret = 0;
+
+ out_unwait:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&call->waitq,&myself);
+ rxrpc_put_call(call);
+ out_put_conn:
+ rxrpc_put_connection(conn);
+ out:
+ _leave(" = %d",ret);
+ return ret;
+
+ abort:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ rxrpc_call_abort(call,ret);
+ schedule();
+ goto out_unwait;
+} /* end afs_rxvl_get_entry_by_name() */
+
+/*****************************************************************************/
+/*
+ * look up a volume location database entry by ID
+ */
+int afs_rxvl_get_entry_by_id(afs_server_t *server,
+ afs_volid_t volid,
+ afs_voltype_t voltype,
+ afsc_vldb_record_t *entry)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_connection *conn;
+ struct rxrpc_call *call;
+ struct iovec piov[1];
+ unsigned tmp;
+ size_t sent;
+ int ret, loop;
+ u32 *bp, param[3];
+
+ _enter(",%x,%d,",volid,voltype);
+
+ memset(entry,0,sizeof(*entry));
+
+ /* get hold of the vlserver connection */
+ ret = afs_server_get_vlconn(server,&conn);
+ if (ret<0)
+ goto out;
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(conn,NULL,NULL,afs_rxvl_aemap,&call);
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ goto out_put_conn;
+ }
+ call->app_opcode = VLGETENTRYBYID;
+
+ /* we want to get event notifications from the call */
+ add_wait_queue(&call->waitq,&myself);
+
+ /* marshall the parameters */
+ param[0] = htonl(VLGETENTRYBYID);
+ param[1] = htonl(volid);
+ param[2] = htonl(voltype);
+
+ piov[0].iov_len = sizeof(param);
+ piov[0].iov_base = param;
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
+ if (ret<0)
+ goto abort;
+
+ /* wait for the reply to completely arrive */
+ bp = rxrpc_call_alloc_scratch(call,384);
+
+ ret = rxrpc_call_read_data(call,bp,384,RXRPC_CALL_READ_BLOCK|RXRPC_CALL_READ_ALL);
+ if (ret<0) {
+ if (ret==-ECONNABORTED) {
+ ret = call->app_errno;
+ goto out_unwait;
+ }
+ goto abort;
+ }
+
+ /* unmarshall the reply */
+ for (loop=0; loop<64; loop++)
+ entry->name[loop] = ntohl(*bp++);
+ bp++; /* final NUL */
+
+ bp++; /* type */
+ entry->nservers = ntohl(*bp++);
+
+ for (loop=0; loop<8; loop++)
+ entry->servers[loop].s_addr = *bp++;
+
+ bp += 8; /* partition IDs */
+
+ for (loop=0; loop<8; loop++) {
+ tmp = ntohl(*bp++);
+ if (tmp & AFS_VLSF_RWVOL ) entry->srvtmask[loop] |= AFSC_VOL_STM_RW;
+ if (tmp & AFS_VLSF_ROVOL ) entry->srvtmask[loop] |= AFSC_VOL_STM_RO;
+ if (tmp & AFS_VLSF_BACKVOL) entry->srvtmask[loop] |= AFSC_VOL_STM_BAK;
+ }
+
+ entry->vid[0] = ntohl(*bp++);
+ entry->vid[1] = ntohl(*bp++);
+ entry->vid[2] = ntohl(*bp++);
+
+ bp++; /* clone ID */
+
+ tmp = ntohl(*bp++); /* flags */
+ if (tmp & AFS_VLF_RWEXISTS ) entry->vidmask |= AFSC_VOL_STM_RW;
+ if (tmp & AFS_VLF_ROEXISTS ) entry->vidmask |= AFSC_VOL_STM_RO;
+ if (tmp & AFS_VLF_BACKEXISTS) entry->vidmask |= AFSC_VOL_STM_BAK;
+
+ ret = -ENOMEDIUM;
+ if (!entry->vidmask)
+ goto abort;
+
+#if 0 /* TODO: remove */
+ entry->nservers = 3;
+ entry->servers[0].s_addr = htonl(0xac101249);
+ entry->servers[1].s_addr = htonl(0xac101243);
+ entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
+
+ entry->srvtmask[0] = AFSC_VOL_STM_RO;
+ entry->srvtmask[1] = AFSC_VOL_STM_RO;
+ entry->srvtmask[2] = AFSC_VOL_STM_RO | AFSC_VOL_STM_RW;
+#endif
+
+ /* success */
+ entry->ctime = xtime.tv_sec;
+ ret = 0;
+
+ out_unwait:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&call->waitq,&myself);
+ rxrpc_put_call(call);
+ out_put_conn:
+ rxrpc_put_connection(conn);
+ out:
+ _leave(" = %d",ret);
+ return ret;
+
+ abort:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ rxrpc_call_abort(call,ret);
+ schedule();
+ goto out_unwait;
+} /* end afs_rxvl_get_entry_by_id() */
+
+/*****************************************************************************/
+/*
+ * look up a volume location database entry by ID asynchronously
+ */
+int afs_rxvl_get_entry_by_id_async(afs_async_op_t *op,
+ afs_volid_t volid,
+ afs_voltype_t voltype)
+{
+ struct rxrpc_connection *conn;
+ struct rxrpc_call *call;
+ struct iovec piov[1];
+ size_t sent;
+ int ret;
+ u32 param[3];
+
+ _enter(",%x,%d,",volid,voltype);
+
+ /* get hold of the vlserver connection */
+ ret = afs_server_get_vlconn(op->server,&conn);
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ /* create a call through that connection */
+ ret = rxrpc_create_call(conn,
+ afs_rxvl_get_entry_by_id_attn,
+ afs_rxvl_get_entry_by_id_error,
+ afs_rxvl_aemap,
+ &op->call);
+ rxrpc_put_connection(conn);
+
+ if (ret<0) {
+ printk("kAFS: Unable to create call: %d\n",ret);
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ op->call->app_opcode = VLGETENTRYBYID;
+ op->call->app_user = op;
+
+ call = op->call;
+ rxrpc_get_call(call);
+
+ /* send event notifications from the call to kafsasyncd */
+ afs_kafsasyncd_begin_op(op);
+
+ /* marshall the parameters */
+ param[0] = htonl(VLGETENTRYBYID);
+ param[1] = htonl(volid);
+ param[2] = htonl(voltype);
+
+ piov[0].iov_len = sizeof(param);
+ piov[0].iov_base = param;
+
+ /* allocate result read buffer in scratch space */
+ call->app_scr_ptr = rxrpc_call_alloc_scratch(op->call,384);
+
+ /* send the parameters to the server */
+ ret = rxrpc_call_write_data(call,1,piov,RXRPC_LAST_PACKET,GFP_NOFS,0,&sent);
+ if (ret<0) {
+ rxrpc_call_abort(call,ret); /* handle from kafsasyncd */
+ ret = 0;
+ goto out;
+ }
+
+ /* wait for the reply to completely arrive */
+ ret = rxrpc_call_read_data(call,call->app_scr_ptr,384,0);
+ switch (ret) {
+ case 0:
+ case -EAGAIN:
+ case -ECONNABORTED:
+ ret = 0;
+ break; /* all handled by kafsasyncd */
+
+ default:
+ rxrpc_call_abort(call,ret); /* force kafsasyncd to handle it */
+ ret = 0;
+ break;
+ }
+
+ out:
+ rxrpc_put_call(call);
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end afs_rxvl_get_entry_by_id_async() */
+
+/*****************************************************************************/
+/*
+ * attend to the asynchronous get VLDB entry by ID
+ */
+int afs_rxvl_get_entry_by_id_async2(afs_async_op_t *op,
+ afsc_vldb_record_t *entry)
+{
+ unsigned *bp, tmp;
+ int loop, ret;
+
+ _enter("{op=%p cst=%u}",op,op->call->app_call_state);
+
+ memset(entry,0,sizeof(*entry));
+
+ if (op->call->app_call_state==RXRPC_CSTATE_COMPLETE) {
+ /* operation finished */
+ afs_kafsasyncd_terminate_op(op);
+
+ bp = op->call->app_scr_ptr;
+
+ /* unmarshall the reply */
+ for (loop=0; loop<64; loop++)
+ entry->name[loop] = ntohl(*bp++);
+ bp++; /* final NUL */
+
+ bp++; /* type */
+ entry->nservers = ntohl(*bp++);
+
+ for (loop=0; loop<8; loop++)
+ entry->servers[loop].s_addr = *bp++;
+
+ bp += 8; /* partition IDs */
+
+ for (loop=0; loop<8; loop++) {
+ tmp = ntohl(*bp++);
+ if (tmp & AFS_VLSF_RWVOL ) entry->srvtmask[loop] |= AFSC_VOL_STM_RW;
+ if (tmp & AFS_VLSF_ROVOL ) entry->srvtmask[loop] |= AFSC_VOL_STM_RO;
+ if (tmp & AFS_VLSF_BACKVOL) entry->srvtmask[loop] |= AFSC_VOL_STM_BAK;
+ }
+
+ entry->vid[0] = ntohl(*bp++);
+ entry->vid[1] = ntohl(*bp++);
+ entry->vid[2] = ntohl(*bp++);
+
+ bp++; /* clone ID */
+
+ tmp = ntohl(*bp++); /* flags */
+ if (tmp & AFS_VLF_RWEXISTS ) entry->vidmask |= AFSC_VOL_STM_RW;
+ if (tmp & AFS_VLF_ROEXISTS ) entry->vidmask |= AFSC_VOL_STM_RO;
+ if (tmp & AFS_VLF_BACKEXISTS) entry->vidmask |= AFSC_VOL_STM_BAK;
+
+ ret = -ENOMEDIUM;
+ if (!entry->vidmask) {
+ rxrpc_call_abort(op->call,ret);
+ goto done;
+ }
+
+#if 0 /* TODO: remove */
+ entry->nservers = 3;
+ entry->servers[0].s_addr = htonl(0xac101249);
+ entry->servers[1].s_addr = htonl(0xac101243);
+ entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
+
+ entry->srvtmask[0] = AFSC_VOL_STM_RO;
+ entry->srvtmask[1] = AFSC_VOL_STM_RO;
+ entry->srvtmask[2] = AFSC_VOL_STM_RO | AFSC_VOL_STM_RW;
+#endif
+
+ /* success */
+ entry->ctime = xtime.tv_sec;
+ ret = 0;
+ goto done;
+ }
+
+ if (op->call->app_call_state==RXRPC_CSTATE_ERROR) {
+ /* operation error */
+ ret = op->call->app_errno;
+ goto done;
+ }
+
+ _leave(" = -EAGAIN");
+ return -EAGAIN;
+
+ done:
+ rxrpc_put_call(op->call);
+ op->call = NULL;
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_rxvl_get_entry_by_id_async2() */
+
+/*****************************************************************************/
+/*
+ * handle attention events on an async get-entry-by-ID op
+ * - called from krxiod
+ */
+static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call)
+{
+ afs_async_op_t *op = call->app_user;
+
+ _enter("{op=%p cst=%u}",op,call->app_call_state);
+
+ switch (call->app_call_state) {
+ case RXRPC_CSTATE_COMPLETE:
+ afs_kafsasyncd_attend_op(op);
+ break;
+ case RXRPC_CSTATE_CLNT_RCV_REPLY:
+ if (call->app_async_read)
+ break;
+ case RXRPC_CSTATE_CLNT_GOT_REPLY:
+ if (call->app_read_count==0)
+ break;
+ printk("kAFS: Reply bigger than expected {cst=%u asyn=%d mark=%d rdy=%u pr=%u%s}",
+ call->app_call_state,
+ call->app_async_read,
+ call->app_mark,
+ call->app_ready_qty,
+ call->pkt_rcv_count,
+ call->app_last_rcv ? " last" : "");
+
+ rxrpc_call_abort(call,-EBADMSG);
+ break;
+ default:
+ BUG();
+ }
+
+ _leave("");
+
+} /* end afs_rxvl_get_entry_by_id_attn() */
+
+/*****************************************************************************/
+/*
+ * handle error events on an async get-entry-by-ID op
+ * - called from krxiod
+ */
+static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call)
+{
+ afs_async_op_t *op = call->app_user;
+
+ _enter("{op=%p cst=%u}",op,call->app_call_state);
+
+ afs_kafsasyncd_attend_op(op);
+
+ _leave("");
+
+} /* end afs_rxvl_get_entry_by_id_error() */
diff --git a/fs/afs/vlclient.h b/fs/afs/vlclient.h
new file mode 100644
index 000000000000..5791e04d6382
--- /dev/null
+++ b/fs/afs/vlclient.h
@@ -0,0 +1,95 @@
+/* vlclient.h: Volume Location Service client interface
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_VLCLIENT_H
+#define _LINUX_AFS_VLCLIENT_H
+
+#include "types.h"
+
+enum AFSVL_Errors {
+ AFSVL_IDEXIST = 363520, /* Volume Id entry exists in vl database */
+ AFSVL_IO = 363521, /* I/O related error */
+ AFSVL_NAMEEXIST = 363522, /* Volume name entry exists in vl database */
+ AFSVL_CREATEFAIL = 363523, /* Internal creation failure */
+ AFSVL_NOENT = 363524, /* No such entry */
+ AFSVL_EMPTY = 363525, /* Vl database is empty */
+ AFSVL_ENTDELETED = 363526, /* Entry is deleted (soft delete) */
+ AFSVL_BADNAME = 363527, /* Volume name is illegal */
+ AFSVL_BADINDEX = 363528, /* Index is out of range */
+ AFSVL_BADVOLTYPE = 363529, /* Bad volume type */
+ AFSVL_BADSERVER = 363530, /* Illegal server number (out of range) */
+ AFSVL_BADPARTITION = 363531, /* Bad partition number */
+ AFSVL_REPSFULL = 363532, /* Run out of space for Replication sites */
+ AFSVL_NOREPSERVER = 363533, /* No such Replication server site exists */
+ AFSVL_DUPREPSERVER = 363534, /* Replication site already exists */
+ AFSVL_RWNOTFOUND = 363535, /* Parent R/W entry not found */
+ AFSVL_BADREFCOUNT = 363536, /* Illegal Reference Count number */
+ AFSVL_SIZEEXCEEDED = 363537, /* Vl size for attributes exceeded */
+ AFSVL_BADENTRY = 363538, /* Bad incoming vl entry */
+ AFSVL_BADVOLIDBUMP = 363539, /* Illegal max volid increment */
+ AFSVL_IDALREADYHASHED = 363540, /* RO/BACK id already hashed */
+ AFSVL_ENTRYLOCKED = 363541, /* Vl entry is already locked */
+ AFSVL_BADVOLOPER = 363542, /* Bad volume operation code */
+ AFSVL_BADRELLOCKTYPE = 363543, /* Bad release lock type */
+ AFSVL_RERELEASE = 363544, /* Status report: last release was aborted */
+ AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server °ag */
+ AFSVL_PERM = 363546, /* No permission access */
+ AFSVL_NOMEM = 363547, /* malloc/realloc failed to alloc enough memory */
+};
+
+/* maps to "struct vldbentry" in vvl-spec.pdf */
+struct afsvl_dbentry {
+ char name[65]; /* name of volume (including NUL char) */
+ afs_voltype_t type; /* volume type */
+ unsigned num_servers; /* num servers that hold instances of this vol */
+ unsigned clone_id; /* cloning ID */
+
+ unsigned flags;
+#define AFS_VLF_RWEXISTS 0x1000 /* R/W volume exists */
+#define AFS_VLF_ROEXISTS 0x2000 /* R/O volume exists */
+#define AFS_VLF_BACKEXISTS 0x4000 /* backup volume exists */
+
+ afs_volid_t volume_ids[3]; /* volume IDs */
+
+ struct {
+ struct in_addr addr; /* server address */
+ unsigned partition; /* partition ID on this server */
+ unsigned flags; /* server specific flags */
+#define AFS_VLSF_NEWREPSITE 0x0001 /* unused */
+#define AFS_VLSF_ROVOL 0x0002 /* this server holds a R/O instance of the volume */
+#define AFS_VLSF_RWVOL 0x0004 /* this server holds a R/W instance of the volume */
+#define AFS_VLSF_BACKVOL 0x0008 /* this server holds a backup instance of the volume */
+ } servers[8];
+
+};
+
+/* probe a volume location server to see if it is still alive */
+extern int afs_rxvl_probe(afs_server_t *server, int alloc_flags);
+
+/* look up a volume location database entry by name */
+extern int afs_rxvl_get_entry_by_name(afs_server_t *server,
+ const char *volname,
+ afsc_vldb_record_t *entry);
+
+/* look up a volume location database entry by ID */
+extern int afs_rxvl_get_entry_by_id(afs_server_t *server,
+ afs_volid_t volid,
+ afs_voltype_t voltype,
+ afsc_vldb_record_t *entry);
+
+extern int afs_rxvl_get_entry_by_id_async(afs_async_op_t *op,
+ afs_volid_t volid,
+ afs_voltype_t voltype);
+
+extern int afs_rxvl_get_entry_by_id_async2(afs_async_op_t *op,
+ afsc_vldb_record_t *entry);
+
+#endif /* _LINUX_AFS_VLCLIENT_H */
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
new file mode 100644
index 000000000000..8d9f4d7e8f29
--- /dev/null
+++ b/fs/afs/vlocation.c
@@ -0,0 +1,824 @@
+/* vlocation.c: volume location management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "cell.h"
+#include "cmservice.h"
+#include "fsclient.h"
+#include "vlclient.h"
+#include "kafstimod.h"
+#include <rxrpc/connection.h>
+#include "internal.h"
+
+#define AFS_VLDB_TIMEOUT HZ*1000
+
+static void afs_vlocation_update_timer(afs_timer_t *timer);
+static void afs_vlocation_update_attend(afs_async_op_t *op);
+static void afs_vlocation_update_discard(afs_async_op_t *op);
+
+static void __afs_vlocation_timeout(afs_timer_t *timer)
+{
+ afs_vlocation_t *vlocation = list_entry(timer,afs_vlocation_t,timeout);
+
+ _debug("VL TIMEOUT [%s{u=%d}]",vlocation->vldb.name,atomic_read(&vlocation->usage));
+
+ afs_vlocation_do_timeout(vlocation);
+}
+
+static const struct afs_timer_ops afs_vlocation_timer_ops = {
+ .timed_out = __afs_vlocation_timeout,
+};
+
+static const struct afs_timer_ops afs_vlocation_update_timer_ops = {
+ .timed_out = afs_vlocation_update_timer,
+};
+
+static const struct afs_async_op_ops afs_vlocation_update_op_ops = {
+ .attend = afs_vlocation_update_attend,
+ .discard = afs_vlocation_update_discard,
+};
+
+static LIST_HEAD(afs_vlocation_update_pendq); /* queue of VLs awaiting update */
+static afs_vlocation_t *afs_vlocation_update; /* VL currently being updated */
+static spinlock_t afs_vlocation_update_lock = SPIN_LOCK_UNLOCKED; /* lock guarding update queue */
+
+/*****************************************************************************/
+/*
+ * iterate through the VL servers in a cell until one of them admits knowing about the volume in
+ * question
+ * - caller must have cell->vl_sem write-locked
+ */
+static int afs_vlocation_access_vl_by_name(afs_vlocation_t *vlocation,
+ const char *name,
+ afsc_vldb_record_t *vldb)
+{
+ afs_server_t *server = NULL;
+ afs_cell_t *cell = vlocation->cell;
+ int count, ret;
+
+ _enter("%s,%s,",cell->name,name);
+
+ ret = -ENOMEDIUM;
+ for (count=cell->vl_naddrs; count>0; count--) {
+ _debug("CellServ[%hu]: %08x",
+ cell->vl_curr_svix,cell->vl_addrs[cell->vl_curr_svix].s_addr);
+
+ /* try and create a server */
+ ret = afs_server_lookup(cell,&cell->vl_addrs[cell->vl_curr_svix],&server);
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMEM:
+ case -ENONET:
+ goto out;
+ default:
+ goto rotate;
+ }
+
+ /* attempt to access the VL server */
+ ret = afs_rxvl_get_entry_by_name(server,name,vldb);
+ switch (ret) {
+ case 0:
+ afs_put_server(server);
+ goto out;
+ case -ENOMEM:
+ case -ENONET:
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
+ case -ECONNREFUSED:
+ down_write(&server->sem);
+ if (server->vlserver) {
+ rxrpc_put_connection(server->vlserver);
+ server->vlserver = NULL;
+ }
+ up_write(&server->sem);
+ afs_put_server(server);
+ if (ret==-ENOMEM || ret==-ENONET)
+ goto out;
+ goto rotate;
+ case -ENOMEDIUM:
+ afs_put_server(server);
+ goto out;
+ default:
+ afs_put_server(server);
+ ret = -ENOMEDIUM;
+ goto rotate;
+ }
+
+ /* rotate the server records upon lookup failure */
+ rotate:
+ cell->vl_curr_svix++;
+ cell->vl_curr_svix %= cell->vl_naddrs;
+ }
+
+ out:
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end afs_vlocation_access_vl_by_name() */
+
+/*****************************************************************************/
+/*
+ * iterate through the VL servers in a cell until one of them admits knowing about the volume in
+ * question
+ * - caller must have cell->vl_sem write-locked
+ */
+static int afs_vlocation_access_vl_by_id(afs_vlocation_t *vlocation,
+ afs_volid_t volid,
+ afs_voltype_t voltype,
+ afsc_vldb_record_t *vldb)
+{
+ afs_server_t *server = NULL;
+ afs_cell_t *cell = vlocation->cell;
+ int count, ret;
+
+ _enter("%s,%x,%d,",cell->name,volid,voltype);
+
+ ret = -ENOMEDIUM;
+ for (count=cell->vl_naddrs; count>0; count--) {
+ _debug("CellServ[%hu]: %08x",
+ cell->vl_curr_svix,cell->vl_addrs[cell->vl_curr_svix].s_addr);
+
+ /* try and create a server */
+ ret = afs_server_lookup(cell,&cell->vl_addrs[cell->vl_curr_svix],&server);
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMEM:
+ case -ENONET:
+ goto out;
+ default:
+ goto rotate;
+ }
+
+ /* attempt to access the VL server */
+ ret = afs_rxvl_get_entry_by_id(server,volid,voltype,vldb);
+ switch (ret) {
+ case 0:
+ afs_put_server(server);
+ goto out;
+ case -ENOMEM:
+ case -ENONET:
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
+ case -ECONNREFUSED:
+ down_write(&server->sem);
+ if (server->vlserver) {
+ rxrpc_put_connection(server->vlserver);
+ server->vlserver = NULL;
+ }
+ up_write(&server->sem);
+ afs_put_server(server);
+ if (ret==-ENOMEM || ret==-ENONET)
+ goto out;
+ goto rotate;
+ case -ENOMEDIUM:
+ afs_put_server(server);
+ goto out;
+ default:
+ afs_put_server(server);
+ ret = -ENOMEDIUM;
+ goto rotate;
+ }
+
+ /* rotate the server records upon lookup failure */
+ rotate:
+ cell->vl_curr_svix++;
+ cell->vl_curr_svix %= cell->vl_naddrs;
+ }
+
+ out:
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end afs_vlocation_access_vl_by_id() */
+
+/*****************************************************************************/
+/*
+ * lookup volume location
+ * - caller must have cell->vol_sem write-locked
+ * - iterate through the VL servers in a cell until one of them admits knowing about the volume in
+ * question
+ * - lookup in the local cache if not able to find on the VL server
+ * - insert/update in the local cache if did get a VL response
+ */
+int afs_vlocation_lookup(afs_cell_t *cell, const char *name, afs_vlocation_t **_vlocation)
+{
+ afsc_vldb_record_t vldb;
+ struct list_head *_p;
+ afs_vlocation_t *vlocation;
+ afs_voltype_t voltype;
+ afs_volid_t vid;
+ int active = 0, ret;
+
+ _enter(",%s,%s,",cell->name,name);
+
+ if (strlen(name)>sizeof(vlocation->vldb.name)) {
+ _leave(" = -ENAMETOOLONG");
+ return -ENAMETOOLONG;
+ }
+
+ /* search the cell's active list first */
+ list_for_each(_p,&cell->vl_list) {
+ vlocation = list_entry(_p,afs_vlocation_t,link);
+ if (strncmp(vlocation->vldb.name,name,sizeof(vlocation->vldb.name))==0)
+ goto found_in_memory;
+ }
+
+ /* search the cell's graveyard list second */
+ spin_lock(&cell->vl_gylock);
+ list_for_each(_p,&cell->vl_graveyard) {
+ vlocation = list_entry(_p,afs_vlocation_t,link);
+ if (strncmp(vlocation->vldb.name,name,sizeof(vlocation->vldb.name))==0)
+ goto found_in_graveyard;
+ }
+ spin_unlock(&cell->vl_gylock);
+
+ /* not in the cell's in-memory lists - create a new record */
+ vlocation = kmalloc(sizeof(afs_vlocation_t),GFP_KERNEL);
+ if (!vlocation)
+ return -ENOMEM;
+
+ memset(vlocation,0,sizeof(afs_vlocation_t));
+ atomic_set(&vlocation->usage,1);
+ INIT_LIST_HEAD(&vlocation->link);
+ rwlock_init(&vlocation->lock);
+ strncpy(vlocation->vldb.name,name,sizeof(vlocation->vldb.name));
+
+ afs_timer_init(&vlocation->timeout,&afs_vlocation_timer_ops);
+ afs_timer_init(&vlocation->upd_timer,&afs_vlocation_update_timer_ops);
+ afs_async_op_init(&vlocation->upd_op,&afs_vlocation_update_op_ops);
+
+ INIT_LIST_HEAD(&vlocation->caches);
+
+ afs_get_cell(cell);
+ vlocation->cell = cell;
+
+ list_add_tail(&vlocation->link,&cell->vl_list);
+
+#if 0
+ /* search local cache if wasn't in memory */
+ ret = afsc_lookup_vlocation(vlocation);
+ switch (ret) {
+ default: goto error; /* disk error */
+ case 0: goto found_in_cache; /* pulled from local cache into memory */
+ case -ENOENT: break; /* not in local cache */
+ }
+#endif
+
+ /* try to look up an unknown volume in the cell VL databases by name */
+ ret = afs_vlocation_access_vl_by_name(vlocation,name,&vldb);
+ if (ret<0) {
+ printk("kAFS: failed to locate '%s' in cell '%s'\n",name,cell->name);
+ goto error;
+ }
+
+ goto found_on_vlserver;
+
+ found_in_graveyard:
+ /* found in the graveyard - resurrect */
+ _debug("found in graveyard");
+ atomic_inc(&vlocation->usage);
+ list_del(&vlocation->link);
+ list_add_tail(&vlocation->link,&cell->vl_list);
+ spin_unlock(&cell->vl_gylock);
+
+ afs_kafstimod_del_timer(&vlocation->timeout);
+ goto active;
+
+ found_in_memory:
+ /* found in memory - check to see if it's active */
+ _debug("found in memory");
+ atomic_inc(&vlocation->usage);
+
+ active:
+ active = 1;
+
+/* found_in_cache: */
+ /* try to look up a cached volume in the cell VL databases by ID */
+ _debug("found in cache");
+
+ _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
+ vlocation->vldb.name,
+ vlocation->vldb.vidmask,
+ ntohl(vlocation->vldb.servers[0].s_addr),vlocation->vldb.srvtmask[0],
+ ntohl(vlocation->vldb.servers[1].s_addr),vlocation->vldb.srvtmask[1],
+ ntohl(vlocation->vldb.servers[2].s_addr),vlocation->vldb.srvtmask[2]
+ );
+
+ _debug("Vids: %08x %08x %08x",
+ vlocation->vldb.vid[0],vlocation->vldb.vid[1],vlocation->vldb.vid[2]);
+
+ if (vlocation->vldb.vidmask & AFSC_VOL_STM_RW) {
+ vid = vlocation->vldb.vid[0];
+ voltype = AFSVL_RWVOL;
+ }
+ else if (vlocation->vldb.vidmask & AFSC_VOL_STM_RO) {
+ vid = vlocation->vldb.vid[1];
+ voltype = AFSVL_ROVOL;
+ }
+ else if (vlocation->vldb.vidmask & AFSC_VOL_STM_BAK) {
+ vid = vlocation->vldb.vid[2];
+ voltype = AFSVL_BACKVOL;
+ }
+ else {
+ BUG();
+ vid = 0;
+ voltype = 0;
+ }
+
+ ret = afs_vlocation_access_vl_by_id(vlocation,vid,voltype,&vldb);
+ switch (ret) {
+ /* net error */
+ default:
+ printk("kAFS: failed to volume '%s' (%x) up in '%s': %d\n",
+ name,vid,cell->name,ret);
+ goto error;
+
+ /* pulled from local cache into memory */
+ case 0:
+ goto found_on_vlserver;
+
+ /* uh oh... looks like the volume got deleted */
+ case -ENOMEDIUM:
+ printk("kAFS: volume '%s' (%x) does not exist '%s'\n",name,vid,cell->name);
+
+ /* TODO: make existing record unavailable */
+ goto error;
+ }
+
+ found_on_vlserver:
+ _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
+ name,
+ vldb.vidmask,
+ ntohl(vldb.servers[0].s_addr),vldb.srvtmask[0],
+ ntohl(vldb.servers[1].s_addr),vldb.srvtmask[1],
+ ntohl(vldb.servers[2].s_addr),vldb.srvtmask[2]
+ );
+
+ _debug("Vids: %08x %08x %08x",vldb.vid[0],vldb.vid[1],vldb.vid[2]);
+
+ if (strncmp(vldb.name,name,sizeof(vlocation->vldb.name))!=0)
+ printk("kAFS: name of volume '%s' changed to '%s' on server\n",name,vldb.name);
+
+ memcpy(&vlocation->vldb,&vldb,sizeof(vlocation->vldb));
+
+#if 0
+ /* add volume entry to local cache */
+ ret = afsc_update_vlocation(vlocation);
+ if (ret<0)
+ goto error;
+#endif
+
+ afs_kafstimod_add_timer(&vlocation->upd_timer,10*HZ);
+
+ *_vlocation = vlocation;
+ _leave(" = 0 (%p)",vlocation);
+ return 0;
+
+ error:
+ if (vlocation) {
+ if (active) {
+ __afs_put_vlocation(vlocation);
+ }
+ else {
+ list_del(&vlocation->link);
+ afs_put_cell(vlocation->cell);
+#if 0
+ afs_put_cache(vlocation->cache);
+#endif
+ kfree(vlocation);
+ }
+ }
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_vlocation_lookup() */
+
+/*****************************************************************************/
+/*
+ * finish using a volume location record
+ * - caller must have cell->vol_sem write-locked
+ */
+void __afs_put_vlocation(afs_vlocation_t *vlocation)
+{
+ afs_cell_t *cell = vlocation->cell;
+
+ _enter("%s",vlocation->vldb.name);
+
+ /* sanity check */
+ if (atomic_read(&vlocation->usage)<=0)
+ BUG();
+
+ spin_lock(&cell->vl_gylock);
+ if (likely(!atomic_dec_and_test(&vlocation->usage))) {
+ spin_unlock(&cell->vl_gylock);
+ _leave("");
+ return;
+ }
+
+ /* move to graveyard queue */
+ list_del(&vlocation->link);
+ list_add_tail(&vlocation->link,&cell->vl_graveyard);
+
+ /* remove from pending timeout queue (refcounted if actually being updated) */
+ list_del_init(&vlocation->upd_op.link);
+
+ /* time out in 10 secs */
+ afs_kafstimod_del_timer(&vlocation->upd_timer);
+ afs_kafstimod_add_timer(&vlocation->timeout,10*HZ);
+
+ spin_unlock(&cell->vl_gylock);
+
+ _leave(" [killed]");
+} /* end __afs_put_vlocation() */
+
+/*****************************************************************************/
+/*
+ * finish using a volume location record
+ */
+void afs_put_vlocation(afs_vlocation_t *vlocation)
+{
+ afs_cell_t *cell = vlocation->cell;
+
+ down_write(&cell->vl_sem);
+ __afs_put_vlocation(vlocation);
+ up_write(&cell->vl_sem);
+} /* end afs_put_vlocation() */
+
+/*****************************************************************************/
+/*
+ * timeout vlocation record
+ * - removes from the cell's graveyard if the usage count is zero
+ */
+void afs_vlocation_do_timeout(afs_vlocation_t *vlocation)
+{
+ afs_cell_t *cell;
+
+ _enter("%s",vlocation->vldb.name);
+
+ cell = vlocation->cell;
+
+ if (atomic_read(&vlocation->usage)<0) BUG();
+
+ /* remove from graveyard if still dead */
+ spin_lock(&cell->vl_gylock);
+ if (atomic_read(&vlocation->usage)==0)
+ list_del_init(&vlocation->link);
+ else
+ vlocation = NULL;
+ spin_unlock(&cell->vl_gylock);
+
+ if (!vlocation) {
+ _leave("");
+ return; /* resurrected */
+ }
+
+ /* we can now destroy it properly */
+ afs_put_cell(cell);
+#if 0
+ afs_put_cache(vlocation->cache);
+#endif
+
+ kfree(vlocation);
+
+ _leave(" [destroyed]");
+} /* end afs_vlocation_do_timeout() */
+
+/*****************************************************************************/
+/*
+ * send an update operation to the currently selected server
+ */
+static int afs_vlocation_update_begin(afs_vlocation_t *vlocation)
+{
+ afs_voltype_t voltype;
+ afs_volid_t vid;
+ int ret;
+
+ _enter("%s{ufs=%u ucs=%u}",
+ vlocation->vldb.name,vlocation->upd_first_svix,vlocation->upd_curr_svix);
+
+ /* try to look up a cached volume in the cell VL databases by ID */
+ if (vlocation->vldb.vidmask & AFSC_VOL_STM_RW) {
+ vid = vlocation->vldb.vid[0];
+ voltype = AFSVL_RWVOL;
+ }
+ else if (vlocation->vldb.vidmask & AFSC_VOL_STM_RO) {
+ vid = vlocation->vldb.vid[1];
+ voltype = AFSVL_ROVOL;
+ }
+ else if (vlocation->vldb.vidmask & AFSC_VOL_STM_BAK) {
+ vid = vlocation->vldb.vid[2];
+ voltype = AFSVL_BACKVOL;
+ }
+ else {
+ BUG();
+ vid = 0;
+ voltype = 0;
+ }
+
+ /* contact the chosen server */
+ ret = afs_server_lookup(vlocation->cell,
+ &vlocation->cell->vl_addrs[vlocation->upd_curr_svix],
+ &vlocation->upd_op.server);
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMEM:
+ case -ENONET:
+ default:
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ /* initiate the update operation */
+ ret = afs_rxvl_get_entry_by_id_async(&vlocation->upd_op,vid,voltype);
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_vlocation_update_begin() */
+
+/*****************************************************************************/
+/*
+ * abandon updating a VL record
+ * - does not restart the update timer
+ */
+static void afs_vlocation_update_abandon(afs_vlocation_t *vlocation,
+ afs_vlocation_upd_t state,
+ int ret)
+{
+ _enter("%s,%u",vlocation->vldb.name,state);
+
+ if (ret<0)
+ printk("kAFS: Abandoning VL update '%s': %d\n",vlocation->vldb.name,ret);
+
+ /* discard the server record */
+ if (vlocation->upd_op.server) {
+ afs_put_server(vlocation->upd_op.server);
+ vlocation->upd_op.server = NULL;
+ }
+
+ spin_lock(&afs_vlocation_update_lock);
+ afs_vlocation_update = NULL;
+ vlocation->upd_state = state;
+
+ /* TODO: start updating next VL record on pending list */
+
+ spin_unlock(&afs_vlocation_update_lock);
+
+ _leave("");
+} /* end afs_vlocation_update_abandon() */
+
+/*****************************************************************************/
+/*
+ * handle periodic update timeouts and busy retry timeouts
+ * - called from kafstimod
+ */
+static void afs_vlocation_update_timer(afs_timer_t *timer)
+{
+ afs_vlocation_t *vlocation = list_entry(timer,afs_vlocation_t,upd_timer);
+ int ret;
+
+ _enter("%s",vlocation->vldb.name);
+
+ /* only update if not in the graveyard (defend against putting too) */
+ spin_lock(&vlocation->cell->vl_gylock);
+
+ if (!atomic_read(&vlocation->usage))
+ goto out_unlock1;
+
+ spin_lock(&afs_vlocation_update_lock);
+
+ /* if we were woken up due to EBUSY sleep then restart immediately if possible or else jump
+ * to front of pending queue */
+ if (vlocation->upd_state==AFS_VLUPD_BUSYSLEEP) {
+ if (afs_vlocation_update) {
+ list_add(&vlocation->upd_op.link,&afs_vlocation_update_pendq);
+ }
+ else {
+ afs_get_vlocation(vlocation);
+ afs_vlocation_update = vlocation;
+ vlocation->upd_state = AFS_VLUPD_INPROGRESS;
+ }
+ goto out_unlock2;
+ }
+
+ /* put on pending queue if there's already another update in progress */
+ if (afs_vlocation_update) {
+ vlocation->upd_state = AFS_VLUPD_PENDING;
+ list_add_tail(&vlocation->upd_op.link,&afs_vlocation_update_pendq);
+ goto out_unlock2;
+ }
+
+ /* hold a ref on it while actually updating */
+ afs_get_vlocation(vlocation);
+ afs_vlocation_update = vlocation;
+ vlocation->upd_state = AFS_VLUPD_INPROGRESS;
+
+ spin_unlock(&afs_vlocation_update_lock);
+ spin_unlock(&vlocation->cell->vl_gylock);
+
+ /* okay... we can start the update */
+ _debug("BEGIN VL UPDATE [%s]",vlocation->vldb.name);
+ vlocation->upd_first_svix = vlocation->cell->vl_curr_svix;
+ vlocation->upd_curr_svix = vlocation->upd_first_svix;
+ vlocation->upd_rej_cnt = 0;
+ vlocation->upd_busy_cnt = 0;
+
+ ret = afs_vlocation_update_begin(vlocation);
+ if (ret<0) {
+ afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,ret);
+ afs_kafstimod_add_timer(&vlocation->upd_timer,AFS_VLDB_TIMEOUT);
+ afs_put_vlocation(vlocation);
+ }
+
+ _leave("");
+ return;
+
+ out_unlock2:
+ spin_unlock(&afs_vlocation_update_lock);
+ out_unlock1:
+ spin_unlock(&vlocation->cell->vl_gylock);
+ _leave("");
+ return;
+
+} /* end afs_vlocation_update_timer() */
+
+/*****************************************************************************/
+/*
+ * attend to an update operation upon which an event happened
+ * - called in kafsasyncd context
+ */
+static void afs_vlocation_update_attend(afs_async_op_t *op)
+{
+ afsc_vldb_record_t vldb;
+ afs_vlocation_t *vlocation = list_entry(op,afs_vlocation_t,upd_op);
+ unsigned tmp;
+ int ret;
+
+ _enter("%s",vlocation->vldb.name);
+
+ ret = afs_rxvl_get_entry_by_id_async2(op,&vldb);
+ switch (ret) {
+ case -EAGAIN:
+ _leave(" [unfinished]");
+ return;
+
+ case 0:
+ _debug("END VL UPDATE: %d\n",ret);
+ vlocation->valid = 1;
+
+ _debug("Done VL Lookup: %02x { %08x(%x) %08x(%x) %08x(%x) }",
+ vldb.vidmask,
+ ntohl(vldb.servers[0].s_addr),vldb.srvtmask[0],
+ ntohl(vldb.servers[1].s_addr),vldb.srvtmask[1],
+ ntohl(vldb.servers[2].s_addr),vldb.srvtmask[2]
+ );
+
+ _debug("Vids: %08x %08x %08x",vldb.vid[0],vldb.vid[1],vldb.vid[2]);
+
+ afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,0);
+
+ down_write(&vlocation->cell->vl_sem);
+
+ /* actually update the cache */
+ if (strncmp(vldb.name,vlocation->vldb.name,sizeof(vlocation->vldb.name))!=0)
+ printk("kAFS: name of volume '%s' changed to '%s' on server\n",
+ vlocation->vldb.name,vldb.name);
+
+ memcpy(&vlocation->vldb,&vldb,sizeof(vlocation->vldb));
+
+#if 0
+ /* add volume entry to local cache */
+ ret = afsc_update_vlocation(vlocation);
+#endif
+
+ up_write(&vlocation->cell->vl_sem);
+
+ if (ret<0)
+ printk("kAFS: failed to update local cache: %d\n",ret);
+
+ afs_kafstimod_add_timer(&vlocation->upd_timer,AFS_VLDB_TIMEOUT);
+ afs_put_vlocation(vlocation);
+ _leave(" [found]");
+ return;
+
+ case -ENOMEDIUM:
+ vlocation->upd_rej_cnt++;
+ goto try_next;
+
+ /* the server is locked - retry in a very short while */
+ case -EBUSY:
+ vlocation->upd_busy_cnt++;
+ if (vlocation->upd_busy_cnt>3)
+ goto try_next; /* too many retries */
+
+ afs_vlocation_update_abandon(vlocation,AFS_VLUPD_BUSYSLEEP,0);
+ afs_kafstimod_add_timer(&vlocation->upd_timer,HZ/2);
+ afs_put_vlocation(vlocation);
+ _leave(" [busy]");
+ return;
+
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
+ case -ECONNREFUSED:
+ case -EREMOTEIO:
+ /* record bad vlserver info in the cell too
+ * - TODO: use down_write_trylock() if available
+ */
+ if (vlocation->upd_curr_svix == vlocation->cell->vl_curr_svix)
+ vlocation->cell->vl_curr_svix =
+ vlocation->cell->vl_curr_svix % vlocation->cell->vl_naddrs;
+
+ case -EBADRQC:
+ case -EINVAL:
+ case -EACCES:
+ case -EBADMSG:
+ goto try_next;
+
+ default:
+ goto abandon;
+ }
+
+ /* try contacting the next server */
+ try_next:
+ vlocation->upd_busy_cnt = 0;
+
+ if (vlocation->upd_op.server) {
+ /* discard the server record */
+ afs_put_server(vlocation->upd_op.server);
+ vlocation->upd_op.server = NULL;
+ }
+
+ tmp = vlocation->cell->vl_naddrs;
+ if (tmp==0)
+ goto abandon;
+
+ vlocation->upd_curr_svix++;
+ if (vlocation->upd_curr_svix >= tmp) vlocation->upd_curr_svix = 0;
+ if (vlocation->upd_first_svix >= tmp) vlocation->upd_first_svix = tmp - 1;
+
+ /* move to the next server */
+ if (vlocation->upd_curr_svix!=vlocation->upd_first_svix) {
+ afs_vlocation_update_begin(vlocation);
+ _leave(" [next]");
+ return;
+ }
+
+ /* run out of servers to try - was the volume rejected? */
+ if (vlocation->upd_rej_cnt>0) {
+ printk("kAFS: Active volume no longer valid '%s'\n",vlocation->vldb.name);
+ vlocation->valid = 0;
+ afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,0);
+ afs_kafstimod_add_timer(&vlocation->upd_timer,AFS_VLDB_TIMEOUT);
+ afs_put_vlocation(vlocation);
+ _leave(" [invalidated]");
+ return;
+ }
+
+ /* abandon the update */
+ abandon:
+ afs_vlocation_update_abandon(vlocation,AFS_VLUPD_SLEEP,ret);
+ afs_kafstimod_add_timer(&vlocation->upd_timer,HZ*10);
+ afs_put_vlocation(vlocation);
+ _leave(" [abandoned]");
+
+} /* end afs_vlocation_update_attend() */
+
+/*****************************************************************************/
+/*
+ * deal with an update operation being discarded
+ * - called in kafsasyncd context when it's dying due to rmmod
+ * - the call has already been aborted and put()'d
+ */
+static void afs_vlocation_update_discard(afs_async_op_t *op)
+{
+ afs_vlocation_t *vlocation = list_entry(op,afs_vlocation_t,upd_op);
+
+ _enter("%s",vlocation->vldb.name);
+
+ afs_put_server(op->server);
+ op->server = NULL;
+
+ afs_put_vlocation(vlocation);
+
+ _leave("");
+} /* end afs_vlocation_update_discard() */
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c
new file mode 100644
index 000000000000..ab2d7b241fc3
--- /dev/null
+++ b/fs/afs/vnode.c
@@ -0,0 +1,316 @@
+/* vnode.c: AFS vnode management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "cell.h"
+#include "cmservice.h"
+#include "fsclient.h"
+#include "vlclient.h"
+#include "vnode.h"
+#include "internal.h"
+
+static void afs_vnode_cb_timed_out(struct afs_timer *timer);
+
+struct afs_timer_ops afs_vnode_cb_timed_out_ops = {
+ .timed_out = afs_vnode_cb_timed_out,
+};
+
+/*****************************************************************************/
+/*
+ * handle a callback timing out
+ * TODO: retain a ref to vnode struct for an outstanding callback timeout
+ */
+static void afs_vnode_cb_timed_out(struct afs_timer *timer)
+{
+ afs_server_t *oldserver;
+ afs_vnode_t *vnode;
+
+ vnode = list_entry(timer,afs_vnode_t,cb_timeout);
+
+ _enter("%p",vnode);
+
+ /* set the changed flag in the vnode and release the server */
+ spin_lock(&vnode->lock);
+
+ oldserver = xchg(&vnode->cb_server,NULL);
+ if (oldserver) {
+ vnode->flags |= AFS_VNODE_CHANGED;
+
+ spin_lock(&afs_cb_hash_lock);
+ list_del_init(&vnode->cb_hash_link);
+ spin_unlock(&afs_cb_hash_lock);
+
+ spin_lock(&oldserver->cb_lock);
+ list_del_init(&vnode->cb_link);
+ spin_unlock(&oldserver->cb_lock);
+ }
+
+ spin_unlock(&vnode->lock);
+
+ if (oldserver)
+ afs_put_server(oldserver);
+
+ _leave("");
+} /* end afs_vnode_cb_timed_out() */
+
+/*****************************************************************************/
+/*
+ * finish off updating the recorded status of a file
+ * - starts callback expiry timer
+ * - adds to server's callback list
+ */
+void afs_vnode_finalise_status_update(afs_vnode_t *vnode, afs_server_t *server, int ret)
+{
+ afs_server_t *oldserver = NULL;
+
+ _enter("%p,%p,%d",vnode,server,ret);
+
+ spin_lock(&vnode->lock);
+
+ vnode->flags &= ~AFS_VNODE_CHANGED;
+
+ if (ret==0) {
+ /* adjust the callback timeout appropriately */
+ afs_kafstimod_add_timer(&vnode->cb_timeout,vnode->cb_expiry*HZ);
+
+ spin_lock(&afs_cb_hash_lock);
+ list_del(&vnode->cb_hash_link);
+ list_add_tail(&vnode->cb_hash_link,&afs_cb_hash(server,&vnode->fid));
+ spin_unlock(&afs_cb_hash_lock);
+
+ /* swap ref to old callback server with that for new callback server */
+ oldserver = xchg(&vnode->cb_server,server);
+ if (oldserver!=server) {
+ if (oldserver) {
+ spin_lock(&oldserver->cb_lock);
+ list_del_init(&vnode->cb_link);
+ spin_unlock(&oldserver->cb_lock);
+ }
+
+ afs_get_server(server);
+ spin_lock(&server->cb_lock);
+ list_add_tail(&vnode->cb_link,&server->cb_promises);
+ spin_unlock(&server->cb_lock);
+ }
+ else {
+ /* same server */
+ oldserver = NULL;
+ }
+ }
+ else if (ret==-ENOENT) {
+ /* the file was deleted - clear the callback timeout */
+ oldserver = xchg(&vnode->cb_server,NULL);
+ afs_kafstimod_del_timer(&vnode->cb_timeout);
+
+ _debug("got NOENT from server - marking file deleted");
+ vnode->flags |= AFS_VNODE_DELETED;
+ }
+
+ vnode->update_cnt--;
+
+ spin_unlock(&vnode->lock);
+
+ wake_up_all(&vnode->update_waitq);
+
+ if (oldserver)
+ afs_put_server(oldserver);
+
+ _leave("");
+
+} /* end afs_vnode_finalise_status_update() */
+
+/*****************************************************************************/
+/*
+ * fetch file status from the volume
+ * - don't issue a fetch if:
+ * - the changed bit is not set and there's a valid callback
+ * - there are any outstanding ops that will fetch the status
+ * - TODO implement local caching
+ */
+int afs_vnode_fetch_status(afs_vnode_t *vnode)
+{
+ afs_server_t *server;
+ int ret;
+
+ DECLARE_WAITQUEUE(myself,current);
+
+ _enter("%s,{%u,%u,%u}",vnode->volume->vlocation->vldb.name,
+ vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique);
+
+ if (!(vnode->flags & AFS_VNODE_CHANGED) && vnode->cb_server) {
+ _leave(" [unchanged]");
+ return 0;
+ }
+
+ if (vnode->flags & AFS_VNODE_DELETED) {
+ _leave(" [deleted]");
+ return -ENOENT;
+ }
+
+ spin_lock(&vnode->lock);
+
+ if (!(vnode->flags & AFS_VNODE_CHANGED)) {
+ spin_unlock(&vnode->lock);
+ _leave(" [unchanged]");
+ return 0;
+ }
+
+ if (vnode->update_cnt>0) {
+ /* someone else started a fetch */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&vnode->update_waitq,&myself);
+
+ /* wait for the status to be updated */
+ for (;;) {
+ if (!(vnode->flags & AFS_VNODE_CHANGED)) break;
+ if (vnode->flags & AFS_VNODE_DELETED) break;
+
+ /* it got updated and invalidated all before we saw it */
+ if (vnode->update_cnt==0) {
+ remove_wait_queue(&vnode->update_waitq,&myself);
+ set_current_state(TASK_RUNNING);
+ goto get_anyway;
+ }
+
+ spin_unlock(&vnode->lock);
+
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ spin_lock(&vnode->lock);
+ }
+
+ remove_wait_queue(&vnode->update_waitq,&myself);
+ spin_unlock(&vnode->lock);
+ set_current_state(TASK_RUNNING);
+
+ return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0;
+ }
+
+ get_anyway:
+ /* okay... we're going to have to initiate the op */
+ vnode->update_cnt++;
+
+ spin_unlock(&vnode->lock);
+
+ /* merge AFS status fetches and clear outstanding callback on this vnode */
+ do {
+ /* pick a server to query */
+ ret = afs_volume_pick_fileserver(vnode->volume,&server);
+ if (ret<0)
+ return ret;
+
+ _debug("USING SERVER: %08x\n",ntohl(server->addr.s_addr));
+
+ ret = afs_rxfs_fetch_file_status(server,vnode,NULL);
+
+ } while (!afs_volume_release_fileserver(vnode->volume,server,ret));
+
+ /* adjust the flags */
+ afs_vnode_finalise_status_update(vnode,server,ret);
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_vnode_fetch_status() */
+
+/*****************************************************************************/
+/*
+ * fetch file data from the volume
+ * - TODO implement caching and server failover
+ */
+int afs_vnode_fetch_data(afs_vnode_t *vnode, struct afs_rxfs_fetch_descriptor *desc)
+{
+ afs_server_t *server;
+ int ret;
+
+ _enter("%s,{%u,%u,%u}",
+ vnode->volume->vlocation->vldb.name,
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique);
+
+ /* this op will fetch the status */
+ spin_lock(&vnode->lock);
+ vnode->update_cnt++;
+ spin_unlock(&vnode->lock);
+
+ /* merge in AFS status fetches and clear outstanding callback on this vnode */
+ do {
+ /* pick a server to query */
+ ret = afs_volume_pick_fileserver(vnode->volume,&server);
+ if (ret<0)
+ return ret;
+
+ _debug("USING SERVER: %08x\n",ntohl(server->addr.s_addr));
+
+ ret = afs_rxfs_fetch_file_data(server,vnode,desc,NULL);
+
+ } while (!afs_volume_release_fileserver(vnode->volume,server,ret));
+
+ /* adjust the flags */
+ afs_vnode_finalise_status_update(vnode,server,ret);
+
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end afs_vnode_fetch_data() */
+
+/*****************************************************************************/
+/*
+ * break any outstanding callback on a vnode
+ * - only relevent to server that issued it
+ */
+int afs_vnode_give_up_callback(afs_vnode_t *vnode)
+{
+ afs_server_t *server;
+ int ret;
+
+ _enter("%s,{%u,%u,%u}",
+ vnode->volume->vlocation->vldb.name,
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique);
+
+ spin_lock(&afs_cb_hash_lock);
+ list_del_init(&vnode->cb_hash_link);
+ spin_unlock(&afs_cb_hash_lock);
+
+ /* set the changed flag in the vnode and release the server */
+ spin_lock(&vnode->lock);
+
+ afs_kafstimod_del_timer(&vnode->cb_timeout);
+
+ server = xchg(&vnode->cb_server,NULL);
+ if (server) {
+ vnode->flags |= AFS_VNODE_CHANGED;
+
+ spin_lock(&server->cb_lock);
+ list_del_init(&vnode->cb_link);
+ spin_unlock(&server->cb_lock);
+ }
+
+ spin_unlock(&vnode->lock);
+
+ ret = 0;
+ if (server) {
+ ret = afs_rxfs_give_up_callback(server,vnode);
+ afs_put_server(server);
+ }
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_vnode_give_up_callback() */
diff --git a/fs/afs/vnode.h b/fs/afs/vnode.h
new file mode 100644
index 000000000000..ec2c412c4214
--- /dev/null
+++ b/fs/afs/vnode.h
@@ -0,0 +1,88 @@
+/* vnode.h: AFS vnode record
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_VNODE_H
+#define _LINUX_AFS_VNODE_H
+
+#include <linux/fs.h>
+#include <linux/version.h>
+#include "server.h"
+#include "kafstimod.h"
+
+#ifdef __KERNEL__
+
+struct afs_rxfs_fetch_descriptor;
+
+/*****************************************************************************/
+/*
+ * AFS inode private data
+ */
+struct afs_vnode
+{
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ struct inode vfs_inode; /* the VFS's inode record */
+#else
+ struct inode *inode; /* the VFS's inode */
+#endif
+
+ afs_volume_t *volume; /* volume on which vnode resides */
+ afs_fid_t fid; /* the file identifier for this inode */
+ afs_file_status_t status; /* AFS status info for this file */
+ unsigned nix; /* vnode index in cache */
+
+ wait_queue_head_t update_waitq; /* status fetch waitqueue */
+ unsigned update_cnt; /* number of outstanding ops that will update the
+ * status */
+ spinlock_t lock; /* waitqueue/flags lock */
+ unsigned flags;
+#define AFS_VNODE_CHANGED 0x00000001 /* set if vnode reported changed by callback */
+#define AFS_VNODE_DELETED 0x00000002 /* set if vnode deleted on server */
+#define AFS_VNODE_MOUNTPOINT 0x00000004 /* set if vnode is a mountpoint symlink */
+
+ /* outstanding callback notification on this file */
+ afs_server_t *cb_server; /* server that made the current promise */
+ struct list_head cb_link; /* link in server's promises list */
+ struct list_head cb_hash_link; /* link in master callback hash */
+ afs_timer_t cb_timeout; /* timeout on promise */
+ unsigned cb_version; /* callback version */
+ unsigned cb_expiry; /* callback expiry time */
+ afs_callback_type_t cb_type; /* type of callback */
+};
+
+static inline afs_vnode_t *AFS_FS_I(struct inode *inode)
+{
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ return list_entry(inode,afs_vnode_t,vfs_inode);
+#else
+ return inode->u.generic_ip;
+#endif
+}
+
+static inline struct inode *AFS_VNODE_TO_I(afs_vnode_t *vnode)
+{
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ return &vnode->vfs_inode;
+#else
+ return vnode->inode;
+#endif
+}
+
+extern int afs_vnode_fetch_status(afs_vnode_t *vnode);
+
+extern int afs_vnode_fetch_data(afs_vnode_t *vnode, struct afs_rxfs_fetch_descriptor *desc);
+
+extern int afs_vnode_give_up_callback(afs_vnode_t *vnode);
+
+extern struct afs_timer_ops afs_vnode_cb_timed_out_ops;
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_AFS_VNODE_H */
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
new file mode 100644
index 000000000000..198c355c715b
--- /dev/null
+++ b/fs/afs/volume.c
@@ -0,0 +1,430 @@
+/* volume.c: AFS volume management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include "volume.h"
+#include "cell.h"
+#include "cmservice.h"
+#include "fsclient.h"
+#include "vlclient.h"
+#include "internal.h"
+
+const char *afs_voltypes[] = { "R/W", "R/O", "BAK" };
+
+/*****************************************************************************/
+/*
+ * lookup a volume by name
+ * - this can be one of the following:
+ * "%[cell:]volume[.]" R/W volume
+ * "#[cell:]volume[.]" R/O or R/W volume (rwparent=0), or R/W (rwparent=1) volume
+ * "%[cell:]volume.readonly" R/O volume
+ * "#[cell:]volume.readonly" R/O volume
+ * "%[cell:]volume.backup" Backup volume
+ * "#[cell:]volume.backup" Backup volume
+ *
+ * The cell name is optional, and defaults to the current cell.
+ *
+ * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin Guide
+ * - Rule 1: Explicit type suffix forces access of that type or nothing
+ * (no suffix, then use Rule 2 & 3)
+ * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W if not available
+ * - Rule 3: If parent volume is R/W, then only mount R/W volume unless explicitly told otherwise
+ */
+int afs_volume_lookup(char *name, int rwparent, afs_volume_t **_volume)
+{
+ afs_vlocation_t *vlocation = NULL;
+ afs_voltype_t type;
+ afs_volume_t *volume = NULL;
+ afs_cell_t *cell = NULL;
+ char *cellname, *volname, *suffix;
+ char srvtmask;
+ int force, ret, loop;
+
+ _enter(",%s,",name);
+
+ if (!name || (name[0]!='%' && name[0]!='#') || !name[1]) {
+ printk("kAFS: unparsable volume name\n");
+ return -EINVAL;
+ }
+
+ /* determine the type of volume we're looking for */
+ force = 0;
+ type = AFSVL_ROVOL;
+
+ if (rwparent || name[0]=='%') {
+ type = AFSVL_RWVOL;
+ force = 1;
+ }
+
+ suffix = strrchr(name,'.');
+ if (suffix) {
+ if (strcmp(suffix,".readonly")==0) {
+ type = AFSVL_ROVOL;
+ force = 1;
+ }
+ else if (strcmp(suffix,".backup")==0) {
+ type = AFSVL_BACKVOL;
+ force = 1;
+ }
+ else if (suffix[1]==0) {
+ *suffix = 0;
+ suffix = NULL;
+ }
+ else {
+ suffix = NULL;
+ }
+ }
+
+ /* split the cell and volume names */
+ name++;
+ volname = strchr(name,':');
+ if (volname) {
+ *volname++ = 0;
+ cellname = name;
+ }
+ else {
+ volname = name;
+ cellname = NULL;
+ }
+
+ _debug("CELL:%s VOLUME:%s SUFFIX:%s TYPE:%d%s",
+ cellname,volname,suffix?:"-",type,force?" FORCE":"");
+
+ /* lookup the cell record */
+ ret = afs_cell_lookup(cellname,&cell);
+ if (ret<0)
+ printk("kAFS: unable to lookup cell '%s'\n",cellname?:"");
+
+ if (cellname) volname[-1] = ':';
+ if (ret<0)
+ goto error;
+
+ /* lookup the volume location record */
+ if (suffix) *suffix = 0;
+ ret = afs_vlocation_lookup(cell,volname,&vlocation);
+ if (suffix) *suffix = '.';
+ if (ret<0)
+ goto error;
+
+ /* make the final decision on the type we want */
+ ret = -ENOMEDIUM;
+ if (force && !(vlocation->vldb.vidmask & (1<<type)))
+ goto error;
+
+ srvtmask = 0;
+ for (loop=0; loop<vlocation->vldb.nservers; loop++)
+ srvtmask |= vlocation->vldb.srvtmask[loop];
+
+ if (force) {
+ if (!(srvtmask & (1 <<type)))
+ goto error;
+ }
+ else if (srvtmask & AFSC_VOL_STM_RO) {
+ type = AFSVL_ROVOL;
+ }
+ else if (srvtmask & AFSC_VOL_STM_RW) {
+ type = AFSVL_RWVOL;
+ }
+ else {
+ goto error;
+ }
+
+ down_write(&cell->vl_sem);
+
+ /* is the volume already active? */
+ if (vlocation->vols[type]) {
+ /* yes - re-use it */
+ volume = vlocation->vols[type];
+ afs_get_volume(volume);
+ goto success;
+ }
+
+ /* create a new volume record */
+ _debug("creating new volume record");
+
+ ret = -ENOMEM;
+ volume = kmalloc(sizeof(afs_volume_t),GFP_KERNEL);
+ if (!volume)
+ goto error_up;
+
+ memset(volume,0,sizeof(afs_volume_t));
+ atomic_set(&volume->usage,1);
+ volume->type = type;
+ volume->type_force = force;
+ volume->cell = cell;
+ volume->vid = vlocation->vldb.vid[type];
+
+ init_rwsem(&volume->server_sem);
+
+ /* look up all the applicable server records */
+ for (loop=0; loop<8; loop++) {
+ if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) {
+ ret = afs_server_lookup(volume->cell,
+ &vlocation->vldb.servers[loop],
+ &volume->servers[volume->nservers]);
+ if (ret<0)
+ goto error_discard;
+
+ volume->nservers++;
+ }
+ }
+
+ /* attach the cache and volume location */
+#if 0
+ afs_get_cache(cache); volume->cache = cache;
+#endif
+ afs_get_vlocation(vlocation); volume->vlocation = vlocation;
+
+ vlocation->vols[type] = volume;
+
+ success:
+ _debug("kAFS selected %s volume %08x",afs_voltypes[volume->type],volume->vid);
+ *_volume = volume;
+ ret = 0;
+
+ /* clean up */
+ error_up:
+ up_write(&cell->vl_sem);
+ error:
+ if (vlocation) afs_put_vlocation(vlocation);
+ if (cell) afs_put_cell(cell);
+
+ _leave(" = %d (%p)",ret,volume);
+ return ret;
+
+ error_discard:
+ up_write(&cell->vl_sem);
+
+ for (loop=volume->nservers-1; loop>=0; loop--)
+ if (volume->servers[loop])
+ afs_put_server(volume->servers[loop]);
+
+ kfree(volume);
+ goto error;
+} /* end afs_volume_lookup() */
+
+/*****************************************************************************/
+/*
+ * destroy a volume record
+ */
+void afs_put_volume(afs_volume_t *volume)
+{
+ afs_vlocation_t *vlocation;
+ int loop;
+
+ _enter("%p",volume);
+
+ vlocation = volume->vlocation;
+
+ /* sanity check */
+ if (atomic_read(&volume->usage)<=0)
+ BUG();
+
+ /* to prevent a race, the decrement and the dequeue must be effectively atomic */
+ down_write(&vlocation->cell->vl_sem);
+
+ if (likely(!atomic_dec_and_test(&volume->usage))) {
+ up_write(&vlocation->cell->vl_sem);
+ _leave("");
+ return;
+ }
+
+ vlocation->vols[volume->type] = NULL;
+
+ up_write(&vlocation->cell->vl_sem);
+
+ afs_put_vlocation(vlocation);
+
+ /* finish cleaning up the volume */
+#if 0
+ if (volume->cache) afs_put_cache(volume->cache);
+#endif
+
+ for (loop=volume->nservers-1; loop>=0; loop--)
+ if (volume->servers[loop])
+ afs_put_server(volume->servers[loop]);
+
+ kfree(volume);
+
+ _leave(" [destroyed]");
+} /* end afs_put_volume() */
+
+/*****************************************************************************/
+/*
+ * pick a server to use to try accessing this volume
+ * - returns with an elevated usage count on the server chosen
+ */
+int afs_volume_pick_fileserver(afs_volume_t *volume, afs_server_t **_server)
+{
+ afs_server_t *server;
+ int ret, state, loop;
+
+ _enter("%s",volume->vlocation->vldb.name);
+
+ down_read(&volume->server_sem);
+
+ /* handle the no-server case */
+ if (volume->nservers==0) {
+ ret = volume->rjservers ? -ENOMEDIUM : -ESTALE;
+ up_read(&volume->server_sem);
+ _leave(" = %d [no servers]",ret);
+ return ret;
+ }
+
+ /* basically, just search the list for the first live server and use that */
+ ret = 0;
+ for (loop=0; loop<volume->nservers; loop++) {
+ server = volume->servers[loop];
+ state = server->fs_state;
+
+ switch (state) {
+ /* found an apparently healthy server */
+ case 0:
+ afs_get_server(server);
+ up_read(&volume->server_sem);
+ *_server = server;
+ _leave(" = 0 (picked %08x)",ntohl(server->addr.s_addr));
+ return 0;
+
+ case -ENETUNREACH:
+ if (ret==0)
+ ret = state;
+ break;
+
+ case -EHOSTUNREACH:
+ if (ret==0 || ret==-ENETUNREACH)
+ ret = state;
+ break;
+
+ case -ECONNREFUSED:
+ if (ret==0 || ret==-ENETUNREACH || ret==-EHOSTUNREACH)
+ ret = state;
+ break;
+
+ default:
+ case -EREMOTEIO:
+ if (ret==0 ||
+ ret==-ENETUNREACH ||
+ ret==-EHOSTUNREACH ||
+ ret==-ECONNREFUSED)
+ ret = state;
+ break;
+ }
+ }
+
+ /* no available servers
+ * - TODO: handle the no active servers case better
+ */
+ up_read(&volume->server_sem);
+ _leave(" = %d",ret);
+ return ret;
+} /* end afs_volume_pick_fileserver() */
+
+/*****************************************************************************/
+/*
+ * release a server after use
+ * - releases the ref on the server struct that was acquired by picking
+ * - records result of using a particular server to access a volume
+ * - return 0 to try again, 1 if okay or to issue error
+ */
+int afs_volume_release_fileserver(afs_volume_t *volume, afs_server_t *server, int result)
+{
+ unsigned loop;
+
+ _enter("%s,%08x,%d",volume->vlocation->vldb.name,ntohl(server->addr.s_addr),result);
+
+ switch (result) {
+ /* success */
+ case 0:
+ server->fs_act_jif = jiffies;
+ break;
+
+ /* the fileserver denied all knowledge of the volume */
+ case -ENOMEDIUM:
+ server->fs_act_jif = jiffies;
+ down_write(&volume->server_sem);
+
+ /* first, find where the server is in the active list (if it is) */
+ for (loop=0; loop<volume->nservers; loop++)
+ if (volume->servers[loop]==server)
+ goto present;
+
+ /* no longer there - may have been discarded by another op */
+ goto try_next_server_upw;
+
+ present:
+ volume->nservers--;
+ memmove(&volume->servers[loop],
+ &volume->servers[loop+1],
+ sizeof(volume->servers[loop]) * (volume->nservers - loop)
+ );
+ volume->servers[volume->nservers] = NULL;
+ afs_put_server(server);
+ volume->rjservers++;
+
+ if (volume->nservers>0)
+ /* another server might acknowledge its existence */
+ goto try_next_server_upw;
+
+ /* handle the case where all the fileservers have rejected the volume
+ * - TODO: try asking the fileservers for volume information
+ * - TODO: contact the VL server again to see if the volume is no longer registered
+ */
+ up_write(&volume->server_sem);
+ afs_put_server(server);
+ _leave(" [completely rejected]");
+ return 1;
+
+ /* problem reaching the server */
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
+ case -ECONNREFUSED:
+ case -ETIMEDOUT:
+ case -EREMOTEIO:
+ /* mark the server as dead
+ * TODO: vary dead timeout depending on error
+ */
+ spin_lock(&server->fs_lock);
+ if (!server->fs_state) {
+ server->fs_dead_jif = jiffies + HZ * 10;
+ server->fs_state = result;
+ printk("kAFS: SERVER DEAD state=%d\n",result);
+ }
+ spin_unlock(&server->fs_lock);
+ goto try_next_server;
+
+ /* miscellaneous error */
+ default:
+ server->fs_act_jif = jiffies;
+ case -ENOMEM:
+ case -ENONET:
+ break;
+ }
+
+ /* tell the caller to accept the result */
+ afs_put_server(server);
+ _leave("");
+ return 1;
+
+ /* tell the caller to loop around and try the next server */
+ try_next_server_upw:
+ up_write(&volume->server_sem);
+ try_next_server:
+ afs_put_server(server);
+ _leave(" [try next server]");
+ return 0;
+
+} /* end afs_volume_release_fileserver() */
diff --git a/fs/afs/volume.h b/fs/afs/volume.h
new file mode 100644
index 000000000000..1842d983677f
--- /dev/null
+++ b/fs/afs/volume.h
@@ -0,0 +1,92 @@
+/* volume.h: AFS volume management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_AFS_VOLUME_H
+#define _LINUX_AFS_VOLUME_H
+
+#include "types.h"
+#include "fsclient.h"
+#include "kafstimod.h"
+#include "kafsasyncd.h"
+#include "cache-layout.h"
+
+#define __packed __attribute__((packed))
+
+typedef enum {
+ AFS_VLUPD_SLEEP, /* sleeping waiting for update timer to fire */
+ AFS_VLUPD_PENDING, /* on pending queue */
+ AFS_VLUPD_INPROGRESS, /* op in progress */
+ AFS_VLUPD_BUSYSLEEP, /* sleeping because server returned EBUSY */
+
+} __attribute__((packed)) afs_vlocation_upd_t;
+
+/*****************************************************************************/
+/*
+ * AFS volume location record
+ */
+struct afs_vlocation
+{
+ atomic_t usage;
+ struct list_head link; /* link in cell volume location list */
+ afs_timer_t timeout; /* decaching timer */
+ afs_cell_t *cell; /* cell to which volume belongs */
+ struct list_head caches; /* backing caches */
+ afsc_vldb_record_t vldb; /* volume information DB record */
+ struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
+ rwlock_t lock; /* access lock */
+ unsigned long read_jif; /* time at which last read from vlserver */
+ afs_timer_t upd_timer; /* update timer */
+ afs_async_op_t upd_op; /* update operation */
+ afs_vlocation_upd_t upd_state; /* update state */
+ unsigned short upd_first_svix; /* first server index during update */
+ unsigned short upd_curr_svix; /* current server index during update */
+ unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
+ unsigned short upd_busy_cnt; /* EBUSY count during update */
+ unsigned short valid; /* T if valid */
+};
+
+extern int afs_vlocation_lookup(afs_cell_t *cell, const char *name, afs_vlocation_t **_vlocation);
+
+#define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
+
+extern void __afs_put_vlocation(afs_vlocation_t *vlocation);
+extern void afs_put_vlocation(afs_vlocation_t *vlocation);
+extern void afs_vlocation_do_timeout(afs_vlocation_t *vlocation);
+
+/*****************************************************************************/
+/*
+ * AFS volume access record
+ */
+struct afs_volume
+{
+ atomic_t usage;
+ afs_cell_t *cell; /* cell to which belongs (unrefd ptr) */
+ afs_vlocation_t *vlocation; /* volume location */
+ afs_volid_t vid; /* volume ID */
+ afs_voltype_t __packed type; /* type of volume */
+ char type_force; /* force volume type (suppress R/O -> R/W) */
+ unsigned short nservers; /* number of server slots filled */
+ unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */
+ afs_server_t *servers[8]; /* servers on which volume resides (ordered) */
+ struct rw_semaphore server_sem; /* lock for accessing current server */
+};
+
+extern int afs_volume_lookup(char *name, int ro, afs_volume_t **_volume);
+
+#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
+
+extern void afs_put_volume(afs_volume_t *volume);
+
+extern int afs_volume_pick_fileserver(afs_volume_t *volume, afs_server_t **_server);
+
+extern int afs_volume_release_fileserver(afs_volume_t *volume, afs_server_t *server, int result);
+
+#endif /* _LINUX_AFS_VOLUME_H */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 33fc669b7842..1ad7f467993b 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -526,8 +526,6 @@ int check_disk_change(struct block_device *bdev)
{
struct block_device_operations * bdops = bdev->bd_op;
kdev_t dev = to_kdev_t(bdev->bd_dev);
- struct gendisk *disk;
- int part;
if (bdops->check_media_change == NULL)
return 0;
@@ -537,10 +535,9 @@ int check_disk_change(struct block_device *bdev)
if (invalidate_device(dev, 0))
printk("VFS: busy inodes on changed media.\n");
- disk = get_gendisk(bdev->bd_dev, &part);
if (bdops->revalidate)
bdops->revalidate(dev);
- if (disk && disk->minor_shift)
+ if (bdev->bd_disk->minors > 1)
bdev->bd_invalidated = 1;
return 1;
}
@@ -548,12 +545,11 @@ int check_disk_change(struct block_device *bdev)
int full_check_disk_change(struct block_device *bdev)
{
int res = 0;
- int n;
if (bdev->bd_contains != bdev)
BUG();
down(&bdev->bd_sem);
if (check_disk_change(bdev)) {
- rescan_partitions(get_gendisk(bdev->bd_dev, &n), bdev);
+ rescan_partitions(bdev->bd_disk, bdev);
res = 1;
}
up(&bdev->bd_sem);
@@ -595,6 +591,8 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
kdev_t dev = to_kdev_t(bdev->bd_dev);
struct module *owner = NULL;
struct block_device_operations *ops, *old;
+ struct gendisk *disk;
+ int part;
lock_kernel();
ops = get_blkfops(major(dev));
@@ -614,33 +612,32 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
if (owner)
__MOD_DEC_USE_COUNT(owner);
}
+ disk = get_gendisk(bdev->bd_dev, &part);
+ if (!disk)
+ goto out1;
if (!bdev->bd_contains) {
- int part;
- struct gendisk *g = get_gendisk(bdev->bd_dev, &part);
bdev->bd_contains = bdev;
- if (g && part) {
- struct block_device *disk;
- disk = bdget(MKDEV(g->major, g->first_minor));
+ if (part) {
+ struct block_device *whole;
+ whole = bdget(MKDEV(disk->major, disk->first_minor));
ret = -ENOMEM;
- if (!disk)
+ if (!whole)
goto out1;
- ret = blkdev_get(disk, file->f_mode, file->f_flags, BDEV_RAW);
+ ret = blkdev_get(whole, file->f_mode, file->f_flags, BDEV_RAW);
if (ret)
goto out1;
- bdev->bd_contains = disk;
+ bdev->bd_contains = whole;
}
}
if (bdev->bd_contains == bdev) {
- int part;
- struct gendisk *g = get_gendisk(bdev->bd_dev, &part);
-
+ if (!bdev->bd_openers)
+ bdev->bd_disk = disk;
if (!bdev->bd_queue) {
struct blk_dev_struct *p = blk_dev + major(dev);
bdev->bd_queue = &p->request_queue;
if (p->queue)
bdev->bd_queue = p->queue(dev);
}
-
if (bdev->bd_op->open) {
ret = bdev->bd_op->open(inode, file);
if (ret)
@@ -648,12 +645,8 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
}
if (!bdev->bd_openers) {
struct backing_dev_info *bdi;
- sector_t sect = 0;
-
bdev->bd_offset = 0;
- if (g)
- sect = get_capacity(g);
- bd_set_size(bdev, (loff_t)sect << 9);
+ bd_set_size(bdev, (loff_t)get_capacity(disk) << 9);
bdi = blk_get_backing_dev_info(bdev);
if (bdi == NULL)
bdi = &default_backing_dev_info;
@@ -661,19 +654,17 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
bdev->bd_inode->i_data.backing_dev_info = bdi;
}
if (bdev->bd_invalidated)
- rescan_partitions(g, bdev);
+ rescan_partitions(disk, bdev);
} else {
down(&bdev->bd_contains->bd_sem);
bdev->bd_contains->bd_part_count++;
if (!bdev->bd_openers) {
- int part;
- struct gendisk *g = get_gendisk(bdev->bd_dev, &part);
struct hd_struct *p;
- p = g->part + part - 1;
+ p = disk->part + part - 1;
inode->i_data.backing_dev_info =
bdev->bd_inode->i_data.backing_dev_info =
bdev->bd_contains->bd_inode->i_data.backing_dev_info;
- if (!p->nr_sects) {
+ if (!(disk->flags & GENHD_FL_UP) || !p->nr_sects) {
bdev->bd_contains->bd_part_count--;
up(&bdev->bd_contains->bd_sem);
ret = -ENXIO;
@@ -682,10 +673,12 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
bdev->bd_queue = bdev->bd_contains->bd_queue;
bdev->bd_offset = p->start_sect;
bd_set_size(bdev, (loff_t) p->nr_sects << 9);
+ bdev->bd_disk = disk;
}
up(&bdev->bd_contains->bd_sem);
}
- bdev->bd_openers++;
+ if (bdev->bd_openers++)
+ put_disk(disk);
up(&bdev->bd_sem);
unlock_kernel();
return 0;
@@ -699,6 +692,7 @@ out2:
}
}
out1:
+ put_disk(disk);
if (!old) {
bdev->bd_op = NULL;
if (owner)
@@ -772,15 +766,18 @@ int blkdev_put(struct block_device *bdev, int kind)
up(&bdev->bd_contains->bd_sem);
}
if (!bdev->bd_openers) {
+ struct gendisk *disk = bdev->bd_disk;
if (bdev->bd_op->owner)
__MOD_DEC_USE_COUNT(bdev->bd_op->owner);
bdev->bd_op = NULL;
bdev->bd_queue = NULL;
+ bdev->bd_disk = NULL;
bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
if (bdev != bdev->bd_contains) {
blkdev_put(bdev->bd_contains, BDEV_RAW);
bdev->bd_contains = NULL;
}
+ put_disk(disk);
}
unlock_kernel();
up(&bdev->bd_sem);
@@ -793,25 +790,6 @@ int blkdev_close(struct inode * inode, struct file * filp)
return blkdev_put(inode->i_bdev, BDEV_FILE);
}
-static int blkdev_reread_part(struct block_device *bdev)
-{
- int part;
- struct gendisk *disk = get_gendisk(bdev->bd_dev, &part);
- int res = 0;
-
- if (!disk || !disk->minor_shift || bdev != bdev->bd_contains)
- return -EINVAL;
- if (part)
- BUG();
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (down_trylock(&bdev->bd_sem))
- return -EBUSY;
- res = rescan_partitions(disk, bdev);
- up(&bdev->bd_sem);
- return res;
-}
-
static ssize_t blkdev_file_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
@@ -820,51 +798,6 @@ static ssize_t blkdev_file_write(struct file *file, const char *buf,
return generic_file_write_nolock(file, &local_iov, 1, ppos);
}
-static int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
- unsigned long arg)
-{
- struct block_device *bdev = inode->i_bdev;
- int ret = -EINVAL;
- switch (cmd) {
- /*
- * deprecated, use the /proc/iosched interface instead
- */
- case BLKELVGET:
- case BLKELVSET:
- ret = -ENOTTY;
- break;
- case BLKRAGET:
- case BLKROGET:
- case BLKBSZGET:
- case BLKSSZGET:
- case BLKFRAGET:
- case BLKSECTGET:
- case BLKRASET:
- case BLKFRASET:
- case BLKBSZSET:
- case BLKPG:
- ret = blk_ioctl(bdev, cmd, arg);
- break;
- case BLKRRPART:
- ret = blkdev_reread_part(bdev);
- break;
- default:
- if (bdev->bd_op->ioctl)
- ret =bdev->bd_op->ioctl(inode, file, cmd, arg);
- if (ret == -EINVAL) {
- switch (cmd) {
- case BLKGETSIZE:
- case BLKGETSIZE64:
- case BLKFLSBUF:
- case BLKROSET:
- ret = blk_ioctl(bdev,cmd,arg);
- break;
- }
- }
- }
- return ret;
-}
-
struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.writepage = blkdev_writepage,
diff --git a/fs/buffer.c b/fs/buffer.c
index d024b78c3e60..35d43421c3a8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -811,6 +811,13 @@ int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
if (buffer_dirty(bh)) {
get_bh(bh);
spin_unlock(lock);
+ /*
+ * Ensure any pending I/O completes so that
+ * ll_rw_block() actually writes the current
+ * contents - it is a noop if I/O is still in
+ * flight on potentially older contents.
+ */
+ wait_on_buffer(bh);
ll_rw_block(WRITE, 1, &bh);
brelse(bh);
spin_lock(lock);
diff --git a/fs/dcache.c b/fs/dcache.c
index ef0871dbcdb2..d0fcfeba16ee 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -637,6 +637,7 @@ struct dentry * d_alloc(struct dentry * parent, const struct qstr *name)
dentry->d_op = NULL;
dentry->d_fsdata = NULL;
dentry->d_mounted = 0;
+ dentry->d_cookie = NULL;
INIT_LIST_HEAD(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
diff --git a/fs/dcookies.c b/fs/dcookies.c
new file mode 100644
index 000000000000..d589103eb820
--- /dev/null
+++ b/fs/dcookies.c
@@ -0,0 +1,323 @@
+/*
+ * dcookies.c
+ *
+ * Copyright 2002 John Levon <levon@movementarian.org>
+ *
+ * Persistent cookie-path mappings. These are used by
+ * profilers to convert a per-task EIP value into something
+ * non-transitory that can be processed at a later date.
+ * This is done by locking the dentry/vfsmnt pair in the
+ * kernel until released by the tasks needing the persistent
+ * objects. The tag is simply an u32 that refers
+ * to the pair and can be looked up from userspace.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mount.h>
+#include <linux/dcache.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/dcookies.h>
+#include <asm/uaccess.h>
+
+/* The dcookies are allocated from a kmem_cache and
+ * hashed onto a small number of lists. None of the
+ * code here is particularly performance critical
+ */
+struct dcookie_struct {
+ struct dentry * dentry;
+ struct vfsmount * vfsmnt;
+ struct list_head hash_list;
+};
+
+static LIST_HEAD(dcookie_users);
+static DECLARE_MUTEX(dcookie_sem);
+static kmem_cache_t * dcookie_cache;
+static struct list_head * dcookie_hashtable;
+static size_t hash_size;
+
+static inline int is_live(void)
+{
+ return !(list_empty(&dcookie_users));
+}
+
+
+/* The dentry is locked, its address will do for the cookie */
+static inline u32 dcookie_value(struct dcookie_struct * dcs)
+{
+ return (u32)dcs->dentry;
+}
+
+
+static size_t dcookie_hash(u32 dcookie)
+{
+ return (dcookie >> 2) & (hash_size - 1);
+}
+
+
+static struct dcookie_struct * find_dcookie(u32 dcookie)
+{
+ struct dcookie_struct * found = 0;
+ struct dcookie_struct * dcs;
+ struct list_head * pos;
+ struct list_head * list;
+
+ list = dcookie_hashtable + dcookie_hash(dcookie);
+
+ list_for_each(pos, list) {
+ dcs = list_entry(pos, struct dcookie_struct, hash_list);
+ if (dcookie_value(dcs) == dcookie) {
+ found = dcs;
+ break;
+ }
+ }
+
+ return found;
+}
+
+
+static void hash_dcookie(struct dcookie_struct * dcs)
+{
+ struct list_head * list = dcookie_hashtable + dcookie_hash(dcookie_value(dcs));
+ list_add(&dcs->hash_list, list);
+}
+
+
+static struct dcookie_struct * alloc_dcookie(struct dentry * dentry,
+ struct vfsmount * vfsmnt)
+{
+ struct dcookie_struct * dcs = kmem_cache_alloc(dcookie_cache, GFP_KERNEL);
+ if (!dcs)
+ return NULL;
+
+ atomic_inc(&dentry->d_count);
+ atomic_inc(&vfsmnt->mnt_count);
+ dentry->d_cookie = dcs;
+
+ dcs->dentry = dentry;
+ dcs->vfsmnt = vfsmnt;
+ hash_dcookie(dcs);
+
+ return dcs;
+}
+
+
+/* This is the main kernel-side routine that retrieves the cookie
+ * value for a dentry/vfsmnt pair.
+ */
+int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
+ u32 * cookie)
+{
+ int err = 0;
+ struct dcookie_struct * dcs;
+
+ down(&dcookie_sem);
+
+ if (!is_live()) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ dcs = dentry->d_cookie;
+
+ if (!dcs)
+ dcs = alloc_dcookie(dentry, vfsmnt);
+
+ if (!dcs) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ *cookie = dcookie_value(dcs);
+
+out:
+ up(&dcookie_sem);
+ return err;
+}
+
+
+/* And here is where the userspace process can look up the cookie value
+ * to retrieve the path.
+ */
+asmlinkage int sys_lookup_dcookie(u32 cookie, char * buf, size_t len)
+{
+ char * kbuf;
+ char * path;
+ int err = -EINVAL;
+ size_t pathlen;
+ struct dcookie_struct * dcs;
+
+ /* we could leak path information to users
+ * without dir read permission without this
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ down(&dcookie_sem);
+
+ if (!is_live()) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!(dcs = find_dcookie(cookie)))
+ goto out;
+
+ err = -ENOMEM;
+ kbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!kbuf)
+ goto out;
+ memset(kbuf, 0, PAGE_SIZE);
+
+ /* FIXME: (deleted) ? */
+ path = d_path(dcs->dentry, dcs->vfsmnt, kbuf, PAGE_SIZE);
+
+ err = 0;
+
+ pathlen = kbuf + PAGE_SIZE - path;
+ if (len > pathlen)
+ len = pathlen;
+
+ if (copy_to_user(buf, path, len))
+ err = -EFAULT;
+
+ kfree(kbuf);
+out:
+ up(&dcookie_sem);
+ return err;
+}
+
+
+static int dcookie_init(void)
+{
+ struct list_head * d;
+ unsigned int i, hash_bits;
+ int err = -ENOMEM;
+
+ dcookie_cache = kmem_cache_create("dcookie_cache",
+ sizeof(struct dcookie_struct),
+ 0, 0, NULL, NULL);
+
+ if (!dcookie_cache)
+ goto out;
+
+ dcookie_hashtable = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dcookie_hashtable)
+ goto out_kmem;
+
+ err = 0;
+
+ /*
+ * Find the power-of-two list-heads that can fit into the allocation..
+ * We don't guarantee that "sizeof(struct list_head)" is necessarily
+ * a power-of-two.
+ */
+ hash_size = PAGE_SIZE / sizeof(struct list_head);
+ hash_bits = 0;
+ do {
+ hash_bits++;
+ } while ((hash_size >> hash_bits) != 0);
+ hash_bits--;
+
+ /*
+ * Re-calculate the actual number of entries and the mask
+ * from the number of bits we can fit.
+ */
+ hash_size = 1UL << hash_bits;
+
+ /* And initialize the newly allocated array */
+ d = dcookie_hashtable;
+ i = hash_size;
+ do {
+ INIT_LIST_HEAD(d);
+ d++;
+ i--;
+ } while (i);
+
+out:
+ return err;
+out_kmem:
+ kmem_cache_destroy(dcookie_cache);
+ goto out;
+}
+
+
+static void free_dcookie(struct dcookie_struct * dcs)
+{
+ dcs->dentry->d_cookie = NULL;
+ dput(dcs->dentry);
+ mntput(dcs->vfsmnt);
+ kmem_cache_free(dcookie_cache, dcs);
+}
+
+
+static void dcookie_exit(void)
+{
+ struct list_head * list;
+ struct list_head * pos;
+ struct list_head * pos2;
+ struct dcookie_struct * dcs;
+ size_t i;
+
+ for (i = 0; i < hash_size; ++i) {
+ list = dcookie_hashtable + i;
+ list_for_each_safe(pos, pos2, list) {
+ dcs = list_entry(pos, struct dcookie_struct, hash_list);
+ list_del(&dcs->hash_list);
+ free_dcookie(dcs);
+ }
+ }
+
+ kfree(dcookie_hashtable);
+ kmem_cache_destroy(dcookie_cache);
+}
+
+
+struct dcookie_user {
+ struct list_head next;
+};
+
+struct dcookie_user * dcookie_register(void)
+{
+ struct dcookie_user * user;
+
+ down(&dcookie_sem);
+
+ user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL);
+ if (!user)
+ goto out;
+
+ if (!is_live() && dcookie_init())
+ goto out_free;
+
+ list_add(&user->next, &dcookie_users);
+
+out:
+ up(&dcookie_sem);
+ return user;
+out_free:
+ kfree(user);
+ user = NULL;
+ goto out;
+}
+
+
+void dcookie_unregister(struct dcookie_user * user)
+{
+ down(&dcookie_sem);
+
+ list_del(&user->next);
+ kfree(user);
+
+ if (!is_live())
+ dcookie_exit();
+
+ up(&dcookie_sem);
+}
+
+EXPORT_SYMBOL_GPL(dcookie_register);
+EXPORT_SYMBOL_GPL(dcookie_unregister);
+EXPORT_SYMBOL_GPL(get_dcookie);
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 72b3148cb038..df2fa4d8d0fd 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -490,11 +490,11 @@ static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn,
}
/*
- * get_index()
+ * read_index()
*
* reads a directory table slot
*/
-static int get_index(struct inode *ip, u32 index,
+static int read_index(struct inode *ip, u32 index,
struct dir_table_slot * dirtab_slot)
{
struct metapage *mp = 0;
@@ -2978,7 +2978,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
return 0;
}
repeat:
- rc = get_index(ip, dir_index, &dirtab_slot);
+ rc = read_index(ip, dir_index, &dirtab_slot);
if (rc) {
filp->f_pos = DIREND;
return rc;
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index 223d700da927..7859b2f22d28 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -478,12 +478,12 @@ int readSuper(struct super_block *sb, struct buffer_head **bpp)
{
/* read in primary superblock */
*bpp = sb_bread(sb, SUPER1_OFF >> sb->s_blocksize_bits);
- if (bpp)
+ if (*bpp)
return 0;
/* read in secondary/replicated superblock */
*bpp = sb_bread(sb, SUPER2_OFF >> sb->s_blocksize_bits);
- if (bpp)
+ if (*bpp)
return 0;
return -EIO;
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 836322c2be06..c098a522553b 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -8,6 +8,7 @@ nfs-y := dir.o file.o flushd.o inode.o nfs2xdr.o pagelist.o \
proc.o read.o symlink.o unlink.o write.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
+nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o
nfs-$(CONFIG_NFS_DIRECTIO) += direct.o
nfs-objs := $(nfs-y)
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 3443f647ed2f..f02b7c9c7f36 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -259,6 +259,12 @@ nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
if (!inode)
return -EINVAL;
+ /* This will be in a forthcoming patch. */
+ if (NFS_PROTO(inode)->version == 4) {
+ printk(KERN_INFO "NFS: file locking over NFSv4 is not yet supported\n");
+ return -EIO;
+ }
+
/* No mandatory locks over NFS */
if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
return -ENOLCK;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index f7e1e442c9e7..39027f2af310 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -28,6 +28,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
#include <linux/nfs_flushd.h>
#include <linux/lockd/bind.h>
#include <linux/smp_lock.h>
@@ -76,8 +77,13 @@ static struct rpc_version * nfs_version[] = {
NULL,
NULL,
&nfs_version2,
-#ifdef CONFIG_NFS_V3
+#if defined(CONFIG_NFS_V3)
&nfs_version3,
+#elif defined(CONFIG_NFS_V4)
+ NULL,
+#endif
+#if defined(CONFIG_NFS_V4)
+ &nfs_version4,
#endif
};
@@ -157,6 +163,7 @@ nfs_put_super(struct super_block *sb)
lockd_down(); /* release rpc.lockd */
rpciod_down(); /* release rpciod */
+ destroy_nfsv4_state(server);
kfree(server->hostname);
}
@@ -234,6 +241,120 @@ nfs_get_root(struct super_block *sb, struct nfs_fh *rootfh)
}
/*
+ * Do NFS version-independent mount processing, and sanity checking
+ */
+int nfs_sb_init(struct super_block *sb)
+{
+ struct nfs_server *server;
+ struct inode *root_inode = NULL;
+ struct nfs_fattr fattr;
+ struct nfs_fsinfo fsinfo = {
+ .fattr = &fattr,
+ };
+ struct nfs_pathconf pathinfo = {
+ .fattr = &fattr,
+ };
+
+ /* We probably want something more informative here */
+ snprintf(sb->s_id, sizeof(sb->s_id), "%x:%x", MAJOR(sb->s_dev), MINOR(sb->s_dev));
+
+ server = NFS_SB(sb);
+
+ sb->s_magic = NFS_SUPER_MAGIC;
+ sb->s_op = &nfs_sops;
+ INIT_LIST_HEAD(&server->lru_read);
+ INIT_LIST_HEAD(&server->lru_dirty);
+ INIT_LIST_HEAD(&server->lru_commit);
+ INIT_LIST_HEAD(&server->lru_busy);
+
+ /* Did getting the root inode fail? */
+ root_inode = nfs_get_root(sb, &server->fh);
+ if (!root_inode)
+ goto out_no_root;
+ sb->s_root = d_alloc_root(root_inode);
+ if (!sb->s_root)
+ goto out_no_root;
+
+ sb->s_root->d_op = &nfs_dentry_operations;
+
+ /* Get some general file system info */
+ if (server->rpc_ops->fsinfo(server, &server->fh, &fsinfo) < 0) {
+ printk(KERN_NOTICE "NFS: cannot retrieve file system info.\n");
+ goto out_no_root;
+ }
+ if (server->namelen == 0 &&
+ server->rpc_ops->pathconf(server, &server->fh, &pathinfo) >= 0)
+ server->namelen = pathinfo.max_namelen;
+ /* Work out a lot of parameters */
+ if (server->rsize == 0)
+ server->rsize = nfs_block_size(fsinfo.rtpref, NULL);
+ if (server->wsize == 0)
+ server->wsize = nfs_block_size(fsinfo.wtpref, NULL);
+ if (sb->s_blocksize == 0) {
+ if (fsinfo.wtmult == 0) {
+ sb->s_blocksize = 512;
+ sb->s_blocksize_bits = 9;
+ } else
+ sb->s_blocksize = nfs_block_bits(fsinfo.wtmult,
+ &sb->s_blocksize_bits);
+ }
+
+ if (fsinfo.rtmax >= 512 && server->rsize > fsinfo.rtmax)
+ server->rsize = nfs_block_size(fsinfo.rtmax, NULL);
+ if (fsinfo.wtmax >= 512 && server->wsize > fsinfo.wtmax)
+ server->wsize = nfs_block_size(fsinfo.wtmax, NULL);
+
+ server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (server->rpages > NFS_READ_MAXIOV) {
+ server->rpages = NFS_READ_MAXIOV;
+ server->rsize = server->rpages << PAGE_CACHE_SHIFT;
+ }
+
+ server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (server->wpages > NFS_WRITE_MAXIOV) {
+ server->wpages = NFS_WRITE_MAXIOV;
+ server->wsize = server->wpages << PAGE_CACHE_SHIFT;
+ }
+
+ server->dtsize = nfs_block_size(fsinfo.dtpref, NULL);
+ if (server->dtsize > PAGE_CACHE_SIZE)
+ server->dtsize = PAGE_CACHE_SIZE;
+ if (server->dtsize > server->rsize)
+ server->dtsize = server->rsize;
+
+ if (server->flags & NFS_MOUNT_NOAC) {
+ server->acregmin = server->acregmax = 0;
+ server->acdirmin = server->acdirmax = 0;
+ sb->s_flags |= MS_SYNCHRONOUS;
+ }
+
+ sb->s_maxbytes = fsinfo.maxfilesize;
+ if (sb->s_maxbytes > MAX_LFS_FILESIZE)
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+
+ /* Fire up the writeback cache */
+ if (nfs_reqlist_alloc(server) < 0) {
+ printk(KERN_NOTICE "NFS: cannot initialize writeback cache.\n");
+ goto failure_kill_reqlist;
+ }
+
+ /* We're airborne Set socket buffersize */
+ rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100);
+ return 0;
+ /* Yargs. It didn't work out. */
+failure_kill_reqlist:
+ nfs_reqlist_exit(server);
+out_free_all:
+ if (root_inode)
+ iput(root_inode);
+ nfs_reqlist_free(server);
+ return -EINVAL;
+out_no_root:
+ printk("nfs_read_super: get root inode failed\n");
+ goto out_free_all;
+}
+
+/*
* The way this works is that the mount process passes a structure
* in the data argument which contains the server's IP address
* and the root file handle obtained from the server's mount
@@ -244,29 +365,20 @@ int nfs_fill_super(struct super_block *sb, struct nfs_mount_data *data, int sile
struct nfs_server *server;
struct rpc_xprt *xprt = NULL;
struct rpc_clnt *clnt = NULL;
- struct inode *root_inode = NULL;
- rpc_authflavor_t authflavor;
struct rpc_timeout timeparms;
- struct nfs_fsinfo fsinfo;
- int tcp, version, maxlen;
+ int tcp, err = -EIO;
- /* We probably want something more informative here */
- snprintf(sb->s_id, sizeof(sb->s_id), "%x:%x", MAJOR(sb->s_dev), MINOR(sb->s_dev));
-
- sb->s_magic = NFS_SUPER_MAGIC;
- sb->s_op = &nfs_sops;
- sb->s_blocksize_bits = 0;
- sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
server = NFS_SB(sb);
- server->rsize = nfs_block_size(data->rsize, NULL);
- server->wsize = nfs_block_size(data->wsize, NULL);
+ sb->s_blocksize_bits = 0;
+ sb->s_blocksize = 0;
+ if (data->bsize)
+ sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
+ if (data->rsize)
+ server->rsize = nfs_block_size(data->rsize, NULL);
+ if (data->wsize)
+ server->wsize = nfs_block_size(data->wsize, NULL);
server->flags = data->flags & NFS_MOUNT_FLAGMASK;
- if (data->flags & NFS_MOUNT_NOAC) {
- data->acregmin = data->acregmax = 0;
- data->acdirmin = data->acdirmax = 0;
- sb->s_flags |= MS_SYNCHRONOUS;
- }
server->acregmin = data->acregmin*HZ;
server->acregmax = data->acregmax*HZ;
server->acdirmin = data->acdirmin*HZ;
@@ -275,34 +387,26 @@ int nfs_fill_super(struct super_block *sb, struct nfs_mount_data *data, int sile
server->namelen = data->namlen;
server->hostname = kmalloc(strlen(data->hostname) + 1, GFP_KERNEL);
if (!server->hostname)
- goto out_unlock;
+ goto out_fail;
strcpy(server->hostname, data->hostname);
- INIT_LIST_HEAD(&server->lru_read);
- INIT_LIST_HEAD(&server->lru_dirty);
- INIT_LIST_HEAD(&server->lru_commit);
- INIT_LIST_HEAD(&server->lru_busy);
- nfsv3_try_again:
- server->caps = 0;
/* Check NFS protocol revision and initialize RPC op vector
* and file handle pool. */
- if (data->flags & NFS_MOUNT_VER3) {
+ if (server->flags & NFS_MOUNT_VER3) {
#ifdef CONFIG_NFS_V3
server->rpc_ops = &nfs_v3_clientops;
- version = 3;
server->caps |= NFS_CAP_READDIRPLUS;
if (data->version < 4) {
printk(KERN_NOTICE "NFS: NFSv3 not supported by mount program.\n");
- goto out_unlock;
+ goto out_fail;
}
#else
printk(KERN_NOTICE "NFS: NFSv3 not supported.\n");
- goto out_unlock;
+ goto out_fail;
#endif
} else {
server->rpc_ops = &nfs_v2_clientops;
- version = 2;
- }
+ }
/* Which protocol do we use? */
tcp = (data->flags & NFS_MOUNT_TCP);
@@ -321,155 +425,54 @@ int nfs_fill_super(struct super_block *sb, struct nfs_mount_data *data, int sile
/* Now create transport and client */
xprt = xprt_create_proto(tcp? IPPROTO_TCP : IPPROTO_UDP,
&server->addr, &timeparms);
- if (xprt == NULL)
- goto out_no_xprt;
-
- /* Choose authentication flavor */
- authflavor = RPC_AUTH_UNIX;
- if (data->flags & NFS_MOUNT_SECURE)
- authflavor = RPC_AUTH_DES;
- else if (data->flags & NFS_MOUNT_KERBEROS)
- authflavor = RPC_AUTH_KRB;
-
+ if (xprt == NULL) {
+ printk(KERN_WARNING "NFS: cannot create RPC transport.\n");
+ goto out_fail;
+ }
clnt = rpc_create_client(xprt, server->hostname, &nfs_program,
- version, authflavor);
- if (clnt == NULL)
- goto out_no_client;
+ server->rpc_ops->version, RPC_AUTH_UNIX);
+ if (clnt == NULL) {
+ printk(KERN_WARNING "NFS: cannot create RPC client.\n");
+ xprt_destroy(xprt);
+ goto out_fail;
+ }
- clnt->cl_intr = (data->flags & NFS_MOUNT_INTR)? 1 : 0;
- clnt->cl_softrtry = (data->flags & NFS_MOUNT_SOFT)? 1 : 0;
- clnt->cl_droppriv = (data->flags & NFS_MOUNT_BROKEN_SUID) ? 1 : 0;
+ clnt->cl_intr = (server->flags & NFS_MOUNT_INTR) ? 1 : 0;
+ clnt->cl_softrtry = (server->flags & NFS_MOUNT_SOFT) ? 1 : 0;
+ clnt->cl_droppriv = (server->flags & NFS_MOUNT_BROKEN_SUID) ? 1 : 0;
clnt->cl_chatty = 1;
server->client = clnt;
/* Fire up rpciod if not yet running */
- if (rpciod_up() != 0)
- goto out_no_iod;
-
- /*
- * Keep the super block locked while we try to get
- * the root fh attributes.
- */
- /* Did getting the root inode fail? */
- if (!(root_inode = nfs_get_root(sb, &server->fh))
- && (data->flags & NFS_MOUNT_VER3)) {
- data->flags &= ~NFS_MOUNT_VER3;
- rpciod_down();
- rpc_shutdown_client(server->client);
- goto nfsv3_try_again;
+ if (rpciod_up() != 0) {
+ printk(KERN_WARNING "NFS: couldn't start rpciod!\n");
+ goto out_shutdown;
}
- if (!root_inode)
- goto out_no_root;
- sb->s_root = d_alloc_root(root_inode);
- if (!sb->s_root)
- goto out_no_root;
+ err = nfs_sb_init(sb);
+ if (err != 0)
+ goto out_noinit;
- sb->s_root->d_op = &nfs_dentry_operations;
-
- /* Get some general file system info */
- if (server->rpc_ops->statfs(server, &server->fh, &fsinfo) >= 0) {
- if (server->namelen == 0)
- server->namelen = fsinfo.namelen;
+ if (server->flags & NFS_MOUNT_VER3) {
+ if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
+ server->namelen = NFS3_MAXNAMLEN;
} else {
- printk(KERN_NOTICE "NFS: cannot retrieve file system info.\n");
- goto out_no_root;
- }
-
- /* Work out a lot of parameters */
- if (data->rsize == 0)
- server->rsize = nfs_block_size(fsinfo.rtpref, NULL);
- if (data->wsize == 0)
- server->wsize = nfs_block_size(fsinfo.wtpref, NULL);
- /* NFSv3: we don't have bsize, but rather rtmult and wtmult... */
- if (!fsinfo.bsize)
- fsinfo.bsize = (fsinfo.rtmult>fsinfo.wtmult) ? fsinfo.rtmult : fsinfo.wtmult;
- /* Also make sure we don't go below rsize/wsize since
- * RPC calls are expensive */
- if (fsinfo.bsize < server->rsize)
- fsinfo.bsize = server->rsize;
- if (fsinfo.bsize < server->wsize)
- fsinfo.bsize = server->wsize;
-
- if (data->bsize == 0)
- sb->s_blocksize = nfs_block_bits(fsinfo.bsize, &sb->s_blocksize_bits);
- if (server->rsize > fsinfo.rtmax)
- server->rsize = fsinfo.rtmax;
- if (server->wsize > fsinfo.wtmax)
- server->wsize = fsinfo.wtmax;
-
- server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (server->rpages > NFS_READ_MAXIOV) {
- server->rpages = NFS_READ_MAXIOV;
- server->rsize = server->rpages << PAGE_CACHE_SHIFT;
+ if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
+ server->namelen = NFS2_MAXNAMLEN;
}
- server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (server->wpages > NFS_WRITE_MAXIOV) {
- server->wpages = NFS_WRITE_MAXIOV;
- server->wsize = server->wpages << PAGE_CACHE_SHIFT;
- }
-
- server->dtsize = nfs_block_size(fsinfo.dtpref, NULL);
- if (server->dtsize > PAGE_CACHE_SIZE)
- server->dtsize = PAGE_CACHE_SIZE;
- if (server->dtsize > server->rsize)
- server->dtsize = server->rsize;
-
- maxlen = (version == 2) ? NFS2_MAXNAMLEN : NFS3_MAXNAMLEN;
-
- if (server->namelen == 0 || server->namelen > maxlen)
- server->namelen = maxlen;
-
- sb->s_maxbytes = fsinfo.maxfilesize;
- if (sb->s_maxbytes > MAX_LFS_FILESIZE)
- sb->s_maxbytes = MAX_LFS_FILESIZE;
-
- /* Fire up the writeback cache */
- if (nfs_reqlist_alloc(server) < 0) {
- printk(KERN_NOTICE "NFS: cannot initialize writeback cache.\n");
- goto failure_kill_reqlist;
- }
-
- /* We're airborne Set socket buffersize */
- rpc_setbufsize(clnt, server->wsize + 100, server->rsize + 100);
-
/* Check whether to start the lockd process */
if (!(server->flags & NFS_MOUNT_NONLM))
lockd_up();
return 0;
-
- /* Yargs. It didn't work out. */
- failure_kill_reqlist:
- nfs_reqlist_exit(server);
-out_no_root:
- printk("nfs_read_super: get root inode failed\n");
- iput(root_inode);
+out_noinit:
rpciod_down();
- goto out_shutdown;
-
-out_no_iod:
- printk(KERN_WARNING "NFS: couldn't start rpciod!\n");
out_shutdown:
rpc_shutdown_client(server->client);
- goto out_free_host;
-
-out_no_client:
- printk(KERN_WARNING "NFS: cannot create RPC client.\n");
- xprt_destroy(xprt);
- goto out_free_host;
-
-out_no_xprt:
- printk(KERN_WARNING "NFS: cannot create RPC transport.\n");
-
-out_free_host:
- nfs_reqlist_free(server);
- kfree(server->hostname);
-out_unlock:
- goto out_fail;
-
out_fail:
- return -EINVAL;
+ if (server->hostname)
+ kfree(server->hostname);
+ return err;
}
static int
@@ -478,29 +481,30 @@ nfs_statfs(struct super_block *sb, struct statfs *buf)
struct nfs_server *server = NFS_SB(sb);
unsigned char blockbits;
unsigned long blockres;
- struct nfs_fsinfo res;
+ struct nfs_fh *rootfh = NFS_FH(sb->s_root->d_inode);
+ struct nfs_fattr fattr;
+ struct nfs_fsstat res = {
+ .fattr = &fattr,
+ };
int error;
lock_kernel();
- error = server->rpc_ops->statfs(server, NFS_FH(sb->s_root->d_inode), &res);
+ error = server->rpc_ops->statfs(server, rootfh, &res);
buf->f_type = NFS_SUPER_MAGIC;
if (error < 0)
goto out_err;
- if (res.bsize == 0)
- res.bsize = sb->s_blocksize;
- buf->f_bsize = nfs_block_bits(res.bsize, &blockbits);
+ buf->f_bsize = sb->s_blocksize;
+ blockbits = sb->s_blocksize_bits;
blockres = (1 << blockbits) - 1;
buf->f_blocks = (res.tbytes + blockres) >> blockbits;
buf->f_bfree = (res.fbytes + blockres) >> blockbits;
buf->f_bavail = (res.abytes + blockres) >> blockbits;
buf->f_files = res.tfiles;
buf->f_ffree = res.afiles;
- if (res.namelen == 0 || res.namelen > server->namelen)
- res.namelen = server->namelen;
- buf->f_namelen = res.namelen;
+ buf->f_namelen = server->namelen;
out:
unlock_kernel();
@@ -1286,6 +1290,239 @@ static struct file_system_type nfs_fs_type = {
.fs_flags = FS_ODD_RENAME,
};
+#ifdef CONFIG_NFS_V4
+
+static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data, int silent)
+{
+ struct nfs_server *server;
+ struct rpc_xprt *xprt = NULL;
+ struct rpc_clnt *clnt = NULL;
+ struct rpc_timeout timeparms;
+ rpc_authflavor_t authflavour;
+ int proto, err = -EIO;
+
+ sb->s_blocksize_bits = 0;
+ sb->s_blocksize = 0;
+ server = NFS_SB(sb);
+ if (data->rsize != 0)
+ server->rsize = nfs_block_size(data->rsize, NULL);
+ if (data->wsize != 0)
+ server->wsize = nfs_block_size(data->wsize, NULL);
+ server->flags = data->flags & NFS_MOUNT_FLAGMASK;
+
+ /* NFSv4 doesn't use NLM locking */
+ server->flags |= NFS_MOUNT_NONLM;
+
+ server->acregmin = data->acregmin*HZ;
+ server->acregmax = data->acregmax*HZ;
+ server->acdirmin = data->acdirmin*HZ;
+ server->acdirmax = data->acdirmax*HZ;
+
+ server->rpc_ops = &nfs_v4_clientops;
+ /* Initialize timeout values */
+
+ timeparms.to_initval = data->timeo * HZ / 10;
+ timeparms.to_retries = data->retrans;
+ timeparms.to_exponential = 1;
+ if (!timeparms.to_retries)
+ timeparms.to_retries = 5;
+
+ proto = data->proto;
+ /* Which IP protocol do we use? */
+ switch (proto) {
+ case IPPROTO_TCP:
+ timeparms.to_maxval = RPC_MAX_TCP_TIMEOUT;
+ if (!timeparms.to_initval)
+ timeparms.to_initval = 600 * HZ / 10;
+ break;
+ case IPPROTO_UDP:
+ timeparms.to_maxval = RPC_MAX_UDP_TIMEOUT;
+ if (!timeparms.to_initval)
+ timeparms.to_initval = 11 * HZ / 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Now create transport and client */
+ xprt = xprt_create_proto(proto, &server->addr, &timeparms);
+ if (xprt == NULL) {
+ printk(KERN_WARNING "NFS: cannot create RPC transport.\n");
+ goto out_fail;
+ }
+
+ authflavour = RPC_AUTH_UNIX;
+ if (data->auth_flavourlen != 0) {
+ if (data->auth_flavourlen > 1)
+ printk(KERN_INFO "NFS: cannot yet deal with multiple auth flavours.\n");
+ if (copy_from_user(authflavour, data->auth_flavours, sizeof(authflavour))) {
+ err = -EFAULT;
+ goto out_fail;
+ }
+ }
+ clnt = rpc_create_client(xprt, server->hostname, &nfs_program,
+ server->rpc_ops->version, authflavour);
+ if (clnt == NULL) {
+ printk(KERN_WARNING "NFS: cannot create RPC client.\n");
+ xprt_destroy(xprt);
+ goto out_fail;
+ }
+
+ clnt->cl_intr = (server->flags & NFS4_MOUNT_INTR) ? 1 : 0;
+ clnt->cl_softrtry = (server->flags & NFS4_MOUNT_SOFT) ? 1 : 0;
+ clnt->cl_chatty = 1;
+ server->client = clnt;
+
+ /* Fire up rpciod if not yet running */
+ if (rpciod_up() != 0) {
+ printk(KERN_WARNING "NFS: couldn't start rpciod!\n");
+ goto out_shutdown;
+ }
+
+ if (create_nfsv4_state(server, data))
+ goto out_shutdown;
+
+ err = nfs_sb_init(sb);
+ if (err == 0)
+ return 0;
+ rpciod_down();
+ destroy_nfsv4_state(server);
+out_shutdown:
+ rpc_shutdown_client(server->client);
+out_fail:
+ return err;
+}
+
+static int nfs4_compare_super(struct super_block *sb, void *data)
+{
+ struct nfs_server *server = data;
+ struct nfs_server *old = NFS_SB(sb);
+
+ if (strcmp(server->hostname, old->hostname) != 0)
+ return 0;
+ if (strcmp(server->mnt_path, old->mnt_path) != 0)
+ return 0;
+ return 1;
+}
+
+static void *
+nfs_copy_user_string(char *dst, struct nfs_string *src, int maxlen)
+{
+ void *p = NULL;
+
+ if (!src->len)
+ return ERR_PTR(-EINVAL);
+ if (src->len < maxlen)
+ maxlen = src->len;
+ if (dst == NULL) {
+ p = dst = kmalloc(maxlen + 1, GFP_KERNEL);
+ if (p == NULL)
+ return ERR_PTR(-ENOMEM);
+ }
+ if (copy_from_user(dst, src->data, maxlen)) {
+ if (p != NULL)
+ kfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ dst[maxlen] = '\0';
+ return dst;
+}
+
+static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
+ int flags, char *dev_name, void *raw_data)
+{
+ int error;
+ struct nfs_server *server;
+ struct super_block *s;
+ struct nfs4_mount_data *data = raw_data;
+ void *p;
+
+ if (!data) {
+ printk("nfs_read_super: missing data argument\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ server = kmalloc(sizeof(struct nfs_server), GFP_KERNEL);
+ if (!server)
+ return ERR_PTR(-ENOMEM);
+ memset(server, 0, sizeof(struct nfs_server));
+
+ if (data->version != NFS4_MOUNT_VERSION) {
+ printk("nfs warning: mount version %s than kernel\n",
+ data->version < NFS_MOUNT_VERSION ? "older" : "newer");
+ }
+
+ p = nfs_copy_user_string(NULL, &data->hostname, 256);
+ if (IS_ERR(p))
+ goto out_err;
+ server->hostname = p;
+
+ p = nfs_copy_user_string(NULL, &data->mnt_path, 1024);
+ if (IS_ERR(p))
+ goto out_err;
+ server->mnt_path = p;
+
+ p = nfs_copy_user_string(server->ip_addr, &data->client_addr,
+ sizeof(server->ip_addr));
+ if (IS_ERR(p))
+ goto out_err;
+
+ /* We now require that the mount process passes the remote address */
+ if (data->host_addrlen != sizeof(server->addr)) {
+ s = ERR_PTR(-EINVAL);
+ goto out_free;
+ }
+ if (copy_from_user(&server->addr, data->host_addr, sizeof(server->addr))) {
+ s = ERR_PTR(-EFAULT);
+ goto out_free;
+ }
+ if (server->addr.sin_family != AF_INET ||
+ server->addr.sin_addr.s_addr == INADDR_ANY) {
+ printk("NFS: mount program didn't pass remote IP address!\n");
+ s = ERR_PTR(-EINVAL);
+ goto out_free;
+ }
+
+ s = sget(fs_type, nfs4_compare_super, nfs_set_super, server);
+
+ if (IS_ERR(s) || s->s_root)
+ goto out_free;
+
+ s->s_flags = flags;
+
+ error = nfs4_fill_super(s, data, flags & MS_VERBOSE ? 1 : 0);
+ if (error) {
+ up_write(&s->s_umount);
+ deactivate_super(s);
+ return ERR_PTR(error);
+ }
+ s->s_flags |= MS_ACTIVE;
+ return s;
+out_err:
+ s = (struct super_block *)p;
+out_free:
+ if (server->mnt_path)
+ kfree(server->mnt_path);
+ if (server->hostname)
+ kfree(server->hostname);
+ kfree(server);
+ return s;
+}
+
+static struct file_system_type nfs4_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .get_sb = nfs4_get_sb,
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_ODD_RENAME,
+};
+#define register_nfs4fs() register_filesystem(&nfs4_fs_type)
+#define unregister_nfs4fs() unregister_filesystem(&nfs4_fs_type)
+#else
+#define register_nfs4fs() (0)
+#define unregister_nfs4fs()
+#endif
+
extern int nfs_init_nfspagecache(void);
extern void nfs_destroy_nfspagecache(void);
extern int nfs_init_readpagecache(void);
@@ -1377,6 +1614,8 @@ static int __init init_nfs_fs(void)
err = register_filesystem(&nfs_fs_type);
if (err)
goto out;
+ if ((err = register_nfs4fs()) != 0)
+ goto out;
return 0;
out:
rpc_proc_unregister("nfs");
@@ -1401,6 +1640,7 @@ static void __exit exit_nfs_fs(void)
rpc_proc_unregister("nfs");
#endif
unregister_filesystem(&nfs_fs_type);
+ unregister_nfs4fs();
}
/* Not quite true; I just maintain it */
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 8dc92b8b3a1c..8e652afdfea4 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -596,37 +596,18 @@ nfs_xdr_writeres(struct rpc_rqst *req, u32 *p, struct nfs_writeres *res)
* Decode STATFS reply
*/
static int
-nfs_xdr_statfsres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res)
+nfs_xdr_statfsres(struct rpc_rqst *req, u32 *p, struct nfs2_fsstat *res)
{
int status;
- u32 xfer_size;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
- /* For NFSv2, we more or less have to guess the preferred
- * read/write/readdir sizes from the single 'transfer size'
- * value.
- */
- xfer_size = ntohl(*p++); /* tsize */
- res->rtmax = 8 * 1024;
- res->rtpref = xfer_size;
- res->rtmult = xfer_size;
- res->wtmax = 8 * 1024;
- res->wtpref = xfer_size;
- res->wtmult = xfer_size;
- res->dtpref = PAGE_CACHE_SIZE;
- res->maxfilesize = 0x7FFFFFFF; /* just a guess */
+ res->tsize = ntohl(*p++);
res->bsize = ntohl(*p++);
-
- res->tbytes = ntohl(*p++) * res->bsize;
- res->fbytes = ntohl(*p++) * res->bsize;
- res->abytes = ntohl(*p++) * res->bsize;
- res->tfiles = 0;
- res->ffiles = 0;
- res->afiles = 0;
- res->namelen = 0;
-
+ res->blocks = ntohl(*p++);
+ res->bfree = ntohl(*p++);
+ res->bavail = ntohl(*p++);
return 0;
}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 1ddb51374cba..790c27ead44f 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -639,24 +639,42 @@ nfs3_proc_mknod(struct inode *dir, struct qstr *name, struct iattr *sattr,
return status;
}
-/*
- * This is a combo call of fsstat and fsinfo
- */
static int
nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+ struct nfs_fsstat *stat)
{
int status;
dprintk("NFS call fsstat\n");
- memset((char *)info, 0, sizeof(*info));
- status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, info, 0);
- if (status < 0)
- goto error;
+ stat->fattr->valid = 0;
+ status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, stat, 0);
+ dprintk("NFS reply statfs: %d\n", status);
+ return status;
+}
+
+static int
+nfs3_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ int status;
+
+ dprintk("NFS call fsinfo\n");
+ info->fattr->valid = 0;
status = rpc_call(server->client, NFS3PROC_FSINFO, fhandle, info, 0);
+ dprintk("NFS reply fsinfo: %d\n", status);
+ return status;
+}
-error:
- dprintk("NFS reply statfs: %d\n", status);
+static int
+nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_pathconf *info)
+{
+ int status;
+
+ dprintk("NFS call pathconf\n");
+ info->fattr->valid = 0;
+ status = rpc_call(server->client, NFS3PROC_PATHCONF, fhandle, info, 0);
+ dprintk("NFS reply pathconf: %d\n", status);
return status;
}
@@ -824,6 +842,8 @@ struct nfs_rpc_ops nfs_v3_clientops = {
.readdir = nfs3_proc_readdir,
.mknod = nfs3_proc_mknod,
.statfs = nfs3_proc_statfs,
+ .fsinfo = nfs3_proc_fsinfo,
+ .pathconf = nfs3_proc_pathconf,
.decode_dirent = nfs3_decode_dirent,
.read_setup = nfs3_proc_read_setup,
.write_setup = nfs3_proc_write_setup,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index b0c77b19fff9..2a813fb65365 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -912,14 +912,13 @@ nfs3_xdr_linkres(struct rpc_rqst *req, u32 *p, struct nfs3_linkres *res)
* Decode FSSTAT reply
*/
static int
-nfs3_xdr_fsstatres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res)
+nfs3_xdr_fsstatres(struct rpc_rqst *req, u32 *p, struct nfs_fsstat *res)
{
- struct nfs_fattr dummy;
int status;
status = ntohl(*p++);
- p = xdr_decode_post_op_attr(p, &dummy);
+ p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
@@ -940,12 +939,11 @@ nfs3_xdr_fsstatres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res)
static int
nfs3_xdr_fsinfores(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res)
{
- struct nfs_fattr dummy;
int status;
status = ntohl(*p++);
- p = xdr_decode_post_op_attr(p, &dummy);
+ p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
@@ -959,6 +957,7 @@ nfs3_xdr_fsinfores(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res)
p = xdr_decode_hyper(p, &res->maxfilesize);
/* ignore time_delta and properties */
+ res->lease_time = 0;
return 0;
}
@@ -966,18 +965,17 @@ nfs3_xdr_fsinfores(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res)
* Decode PATHCONF reply
*/
static int
-nfs3_xdr_pathconfres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res)
+nfs3_xdr_pathconfres(struct rpc_rqst *req, u32 *p, struct nfs_pathconf *res)
{
- struct nfs_fattr dummy;
int status;
status = ntohl(*p++);
- p = xdr_decode_post_op_attr(p, &dummy);
+ p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
- res->linkmax = ntohl(*p++);
- res->namelen = ntohl(*p++);
+ res->max_link = ntohl(*p++);
+ res->max_namelen = ntohl(*p++);
/* ignore remaining fields */
return 0;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
new file mode 100644
index 000000000000..8608fd9b3a30
--- /dev/null
+++ b/fs/nfs/nfs4proc.c
@@ -0,0 +1,1577 @@
+/*
+ * fs/nfs/nfs4proc.c
+ *
+ * Client-side procedure declarations for NFSv4.
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Kendrick Smith <kmsmith@umich.edu>
+ * Andy Adamson <andros@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/mm.h>
+#include <linux/utsname.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/smp_lock.h>
+
+#define NFSDBG_FACILITY NFSDBG_PROC
+
+#define GET_OP(cp,name) &cp->ops[cp->req_nops].u.name
+#define OPNUM(cp) cp->ops[cp->req_nops].opnum
+
+extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);
+
+static nfs4_stateid zero_stateid =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+static spinlock_t renew_lock = SPIN_LOCK_UNLOCKED;
+
+static void
+nfs4_setup_compound(struct nfs4_compound *cp, struct nfs4_op *ops,
+ struct nfs_server *server, char *tag)
+{
+ memset(cp, 0, sizeof(*cp));
+ cp->ops = ops;
+ cp->server = server;
+
+#if NFS4_DEBUG
+ cp->taglen = strlen(tag);
+ cp->tag = tag;
+#endif
+}
+
+static void
+nfs4_setup_access(struct nfs4_compound *cp, u32 req_access, u32 *resp_supported, u32 *resp_access)
+{
+ struct nfs4_access *access = GET_OP(cp, access);
+
+ access->ac_req_access = req_access;
+ access->ac_resp_supported = resp_supported;
+ access->ac_resp_access = resp_access;
+
+ OPNUM(cp) = OP_ACCESS;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_close(struct nfs4_compound *cp, nfs4_stateid stateid, u32 seqid)
+{
+ struct nfs4_close *close = GET_OP(cp, close);
+
+ close->cl_stateid = stateid;
+ close->cl_seqid = seqid;
+
+ OPNUM(cp) = OP_CLOSE;
+ cp->req_nops++;
+ cp->renew_index = cp->req_nops;
+}
+
+static void
+nfs4_setup_commit(struct nfs4_compound *cp, u64 start, u32 len, struct nfs_writeverf *verf)
+{
+ struct nfs4_commit *commit = GET_OP(cp, commit);
+
+ commit->co_start = start;
+ commit->co_len = len;
+ commit->co_verifier = verf;
+
+ OPNUM(cp) = OP_COMMIT;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_create_dir(struct nfs4_compound *cp, struct qstr *name,
+ struct iattr *sattr, struct nfs4_change_info *info)
+{
+ struct nfs4_create *create = GET_OP(cp, create);
+
+ create->cr_ftype = NF4DIR;
+ create->cr_namelen = name->len;
+ create->cr_name = name->name;
+ create->cr_attrs = sattr;
+ create->cr_cinfo = info;
+
+ OPNUM(cp) = OP_CREATE;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_create_symlink(struct nfs4_compound *cp, struct qstr *name,
+ struct qstr *linktext, struct iattr *sattr,
+ struct nfs4_change_info *info)
+{
+ struct nfs4_create *create = GET_OP(cp, create);
+
+ create->cr_ftype = NF4LNK;
+ create->cr_textlen = linktext->len;
+ create->cr_text = linktext->name;
+ create->cr_namelen = name->len;
+ create->cr_name = name->name;
+ create->cr_attrs = sattr;
+ create->cr_cinfo = info;
+
+ OPNUM(cp) = OP_CREATE;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_create_special(struct nfs4_compound *cp, struct qstr *name,
+ dev_t dev, struct iattr *sattr,
+ struct nfs4_change_info *info)
+{
+ int mode = sattr->ia_mode;
+ struct nfs4_create *create = GET_OP(cp, create);
+
+ BUG_ON(!(sattr->ia_valid & ATTR_MODE));
+ BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
+
+ if (S_ISFIFO(mode))
+ create->cr_ftype = NF4FIFO;
+ else if (S_ISBLK(mode)) {
+ create->cr_ftype = NF4BLK;
+ create->cr_specdata1 = MAJOR(dev);
+ create->cr_specdata2 = MINOR(dev);
+ }
+ else if (S_ISCHR(mode)) {
+ create->cr_ftype = NF4CHR;
+ create->cr_specdata1 = MAJOR(dev);
+ create->cr_specdata2 = MINOR(dev);
+ }
+ else
+ create->cr_ftype = NF4SOCK;
+
+ create->cr_namelen = name->len;
+ create->cr_name = name->name;
+ create->cr_attrs = sattr;
+ create->cr_cinfo = info;
+
+ OPNUM(cp) = OP_CREATE;
+ cp->req_nops++;
+}
+
+/*
+ * This is our standard bitmap for GETATTR requests.
+ */
+u32 nfs4_fattr_bitmap[2] = {
+ FATTR4_WORD0_TYPE
+ | FATTR4_WORD0_CHANGE
+ | FATTR4_WORD0_SIZE
+ | FATTR4_WORD0_FSID
+ | FATTR4_WORD0_FILEID,
+ FATTR4_WORD1_MODE
+ | FATTR4_WORD1_NUMLINKS
+ | FATTR4_WORD1_OWNER
+ | FATTR4_WORD1_OWNER_GROUP
+ | FATTR4_WORD1_RAWDEV
+ | FATTR4_WORD1_SPACE_USED
+ | FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_METADATA
+ | FATTR4_WORD1_TIME_MODIFY
+};
+
+u32 nfs4_statfs_bitmap[2] = {
+ FATTR4_WORD0_FILES_AVAIL
+ | FATTR4_WORD0_FILES_FREE
+ | FATTR4_WORD0_FILES_TOTAL,
+ FATTR4_WORD1_SPACE_AVAIL
+ | FATTR4_WORD1_SPACE_FREE
+ | FATTR4_WORD1_SPACE_TOTAL
+};
+
+u32 nfs4_fsinfo_bitmap[2] = {
+ FATTR4_WORD0_MAXFILESIZE
+ | FATTR4_WORD0_MAXREAD
+ | FATTR4_WORD0_MAXWRITE
+ | FATTR4_WORD0_LEASE_TIME,
+ 0
+};
+
+u32 nfs4_pathconf_bitmap[2] = {
+ FATTR4_WORD0_MAXLINK
+ | FATTR4_WORD0_MAXNAME,
+ 0
+};
+
+/* mount bitmap: fattr bitmap + lease time */
+u32 nfs4_mount_bitmap[2] = {
+ FATTR4_WORD0_TYPE
+ | FATTR4_WORD0_CHANGE
+ | FATTR4_WORD0_SIZE
+ | FATTR4_WORD0_FSID
+ | FATTR4_WORD0_FILEID
+ | FATTR4_WORD0_LEASE_TIME,
+ FATTR4_WORD1_MODE
+ | FATTR4_WORD1_NUMLINKS
+ | FATTR4_WORD1_OWNER
+ | FATTR4_WORD1_OWNER_GROUP
+ | FATTR4_WORD1_RAWDEV
+ | FATTR4_WORD1_SPACE_USED
+ | FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_METADATA
+ | FATTR4_WORD1_TIME_MODIFY
+};
+
+static inline void
+__nfs4_setup_getattr(struct nfs4_compound *cp, u32 *bitmap,
+ struct nfs_fattr *fattr,
+ struct nfs_fsstat *fsstat,
+ struct nfs_fsinfo *fsinfo,
+ struct nfs_pathconf *pathconf,
+ u32 *bmres)
+{
+ struct nfs4_getattr *getattr = GET_OP(cp, getattr);
+
+ getattr->gt_bmval = bitmap;
+ getattr->gt_attrs = fattr;
+ getattr->gt_fsstat = fsstat;
+ getattr->gt_fsinfo = fsinfo;
+ getattr->gt_pathconf = pathconf;
+ getattr->gt_bmres = bmres;
+
+ OPNUM(cp) = OP_GETATTR;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_getattr(struct nfs4_compound *cp,
+ struct nfs_fattr *fattr,
+ u32 *bmres)
+{
+ __nfs4_setup_getattr(cp, nfs4_fattr_bitmap, fattr,
+ NULL, NULL, NULL, bmres);
+}
+
+static void
+nfs4_setup_getrootattr(struct nfs4_compound *cp,
+ struct nfs_fattr *fattr,
+ struct nfs_fsinfo *fsinfo,
+ u32 *bmres)
+{
+ __nfs4_setup_getattr(cp, nfs4_mount_bitmap,
+ fattr, NULL, fsinfo, NULL, bmres);
+}
+
+static void
+nfs4_setup_statfs(struct nfs4_compound *cp,
+ struct nfs_fsstat *fsstat,
+ u32 *bmres)
+{
+ __nfs4_setup_getattr(cp, nfs4_statfs_bitmap,
+ NULL, fsstat, NULL, NULL, bmres);
+}
+
+static void
+nfs4_setup_fsinfo(struct nfs4_compound *cp,
+ struct nfs_fsinfo *fsinfo,
+ u32 *bmres)
+{
+ __nfs4_setup_getattr(cp, nfs4_fsinfo_bitmap,
+ NULL, NULL, fsinfo, NULL, bmres);
+}
+
+static void
+nfs4_setup_pathconf(struct nfs4_compound *cp,
+ struct nfs_pathconf *pathconf,
+ u32 *bmres)
+{
+ __nfs4_setup_getattr(cp, nfs4_pathconf_bitmap,
+ NULL, NULL, NULL, pathconf, bmres);
+}
+
+static void
+nfs4_setup_getfh(struct nfs4_compound *cp, struct nfs_fh *fhandle)
+{
+ struct nfs4_getfh *getfh = GET_OP(cp, getfh);
+
+ getfh->gf_fhandle = fhandle;
+
+ OPNUM(cp) = OP_GETFH;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_link(struct nfs4_compound *cp, struct qstr *name,
+ struct nfs4_change_info *info)
+{
+ struct nfs4_link *link = GET_OP(cp, link);
+
+ link->ln_namelen = name->len;
+ link->ln_name = name->name;
+ link->ln_cinfo = info;
+
+ OPNUM(cp) = OP_LINK;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_lookup(struct nfs4_compound *cp, struct qstr *q)
+{
+ struct nfs4_lookup *lookup = GET_OP(cp, lookup);
+
+ lookup->lo_name = q;
+
+ OPNUM(cp) = OP_LOOKUP;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_putfh(struct nfs4_compound *cp, struct nfs_fh *fhandle)
+{
+ struct nfs4_putfh *putfh = GET_OP(cp, putfh);
+
+ putfh->pf_fhandle = fhandle;
+
+ OPNUM(cp) = OP_PUTFH;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_putrootfh(struct nfs4_compound *cp)
+{
+ OPNUM(cp) = OP_PUTROOTFH;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_open(struct nfs4_compound *cp, int flags, struct qstr *name,
+ struct iattr *sattr, char *stateid, struct nfs4_change_info *cinfo,
+ u32 *rflags)
+{
+ struct nfs4_open *open = GET_OP(cp, open);
+
+ BUG_ON(cp->flags);
+
+ open->op_share_access = flags & 3;
+ open->op_opentype = (flags & O_CREAT) ? NFS4_OPEN_CREATE : NFS4_OPEN_NOCREATE;
+ open->op_createmode = NFS4_CREATE_UNCHECKED;
+ open->op_attrs = sattr;
+ if (flags & O_EXCL) {
+ u32 *p = (u32 *) open->op_verifier;
+ p[0] = jiffies;
+ p[1] = current->pid;
+ open->op_createmode = NFS4_CREATE_EXCLUSIVE;
+ }
+ open->op_name = name;
+ open->op_stateid = stateid;
+ open->op_cinfo = cinfo;
+ open->op_rflags = rflags;
+
+ OPNUM(cp) = OP_OPEN;
+ cp->req_nops++;
+ cp->renew_index = cp->req_nops;
+}
+
+static void
+nfs4_setup_open_confirm(struct nfs4_compound *cp, char *stateid)
+{
+ struct nfs4_open_confirm *open_confirm = GET_OP(cp, open_confirm);
+
+ open_confirm->oc_stateid = stateid;
+
+ OPNUM(cp) = OP_OPEN_CONFIRM;
+ cp->req_nops++;
+ cp->renew_index = cp->req_nops;
+}
+
+static void
+nfs4_setup_read(struct nfs4_compound *cp, u64 offset, u32 length,
+ struct page **pages, unsigned int pgbase, u32 *eofp, u32 *bytes_read)
+{
+ struct nfs4_read *read = GET_OP(cp, read);
+
+ read->rd_offset = offset;
+ read->rd_length = length;
+ read->rd_pages = pages;
+ read->rd_pgbase = pgbase;
+ read->rd_eof = eofp;
+ read->rd_bytes_read = bytes_read;
+
+ OPNUM(cp) = OP_READ;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_readdir(struct nfs4_compound *cp, u64 cookie, u32 *verifier,
+ struct page **pages, unsigned int bufsize, struct dentry *dentry)
+{
+ u32 *start, *p;
+ struct nfs4_readdir *readdir = GET_OP(cp, readdir);
+
+ BUG_ON(bufsize < 80);
+ readdir->rd_cookie = (cookie > 2) ? cookie : 0;
+ memcpy(readdir->rd_req_verifier, verifier, sizeof(nfs4_verifier));
+ readdir->rd_count = bufsize;
+ readdir->rd_bmval[0] = FATTR4_WORD0_FILEID;
+ readdir->rd_bmval[1] = 0;
+ readdir->rd_pages = pages;
+ readdir->rd_pgbase = 0;
+
+ OPNUM(cp) = OP_READDIR;
+ cp->req_nops++;
+
+ if (cookie >= 2)
+ return;
+
+ /*
+ * NFSv4 servers do not return entries for '.' and '..'
+ * Therefore, we fake these entries here. We let '.'
+ * have cookie 0 and '..' have cookie 1. Note that
+ * when talking to the server, we always send cookie 0
+ * instead of 1 or 2.
+ */
+ start = p = (u32 *)kmap(*pages);
+
+ if (cookie == 0) {
+ *p++ = xdr_one; /* next */
+ *p++ = xdr_zero; /* cookie, first word */
+ *p++ = xdr_one; /* cookie, second word */
+ *p++ = xdr_one; /* entry len */
+ memcpy(p, ".\0\0\0", 4); /* entry */
+ p++;
+ *p++ = xdr_one; /* bitmap length */
+ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
+ *p++ = htonl(8); /* attribute buffer length */
+ p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
+ }
+
+ *p++ = xdr_one; /* next */
+ *p++ = xdr_zero; /* cookie, first word */
+ *p++ = xdr_two; /* cookie, second word */
+ *p++ = xdr_two; /* entry len */
+ memcpy(p, "..\0\0", 4); /* entry */
+ p++;
+ *p++ = xdr_one; /* bitmap length */
+ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
+ *p++ = htonl(8); /* attribute buffer length */
+ p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
+
+ readdir->rd_pgbase = (char *)p - (char *)start;
+ readdir->rd_count -= readdir->rd_pgbase;
+ kunmap(*pages);
+}
+
+static void
+nfs4_setup_readlink(struct nfs4_compound *cp, int count, struct page **pages)
+{
+ struct nfs4_readlink *readlink = GET_OP(cp, readlink);
+
+ readlink->rl_count = count;
+ readlink->rl_pages = pages;
+
+ OPNUM(cp) = OP_READLINK;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_remove(struct nfs4_compound *cp, struct qstr *name, struct nfs4_change_info *cinfo)
+{
+ struct nfs4_remove *remove = GET_OP(cp, remove);
+
+ remove->rm_namelen = name->len;
+ remove->rm_name = name->name;
+ remove->rm_cinfo = cinfo;
+
+ OPNUM(cp) = OP_REMOVE;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_rename(struct nfs4_compound *cp, struct qstr *old, struct qstr *new,
+ struct nfs4_change_info *old_cinfo, struct nfs4_change_info *new_cinfo)
+{
+ struct nfs4_rename *rename = GET_OP(cp, rename);
+
+ rename->rn_oldnamelen = old->len;
+ rename->rn_oldname = old->name;
+ rename->rn_newnamelen = new->len;
+ rename->rn_newname = new->name;
+ rename->rn_src_cinfo = old_cinfo;
+ rename->rn_dst_cinfo = new_cinfo;
+
+ OPNUM(cp) = OP_RENAME;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_renew(struct nfs4_compound *cp)
+{
+ OPNUM(cp) = OP_RENEW;
+ cp->req_nops++;
+ cp->renew_index = cp->req_nops;
+}
+
+static void
+nfs4_setup_restorefh(struct nfs4_compound *cp)
+{
+ OPNUM(cp) = OP_RESTOREFH;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_savefh(struct nfs4_compound *cp)
+{
+ OPNUM(cp) = OP_SAVEFH;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_setattr(struct nfs4_compound *cp, char *stateid, struct iattr *iap)
+{
+ struct nfs4_setattr *setattr = GET_OP(cp, setattr);
+
+ setattr->st_stateid = stateid;
+ setattr->st_iap = iap;
+
+ OPNUM(cp) = OP_SETATTR;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_setclientid(struct nfs4_compound *cp, u32 program, unsigned short port)
+{
+ struct nfs4_setclientid *setclientid = GET_OP(cp, setclientid);
+ struct nfs_server *server = cp->server;
+ struct timeval tv;
+ u32 *p;
+
+ do_gettimeofday(&tv);
+ p = (u32 *)setclientid->sc_verifier;
+ *p++ = tv.tv_sec;
+ *p++ = tv.tv_usec;
+ setclientid->sc_name = server->ip_addr;
+ sprintf(setclientid->sc_netid, "udp");
+ sprintf(setclientid->sc_uaddr, "%s.%d.%d", server->ip_addr, port >> 8, port & 255);
+ setclientid->sc_prog = program;
+ setclientid->sc_cb_ident = 0;
+
+ OPNUM(cp) = OP_SETCLIENTID;
+ cp->req_nops++;
+}
+
+static void
+nfs4_setup_setclientid_confirm(struct nfs4_compound *cp)
+{
+ OPNUM(cp) = OP_SETCLIENTID_CONFIRM;
+ cp->req_nops++;
+ cp->renew_index = cp->req_nops;
+}
+
+static void
+nfs4_setup_write(struct nfs4_compound *cp, u64 offset, u32 length, int stable,
+ struct page **pages, unsigned int pgbase, u32 *bytes_written,
+ struct nfs_writeverf *verf)
+{
+ struct nfs4_write *write = GET_OP(cp, write);
+
+ write->wr_offset = offset;
+ write->wr_stable_how = stable;
+ write->wr_len = length;
+ write->wr_bytes_written = bytes_written;
+ write->wr_verf = verf;
+
+ write->wr_pages = pages;
+ write->wr_pgbase = pgbase;
+
+ OPNUM(cp) = OP_WRITE;
+ cp->req_nops++;
+}
+
+static inline void
+process_lease(struct nfs4_compound *cp)
+{
+ struct nfs_server *server;
+
+ /*
+ * Generic lease processing: If this operation contains a
+ * lease-renewing operation, and it succeeded, update the RENEW time
+ * in the superblock. Instead of the current time, we use the time
+ * when the request was sent out. (All we know is that the lease was
+ * renewed sometime between then and now, and we have to assume the
+ * worst case.)
+ *
+ * Notes:
+ * (1) renewd doesn't acquire the spinlock when messing with
+ * server->last_renewal; this is OK since rpciod always runs
+ * under the BKL.
+ * (2) cp->timestamp was set at the end of XDR encode.
+ */
+ if (!cp->renew_index)
+ return;
+ if (!cp->toplevel_status || cp->resp_nops > cp->renew_index) {
+ server = cp->server;
+ spin_lock(&renew_lock);
+ if (server->last_renewal < cp->timestamp)
+ server->last_renewal = cp->timestamp;
+ spin_unlock(&renew_lock);
+ }
+}
+
+static int
+nfs4_call_compound(struct nfs4_compound *cp, struct rpc_cred *cred, int flags)
+{
+ int status;
+ struct rpc_message msg = {
+ .rpc_proc = NFSPROC4_COMPOUND,
+ .rpc_argp = cp,
+ .rpc_resp = cp,
+ .rpc_cred = cred,
+ };
+
+ status = rpc_call_sync(cp->server->client, &msg, flags);
+ if (!status)
+ process_lease(cp);
+
+ return status;
+}
+
+static inline void
+process_cinfo(struct nfs4_change_info *info, struct nfs_fattr *fattr)
+{
+ BUG_ON((fattr->valid & NFS_ATTR_FATTR) == 0);
+ BUG_ON((fattr->valid & NFS_ATTR_FATTR_V4) == 0);
+
+ if (fattr->change_attr == info->after) {
+ fattr->pre_change_attr = info->before;
+ fattr->valid |= NFS_ATTR_PRE_CHANGE;
+ fattr->timestamp = jiffies;
+ }
+}
+
+static int
+do_open(struct inode *dir, struct qstr *name, int flags, struct iattr *sattr,
+ struct nfs_fattr *fattr, struct nfs_fh *fhandle, u32 *seqid, char *stateid)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[7];
+ struct nfs4_change_info dir_cinfo;
+ struct nfs_fattr dir_attr;
+ u32 dir_bmres[2];
+ u32 bmres[2];
+ u32 rflags;
+ int status;
+
+ dir_attr.valid = 0;
+ fattr->valid = 0;
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(dir), "open");
+ nfs4_setup_putfh(&compound, NFS_FH(dir));
+ nfs4_setup_savefh(&compound);
+ nfs4_setup_open(&compound, flags, name, sattr, stateid, &dir_cinfo, &rflags);
+ nfs4_setup_getattr(&compound, fattr, bmres);
+ nfs4_setup_getfh(&compound, fhandle);
+ nfs4_setup_restorefh(&compound);
+ nfs4_setup_getattr(&compound, &dir_attr, dir_bmres);
+ if ((status = nfs4_call_compound(&compound, NULL, 0)))
+ return status;
+
+ process_cinfo(&dir_cinfo, &dir_attr);
+ nfs_refresh_inode(dir, &dir_attr);
+ if (!(rflags & NFS4_OPEN_RESULT_CONFIRM)) {
+ *seqid = 1;
+ return 0;
+ }
+ *seqid = 2;
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(dir), "open_confirm");
+ nfs4_setup_putfh(&compound, fhandle);
+ nfs4_setup_open_confirm(&compound, stateid);
+ return nfs4_call_compound(&compound, NULL, 0);
+}
+
+static int
+do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
+ struct nfs_fh *fhandle, struct iattr *sattr, char *stateid)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[3];
+ u32 bmres[2];
+
+ fattr->valid = 0;
+ nfs4_setup_compound(&compound, ops, server, "setattr");
+ nfs4_setup_putfh(&compound, fhandle);
+ nfs4_setup_setattr(&compound, stateid, sattr);
+ nfs4_setup_getattr(&compound, fattr, bmres);
+ return nfs4_call_compound(&compound, NULL, 0);
+}
+
+static int
+do_close(struct nfs_server *server, struct nfs_fh *fhandle, u32 seqid, char *stateid)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[2];
+
+ nfs4_setup_compound(&compound, ops, server, "close");
+ nfs4_setup_putfh(&compound, fhandle);
+ nfs4_setup_close(&compound, stateid, seqid);
+ return nfs4_call_compound(&compound, NULL, 0);
+}
+
+static int
+nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[4];
+ struct nfs_fsinfo fsinfo;
+ u32 bmres[2];
+ unsigned char * p;
+ struct qstr q;
+ int status;
+
+ fattr->valid = 0;
+
+ if (!(server->nfs4_state = nfs4_get_client()))
+ return -ENOMEM;
+
+ /*
+ * SETCLIENTID.
+ * Until delegations are imported, we don't bother setting the program
+ * number and port to anything meaningful.
+ */
+ nfs4_setup_compound(&compound, ops, server, "setclientid");
+ nfs4_setup_setclientid(&compound, 0, 0);
+ if ((status = nfs4_call_compound(&compound, NULL, 0)))
+ goto out;
+
+ /*
+ * SETCLIENTID_CONFIRM, plus root filehandle.
+ * We also get the lease time here.
+ */
+ nfs4_setup_compound(&compound, ops, server, "setclientid_confirm");
+ nfs4_setup_setclientid_confirm(&compound);
+ nfs4_setup_putrootfh(&compound);
+ nfs4_setup_getrootattr(&compound, fattr, &fsinfo, bmres);
+ nfs4_setup_getfh(&compound, fhandle);
+ if ((status = nfs4_call_compound(&compound, NULL, 0)))
+ goto out;
+
+ /*
+ * Now that we have instantiated the clientid and determined
+ * the lease time, we can initialize the renew daemon for this
+ * server.
+ */
+ server->lease_time = fsinfo.lease_time * HZ;
+ if ((status = nfs4_init_renewd(server)))
+ goto out;
+
+ /*
+ * Now we do a seperate LOOKUP for each component of the mount path.
+ * The LOOKUPs are done seperately so that we can conveniently
+ * catch an ERR_WRONGSEC if it occurs along the way...
+ */
+ p = server->mnt_path;
+ for (;;) {
+ while (*p == '/')
+ p++;
+ if (!*p)
+ break;
+ q.name = p;
+ while (*p && (*p != '/'))
+ p++;
+ q.len = p - q.name;
+
+ nfs4_setup_compound(&compound, ops, server, "mount");
+ nfs4_setup_putfh(&compound, fhandle);
+ nfs4_setup_lookup(&compound, &q);
+ nfs4_setup_getattr(&compound, fattr, bmres);
+ nfs4_setup_getfh(&compound, fhandle);
+ status = nfs4_call_compound(&compound, NULL, 0);
+ if (!status)
+ continue;
+ if (status == -ENOENT) {
+ printk(KERN_NOTICE "NFS: mount path %s does not exist!\n", server->mnt_path);
+ printk(KERN_NOTICE "NFS: suggestion: try mounting '/' instead.\n");
+ }
+ break;
+ }
+
+out:
+ return status;
+}
+
+static int
+nfs4_proc_getattr(struct inode *inode, struct nfs_fattr *fattr)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[2];
+ u32 bmres[2];
+
+ fattr->valid = 0;
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(inode), "getattr");
+ nfs4_setup_putfh(&compound, NFS_FH(inode));
+ nfs4_setup_getattr(&compound, fattr, bmres);
+ return nfs4_call_compound(&compound, NULL, 0);
+}
+
+static int
+nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+ struct iattr *sattr)
+{
+ struct inode * inode = dentry->d_inode;
+ int size_change = sattr->ia_valid & ATTR_SIZE;
+ struct nfs_fh throwaway_fh;
+ u32 seqid;
+ nfs4_stateid stateid;
+ int status;
+
+ fattr->valid = 0;
+
+ if (size_change) {
+ status = do_open(dentry->d_parent->d_inode, &dentry->d_name,
+ NFS4_SHARE_ACCESS_WRITE, NULL, fattr,
+ &throwaway_fh, &seqid, stateid);
+ if (status)
+ return status;
+
+ /*
+ * Because OPEN is always done by name in nfsv4, it is
+ * possible that we opened a different file by the same
+ * name. We can recognize this race condition, but we
+ * can't do anything about it besides returning an error.
+ *
+ * XXX: Should we compare filehandles too, as in
+ * nfs_find_actor()?
+ */
+ if (fattr->fileid != NFS_FILEID(inode)) {
+ printk(KERN_WARNING "nfs: raced in setattr, returning -EIO\n");
+ do_close(NFS_SERVER(inode), NFS_FH(inode), seqid, stateid);
+ return -EIO;
+ }
+ }
+ else
+ memcpy(stateid, zero_stateid, sizeof(nfs4_stateid));
+
+ status = do_setattr(NFS_SERVER(inode), fattr, NFS_FH(inode), sattr, stateid);
+ if (size_change)
+ do_close(NFS_SERVER(inode), NFS_FH(inode), seqid, stateid);
+ return status;
+}
+
+static int
+nfs4_proc_lookup(struct inode *dir, struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[5];
+ struct nfs_fattr dir_attr;
+ u32 dir_bmres[2];
+ u32 bmres[2];
+ int status;
+
+ dir_attr.valid = 0;
+ fattr->valid = 0;
+
+ dprintk("NFS call lookup %s\n", name->name);
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(dir), "lookup");
+ nfs4_setup_putfh(&compound, NFS_FH(dir));
+ nfs4_setup_getattr(&compound, &dir_attr, dir_bmres);
+ nfs4_setup_lookup(&compound, name);
+ nfs4_setup_getattr(&compound, fattr, bmres);
+ nfs4_setup_getfh(&compound, fhandle);
+ status = nfs4_call_compound(&compound, NULL, 0);
+ dprintk("NFS reply lookup: %d\n", status);
+
+ if (status >= 0)
+ status = nfs_refresh_inode(dir, &dir_attr);
+ return status;
+}
+
+static int
+nfs4_proc_access(struct inode *inode, struct rpc_cred *cred, int mode)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[3];
+ struct nfs_fattr fattr;
+ u32 bmres[2];
+ u32 req_access = 0, resp_supported, resp_access;
+ int status;
+
+ fattr.valid = 0;
+
+ /*
+ * Determine which access bits we want to ask for...
+ */
+ if (mode & MAY_READ)
+ req_access |= NFS4_ACCESS_READ;
+ if (S_ISDIR(inode->i_mode)) {
+ if (mode & MAY_WRITE)
+ req_access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
+ if (mode & MAY_EXEC)
+ req_access |= NFS4_ACCESS_LOOKUP;
+ }
+ else {
+ if (mode & MAY_WRITE)
+ req_access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
+ if (mode & MAY_EXEC)
+ req_access |= NFS4_ACCESS_EXECUTE;
+ }
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(inode), "access");
+ nfs4_setup_putfh(&compound, NFS_FH(inode));
+ nfs4_setup_getattr(&compound, &fattr, bmres);
+ nfs4_setup_access(&compound, req_access, &resp_supported, &resp_access);
+ status = nfs4_call_compound(&compound, cred, 0);
+ nfs_refresh_inode(inode, &fattr);
+
+ if (!status) {
+ if (req_access != resp_supported) {
+ printk(KERN_NOTICE "NFS: server didn't support all access bits!\n");
+ status = -ENOTSUPP;
+ }
+ else if (req_access != resp_access)
+ status = -EACCES;
+ }
+ return status;
+}
+
+/*
+ * TODO: For the time being, we don't try to get any attributes
+ * along with any of the zero-copy operations READ, READDIR,
+ * READLINK, WRITE.
+ *
+ * In the case of the first three, we want to put the GETATTR
+ * after the read-type operation -- this is because it is hard
+ * to predict the length of a GETATTR response in v4, and thus
+ * align the READ data correctly. This means that the GETATTR
+ * may end up partially falling into the page cache, and we should
+ * shift it into the 'tail' of the xdr_buf before processing.
+ * To do this efficiently, we need to know the total length
+ * of data received, which doesn't seem to be available outside
+ * of the RPC layer.
+ *
+ * In the case of WRITE, we also want to put the GETATTR after
+ * the operation -- in this case because we want to make sure
+ * we get the post-operation mtime and size. This means that
+ * we can't use xdr_encode_pages() as written: we need a variant
+ * of it which would leave room in the 'tail' iovec.
+ *
+ * Both of these changes to the XDR layer would in fact be quite
+ * minor, but I decided to leave them for a subsequent patch.
+ */
+static int
+nfs4_proc_readlink(struct inode *inode, struct page *page)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[2];
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(inode), "readlink");
+ nfs4_setup_putfh(&compound, NFS_FH(inode));
+ nfs4_setup_readlink(&compound, PAGE_CACHE_SIZE, &page);
+ return nfs4_call_compound(&compound, NULL, 0);
+}
+
+static int
+nfs4_proc_read(struct inode *inode, struct rpc_cred *cred,
+ struct nfs_fattr *fattr, int flags,
+ unsigned int base, unsigned int count,
+ struct page *page, int *eofp)
+{
+ u64 offset = page_offset(page) + base;
+ struct nfs4_compound compound;
+ struct nfs4_op ops[2];
+ u32 bytes_read;
+ int status;
+
+ fattr->valid = 0;
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(inode), "read [sync]");
+ nfs4_setup_putfh(&compound, NFS_FH(inode));
+ nfs4_setup_read(&compound, offset, count, &page, base, eofp, &bytes_read);
+ status = nfs4_call_compound(&compound, cred, 0);
+
+ if (status >= 0)
+ status = bytes_read;
+ return status;
+}
+
+static int
+nfs4_proc_write(struct inode *inode, struct rpc_cred *cred,
+ struct nfs_fattr *fattr, int flags,
+ unsigned int base, unsigned int count,
+ struct page *page, struct nfs_writeverf *verf)
+{
+ u64 offset = page_offset(page) + base;
+ struct nfs4_compound compound;
+ struct nfs4_op ops[2];
+ u32 bytes_written;
+ int stable = (flags & NFS_RW_SYNC) ? NFS_FILE_SYNC : NFS_UNSTABLE;
+ int rpcflags = (flags & NFS_RW_SWAP) ? NFS_RPC_SWAPFLAGS : 0;
+ int status;
+
+ fattr->valid = 0;
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(inode), "write [sync]");
+ nfs4_setup_putfh(&compound, NFS_FH(inode));
+ nfs4_setup_write(&compound, offset, count, stable, &page, base, &bytes_written, verf);
+ status = nfs4_call_compound(&compound, cred, rpcflags);
+
+ if (status >= 0)
+ status = bytes_written;
+ return status;
+}
+
+static int
+nfs4_proc_create(struct inode *dir, struct qstr *name, struct iattr *sattr,
+ int flags, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+ int oflags;
+ u32 seqid;
+ nfs4_stateid stateid;
+ int status;
+
+ oflags = NFS4_SHARE_ACCESS_READ | O_CREAT | (flags & O_EXCL);
+ status = do_open(dir, name, oflags, sattr, fattr, fhandle, &seqid, stateid);
+ if (!status) {
+ if (flags & O_EXCL)
+ status = do_setattr(NFS_SERVER(dir), fattr, fhandle, sattr, stateid);
+ do_close(NFS_SERVER(dir), fhandle, seqid, stateid);
+ }
+ return status;
+}
+
+static int
+nfs4_proc_remove(struct inode *dir, struct qstr *name)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[3];
+ struct nfs4_change_info dir_cinfo;
+ struct nfs_fattr dir_attr;
+ u32 dir_bmres[2];
+ int status;
+
+ dir_attr.valid = 0;
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(dir), "remove");
+ nfs4_setup_putfh(&compound, NFS_FH(dir));
+ nfs4_setup_remove(&compound, name, &dir_cinfo);
+ nfs4_setup_getattr(&compound, &dir_attr, dir_bmres);
+ status = nfs4_call_compound(&compound, NULL, 0);
+
+ if (!status) {
+ process_cinfo(&dir_cinfo, &dir_attr);
+ nfs_refresh_inode(dir, &dir_attr);
+ }
+ return status;
+}
+
+struct unlink_desc {
+ struct nfs4_compound compound;
+ struct nfs4_op ops[3];
+ struct nfs4_change_info cinfo;
+ struct nfs_fattr attrs;
+};
+
+static int
+nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr *name)
+{
+ struct unlink_desc * up;
+ struct nfs4_compound * cp;
+ u32 bmres[2];
+
+ up = (struct unlink_desc *) kmalloc(sizeof(*up), GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
+ cp = &up->compound;
+
+ nfs4_setup_compound(cp, up->ops, NFS_SERVER(dir->d_inode), "unlink_setup");
+ nfs4_setup_putfh(cp, NFS_FH(dir->d_inode));
+ nfs4_setup_remove(cp, name, &up->cinfo);
+ nfs4_setup_getattr(cp, &up->attrs, bmres);
+
+ msg->rpc_proc = NFSPROC4_COMPOUND;
+ msg->rpc_argp = cp;
+ msg->rpc_resp = cp;
+ return 0;
+}
+
+static int
+nfs4_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
+{
+ struct rpc_message *msg = &task->tk_msg;
+ struct unlink_desc *up;
+
+ if (msg->rpc_argp) {
+ up = (struct unlink_desc *) msg->rpc_argp;
+ process_lease(&up->compound);
+ process_cinfo(&up->cinfo, &up->attrs);
+ nfs_refresh_inode(dir->d_inode, &up->attrs);
+ kfree(up);
+ msg->rpc_argp = NULL;
+ }
+ return 0;
+}
+
+static int
+nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
+ struct inode *new_dir, struct qstr *new_name)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[7];
+ struct nfs4_change_info old_cinfo, new_cinfo;
+ struct nfs_fattr old_dir_attr, new_dir_attr;
+ u32 old_dir_bmres[2], new_dir_bmres[2];
+ int status;
+
+ old_dir_attr.valid = 0;
+ new_dir_attr.valid = 0;
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(old_dir), "rename");
+ nfs4_setup_putfh(&compound, NFS_FH(old_dir));
+ nfs4_setup_savefh(&compound);
+ nfs4_setup_putfh(&compound, NFS_FH(new_dir));
+ nfs4_setup_rename(&compound, old_name, new_name, &old_cinfo, &new_cinfo);
+ nfs4_setup_getattr(&compound, &new_dir_attr, new_dir_bmres);
+ nfs4_setup_restorefh(&compound);
+ nfs4_setup_getattr(&compound, &old_dir_attr, old_dir_bmres);
+ status = nfs4_call_compound(&compound, NULL, 0);
+
+ if (!status) {
+ process_cinfo(&old_cinfo, &old_dir_attr);
+ process_cinfo(&new_cinfo, &new_dir_attr);
+ nfs_refresh_inode(old_dir, &old_dir_attr);
+ nfs_refresh_inode(new_dir, &new_dir_attr);
+ }
+ return status;
+}
+
+static int
+nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[7];
+ struct nfs4_change_info dir_cinfo;
+ struct nfs_fattr dir_attr, fattr;
+ u32 dir_bmres[2], bmres[2];
+ int status;
+
+ dir_attr.valid = 0;
+ fattr.valid = 0;
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(inode), "link");
+ nfs4_setup_putfh(&compound, NFS_FH(inode));
+ nfs4_setup_savefh(&compound);
+ nfs4_setup_putfh(&compound, NFS_FH(dir));
+ nfs4_setup_link(&compound, name, &dir_cinfo);
+ nfs4_setup_getattr(&compound, &dir_attr, dir_bmres);
+ nfs4_setup_restorefh(&compound);
+ nfs4_setup_getattr(&compound, &fattr, bmres);
+ status = nfs4_call_compound(&compound, NULL, 0);
+
+ if (!status) {
+ process_cinfo(&dir_cinfo, &dir_attr);
+ nfs_refresh_inode(dir, &dir_attr);
+ nfs_refresh_inode(inode, &fattr);
+ }
+ return status;
+}
+
+static int
+nfs4_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path,
+ struct iattr *sattr, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[7];
+ struct nfs_fattr dir_attr;
+ u32 dir_bmres[2], bmres[2];
+ struct nfs4_change_info dir_cinfo;
+ int status;
+
+ dir_attr.valid = 0;
+ fattr->valid = 0;
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(dir), "symlink");
+ nfs4_setup_putfh(&compound, NFS_FH(dir));
+ nfs4_setup_savefh(&compound);
+ nfs4_setup_create_symlink(&compound, name, path, sattr, &dir_cinfo);
+ nfs4_setup_getattr(&compound, fattr, bmres);
+ nfs4_setup_getfh(&compound, fhandle);
+ nfs4_setup_restorefh(&compound);
+ nfs4_setup_getattr(&compound, &dir_attr, dir_bmres);
+ status = nfs4_call_compound(&compound, NULL, 0);
+
+ if (!status) {
+ process_cinfo(&dir_cinfo, &dir_attr);
+ nfs_refresh_inode(dir, &dir_attr);
+ }
+ return status;
+}
+
+static int
+nfs4_proc_mkdir(struct inode *dir, struct qstr *name, struct iattr *sattr,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[7];
+ struct nfs_fattr dir_attr;
+ u32 dir_bmres[2], bmres[2];
+ struct nfs4_change_info dir_cinfo;
+ int status;
+
+ dir_attr.valid = 0;
+ fattr->valid = 0;
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(dir), "mkdir");
+ nfs4_setup_putfh(&compound, NFS_FH(dir));
+ nfs4_setup_savefh(&compound);
+ nfs4_setup_create_dir(&compound, name, sattr, &dir_cinfo);
+ nfs4_setup_getattr(&compound, fattr, bmres);
+ nfs4_setup_getfh(&compound, fhandle);
+ nfs4_setup_restorefh(&compound);
+ nfs4_setup_getattr(&compound, &dir_attr, dir_bmres);
+ status = nfs4_call_compound(&compound, NULL, 0);
+
+ if (!status) {
+ process_cinfo(&dir_cinfo, &dir_attr);
+ nfs_refresh_inode(dir, &dir_attr);
+ }
+ return status;
+}
+
+static int
+nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ u64 cookie, struct page *page, unsigned int count, int plus)
+{
+ struct inode *dir = dentry->d_inode;
+ struct nfs4_compound compound;
+ struct nfs4_op ops[2];
+ int status;
+
+ lock_kernel();
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(dir), "readdir");
+ nfs4_setup_putfh(&compound, NFS_FH(dir));
+ nfs4_setup_readdir(&compound, cookie, NFS_COOKIEVERF(dir), &page, count, dentry);
+ status = nfs4_call_compound(&compound, cred, 0);
+
+ unlock_kernel();
+ return status;
+}
+
+static int
+nfs4_proc_mknod(struct inode *dir, struct qstr *name, struct iattr *sattr,
+ dev_t rdev, struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[7];
+ struct nfs_fattr dir_attr;
+ u32 dir_bmres[2], bmres[2];
+ struct nfs4_change_info dir_cinfo;
+ int status;
+
+ dir_attr.valid = 0;
+ fattr->valid = 0;
+
+ nfs4_setup_compound(&compound, ops, NFS_SERVER(dir), "mknod");
+ nfs4_setup_putfh(&compound, NFS_FH(dir));
+ nfs4_setup_savefh(&compound);
+ nfs4_setup_create_special(&compound, name, rdev,sattr, &dir_cinfo);
+ nfs4_setup_getattr(&compound, fattr, bmres);
+ nfs4_setup_getfh(&compound, fh);
+ nfs4_setup_restorefh(&compound);
+ nfs4_setup_getattr(&compound, &dir_attr, dir_bmres);
+ status = nfs4_call_compound(&compound, NULL, 0);
+
+ if (!status) {
+ process_cinfo(&dir_cinfo, &dir_attr);
+ nfs_refresh_inode(dir, &dir_attr);
+ }
+ return status;
+}
+
+static int
+nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsstat *fsstat)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[2];
+ u32 bmres[2];
+
+ memset(fsstat, 0, sizeof(*fsstat));
+ nfs4_setup_compound(&compound, ops, server, "statfs");
+ nfs4_setup_putfh(&compound, fhandle);
+ nfs4_setup_statfs(&compound, fsstat, bmres);
+ return nfs4_call_compound(&compound, NULL, 0);
+}
+
+static int
+nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *fsinfo)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[2];
+ u32 bmres[2];
+
+ memset(fsinfo, 0, sizeof(*fsinfo));
+ nfs4_setup_compound(&compound, ops, server, "statfs");
+ nfs4_setup_putfh(&compound, fhandle);
+ nfs4_setup_fsinfo(&compound, fsinfo, bmres);
+ return nfs4_call_compound(&compound, NULL, 0);
+}
+
+static int
+nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_pathconf *pathconf)
+{
+ struct nfs4_compound compound;
+ struct nfs4_op ops[2];
+ u32 bmres[2];
+
+ memset(pathconf, 0, sizeof(*pathconf));
+ nfs4_setup_compound(&compound, ops, server, "statfs");
+ nfs4_setup_putfh(&compound, fhandle);
+ nfs4_setup_pathconf(&compound, pathconf, bmres);
+ return nfs4_call_compound(&compound, NULL, 0);
+}
+
+static void
+nfs4_read_done(struct rpc_task *task)
+{
+ struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
+
+ process_lease(&data->u.v4.compound);
+ nfs_readpage_result(task, data->u.v4.res_count, data->u.v4.res_eof);
+}
+
+static void
+nfs4_proc_read_setup(struct nfs_read_data *data, unsigned int count)
+{
+ struct rpc_task *task = &data->task;
+ struct nfs4_compound *cp = &data->u.v4.compound;
+ struct rpc_message msg = {
+ .rpc_proc = NFSPROC4_COMPOUND,
+ .rpc_argp = cp,
+ .rpc_resp = cp,
+ .rpc_cred = data->cred,
+ };
+ struct inode *inode = data->inode;
+ struct nfs_page *req = nfs_list_entry(data->pages.next);
+ int flags;
+
+ nfs4_setup_compound(cp, data->u.v4.ops, NFS_SERVER(inode), "read [async]");
+ nfs4_setup_putfh(cp, NFS_FH(inode));
+ nfs4_setup_read(cp, req_offset(req) + req->wb_offset,
+ count, data->pagevec, req->wb_offset,
+ &data->u.v4.res_eof,
+ &data->u.v4.res_count);
+
+ /* N.B. Do we need to test? Never called for swapfile inode */
+ flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
+
+ /* Finalize the task. */
+ rpc_init_task(task, NFS_CLIENT(inode), nfs4_read_done, flags);
+ task->tk_calldata = data;
+ /* Release requests */
+ task->tk_release = nfs_readdata_release;
+
+ rpc_call_setup(task, &msg, 0);
+}
+
+static void
+nfs4_write_done(struct rpc_task *task)
+{
+ struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+
+ process_lease(&data->u.v4.compound);
+ nfs_writeback_done(task, data->u.v4.arg_stable,
+ data->u.v4.arg_count, data->u.v4.res_count);
+}
+
+static void
+nfs4_proc_write_setup(struct nfs_write_data *data, unsigned int count, int how)
+{
+ struct rpc_task *task = &data->task;
+ struct nfs4_compound *cp = &data->u.v4.compound;
+ struct rpc_message msg = {
+ .rpc_proc = NFSPROC4_COMPOUND,
+ .rpc_argp = cp,
+ .rpc_resp = cp,
+ .rpc_cred = data->cred,
+ };
+ struct inode *inode = data->inode;
+ struct nfs_page *req = nfs_list_entry(data->pages.next);
+ int stable;
+ int flags;
+
+ if (how & FLUSH_STABLE) {
+ if (!NFS_I(inode)->ncommit)
+ stable = NFS_FILE_SYNC;
+ else
+ stable = NFS_DATA_SYNC;
+ } else
+ stable = NFS_UNSTABLE;
+
+ nfs4_setup_compound(cp, data->u.v4.ops, NFS_SERVER(inode), "write [async]");
+ nfs4_setup_putfh(cp, NFS_FH(inode));
+ nfs4_setup_write(cp, req_offset(req) + req->wb_offset,
+ count, stable, data->pagevec, req->wb_offset,
+ &data->u.v4.res_count, &data->verf);
+
+ /* Set the initial flags for the task. */
+ flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
+
+ /* Finalize the task. */
+ rpc_init_task(task, NFS_CLIENT(inode), nfs4_write_done, flags);
+ task->tk_calldata = data;
+ /* Release requests */
+ task->tk_release = nfs_writedata_release;
+
+ rpc_call_setup(task, &msg, 0);
+}
+
+static void
+nfs4_commit_done(struct rpc_task *task)
+{
+ struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+
+ process_lease(&data->u.v4.compound);
+ nfs_commit_done(task);
+}
+
+static void
+nfs4_proc_commit_setup(struct nfs_write_data *data, u64 start, u32 len, int how)
+{
+ struct rpc_task *task = &data->task;
+ struct nfs4_compound *cp = &data->u.v4.compound;
+ struct rpc_message msg = {
+ .rpc_proc = NFSPROC4_COMPOUND,
+ .rpc_argp = cp,
+ .rpc_resp = cp,
+ .rpc_cred = data->cred,
+ };
+ struct inode *inode = data->inode;
+ int flags;
+
+ nfs4_setup_compound(cp, data->u.v4.ops, NFS_SERVER(inode), "commit [async]");
+ nfs4_setup_putfh(cp, NFS_FH(inode));
+ nfs4_setup_commit(cp, start, len, &data->verf);
+
+ /* Set the initial flags for the task. */
+ flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
+
+ /* Finalize the task. */
+ rpc_init_task(task, NFS_CLIENT(inode), nfs4_commit_done, flags);
+ task->tk_calldata = data;
+ /* Release requests */
+ task->tk_release = nfs_writedata_release;
+
+ rpc_call_setup(task, &msg, 0);
+}
+
+/*
+ * nfs4_proc_renew(): This is not one of the nfs_rpc_ops; it is a special
+ * standalone procedure for queueing an asynchronous RENEW.
+ */
+struct renew_desc {
+ struct rpc_task task;
+ struct nfs4_compound compound;
+ struct nfs4_op ops[1];
+};
+
+static void
+renew_done(struct rpc_task *task)
+{
+ struct nfs4_compound *cp = (struct nfs4_compound *) task->tk_msg.rpc_argp;
+ process_lease(cp);
+}
+
+static void
+renew_release(struct rpc_task *task)
+{
+ kfree(task->tk_calldata);
+ task->tk_calldata = NULL;
+}
+
+int
+nfs4_proc_renew(struct nfs_server *server)
+{
+ struct renew_desc *rp;
+ struct rpc_task *task;
+ struct nfs4_compound *cp;
+ struct rpc_message msg;
+
+ rp = (struct renew_desc *) kmalloc(sizeof(*rp), GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+ cp = &rp->compound;
+ task = &rp->task;
+
+ nfs4_setup_compound(cp, rp->ops, server, "renew");
+ nfs4_setup_renew(cp);
+
+ msg.rpc_proc = NFSPROC4_COMPOUND;
+ msg.rpc_argp = cp;
+ msg.rpc_resp = cp;
+ msg.rpc_cred = NULL;
+ rpc_init_task(task, server->client, renew_done, RPC_TASK_ASYNC);
+ rpc_call_setup(task, &msg, 0);
+ task->tk_calldata = rp;
+ task->tk_release = renew_release;
+
+ return rpc_execute(task);
+}
+
+struct nfs_rpc_ops nfs_v4_clientops = {
+ .version = 4, /* protocol version */
+ .getroot = nfs4_proc_get_root,
+ .getattr = nfs4_proc_getattr,
+ .setattr = nfs4_proc_setattr,
+ .lookup = nfs4_proc_lookup,
+ .access = nfs4_proc_access,
+ .readlink = nfs4_proc_readlink,
+ .read = nfs4_proc_read,
+ .write = nfs4_proc_write,
+ .commit = NULL,
+ .create = nfs4_proc_create,
+ .remove = nfs4_proc_remove,
+ .unlink_setup = nfs4_proc_unlink_setup,
+ .unlink_done = nfs4_proc_unlink_done,
+ .rename = nfs4_proc_rename,
+ .link = nfs4_proc_link,
+ .symlink = nfs4_proc_symlink,
+ .mkdir = nfs4_proc_mkdir,
+ .rmdir = nfs4_proc_remove,
+ .readdir = nfs4_proc_readdir,
+ .mknod = nfs4_proc_mknod,
+ .statfs = nfs4_proc_statfs,
+ .fsinfo = nfs4_proc_fsinfo,
+ .pathconf = nfs4_proc_pathconf,
+ .decode_dirent = nfs4_decode_dirent,
+ .read_setup = nfs4_proc_read_setup,
+ .write_setup = nfs4_proc_write_setup,
+ .commit_setup = nfs4_proc_commit_setup,
+};
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
new file mode 100644
index 000000000000..4ba871885dbc
--- /dev/null
+++ b/fs/nfs/nfs4renewd.c
@@ -0,0 +1,110 @@
+/*
+ * fs/nfs/nfs4renewd.c
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Kendrick Smith <kmsmith@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Implementation of the NFSv4 "renew daemon", which wakes up periodically to
+ * send a RENEW, to keep state alive on the server. The daemon is implemented
+ * as an rpc_task, not a real kernel thread, so it always runs in rpciod's
+ * context. There is one renewd per nfs_server.
+ *
+ * TODO: If the send queue gets backlogged (e.g., if the server goes down),
+ * we will keep filling the queue with periodic RENEW requests. We need a
+ * mechanism for ensuring that if renewd successfully sends off a request,
+ * then it only wakes up when the request is finished. Maybe use the
+ * child task framework of the RPC layer?
+ */
+
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/clnt.h>
+
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+
+static RPC_WAITQ(nfs4_renewd_queue, "nfs4_renewd_queue");
+
+static void
+renewd(struct rpc_task *task)
+{
+ struct nfs_server *server = (struct nfs_server *)task->tk_calldata;
+ unsigned long lease = server->lease_time;
+ unsigned long last = server->last_renewal;
+ unsigned long timeout;
+
+ if (!server->nfs4_state)
+ timeout = (2 * lease) / 3;
+ else if (jiffies < last + lease/3)
+ timeout = (2 * lease) / 3 + last - jiffies;
+ else {
+ /* Queue an asynchronous RENEW. */
+ nfs4_proc_renew(server);
+ timeout = (2 * lease) / 3;
+ }
+
+ if (timeout < 5 * HZ) /* safeguard */
+ timeout = 5 * HZ;
+ task->tk_timeout = timeout;
+ task->tk_action = renewd;
+ task->tk_exit = NULL;
+ rpc_sleep_on(&nfs4_renewd_queue, task, NULL, NULL);
+ return;
+}
+
+int
+nfs4_init_renewd(struct nfs_server *server)
+{
+ struct rpc_task *task;
+ int status;
+
+ lock_kernel();
+ status = -ENOMEM;
+ task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC);
+ if (!task)
+ goto out;
+ task->tk_calldata = server;
+ task->tk_action = renewd;
+ status = rpc_execute(task);
+
+out:
+ unlock_kernel();
+ return status;
+}
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
new file mode 100644
index 000000000000..ecbc54fb1048
--- /dev/null
+++ b/fs/nfs/nfs4state.c
@@ -0,0 +1,81 @@
+/*
+ * fs/nfs/nfs4state.c
+ *
+ * Client-side XDR for NFSv4.
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Kendrick Smith <kmsmith@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Implementation of the NFSv4 state model. For the time being,
+ * this is minimal, but will be made much more complex in a
+ * subsequent patch.
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/nfs_fs.h>
+
+/*
+ * nfs4_get_client(): returns an empty client structure
+ * nfs4_put_client(): drops reference to client structure
+ *
+ * Since these are allocated/deallocated very rarely, we don't
+ * bother putting them in a slab cache...
+ */
+struct nfs4_client *
+nfs4_get_client(void)
+{
+ struct nfs4_client *clp;
+
+ if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL))) {
+ atomic_set(&clp->cl_count, 1);
+ clp->cl_clientid = 0;
+ INIT_LIST_HEAD(&clp->cl_lockowners);
+ }
+ return clp;
+}
+
+void
+nfs4_put_client(struct nfs4_client *clp)
+{
+ BUG_ON(!clp);
+ BUG_ON(!atomic_read(&clp->cl_count));
+
+ if (atomic_dec_and_test(&clp->cl_count)) {
+ BUG_ON(!list_empty(&clp->cl_lockowners));
+ kfree(clp);
+ }
+}
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
new file mode 100644
index 000000000000..edbf0e2a02d7
--- /dev/null
+++ b/fs/nfs/nfs4xdr.c
@@ -0,0 +1,1777 @@
+/*
+ * fs/nfs/nfs4xdr.c
+ *
+ * Client-side XDR for NFSv4.
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Kendrick Smith <kmsmith@umich.edu>
+ * Andy Adamson <andros@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/kdev_t.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+
+/* Emperically, it seems that the NFS client gets confused if
+ * cookies larger than this are returned -- presumably a
+ * signedness issue?
+ */
+#define COOKIE_MAX 0x7fffffff
+
+#define NFS4_CLIENTID(server) ((server)->nfs4_state->cl_clientid)
+
+#define NFSDBG_FACILITY NFSDBG_XDR
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO EIO
+
+extern int nfs_stat_to_errno(int);
+
+#define NFS4_enc_void_sz 0
+#define NFS4_dec_void_sz 0
+#define NFS4_enc_compound_sz 1024 /* XXX: large enough? */
+#define NFS4_dec_compound_sz 1024 /* XXX: large enough? */
+
+static struct {
+ unsigned int mode;
+ unsigned int nfs2type;
+} nfs_type2fmt[] = {
+ { 0, NFNON },
+ { S_IFREG, NFREG },
+ { S_IFDIR, NFDIR },
+ { S_IFBLK, NFBLK },
+ { S_IFCHR, NFCHR },
+ { S_IFLNK, NFLNK },
+ { S_IFSOCK, NFSOCK },
+ { S_IFIFO, NFFIFO },
+ { 0, NFNON },
+ { 0, NFNON },
+};
+
+/*
+ * START OF "GENERIC" ENCODE ROUTINES.
+ * These may look a little ugly since they are imported from a "generic"
+ * set of XDR encode/decode routines which are intended to be shared by
+ * all of our NFSv4 implementations (OpenBSD, MacOS X...).
+ *
+ * If the pain of reading these is too great, it should be a straightforward
+ * task to translate them into Linux-specific versions which are more
+ * consistent with the style used in NFSv2/v3...
+ */
+#define ENCODE_HEAD \
+ u32 *p;
+#define ENCODE_TAIL \
+ return 0
+
+#define WRITE32(n) *p++ = htonl(n)
+#define WRITE64(n) do { \
+ *p++ = htonl((u32)((n) >> 32)); \
+ *p++ = htonl((u32)(n)); \
+} while (0)
+#define WRITEMEM(ptr,nbytes) do { \
+ p = xdr_writemem(p, ptr, nbytes); \
+} while (0)
+
+#define RESERVE_SPACE(nbytes) do { BUG_ON(cp->p + XDR_QUADLEN(nbytes) > cp->end); p = cp->p; } while (0)
+#define ADJUST_ARGS() cp->p = p
+
+static inline
+u32 *xdr_writemem(u32 *p, const void *ptr, int nbytes)
+{
+ int tmp = XDR_QUADLEN(nbytes);
+ if (!tmp)
+ return p;
+ p[tmp-1] = 0;
+ memcpy(p, ptr, nbytes);
+ return p + tmp;
+}
+
+/*
+ * FIXME: The following dummy entries will be replaced once the userland
+ * upcall gets in...
+ */
+static int
+encode_uid(char *p, uid_t uid)
+{
+ strcpy(p, "nobody");
+ return 6;
+}
+
+/*
+ * FIXME: The following dummy entries will be replaced once the userland
+ * upcall gets in...
+ */
+static int
+encode_gid(char *p, gid_t gid)
+{
+ strcpy(p, "nobody");
+ return 6;
+}
+
+static int
+encode_attrs(struct nfs4_compound *cp, struct iattr *iap)
+{
+ char owner_name[256];
+ char owner_group[256];
+ int owner_namelen = 0;
+ int owner_grouplen = 0;
+ u32 *q;
+ int len;
+ u32 bmval0 = 0;
+ u32 bmval1 = 0;
+ int status;
+ ENCODE_HEAD;
+
+ /*
+ * We reserve enough space to write the entire attribute buffer at once.
+ * In the worst-case, this would be
+ * 12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime)
+ * = 36 bytes, plus any contribution from variable-length fields
+ * such as owner/group/acl's.
+ */
+ len = 36;
+
+ /* Sigh */
+ if (iap->ia_valid & ATTR_UID) {
+ status = owner_namelen = encode_uid(owner_name, iap->ia_uid);
+ if (status < 0) {
+ printk(KERN_WARNING "nfs: couldn't resolve uid %d to string\n",
+ iap->ia_uid);
+ goto out;
+ }
+ len += XDR_QUADLEN(owner_namelen);
+ }
+ if (iap->ia_valid & ATTR_GID) {
+ status = owner_grouplen = encode_gid(owner_group, iap->ia_gid);
+ if (status < 0) {
+ printk(KERN_WARNING "nfs4: couldn't resolve gid %d to string\n",
+ iap->ia_gid);
+ goto out;
+ }
+ len += XDR_QUADLEN(owner_grouplen);
+ }
+ RESERVE_SPACE(len);
+
+ /*
+ * We write the bitmap length now, but leave the bitmap and the attribute
+ * buffer length to be backfilled at the end of this routine.
+ */
+ WRITE32(2);
+ q = p;
+ p += 3;
+
+ if (iap->ia_valid & ATTR_SIZE) {
+ bmval0 |= FATTR4_WORD0_SIZE;
+ WRITE64(iap->ia_size);
+ }
+ if (iap->ia_valid & ATTR_MODE) {
+ bmval1 |= FATTR4_WORD1_MODE;
+ WRITE32(iap->ia_mode);
+ }
+ if (iap->ia_valid & ATTR_UID) {
+ bmval1 |= FATTR4_WORD1_OWNER;
+ WRITE32(owner_namelen);
+ WRITEMEM(owner_name, owner_namelen);
+ p += owner_namelen;
+ }
+ if (iap->ia_valid & ATTR_GID) {
+ bmval1 |= FATTR4_WORD1_OWNER_GROUP;
+ WRITE32(owner_grouplen);
+ WRITEMEM(owner_group, owner_grouplen);
+ p += owner_namelen;
+ }
+ if (iap->ia_valid & ATTR_ATIME_SET) {
+ bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
+ WRITE32(NFS4_SET_TO_CLIENT_TIME);
+ WRITE32(0);
+ WRITE32(iap->ia_mtime);
+ WRITE32(0);
+ }
+ else if (iap->ia_valid & ATTR_ATIME) {
+ bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
+ WRITE32(NFS4_SET_TO_SERVER_TIME);
+ }
+ if (iap->ia_valid & ATTR_MTIME_SET) {
+ bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
+ WRITE32(NFS4_SET_TO_CLIENT_TIME);
+ WRITE32(0);
+ WRITE32(iap->ia_mtime);
+ WRITE32(0);
+ }
+ else if (iap->ia_valid & ATTR_MTIME) {
+ bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
+ WRITE32(NFS4_SET_TO_SERVER_TIME);
+ }
+
+ ADJUST_ARGS();
+
+ /*
+ * Now we backfill the bitmap and the attribute buffer length.
+ */
+ len = (char *)p - (char *)q - 12;
+ *q++ = htonl(bmval0);
+ *q++ = htonl(bmval1);
+ *q++ = htonl(len);
+
+ status = 0;
+out:
+ return status;
+}
+
+static int
+encode_access(struct nfs4_compound *cp, struct nfs4_access *access)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(8);
+ WRITE32(OP_ACCESS);
+ WRITE32(access->ac_req_access);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_close(struct nfs4_compound *cp, struct nfs4_close *close)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(20);
+ WRITE32(OP_CLOSE);
+ WRITE32(close->cl_seqid);
+ WRITEMEM(close->cl_stateid, sizeof(nfs4_stateid));
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_commit(struct nfs4_compound *cp, struct nfs4_commit *commit)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(16);
+ WRITE32(OP_COMMIT);
+ WRITE64(commit->co_start);
+ WRITE32(commit->co_len);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_create(struct nfs4_compound *cp, struct nfs4_create *create)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(8);
+ WRITE32(OP_CREATE);
+ WRITE32(create->cr_ftype);
+ ADJUST_ARGS();
+
+ switch (create->cr_ftype) {
+ case NF4LNK:
+ RESERVE_SPACE(4 + create->cr_textlen);
+ WRITE32(create->cr_textlen);
+ WRITEMEM(create->cr_text, create->cr_textlen);
+ ADJUST_ARGS();
+ break;
+
+ case NF4BLK: case NF4CHR:
+ RESERVE_SPACE(8);
+ WRITE32(create->cr_specdata1);
+ WRITE32(create->cr_specdata2);
+ ADJUST_ARGS();
+ break;
+
+ default:
+ break;
+ }
+
+ RESERVE_SPACE(4 + create->cr_namelen);
+ WRITE32(create->cr_namelen);
+ WRITEMEM(create->cr_name, create->cr_namelen);
+ ADJUST_ARGS();
+
+ return encode_attrs(cp, create->cr_attrs);
+}
+
+static int
+encode_getattr(struct nfs4_compound *cp, struct nfs4_getattr *getattr)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(16);
+ WRITE32(OP_GETATTR);
+ WRITE32(2);
+ WRITE32(getattr->gt_bmval[0]);
+ WRITE32(getattr->gt_bmval[1]);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_getfh(struct nfs4_compound *cp)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_GETFH);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_link(struct nfs4_compound *cp, struct nfs4_link *link)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(8 + link->ln_namelen);
+ WRITE32(OP_LINK);
+ WRITE32(link->ln_namelen);
+ WRITEMEM(link->ln_name, link->ln_namelen);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_lookup(struct nfs4_compound *cp, struct nfs4_lookup *lookup)
+{
+ int len = lookup->lo_name->len;
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(8 + len);
+ WRITE32(OP_LOOKUP);
+ WRITE32(len);
+ WRITEMEM(lookup->lo_name->name, len);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_open(struct nfs4_compound *cp, struct nfs4_open *open)
+{
+ static int global_id = 0;
+ int id = global_id++;
+ int status;
+ ENCODE_HEAD;
+
+ /* seqid, share_access, share_deny, clientid, ownerlen, owner, opentype */
+ RESERVE_SPACE(52);
+ WRITE32(OP_OPEN);
+ WRITE32(0); /* seqid */
+ WRITE32(open->op_share_access);
+ WRITE32(0); /* for us, share_deny== 0 always */
+ WRITE64(NFS4_CLIENTID(cp->server));
+ WRITE32(4);
+ WRITE32(id);
+ WRITE32(open->op_opentype);
+ ADJUST_ARGS();
+
+ if (open->op_opentype == NFS4_OPEN_CREATE) {
+ if (open->op_createmode == NFS4_CREATE_EXCLUSIVE) {
+ RESERVE_SPACE(12);
+ WRITE32(open->op_createmode);
+ WRITEMEM(open->op_verifier, sizeof(nfs4_verifier));
+ ADJUST_ARGS();
+ }
+ else if (open->op_attrs) {
+ RESERVE_SPACE(4);
+ WRITE32(open->op_createmode);
+ ADJUST_ARGS();
+ if ((status = encode_attrs(cp, open->op_attrs)))
+ return status;
+ }
+ else {
+ RESERVE_SPACE(12);
+ WRITE32(open->op_createmode);
+ WRITE32(0);
+ WRITE32(0);
+ ADJUST_ARGS();
+ }
+ }
+
+ RESERVE_SPACE(8 + open->op_name->len);
+ WRITE32(NFS4_OPEN_CLAIM_NULL);
+ WRITE32(open->op_name->len);
+ WRITEMEM(open->op_name->name, open->op_name->len);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_open_confirm(struct nfs4_compound *cp, struct nfs4_open_confirm *open_confirm)
+{
+ ENCODE_HEAD;
+
+ /*
+ * Note: In this "stateless" implementation, the OPEN_CONFIRM
+ * seqid is always equal to 1.
+ */
+ RESERVE_SPACE(24);
+ WRITE32(OP_OPEN_CONFIRM);
+ WRITEMEM(open_confirm->oc_stateid, sizeof(nfs4_stateid));
+ WRITE32(1);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_putfh(struct nfs4_compound *cp, struct nfs4_putfh *putfh)
+{
+ int len = putfh->pf_fhandle->size;
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(8 + len);
+ WRITE32(OP_PUTFH);
+ WRITE32(len);
+ WRITEMEM(putfh->pf_fhandle->data, len);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_putrootfh(struct nfs4_compound *cp)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_PUTROOTFH);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_read(struct nfs4_compound *cp, struct nfs4_read *read, struct rpc_rqst *req)
+{
+ struct rpc_auth *auth = req->rq_task->tk_auth;
+ int replen;
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(32);
+ WRITE32(OP_READ);
+ WRITE32(0); /* all-zero stateid! */
+ WRITE32(0);
+ WRITE32(0);
+ WRITE32(0);
+ WRITE64(read->rd_offset);
+ WRITE32(read->rd_length);
+ ADJUST_ARGS();
+
+ /* set up reply iovec
+ * toplevel status + taglen + rescount + OP_PUTFH + status
+ * + OP_READ + status + eof + datalen = 9
+ */
+ replen = (RPC_REPHDRSIZE + auth->au_rslack + 9 + XDR_QUADLEN(cp->taglen)) << 2;
+ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ xdr_inline_pages(&req->rq_rcv_buf, replen,
+ read->rd_pages, read->rd_pgbase, read->rd_length);
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_readdir(struct nfs4_compound *cp, struct nfs4_readdir *readdir, struct rpc_rqst *req)
+{
+ struct rpc_auth *auth = req->rq_task->tk_auth;
+ int replen;
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(40);
+ WRITE32(OP_READDIR);
+ WRITE64(readdir->rd_cookie);
+ WRITEMEM(readdir->rd_req_verifier, sizeof(nfs4_verifier));
+ WRITE32(readdir->rd_count >> 5); /* meaningless "dircount" field */
+ WRITE32(readdir->rd_count);
+ WRITE32(2);
+ WRITE32(readdir->rd_bmval[0]);
+ WRITE32(readdir->rd_bmval[1]);
+ ADJUST_ARGS();
+
+ /* set up reply iovec
+ * toplevel_status + taglen + rescount + OP_PUTFH + status
+ * + OP_READDIR + status + verifer(2) = 9
+ */
+ replen = (RPC_REPHDRSIZE + auth->au_rslack + 9 + XDR_QUADLEN(cp->taglen)) << 2;
+ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ xdr_inline_pages(&req->rq_rcv_buf, replen, readdir->rd_pages,
+ readdir->rd_pgbase, readdir->rd_count);
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_readlink(struct nfs4_compound *cp, struct nfs4_readlink *readlink, struct rpc_rqst *req)
+{
+ struct rpc_auth *auth = req->rq_task->tk_auth;
+ int replen;
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_READLINK);
+ ADJUST_ARGS();
+
+ /* set up reply iovec
+ * toplevel_status + taglen + rescount + OP_PUTFH + status
+ * + OP_READLINK + status = 7
+ */
+ replen = (RPC_REPHDRSIZE + auth->au_rslack + 7 + XDR_QUADLEN(cp->taglen)) << 2;
+ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ xdr_inline_pages(&req->rq_rcv_buf, replen, readlink->rl_pages, 0, readlink->rl_count);
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_remove(struct nfs4_compound *cp, struct nfs4_remove *remove)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(8 + remove->rm_namelen);
+ WRITE32(OP_REMOVE);
+ WRITE32(remove->rm_namelen);
+ WRITEMEM(remove->rm_name, remove->rm_namelen);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_rename(struct nfs4_compound *cp, struct nfs4_rename *rename)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(8 + rename->rn_oldnamelen);
+ WRITE32(OP_RENAME);
+ WRITE32(rename->rn_oldnamelen);
+ WRITEMEM(rename->rn_oldname, rename->rn_oldnamelen);
+ ADJUST_ARGS();
+
+ RESERVE_SPACE(8 + rename->rn_newnamelen);
+ WRITE32(rename->rn_newnamelen);
+ WRITEMEM(rename->rn_newname, rename->rn_newnamelen);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_renew(struct nfs4_compound *cp)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(12);
+ WRITE32(OP_RENEW);
+ WRITE64(NFS4_CLIENTID(cp->server));
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_restorefh(struct nfs4_compound *cp)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_RESTOREFH);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_savefh(struct nfs4_compound *cp)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_SAVEFH);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_setattr(struct nfs4_compound *cp, struct nfs4_setattr *setattr)
+{
+ int status;
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(20);
+ WRITE32(OP_SETATTR);
+ WRITEMEM(setattr->st_stateid, sizeof(nfs4_stateid));
+ ADJUST_ARGS();
+
+ if ((status = encode_attrs(cp, setattr->st_iap)))
+ return status;
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_setclientid(struct nfs4_compound *cp, struct nfs4_setclientid *setclientid)
+{
+ u32 total_len;
+ u32 len1, len2, len3;
+ ENCODE_HEAD;
+
+ len1 = strlen(setclientid->sc_name);
+ len2 = strlen(setclientid->sc_netid);
+ len3 = strlen(setclientid->sc_uaddr);
+ total_len = XDR_QUADLEN(len1) + XDR_QUADLEN(len2) + XDR_QUADLEN(len3);
+ total_len = (total_len << 2) + 32;
+
+ RESERVE_SPACE(total_len);
+ WRITE32(OP_SETCLIENTID);
+ WRITEMEM(setclientid->sc_verifier, sizeof(nfs4_verifier));
+ WRITE32(len1);
+ WRITEMEM(setclientid->sc_name, len1);
+ WRITE32(setclientid->sc_prog);
+ WRITE32(len2);
+ WRITEMEM(setclientid->sc_netid, len2);
+ WRITE32(len3);
+ WRITEMEM(setclientid->sc_uaddr, len3);
+ WRITE32(setclientid->sc_cb_ident);
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_setclientid_confirm(struct nfs4_compound *cp)
+{
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(12 + sizeof(nfs4_verifier));
+ WRITE32(OP_SETCLIENTID_CONFIRM);
+ WRITE64(cp->server->nfs4_state->cl_clientid);
+ WRITEMEM(cp->server->nfs4_state->cl_confirm,sizeof(nfs4_verifier));
+ ADJUST_ARGS();
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_write(struct nfs4_compound *cp, struct nfs4_write *write, struct rpc_rqst *req)
+{
+ struct xdr_buf *sndbuf = &req->rq_snd_buf;
+ ENCODE_HEAD;
+
+ RESERVE_SPACE(36);
+ WRITE32(OP_WRITE);
+ WRITE32(0xffffffff); /* magic stateid -1 */
+ WRITE32(0xffffffff);
+ WRITE32(0xffffffff);
+ WRITE32(0xffffffff);
+ WRITE64(write->wr_offset);
+ WRITE32(write->wr_stable_how);
+ WRITE32(write->wr_len);
+ ADJUST_ARGS();
+
+ sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
+ xdr_encode_pages(sndbuf, write->wr_pages, write->wr_pgbase, write->wr_len);
+
+ ENCODE_TAIL;
+}
+
+static int
+encode_compound(struct nfs4_compound *cp, struct rpc_rqst *req)
+{
+ int i, status = 0;
+ ENCODE_HEAD;
+
+ dprintk("encode_compound: tag=%.*s\n", (int)cp->taglen, cp->tag);
+
+ RESERVE_SPACE(12 + cp->taglen);
+ WRITE32(cp->taglen);
+ WRITEMEM(cp->tag, cp->taglen);
+ WRITE32(NFS4_MINOR_VERSION);
+ WRITE32(cp->req_nops);
+ ADJUST_ARGS();
+
+ for (i = 0; i < cp->req_nops; i++) {
+ switch (cp->ops[i].opnum) {
+ case OP_ACCESS:
+ status = encode_access(cp, &cp->ops[i].u.access);
+ break;
+ case OP_CLOSE:
+ status = encode_close(cp, &cp->ops[i].u.close);
+ break;
+ case OP_COMMIT:
+ status = encode_commit(cp, &cp->ops[i].u.commit);
+ break;
+ case OP_CREATE:
+ status = encode_create(cp, &cp->ops[i].u.create);
+ break;
+ case OP_GETATTR:
+ status = encode_getattr(cp, &cp->ops[i].u.getattr);
+ break;
+ case OP_GETFH:
+ status = encode_getfh(cp);
+ break;
+ case OP_LINK:
+ status = encode_link(cp, &cp->ops[i].u.link);
+ break;
+ case OP_LOOKUP:
+ status = encode_lookup(cp, &cp->ops[i].u.lookup);
+ break;
+ case OP_OPEN:
+ status = encode_open(cp, &cp->ops[i].u.open);
+ break;
+ case OP_OPEN_CONFIRM:
+ status = encode_open_confirm(cp, &cp->ops[i].u.open_confirm);
+ break;
+ case OP_PUTFH:
+ status = encode_putfh(cp, &cp->ops[i].u.putfh);
+ break;
+ case OP_PUTROOTFH:
+ status = encode_putrootfh(cp);
+ break;
+ case OP_READ:
+ status = encode_read(cp, &cp->ops[i].u.read, req);
+ break;
+ case OP_READDIR:
+ status = encode_readdir(cp, &cp->ops[i].u.readdir, req);
+ break;
+ case OP_READLINK:
+ status = encode_readlink(cp, &cp->ops[i].u.readlink, req);
+ break;
+ case OP_REMOVE:
+ status = encode_remove(cp, &cp->ops[i].u.remove);
+ break;
+ case OP_RENAME:
+ status = encode_rename(cp, &cp->ops[i].u.rename);
+ break;
+ case OP_RENEW:
+ status = encode_renew(cp);
+ break;
+ case OP_RESTOREFH:
+ status = encode_restorefh(cp);
+ break;
+ case OP_SAVEFH:
+ status = encode_savefh(cp);
+ break;
+ case OP_SETATTR:
+ status = encode_setattr(cp, &cp->ops[i].u.setattr);
+ break;
+ case OP_SETCLIENTID:
+ status = encode_setclientid(cp, &cp->ops[i].u.setclientid);
+ break;
+ case OP_SETCLIENTID_CONFIRM:
+ status = encode_setclientid_confirm(cp);
+ break;
+ case OP_WRITE:
+ status = encode_write(cp, &cp->ops[i].u.write, req);
+ break;
+ default:
+ BUG();
+ }
+ if (status)
+ return status;
+ }
+
+ ENCODE_TAIL;
+}
+/*
+ * END OF "GENERIC" ENCODE ROUTINES.
+ */
+
+
+/*
+ * Encode void argument
+ */
+static int
+nfs4_xdr_enc_void(struct rpc_rqst *req, u32 *p, void *dummy)
+{
+ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ return 0;
+}
+
+/*
+ * Encode COMPOUND argument
+ */
+static int
+nfs4_xdr_enc_compound(struct rpc_rqst *req, u32 *p, struct nfs4_compound *cp)
+{
+ int status;
+ struct xdr_buf *sndbuf = &req->rq_snd_buf;
+
+ cp->p = p;
+ cp->end = (u32 *) ((char *)req->rq_svec[0].iov_base + req->rq_svec[0].iov_len);
+ status = encode_compound(cp, req);
+ cp->timestamp = jiffies;
+
+ if (!status && !sndbuf->page_len)
+ req->rq_slen = xdr_adjust_iovec(sndbuf->head, cp->p);
+ return status;
+}
+
+
+/*
+ * START OF "GENERIC" DECODE ROUTINES.
+ * These may look a little ugly since they are imported from a "generic"
+ * set of XDR encode/decode routines which are intended to be shared by
+ * all of our NFSv4 implementations (OpenBSD, MacOS X...).
+ *
+ * If the pain of reading these is too great, it should be a straightforward
+ * task to translate them into Linux-specific versions which are more
+ * consistent with the style used in NFSv2/v3...
+ */
+#define DECODE_HEAD \
+ u32 *p; \
+ int status
+#define DECODE_TAIL \
+ status = 0; \
+out: \
+ return status; \
+xdr_error: \
+ printk(KERN_NOTICE "xdr error! (%s:%d)\n", __FILE__, __LINE__); \
+ status = -EIO; \
+ goto out
+
+#define READ32(x) (x) = ntohl(*p++)
+#define READ64(x) do { \
+ (x) = (u64)ntohl(*p++) << 32; \
+ (x) |= ntohl(*p++); \
+} while (0)
+#define READTIME(x) do { \
+ p++; \
+ (x) = (u64)ntohl(*p++) << 32; \
+ (x) |= ntohl(*p++); \
+} while (0)
+#define COPYMEM(x,nbytes) do { \
+ memcpy((x), p, nbytes); \
+ p += XDR_QUADLEN(nbytes); \
+} while (0)
+
+#define READ_BUF(nbytes) do { \
+ if (nbytes > (u32)((char *)cp->end - (char *)cp->p)) \
+ goto xdr_error; \
+ p = cp->p; \
+ cp->p += XDR_QUADLEN(nbytes); \
+} while (0)
+
+/*
+ * FIXME: The following dummy entry will be replaced once the userland
+ * upcall gets in...
+ */
+static int
+decode_uid(char *p, u32 len, uid_t *uid)
+{
+ *uid = -2;
+ return 0;
+}
+
+/*
+ * FIXME: The following dummy entry will be replaced once the userland
+ * upcall gets in...
+ */
+static int
+decode_gid(char *p, u32 len, gid_t *gid)
+{
+ *gid = -2;
+ return 0;
+}
+
+static int
+decode_change_info(struct nfs4_compound *cp, struct nfs4_change_info *cinfo)
+{
+ DECODE_HEAD;
+
+ READ_BUF(20);
+ READ32(cinfo->atomic);
+ READ64(cinfo->before);
+ READ64(cinfo->after);
+
+ DECODE_TAIL;
+}
+
+static int
+decode_access(struct nfs4_compound *cp, int nfserr, struct nfs4_access *access)
+{
+ u32 supp, acc;
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ READ_BUF(8);
+ READ32(supp);
+ READ32(acc);
+
+ status = -EIO;
+ if ((supp & ~access->ac_req_access) || (acc & ~supp)) {
+ printk(KERN_NOTICE "NFS: server returned bad bits in access call!\n");
+ goto out;
+ }
+ *access->ac_resp_supported = supp;
+ *access->ac_resp_access = acc;
+ }
+
+ DECODE_TAIL;
+}
+
+static int
+decode_close(struct nfs4_compound *cp, int nfserr, struct nfs4_close *close)
+{
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ READ_BUF(sizeof(nfs4_stateid));
+ COPYMEM(close->cl_stateid, sizeof(nfs4_stateid));
+ }
+
+ DECODE_TAIL;
+}
+
+static int
+decode_commit(struct nfs4_compound *cp, int nfserr, struct nfs4_commit *commit)
+{
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ READ_BUF(8);
+ COPYMEM(commit->co_verifier->verifier, 8);
+ }
+
+ DECODE_TAIL;
+}
+
+static int
+decode_create(struct nfs4_compound *cp, int nfserr, struct nfs4_create *create)
+{
+ u32 bmlen;
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ if ((status = decode_change_info(cp, create->cr_cinfo)))
+ goto out;
+ READ_BUF(4);
+ READ32(bmlen);
+ if (bmlen > 2)
+ goto xdr_error;
+ READ_BUF(bmlen << 2);
+ }
+
+ DECODE_TAIL;
+}
+
+extern u32 nfs4_fattr_bitmap[2];
+extern u32 nfs4_fsinfo_bitmap[2];
+extern u32 nfs4_fsstat_bitmap[2];
+extern u32 nfs4_pathconf_bitmap[2];
+
+static int
+decode_getattr(struct nfs4_compound *cp, int nfserr, struct nfs4_getattr *getattr)
+{
+ struct nfs_fattr *nfp = getattr->gt_attrs;
+ struct nfs_fsstat *fsstat = getattr->gt_fsstat;
+ struct nfs_fsinfo *fsinfo = getattr->gt_fsinfo;
+ struct nfs_pathconf *pathconf = getattr->gt_pathconf;
+ u32 bmlen;
+ u32 bmval0 = 0;
+ u32 bmval1 = 0;
+ u32 attrlen;
+ u32 dummy32;
+ u32 len = 0;
+ unsigned int type;
+ int fmode = 0;
+ DECODE_HEAD;
+
+ if (nfserr)
+ goto success;
+
+ READ_BUF(4);
+ READ32(bmlen);
+ if (bmlen > 2)
+ goto xdr_error;
+
+ READ_BUF((bmlen << 2) + 4);
+ if (bmlen > 0)
+ READ32(bmval0);
+ if (bmlen > 1)
+ READ32(bmval1);
+ READ32(attrlen);
+
+ if ((bmval0 & ~getattr->gt_bmval[0]) ||
+ (bmval1 & ~getattr->gt_bmval[1])) {
+ dprintk("read_attrs: server returned bad attributes!\n");
+ goto xdr_error;
+ }
+ getattr->gt_bmres[0] = bmval0;
+ getattr->gt_bmres[1] = bmval1;
+
+ /*
+ * In case the server doesn't return some attributes,
+ * we initialize them here to some nominal values..
+ */
+ if (nfp) {
+ nfp->valid = NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4;
+ nfp->nlink = 1;
+ nfp->timestamp = jiffies;
+ }
+ if (fsinfo) {
+ fsinfo->rtmult = fsinfo->wtmult = 512; /* ??? */
+ fsinfo->lease_time = 60;
+ }
+
+ if (bmval0 & FATTR4_WORD0_TYPE) {
+ READ_BUF(4);
+ len += 4;
+ READ32(type);
+ if (type < NF4REG || type > NF4NAMEDATTR) {
+ dprintk("read_attrs: bad type %d\n", type);
+ goto xdr_error;
+ }
+ nfp->type = nfs_type2fmt[type].nfs2type;
+ fmode = nfs_type2fmt[type].mode;
+ dprintk("read_attrs: type=%d\n", (u32)nfp->type);
+ }
+ if (bmval0 & FATTR4_WORD0_CHANGE) {
+ READ_BUF(8);
+ len += 8;
+ READ64(nfp->change_attr);
+ dprintk("read_attrs: changeid=%Ld\n", (u64)nfp->change_attr);
+ }
+ if (bmval0 & FATTR4_WORD0_SIZE) {
+ READ_BUF(8);
+ len += 8;
+ READ64(nfp->size);
+ dprintk("read_attrs: size=%Ld\n", (u64)nfp->size);
+ }
+ if (bmval0 & FATTR4_WORD0_FSID) {
+ READ_BUF(16);
+ len += 16;
+ READ64(nfp->fsid_u.nfs4.major);
+ READ64(nfp->fsid_u.nfs4.minor);
+ dprintk("read_attrs: fsid=0x%Lx/0x%Lx\n",
+ nfp->fsid_u.nfs4.major, nfp->fsid_u.nfs4.minor);
+ }
+ if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
+ READ_BUF(4);
+ len += 4;
+ READ32(fsinfo->lease_time);
+ dprintk("read_attrs: lease_time=%d\n", fsinfo->lease_time);
+ }
+ if (bmval0 & FATTR4_WORD0_FILEID) {
+ READ_BUF(8);
+ len += 8;
+ READ64(nfp->fileid);
+ dprintk("read_attrs: fileid=%Ld\n", nfp->fileid);
+ }
+ if (bmval0 & FATTR4_WORD0_FILES_AVAIL) {
+ READ_BUF(8);
+ len += 8;
+ READ64(fsstat->afiles);
+ dprintk("read_attrs: files_avail=0x%Lx\n", fsstat->afiles);
+ }
+ if (bmval0 & FATTR4_WORD0_FILES_FREE) {
+ READ_BUF(8);
+ len += 8;
+ READ64(fsstat->ffiles);
+ dprintk("read_attrs: files_free=0x%Lx\n", fsstat->ffiles);
+ }
+ if (bmval0 & FATTR4_WORD0_FILES_TOTAL) {
+ READ_BUF(8);
+ len += 8;
+ READ64(fsstat->tfiles);
+ dprintk("read_attrs: files_tot=0x%Lx\n", fsstat->tfiles);
+ }
+ if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
+ READ_BUF(8);
+ len += 8;
+ READ64(fsinfo->maxfilesize);
+ dprintk("read_attrs: maxfilesize=0x%Lx\n", fsinfo->maxfilesize);
+ }
+ if (bmval0 & FATTR4_WORD0_MAXLINK) {
+ READ_BUF(4);
+ len += 4;
+ READ32(pathconf->max_link);
+ dprintk("read_attrs: maxlink=%d\n", pathconf->max_link);
+ }
+ if (bmval0 & FATTR4_WORD0_MAXNAME) {
+ READ_BUF(4);
+ len += 4;
+ READ32(pathconf->max_namelen);
+ dprintk("read_attrs: maxname=%d\n", pathconf->max_namelen);
+ }
+ if (bmval0 & FATTR4_WORD0_MAXREAD) {
+ READ_BUF(8);
+ len += 8;
+ READ64(fsinfo->rtmax);
+ fsinfo->rtpref = fsinfo->dtpref = fsinfo->rtmax;
+ dprintk("read_attrs: maxread=%d\n", fsinfo->rtmax);
+ }
+ if (bmval0 & FATTR4_WORD0_MAXWRITE) {
+ READ_BUF(8);
+ len += 8;
+ READ64(fsinfo->wtmax);
+ fsinfo->wtpref = fsinfo->wtmax;
+ dprintk("read_attrs: maxwrite=%d\n", fsinfo->wtmax);
+ }
+
+ if (bmval1 & FATTR4_WORD1_MODE) {
+ READ_BUF(4);
+ len += 4;
+ READ32(dummy32);
+ nfp->mode = (dummy32 & ~S_IFMT) | fmode;
+ dprintk("read_attrs: mode=0%o\n", nfp->mode);
+ }
+ if (bmval1 & FATTR4_WORD1_NUMLINKS) {
+ READ_BUF(4);
+ len += 4;
+ READ32(nfp->nlink);
+ dprintk("read_attrs: nlinks=0%o\n", nfp->nlink);
+ }
+ if (bmval1 & FATTR4_WORD1_OWNER) {
+ READ_BUF(4);
+ len += 4;
+ READ32(dummy32); /* name length */
+ if (dummy32 > XDR_MAX_NETOBJ) {
+ dprintk("read_attrs: name too long!\n");
+ goto xdr_error;
+ }
+ READ_BUF(dummy32);
+ len += (XDR_QUADLEN(dummy32) << 2);
+ if ((status = decode_uid((char *)p, dummy32, &nfp->uid))) {
+ dprintk("read_attrs: gss_get_num failed!\n");
+ goto out;
+ }
+ dprintk("read_attrs: uid=%d\n", (int)nfp->uid);
+ }
+ if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
+ READ_BUF(4);
+ len += 4;
+ READ32(dummy32);
+ if (dummy32 > XDR_MAX_NETOBJ) {
+ dprintk("read_attrs: name too long!\n");
+ goto xdr_error;
+ }
+ READ_BUF(dummy32);
+ len += (XDR_QUADLEN(dummy32) << 2);
+ if ((status = decode_gid((char *)p, dummy32, &nfp->gid))) {
+ dprintk("read_attrs: gss_get_num failed!\n");
+ goto out;
+ }
+ dprintk("read_attrs: gid=%d\n", (int)nfp->gid);
+ }
+ if (bmval1 & FATTR4_WORD1_RAWDEV) {
+ READ_BUF(8);
+ len += 8;
+ READ32(dummy32);
+ nfp->rdev = (dummy32 << MINORBITS);
+ READ32(dummy32);
+ nfp->rdev |= (dummy32 & MINORMASK);
+ dprintk("read_attrs: rdev=%d\n", nfp->rdev);
+ }
+ if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) {
+ READ_BUF(8);
+ len += 8;
+ READ64(fsstat->abytes);
+ dprintk("read_attrs: savail=0x%Lx\n", fsstat->abytes);
+ }
+ if (bmval1 & FATTR4_WORD1_SPACE_FREE) {
+ READ_BUF(8);
+ len += 8;
+ READ64(fsstat->fbytes);
+ dprintk("read_attrs: sfree=0x%Lx\n", fsstat->fbytes);
+ }
+ if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) {
+ READ_BUF(8);
+ len += 8;
+ READ64(fsstat->tbytes);
+ dprintk("read_attrs: stotal=0x%Lx\n", fsstat->tbytes);
+ }
+ if (bmval1 & FATTR4_WORD1_SPACE_USED) {
+ READ_BUF(8);
+ len += 8;
+ READ64(nfp->du.nfs3.used);
+ dprintk("read_attrs: sused=0x%Lx\n", nfp->du.nfs3.used);
+ }
+ if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
+ READ_BUF(12);
+ len += 12;
+ READTIME(nfp->atime);
+ dprintk("read_attrs: atime=%d\n", (int)nfp->atime);
+ }
+ if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
+ READ_BUF(12);
+ len += 12;
+ READTIME(nfp->ctime);
+ dprintk("read_attrs: ctime=%d\n", (int)nfp->ctime);
+ }
+ if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
+ READ_BUF(12);
+ len += 12;
+ READTIME(nfp->mtime);
+ dprintk("read_attrs: mtime=%d\n", (int)nfp->mtime);
+ }
+ if (len != attrlen)
+ goto xdr_error;
+
+success:
+ DECODE_TAIL;
+}
+
+static int
+decode_getfh(struct nfs4_compound *cp, int nfserr, struct nfs4_getfh *getfh)
+{
+ struct nfs_fh *fh = getfh->gf_fhandle;
+ int len;
+ DECODE_HEAD;
+
+ /* Zero handle first to allow comparisons */
+ memset(fh, 0, sizeof(*fh));
+
+ if (!nfserr) {
+ READ_BUF(4);
+ READ32(len);
+ if (len > NFS_MAXFHSIZE)
+ goto xdr_error;
+ fh->size = len;
+ READ_BUF(len);
+ COPYMEM(fh->data, len);
+ }
+
+ DECODE_TAIL;
+}
+
+static int
+decode_link(struct nfs4_compound *cp, int nfserr, struct nfs4_link *link)
+{
+ int status = 0;
+
+ if (!nfserr)
+ status = decode_change_info(cp, link->ln_cinfo);
+ return status;
+}
+
+static int
+decode_open(struct nfs4_compound *cp, int nfserr, struct nfs4_open *open)
+{
+ u32 bmlen, delegation_type;
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ READ_BUF(sizeof(nfs4_stateid));
+ COPYMEM(open->op_stateid, sizeof(nfs4_stateid));
+
+ decode_change_info(cp, open->op_cinfo);
+
+ READ_BUF(8);
+ READ32(*open->op_rflags);
+ READ32(bmlen);
+ if (bmlen > 10)
+ goto xdr_error;
+
+ READ_BUF((bmlen << 2) + 4);
+ p += bmlen;
+ READ32(delegation_type);
+ if (delegation_type != NFS4_OPEN_DELEGATE_NONE)
+ goto xdr_error;
+ }
+
+ DECODE_TAIL;
+}
+
+static int
+decode_open_confirm(struct nfs4_compound *cp, int nfserr, struct nfs4_open_confirm *open_confirm)
+{
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ READ_BUF(sizeof(nfs4_stateid));
+ COPYMEM(open_confirm->oc_stateid, sizeof(nfs4_stateid));
+ }
+
+ DECODE_TAIL;
+}
+
+static int
+decode_read(struct nfs4_compound *cp, int nfserr, struct nfs4_read *read)
+{
+ u32 throwaway;
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ READ_BUF(8);
+ if (read->rd_eof)
+ READ32(*read->rd_eof);
+ else
+ READ32(throwaway);
+ READ32(*read->rd_bytes_read);
+ if (*read->rd_bytes_read > read->rd_length)
+ goto xdr_error;
+ }
+
+ DECODE_TAIL;
+}
+
+static int
+decode_readdir(struct nfs4_compound *cp, int nfserr, struct rpc_rqst *req, struct nfs4_readdir *readdir)
+{
+ struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+ struct page *page = *rcvbuf->pages;
+ unsigned int pglen = rcvbuf->page_len;
+ u32 *end, *entry;
+ u32 len, attrlen, word;
+ int i;
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ READ_BUF(8);
+ COPYMEM(readdir->rd_resp_verifier, 8);
+
+ BUG_ON(pglen > PAGE_CACHE_SIZE);
+ p = (u32 *) kmap(page);
+ end = (u32 *) ((char *)p + pglen + readdir->rd_pgbase);
+
+ while (*p++) {
+ entry = p - 1;
+ if (p + 3 > end)
+ goto short_pkt;
+ p += 2; /* cookie */
+ len = ntohl(*p++); /* filename length */
+ if (len > NFS4_MAXNAMLEN) {
+ printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len);
+ goto err_unmap;
+ }
+
+ p += XDR_QUADLEN(len);
+ if (p + 1 > end)
+ goto short_pkt;
+ len = ntohl(*p++); /* bitmap length */
+ if (len > 10) {
+ printk(KERN_WARNING "NFS: giant bitmap in readdir (len 0x%x)\n", len);
+ goto err_unmap;
+ }
+ if (p + len + 1 > end)
+ goto short_pkt;
+ attrlen = 0;
+ for (i = 0; i < len; i++) {
+ word = ntohl(*p++);
+ if (!word)
+ continue;
+ else if (i == 0 && word == FATTR4_WORD0_FILEID) {
+ attrlen = 8;
+ continue;
+ }
+ printk(KERN_WARNING "NFS: unexpected bitmap word in readdir (0x%x)\n", word);
+ goto err_unmap;
+ }
+ if (ntohl(*p++) != attrlen) {
+ printk(KERN_WARNING "NFS: unexpected attrlen in readdir\n");
+ goto err_unmap;
+ }
+ p += XDR_QUADLEN(attrlen);
+ if (p + 1 > end)
+ goto short_pkt;
+ }
+ kunmap(page);
+ }
+
+ DECODE_TAIL;
+short_pkt:
+ printk(KERN_NOTICE "NFS: short packet in readdir reply!\n");
+ /* truncate listing */
+ kunmap(page);
+ entry[0] = entry[1] = 0;
+ return 0;
+err_unmap:
+ kunmap(page);
+ return -errno_NFSERR_IO;
+}
+
+static int
+decode_readlink(struct nfs4_compound *cp, int nfserr, struct rpc_rqst *req, struct nfs4_readlink *readlink)
+{
+ struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+ u32 *strlen;
+ u32 len;
+ char *string;
+
+ if (!nfserr) {
+ /*
+ * The XDR encode routine has set things up so that
+ * the link text will be copied directly into the
+ * buffer. We just have to do overflow-checking,
+ * and and null-terminate the text (the VFS expects
+ * null-termination).
+ */
+ strlen = (u32 *) kmap(rcvbuf->pages[0]);
+ len = ntohl(*strlen);
+ if (len > PAGE_CACHE_SIZE - 5) {
+ printk(KERN_WARNING "nfs: server returned giant symlink!\n");
+ kunmap(rcvbuf->pages[0]);
+ return -EIO;
+ }
+ *strlen = len;
+
+ string = (char *)(strlen + 1);
+ string[len] = '\0';
+ kunmap(rcvbuf->pages[0]);
+ }
+ return 0;
+}
+
+static int
+decode_remove(struct nfs4_compound *cp, int nfserr, struct nfs4_remove *remove)
+{
+ int status;
+
+ status = 0;
+ if (!nfserr)
+ status = decode_change_info(cp, remove->rm_cinfo);
+ return status;
+}
+
+static int
+decode_rename(struct nfs4_compound *cp, int nfserr, struct nfs4_rename *rename)
+{
+ int status = 0;
+
+ if (!nfserr) {
+ if ((status = decode_change_info(cp, rename->rn_src_cinfo)))
+ goto out;
+ if ((status = decode_change_info(cp, rename->rn_dst_cinfo)))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static int
+decode_setattr(struct nfs4_compound *cp)
+{
+ u32 bmlen;
+ DECODE_HEAD;
+
+ READ_BUF(4);
+ READ32(bmlen);
+ if (bmlen > 10)
+ goto xdr_error;
+ READ_BUF(bmlen << 2);
+
+ DECODE_TAIL;
+}
+
+static int
+decode_setclientid(struct nfs4_compound *cp, int nfserr)
+{
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ READ_BUF(8 + sizeof(nfs4_verifier));
+ READ64(cp->server->nfs4_state->cl_clientid);
+ COPYMEM(cp->server->nfs4_state->cl_confirm, sizeof(nfs4_verifier));
+ }
+ else if (nfserr == NFSERR_CLID_INUSE) {
+ u32 len;
+
+ /* skip netid string */
+ READ_BUF(4);
+ READ32(len);
+ READ_BUF(len);
+
+ /* skip uaddr string */
+ READ_BUF(4);
+ READ32(len);
+ READ_BUF(len);
+ }
+
+ DECODE_TAIL;
+}
+
+static int
+decode_write(struct nfs4_compound *cp, int nfserr, struct nfs4_write *write)
+{
+ DECODE_HEAD;
+
+ if (!nfserr) {
+ READ_BUF(16);
+ READ32(*write->wr_bytes_written);
+ if (*write->wr_bytes_written > write->wr_len)
+ goto xdr_error;
+ READ32(write->wr_verf->committed);
+ COPYMEM(write->wr_verf->verifier, 8);
+ }
+
+ DECODE_TAIL;
+}
+
+static int
+decode_compound(struct nfs4_compound *cp, struct rpc_rqst *req)
+{
+ u32 taglen;
+ u32 opnum, nfserr;
+ DECODE_HEAD;
+
+ READ_BUF(8);
+ READ32(cp->toplevel_status);
+ READ32(taglen);
+
+ /*
+ * We need this if our zero-copy I/O is going to work. Rumor has
+ * it that the spec will soon mandate it...
+ */
+ if (taglen != cp->taglen)
+ dprintk("nfs4: non-conforming server returns tag length mismatch!\n");
+
+ READ_BUF(taglen + 4);
+ p += XDR_QUADLEN(taglen);
+ READ32(cp->resp_nops);
+ if (cp->resp_nops > cp->req_nops) {
+ dprintk("nfs4: resp_nops > req_nops!\n");
+ goto xdr_error;
+ }
+
+ for (cp->nops = 0; cp->nops < cp->resp_nops; cp->nops++) {
+ READ_BUF(8);
+ READ32(opnum);
+ if (opnum != cp->ops[cp->nops].opnum) {
+ dprintk("nfs4: operation mismatch!\n");
+ goto xdr_error;
+ }
+ READ32(nfserr);
+ if (cp->nops == cp->resp_nops - 1) {
+ if (nfserr != cp->toplevel_status) {
+ dprintk("nfs4: status mismatch!\n");
+ goto xdr_error;
+ }
+ }
+ else if (nfserr) {
+ dprintk("nfs4: intermediate status nonzero!\n");
+ goto xdr_error;
+ }
+ cp->ops[cp->nops].nfserr = nfserr;
+
+ switch (opnum) {
+ case OP_ACCESS:
+ status = decode_access(cp, nfserr, &cp->ops[cp->nops].u.access);
+ break;
+ case OP_CLOSE:
+ status = decode_close(cp, nfserr, &cp->ops[cp->nops].u.close);
+ break;
+ case OP_COMMIT:
+ status = decode_commit(cp, nfserr, &cp->ops[cp->nops].u.commit);
+ break;
+ case OP_CREATE:
+ status = decode_create(cp, nfserr, &cp->ops[cp->nops].u.create);
+ break;
+ case OP_GETATTR:
+ status = decode_getattr(cp, nfserr, &cp->ops[cp->nops].u.getattr);
+ break;
+ case OP_GETFH:
+ status = decode_getfh(cp, nfserr, &cp->ops[cp->nops].u.getfh);
+ break;
+ case OP_LINK:
+ status = decode_link(cp, nfserr, &cp->ops[cp->nops].u.link);
+ break;
+ case OP_LOOKUP:
+ status = 0;
+ break;
+ case OP_OPEN:
+ status = decode_open(cp, nfserr, &cp->ops[cp->nops].u.open);
+ break;
+ case OP_OPEN_CONFIRM:
+ status = decode_open_confirm(cp, nfserr, &cp->ops[cp->nops].u.open_confirm);
+ break;
+ case OP_PUTFH:
+ status = 0;
+ break;
+ case OP_PUTROOTFH:
+ status = 0;
+ break;
+ case OP_READ:
+ status = decode_read(cp, nfserr, &cp->ops[cp->nops].u.read);
+ break;
+ case OP_READDIR:
+ status = decode_readdir(cp, nfserr, req, &cp->ops[cp->nops].u.readdir);
+ break;
+ case OP_READLINK:
+ status = decode_readlink(cp, nfserr, req, &cp->ops[cp->nops].u.readlink);
+ break;
+ case OP_RESTOREFH:
+ status = 0;
+ break;
+ case OP_REMOVE:
+ status = decode_remove(cp, nfserr, &cp->ops[cp->nops].u.remove);
+ break;
+ case OP_RENAME:
+ status = decode_rename(cp, nfserr, &cp->ops[cp->nops].u.rename);
+ break;
+ case OP_RENEW:
+ status = 0;
+ break;
+ case OP_SAVEFH:
+ status = 0;
+ break;
+ case OP_SETATTR:
+ status = decode_setattr(cp);
+ break;
+ case OP_SETCLIENTID:
+ status = decode_setclientid(cp, nfserr);
+ break;
+ case OP_SETCLIENTID_CONFIRM:
+ status = 0;
+ break;
+ case OP_WRITE:
+ status = decode_write(cp, nfserr, &cp->ops[cp->nops].u.write);
+ break;
+ default:
+ BUG();
+ return -EIO;
+ }
+ if (status)
+ goto xdr_error;
+ }
+
+ DECODE_TAIL;
+}
+/*
+ * END OF "GENERIC" DECODE ROUTINES.
+ */
+
+/*
+ * Decode void reply
+ */
+static int
+nfs4_xdr_dec_void(struct rpc_rqst *req, u32 *p, void *dummy)
+{
+ return 0;
+}
+
+/*
+ * Decode COMPOUND response
+ */
+static int
+nfs4_xdr_dec_compound(struct rpc_rqst *rqstp, u32 *p, struct nfs4_compound *cp)
+{
+ int status;
+
+ cp->p = p;
+ cp->end = (u32 *) ((u8 *) rqstp->rq_rvec->iov_base + rqstp->rq_rvec->iov_len);
+
+ if ((status = decode_compound(cp, rqstp)))
+ goto out;
+
+ status = 0;
+ if (cp->toplevel_status)
+ status = -nfs_stat_to_errno(cp->toplevel_status);
+
+out:
+ return status;
+}
+
+u32 *
+nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus)
+{
+ u32 len;
+
+ if (!*p++) {
+ if (!*p)
+ return ERR_PTR(-EAGAIN);
+ entry->eof = 1;
+ return ERR_PTR(-EBADCOOKIE);
+ }
+
+ entry->prev_cookie = entry->cookie;
+ p = xdr_decode_hyper(p, &entry->cookie);
+ entry->len = ntohl(*p++);
+ entry->name = (const char *) p;
+ p += XDR_QUADLEN(entry->len);
+
+ if (entry->cookie > COOKIE_MAX)
+ entry->cookie = COOKIE_MAX;
+
+ /*
+ * In case the server doesn't return an inode number,
+ * we fake one here. (We don't use inode number 0,
+ * since glibc seems to choke on it...)
+ */
+ entry->ino = 1;
+
+ len = ntohl(*p++); /* bitmap length */
+ p += len;
+ len = ntohl(*p++); /* attribute buffer length */
+ if (len)
+ p = xdr_decode_hyper(p, &entry->ino);
+
+ entry->eof = !p[0] && p[1];
+ return p;
+}
+
+#ifndef MAX
+# define MAX(a, b) (((a) > (b))? (a) : (b))
+#endif
+
+#define PROC(proc, argtype, restype) \
+ { "nfs4_" #proc, \
+ (kxdrproc_t) nfs4_xdr_##argtype, \
+ (kxdrproc_t) nfs4_xdr_##restype, \
+ MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2, \
+ 0 \
+ }
+
+static struct rpc_procinfo nfs4_procedures[] = {
+ PROC(null, enc_void, dec_void),
+ PROC(compound, enc_compound, dec_compound)
+};
+
+struct rpc_version nfs_version4 = {
+ .number = 4,
+ .nrprocs = sizeof(nfs4_procedures)/sizeof(nfs4_procedures[0]),
+ .procs = nfs4_procedures
+};
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 2ad13ec4cd27..a5a1c373444d 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -460,17 +460,62 @@ nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
static int
nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+ struct nfs_fsstat *stat)
{
+ struct nfs2_fsstat fsinfo;
int status;
dprintk("NFS call statfs\n");
- memset((char *)info, 0, sizeof(*info));
- status = rpc_call(server->client, NFSPROC_STATFS, fhandle, info, 0);
+ stat->fattr->valid = 0;
+ status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0);
dprintk("NFS reply statfs: %d\n", status);
+ if (status)
+ goto out;
+ stat->tbytes = (u64)fsinfo.blocks * fsinfo.bsize;
+ stat->fbytes = (u64)fsinfo.bfree * fsinfo.bsize;
+ stat->abytes = (u64)fsinfo.bavail * fsinfo.bsize;
+ stat->tfiles = 0;
+ stat->ffiles = 0;
+ stat->afiles = 0;
+out:
+ return status;
+}
+
+static int
+nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ struct nfs2_fsstat fsinfo;
+ int status;
+
+ dprintk("NFS call fsinfo\n");
+ info->fattr->valid = 0;
+ status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &info, 0);
+ dprintk("NFS reply fsinfo: %d\n", status);
+ if (status)
+ goto out;
+ info->rtmax = NFS_MAXDATA;
+ info->rtpref = fsinfo.tsize;
+ info->rtmult = fsinfo.bsize;
+ info->wtmax = NFS_MAXDATA;
+ info->wtpref = fsinfo.tsize;
+ info->wtmult = fsinfo.bsize;
+ info->dtpref = fsinfo.tsize;
+ info->maxfilesize = 0x7FFFFFFF;
+ info->lease_time = 0;
+out:
return status;
}
+static int
+nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_pathconf *info)
+{
+ info->max_link = 0;
+ info->max_namelen = NFS2_MAXNAMLEN;
+ return 0;
+}
+
extern u32 * nfs_decode_dirent(u32 *, struct nfs_entry *, int);
static void
@@ -590,6 +635,8 @@ struct nfs_rpc_ops nfs_v2_clientops = {
.readdir = nfs_proc_readdir,
.mknod = nfs_proc_mknod,
.statfs = nfs_proc_statfs,
+ .fsinfo = nfs_proc_fsinfo,
+ .pathconf = nfs_proc_pathconf,
.decode_dirent = nfs_decode_dirent,
.read_setup = nfs_proc_read_setup,
.write_setup = nfs_proc_write_setup,
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 5976fa3e466f..e6ed1a443116 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -18,6 +18,7 @@
#include <linux/blk.h>
#include <linux/kmod.h>
#include <linux/ctype.h>
+#include <../drivers/base/fs/fs.h> /* Eeeeewwwww */
#include "check.h"
@@ -111,115 +112,17 @@ char *disk_name(struct gendisk *hd, int part, char *buf)
return buf;
}
-/* Driverfs file support */
-static ssize_t partition_device_kdev_read(struct device *driverfs_dev,
- char *page, size_t count, loff_t off)
-{
- kdev_t kdev;
- kdev.value=(int)(long)driverfs_dev->driver_data;
- return off ? 0 : sprintf (page, "%x\n",kdev.value);
-}
-static DEVICE_ATTR(kdev,S_IRUGO,partition_device_kdev_read,NULL);
-
-static ssize_t partition_device_type_read(struct device *driverfs_dev,
- char *page, size_t count, loff_t off)
-{
- return off ? 0 : sprintf (page, "BLK\n");
-}
-static DEVICE_ATTR(type,S_IRUGO,partition_device_type_read,NULL);
-
-static void driverfs_create_partitions(struct gendisk *hd)
-{
- int max_p = 1<<hd->minor_shift;
- struct hd_struct *p = hd->part;
- char name[DEVICE_NAME_SIZE];
- char bus_id[BUS_ID_SIZE];
- struct device *dev, *parent;
- int part;
-
- /* if driverfs not supported by subsystem, skip partitions */
- if (!(hd->flags & GENHD_FL_DRIVERFS))
- return;
-
- parent = hd->driverfs_dev;
-
- if (parent) {
- sprintf(name, "%s", parent->name);
- sprintf(bus_id, "%s:", parent->bus_id);
- } else {
- *name = *bus_id = '\0';
- }
-
- dev = &hd->disk_dev;
- dev->driver_data = (void *)(long)__mkdev(hd->major, hd->first_minor);
- sprintf(dev->name, "%sdisc", name);
- sprintf(dev->bus_id, "%sdisc", bus_id);
- for (part=1; part < max_p; part++) {
- dev = &p[part-1].hd_driverfs_dev;
- sprintf(dev->name, "%spart%d", name, part);
- sprintf(dev->bus_id, "%s:p%d", bus_id, part);
- if (!p[part-1].nr_sects)
- continue;
- dev->driver_data =
- (void *)(long)__mkdev(hd->major, hd->first_minor+part);
- }
-
- dev = &hd->disk_dev;
- dev->parent = parent;
- if (parent)
- dev->bus = parent->bus;
- device_register(dev);
- device_create_file(dev, &dev_attr_type);
- device_create_file(dev, &dev_attr_kdev);
-
- for (part=0; part < max_p-1; part++) {
- dev = &p[part].hd_driverfs_dev;
- dev->parent = parent;
- if (parent)
- dev->bus = parent->bus;
- if (!dev->driver_data)
- continue;
- device_register(dev);
- device_create_file(dev, &dev_attr_type);
- device_create_file(dev, &dev_attr_kdev);
- }
-}
-
-static void driverfs_remove_partitions(struct gendisk *hd)
-{
- int max_p = 1<<hd->minor_shift;
- struct device *dev;
- struct hd_struct *p;
- int part;
-
- for (part=1, p = hd->part; part < max_p; part++, p++) {
- dev = &p->hd_driverfs_dev;
- if (dev->driver_data) {
- device_remove_file(dev, &dev_attr_type);
- device_remove_file(dev, &dev_attr_kdev);
- put_device(dev);
- dev->driver_data = NULL;
- }
- }
- dev = &hd->disk_dev;
- if (dev->driver_data) {
- device_remove_file(dev, &dev_attr_type);
- device_remove_file(dev, &dev_attr_kdev);
- put_device(dev);
- dev->driver_data = NULL;
- }
-}
-
-static void check_partition(struct gendisk *hd, struct block_device *bdev)
+static struct parsed_partitions *
+check_partition(struct gendisk *hd, struct block_device *bdev)
{
+ struct parsed_partitions *state;
devfs_handle_t de = NULL;
char buf[64];
- struct parsed_partitions *state;
- int i;
+ int i, res;
state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
if (!state)
- return;
+ return NULL;
if (hd->flags & GENHD_FL_DEVFS)
de = hd->de;
@@ -233,32 +136,20 @@ static void check_partition(struct gendisk *hd, struct block_device *bdev)
if (isdigit(state->name[strlen(state->name)-1]))
sprintf(state->name, "p");
}
- state->limit = 1<<hd->minor_shift;
- for (i = 0; check_part[i]; i++) {
- int res, j;
- struct hd_struct *p;
+ state->limit = hd->minors;
+ i = res = 0;
+ while (!res && check_part[i]) {
memset(&state->parts, 0, sizeof(state->parts));
- res = check_part[i](state, bdev);
- if (!res)
- continue;
- if (res < 0) {
- if (warn_no_part)
- printk(" unable to read partition table\n");
- return;
- }
- p = hd->part;
- for (j = 1; j < state->limit; j++) {
- p[j-1].start_sect = state->parts[j].from;
- p[j-1].nr_sects = state->parts[j].size;
-#if CONFIG_BLK_DEV_MD
- if (!state->parts[j].flags)
- continue;
- md_autodetect_dev(bdev->bd_dev+j);
-#endif
- }
- return;
+ res = check_part[i++](state, bdev);
}
- printk(" unknown partition table\n");
+ if (res > 0)
+ return state;
+ if (!res)
+ printk(" unknown partition table\n");
+ else if (warn_no_part)
+ printk(" unable to read partition table\n");
+ kfree(state);
+ return NULL;
}
static void devfs_register_partition(struct gendisk *dev, int part)
@@ -298,7 +189,7 @@ static void devfs_create_partitions(struct gendisk *dev)
unsigned int devfs_flags = DEVFS_FL_DEFAULT;
char dirname[64], symlink[16];
static devfs_handle_t devfs_handle;
- int part, max_p = 1<<dev->minor_shift;
+ int part, max_p = dev->minors;
struct hd_struct *p = dev->part;
if (dev->flags & GENHD_FL_REMOVABLE)
@@ -329,9 +220,6 @@ static void devfs_create_partitions(struct gendisk *dev)
devfs_auto_unregister(dev->disk_de, slave);
if (!(dev->flags & GENHD_FL_DEVFS))
devfs_auto_unregister (slave, dir);
- for (part = 1; part < max_p; part++, p++)
- if (p->nr_sects)
- devfs_register_partition(dev, part);
#endif
}
@@ -379,11 +267,6 @@ static void devfs_create_cdrom(struct gendisk *dev)
static void devfs_remove_partitions(struct gendisk *dev)
{
#ifdef CONFIG_DEVFS_FS
- int part;
- for (part = (1<<dev->minor_shift)-1; part--; ) {
- devfs_unregister(dev->part[part].de);
- dev->part[part].de = NULL;
- }
devfs_unregister(dev->disk_de);
dev->disk_de = NULL;
if (dev->flags & GENHD_FL_CD)
@@ -393,15 +276,196 @@ static void devfs_remove_partitions(struct gendisk *dev)
#endif
}
+static ssize_t part_dev_read(struct device *dev,
+ char *page, size_t count, loff_t off)
+{
+ struct gendisk *disk = dev->parent->driver_data;
+ struct hd_struct *p = dev->driver_data;
+ int part = p - disk->part + 1;
+ dev_t base = MKDEV(disk->major, disk->first_minor);
+ return off ? 0 : sprintf(page, "%04x\n",base + part);
+}
+static ssize_t part_start_read(struct device *dev,
+ char *page, size_t count, loff_t off)
+{
+ struct hd_struct *p = dev->driver_data;
+ return off ? 0 : sprintf(page, "%llu\n",(u64)p->start_sect);
+}
+static ssize_t part_size_read(struct device *dev,
+ char *page, size_t count, loff_t off)
+{
+ struct hd_struct *p = dev->driver_data;
+ return off ? 0 : sprintf(page, "%llu\n",(u64)p->nr_sects);
+}
+static struct device_attribute part_attr_dev = {
+ .attr = {.name = "dev", .mode = S_IRUGO },
+ .show = part_dev_read
+};
+static struct device_attribute part_attr_start = {
+ .attr = {.name = "start", .mode = S_IRUGO },
+ .show = part_start_read
+};
+static struct device_attribute part_attr_size = {
+ .attr = {.name = "size", .mode = S_IRUGO },
+ .show = part_size_read
+};
+
+void delete_partition(struct gendisk *disk, int part)
+{
+ struct hd_struct *p = disk->part + part - 1;
+ struct device *dev;
+ if (!p->nr_sects)
+ return;
+ p->start_sect = 0;
+ p->nr_sects = 0;
+ devfs_unregister(p->de);
+ dev = p->hd_driverfs_dev;
+ p->hd_driverfs_dev = NULL;
+ if (dev) {
+ device_remove_file(dev, &part_attr_size);
+ device_remove_file(dev, &part_attr_start);
+ device_remove_file(dev, &part_attr_dev);
+ device_unregister(dev);
+ }
+}
+
+static void part_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len)
+{
+ struct hd_struct *p = disk->part + part - 1;
+ struct device *parent = &disk->disk_dev;
+ struct device *dev;
+
+ p->start_sect = start;
+ p->nr_sects = len;
+ devfs_register_partition(disk, part);
+ dev = kmalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev)
+ return;
+ memset(dev, 0, sizeof(struct device));
+ dev->parent = parent;
+ sprintf(dev->bus_id, "p%d", part);
+ dev->release = part_release;
+ dev->driver_data = p;
+ device_register(dev);
+ device_create_file(dev, &part_attr_dev);
+ device_create_file(dev, &part_attr_start);
+ device_create_file(dev, &part_attr_size);
+ p->hd_driverfs_dev = dev;
+}
+
+static ssize_t disk_dev_read(struct device *dev,
+ char *page, size_t count, loff_t off)
+{
+ struct gendisk *disk = dev->driver_data;
+ dev_t base = MKDEV(disk->major, disk->first_minor);
+ return off ? 0 : sprintf(page, "%04x\n",base);
+}
+static ssize_t disk_range_read(struct device *dev,
+ char *page, size_t count, loff_t off)
+{
+ struct gendisk *disk = dev->driver_data;
+ return off ? 0 : sprintf(page, "%d\n",disk->minors);
+}
+static ssize_t disk_size_read(struct device *dev,
+ char *page, size_t count, loff_t off)
+{
+ struct gendisk *disk = dev->driver_data;
+ return off ? 0 : sprintf(page, "%llu\n",(u64)get_capacity(disk));
+}
+static struct device_attribute disk_attr_dev = {
+ .attr = {.name = "dev", .mode = S_IRUGO },
+ .show = disk_dev_read
+};
+static struct device_attribute disk_attr_range = {
+ .attr = {.name = "range", .mode = S_IRUGO },
+ .show = disk_range_read
+};
+static struct device_attribute disk_attr_size = {
+ .attr = {.name = "size", .mode = S_IRUGO },
+ .show = disk_size_read
+};
+
+static void disk_driverfs_symlinks(struct gendisk *disk)
+{
+ struct device *target = disk->driverfs_dev;
+ struct device *dev = &disk->disk_dev;
+ struct device *p;
+ char *path;
+ char *s;
+ int length;
+ int depth;
+
+ if (!target)
+ return;
+
+ get_device(target);
+
+ length = get_devpath_length(target);
+ length += strlen("..");
+
+ if (length > PATH_MAX)
+ return;
+
+ if (!(path = kmalloc(length,GFP_KERNEL)))
+ return;
+ memset(path,0,length);
+
+ /* our relative position */
+ strcpy(path,"..");
+
+ fill_devpath(target, path, length);
+ driverfs_create_symlink(&dev->dir, "device", path);
+ kfree(path);
+
+ for (p = target, depth = 0; p; p = p->parent, depth++)
+ ;
+ length = get_devpath_length(dev);
+ length += 3 * depth - 1;
+
+ if (length > PATH_MAX)
+ return;
+
+ if (!(path = kmalloc(length,GFP_KERNEL)))
+ return;
+ memset(path,0,length);
+ for (s = path; depth--; s += 3)
+ strcpy(s, "../");
+
+ fill_devpath(dev, path, length);
+ driverfs_create_symlink(&target->dir, "block", path);
+ kfree(path);
+}
+
/* Not exported, helper to add_disk(). */
void register_disk(struct gendisk *disk)
{
+ struct device *dev = &disk->disk_dev;
+ struct parsed_partitions *state;
struct block_device *bdev;
+ char *s;
+ int j;
+
+ strcpy(dev->bus_id, disk->disk_name);
+ /* ewww... some of these buggers have / in name... */
+ s = strchr(dev->bus_id, '/');
+ if (s)
+ *s = '!';
+ device_add(dev);
+ device_create_file(dev, &disk_attr_dev);
+ device_create_file(dev, &disk_attr_range);
+ device_create_file(dev, &disk_attr_size);
+ disk_driverfs_symlinks(disk);
+
if (disk->flags & GENHD_FL_CD)
devfs_create_cdrom(disk);
/* No minors to use for partitions */
- if (!disk->minor_shift)
+ if (disk->minors == 1)
return;
/* No such device (e.g., media were just removed) */
@@ -411,45 +475,32 @@ void register_disk(struct gendisk *disk)
bdev = bdget(MKDEV(disk->major, disk->first_minor));
if (blkdev_get(bdev, FMODE_READ, 0, BDEV_RAW) < 0)
return;
- check_partition(disk, bdev);
- driverfs_create_partitions(disk);
+ state = check_partition(disk, bdev);
devfs_create_partitions(disk);
- blkdev_put(bdev, BDEV_RAW);
-}
-
-void update_partition(struct gendisk *disk, int part)
-{
- struct hd_struct *p = disk->part + part - 1;
- struct device *dev = &p->hd_driverfs_dev;
-
- if (!p->nr_sects) {
- if (p->de) {
- devfs_unregister(p->de);
- p->de = NULL;
- }
- if (dev->driver_data) {
- device_remove_file(dev, &dev_attr_type);
- device_remove_file(dev, &dev_attr_kdev);
- put_device(dev);
- dev->driver_data = NULL;
+ if (state) {
+ for (j = 1; j < state->limit; j++) {
+ sector_t size = state->parts[j].size;
+ sector_t from = state->parts[j].from;
+ if (!size)
+ continue;
+ add_partition(disk, j, from, size);
+#if CONFIG_BLK_DEV_MD
+ if (!state->parts[j].flags)
+ continue;
+ md_autodetect_dev(bdev->bd_dev+j);
+#endif
}
- return;
+ kfree(state);
}
- if (!p->de)
- devfs_register_partition(disk, part);
- if (dev->driver_data || !(disk->flags & GENHD_FL_DRIVERFS))
- return;
- dev->driver_data =
- (void *)(long)__mkdev(disk->major, disk->first_minor+part);
- device_register(dev);
- device_create_file(dev, &dev_attr_type);
- device_create_file(dev, &dev_attr_kdev);
+ blkdev_put(bdev, BDEV_RAW);
}
int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
{
kdev_t dev = to_kdev_t(bdev->bd_dev);
+ struct parsed_partitions *state;
int p, res;
+
if (!bdev->bd_invalidated)
return 0;
if (bdev->bd_part_count)
@@ -458,16 +509,25 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
if (res)
return res;
bdev->bd_invalidated = 0;
- for (p = 0; p < (1<<disk->minor_shift) - 1; p++) {
- disk->part[p].start_sect = 0;
- disk->part[p].nr_sects = 0;
- }
+ for (p = 1; p < disk->minors; p++)
+ delete_partition(disk, p);
if (bdev->bd_op->revalidate)
bdev->bd_op->revalidate(dev);
- if (get_capacity(disk))
- check_partition(disk, bdev);
- for (p = 1; p < (1<<disk->minor_shift); p++)
- update_partition(disk, p);
+ if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
+ return res;
+ for (p = 1; p < state->limit; p++) {
+ sector_t size = state->parts[p].size;
+ sector_t from = state->parts[p].from;
+ if (!size)
+ continue;
+ add_partition(disk, p, from, size);
+#if CONFIG_BLK_DEV_MD
+ if (!state->parts[j].flags)
+ continue;
+ md_autodetect_dev(bdev->bd_dev+p);
+#endif
+ }
+ kfree(state);
return res;
}
@@ -493,48 +553,33 @@ fail:
return NULL;
}
-static int wipe_partitions(struct gendisk *disk)
+void del_gendisk(struct gendisk *disk)
{
- int max_p = 1 << disk->minor_shift;
+ int max_p = disk->minors;
kdev_t devp;
- int res;
int p;
/* invalidate stuff */
for (p = max_p - 1; p > 0; p--) {
devp = mk_kdev(disk->major,disk->first_minor + p);
-#if 0 /* %%% superfluous? */
- if (disk->part[p-1].nr_sects == 0)
- continue;
-#endif
- res = invalidate_device(devp, 1);
- if (res)
- return res;
- disk->part[p-1].start_sect = 0;
- disk->part[p-1].nr_sects = 0;
+ invalidate_device(devp, 1);
+ delete_partition(disk, p);
}
devp = mk_kdev(disk->major,disk->first_minor);
-#if 0 /* %%% superfluous? */
- if (disk->part[p].nr_sects == 0)
- continue;
-#endif
- res = invalidate_device(devp, 1);
- if (res)
- return res;
+ invalidate_device(devp, 1);
disk->capacity = 0;
- return 0;
-}
-
-void del_gendisk(struct gendisk *disk)
-{
- driverfs_remove_partitions(disk);
- wipe_partitions(disk);
+ disk->flags &= ~GENHD_FL_UP;
unlink_gendisk(disk);
devfs_remove_partitions(disk);
- if (disk->part) {
- kfree(disk->part);
- disk->part = NULL;
+ device_remove_file(&disk->disk_dev, &disk_attr_dev);
+ device_remove_file(&disk->disk_dev, &disk_attr_range);
+ device_remove_file(&disk->disk_dev, &disk_attr_size);
+ driverfs_remove_file(&disk->disk_dev.dir, "device");
+ if (disk->driverfs_dev) {
+ driverfs_remove_file(&disk->driverfs_dev->dir, "block");
+ put_device(disk->driverfs_dev);
}
+ device_del(&disk->disk_dev);
}
struct dev_name {
@@ -571,6 +616,7 @@ char *partition_name(dev_t dev)
dname->name = NULL;
if (hd)
dname->name = disk_name(hd, part, dname->namebuf);
+ put_disk(hd);
if (!dname->name) {
sprintf(dname->namebuf, "[dev %s]", kdevname(to_kdev_t(dev)));
dname->name = dname->namebuf;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 7bdea5bbe922..cbafa4129498 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -38,6 +38,7 @@
#include <linux/smp_lock.h>
#include <linux/seq_file.h>
#include <linux/times.h>
+#include <linux/profile.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
diff --git a/fs/xfs/linux/xfs_aops.c b/fs/xfs/linux/xfs_aops.c
index e749c3c3bbed..8364f6c3eb41 100644
--- a/fs/xfs/linux/xfs_aops.c
+++ b/fs/xfs/linux/xfs_aops.c
@@ -36,7 +36,6 @@
#include <linux/mpage.h>
-STATIC int delalloc_convert(struct inode *, struct page *, int, int);
STATIC int
map_blocks(
@@ -50,17 +49,11 @@ map_blocks(
int error, nmaps = 1;
retry:
- if (flags & PBF_FILE_ALLOCATE) {
- VOP_STRATEGY(vp, offset, count, flags, NULL,
- pbmapp, &nmaps, error);
- } else {
- VOP_BMAP(vp, offset, count, flags, NULL,
- pbmapp, &nmaps, error);
- }
+ VOP_BMAP(vp, offset, count, flags, pbmapp, &nmaps, error);
if (flags & PBF_WRITE) {
if (unlikely((flags & PBF_DIRECT) && nmaps &&
(pbmapp->pbm_flags & PBMF_DELAY))) {
- flags = PBF_WRITE | PBF_FILE_ALLOCATE;
+ flags = PBF_FILE_ALLOCATE;
goto retry;
}
VMODIFY(vp);
@@ -130,83 +123,6 @@ map_buffer_at_offset(
}
/*
- * Convert delalloc space to real space, do not flush the
- * data out to disk, that will be done by the caller.
- */
-STATIC int
-release_page(
- struct page *page)
-{
- struct inode *inode = (struct inode*)page->mapping->host;
- unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
- int ret;
-
- /* Are we off the end of the file ? */
- if (page->index >= end_index) {
- unsigned offset = inode->i_size & (PAGE_CACHE_SIZE-1);
- if ((page->index >= end_index+1) || !offset) {
- ret = -EIO;
- goto out;
- }
- }
-
- ret = delalloc_convert(inode, page, 0, 0);
-
-out:
- if (ret < 0) {
- block_invalidatepage(page, 0);
- ClearPageUptodate(page);
-
- return 0;
- }
-
- return 1;
-}
-
-/*
- * Convert delalloc or unmapped space to real space and flush out
- * to disk.
- */
-STATIC int
-write_full_page(
- struct page *page,
- int delalloc)
-{
- struct inode *inode = (struct inode*)page->mapping->host;
- unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
- int ret;
-
- /* Are we off the end of the file ? */
- if (page->index >= end_index) {
- unsigned offset = inode->i_size & (PAGE_CACHE_SIZE-1);
- if ((page->index >= end_index+1) || !offset) {
- ret = -EIO;
- goto out;
- }
- }
-
- if (!page_has_buffers(page)) {
- create_empty_buffers(page, 1 << inode->i_blkbits, 0);
- }
-
- ret = delalloc_convert(inode, page, 1, delalloc == 0);
-
-out:
- if (ret < 0) {
- /*
- * If it's delalloc and we have nowhere to put it,
- * throw it away.
- */
- if (delalloc)
- block_invalidatepage(page, 0);
- ClearPageUptodate(page);
- unlock_page(page);
- }
-
- return ret;
-}
-
-/*
* Look for a page at index which is unlocked and not mapped
* yet - clustering for mmap write case.
*/
@@ -347,16 +263,21 @@ submit_page(
end_page_writeback(page);
}
-STATIC int
-map_page(
+/*
+ * Allocate & map buffers for page given the extent map. Write it out.
+ * except for the original page of a writepage, this is called on
+ * delalloc pages only, for the original page it is possible that
+ * the page has no mapping at all.
+ */
+STATIC void
+convert_page(
struct inode *inode,
struct page *page,
page_buf_bmap_t *maps,
- struct buffer_head *bh_arr[],
int startio,
int all_bh)
{
- struct buffer_head *bh, *head;
+ struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
page_buf_bmap_t *mp = maps, *tmp;
unsigned long end, offset, end_index;
int i = 0, index = 0;
@@ -393,32 +314,12 @@ map_page(
}
} while (i++, (bh = bh->b_this_page) != head);
- return index;
-}
-
-/*
- * Allocate & map buffers for page given the extent map. Write it out.
- * except for the original page of a writepage, this is called on
- * delalloc pages only, for the original page it is possible that
- * the page has no mapping at all.
- */
-STATIC void
-convert_page(
- struct inode *inode,
- struct page *page,
- page_buf_bmap_t *maps,
- int startio,
- int all_bh)
-{
- struct buffer_head *bh_arr[MAX_BUF_PER_PAGE];
- int cnt;
-
- cnt = map_page(inode, page, maps, bh_arr, startio, all_bh);
if (startio) {
- submit_page(page, bh_arr, cnt);
+ submit_page(page, bh_arr, index);
} else {
unlock_page(page);
}
+
page_cache_release(page);
}
@@ -439,40 +340,47 @@ cluster_write(
tlast = (mp->pbm_offset + mp->pbm_bsize) >> PAGE_CACHE_SHIFT;
for (; tindex < tlast; tindex++) {
- if (!(page = probe_page(inode, tindex)))
+ page = probe_page(inode, tindex);
+ if (!page)
break;
convert_page(inode, page, mp, startio, all_bh);
}
}
/*
- * Calling this without allocate_space set means we are being asked to
- * flush a dirty buffer head. When called with async_write set then we
- * are coming from writepage. A writepage call with allocate_space set
- * means we are being asked to write out all of the page which is before
- * EOF and therefore need to allocate space for unmapped portions of the
- * page.
+ * Calling this without startio set means we are being asked to make a dirty
+ * page ready for freeing it's buffers. When called with startio set then
+ * we are coming from writepage.
*/
STATIC int
delalloc_convert(
- struct inode *inode, /* inode containing page */
- struct page *page, /* page to convert - locked */
- int startio, /* start io on the page */
+ struct page *page,
+ int startio,
int allocate_space)
{
- struct buffer_head *bh, *head;
- struct buffer_head *bh_arr[MAX_BUF_PER_PAGE];
+ struct inode *inode = page->mapping->host;
+ struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
page_buf_bmap_t *mp, map;
- int i, cnt = 0;
- int len, err;
- unsigned long p_offset = 0;
- loff_t offset;
- loff_t end_offset;
+ unsigned long p_offset = 0, end_index;
+ loff_t offset, end_offset;
+ int len, err, i, cnt = 0;
+
+ /* Are we off the end of the file ? */
+ end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+ if (page->index >= end_index) {
+ unsigned remaining = inode->i_size & (PAGE_CACHE_SIZE-1);
+ if ((page->index >= end_index+1) || !remaining) {
+ return -EIO;
+ }
+ }
offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
end_offset = offset + PAGE_CACHE_SIZE;
if (end_offset > inode->i_size)
end_offset = inode->i_size;
+
+ if (startio && !page_has_buffers(page))
+ create_empty_buffers(page, 1 << inode->i_blkbits, 0);
bh = head = page_buffers(page);
mp = NULL;
@@ -490,9 +398,10 @@ delalloc_convert(
if (buffer_delay(bh)) {
if (!mp) {
err = map_blocks(inode, offset, len, &map,
- PBF_WRITE|PBF_FILE_ALLOCATE);
- if (err)
+ PBF_FILE_ALLOCATE);
+ if (err) {
goto error;
+ }
mp = match_offset_to_mapping(page, &map,
p_offset);
}
@@ -517,8 +426,9 @@ delalloc_convert(
bh, head);
err = map_blocks(inode, offset, size, &map,
PBF_WRITE|PBF_DIRECT);
- if (err)
+ if (err) {
goto error;
+ }
mp = match_offset_to_mapping(page, &map,
p_offset);
}
@@ -544,12 +454,14 @@ next_bh:
bh = bh->b_this_page;
} while (offset < end_offset);
- if (startio)
+ if (startio) {
submit_page(page, bh_arr, cnt);
+ }
- if (mp)
+ if (mp) {
cluster_write(inode, page->index + 1, mp,
startio, allocate_space);
+ }
return 0;
@@ -557,7 +469,15 @@ error:
for (i = 0; i < cnt; i++) {
unlock_buffer(bh_arr[i]);
}
-
+
+ /*
+ * If it's delalloc and we have nowhere to put it,
+ * throw it away.
+ */
+ if (!allocate_space) {
+ block_invalidatepage(page, 0);
+ }
+ ClearPageUptodate(page);
return err;
}
@@ -591,7 +511,7 @@ linvfs_get_block_core(
}
VOP_BMAP(vp, offset, size,
- create ? flags : PBF_READ, NULL,
+ create ? flags : PBF_READ,
(struct page_buf_bmap_s *)&pbmap, &retpbbm, error);
if (error)
return -error;
@@ -745,14 +665,12 @@ count_page_state(
bh = head = page_buffers(page);
do {
- if (buffer_uptodate(bh) && !buffer_mapped(bh)) {
+ if (buffer_uptodate(bh) && !buffer_mapped(bh))
(*nr_unmapped)++;
- continue;
- }
- if (!buffer_delay(bh))
- continue;
- (*nr_delalloc)++;
+ else if (buffer_delay(bh))
+ (*nr_delalloc)++;
} while ((bh = bh->b_this_page) != head);
+
return 1;
}
@@ -764,20 +682,22 @@ linvfs_writepage(
struct page *page)
{
int error;
- int need_trans;
+ int need_trans = 1;
int nr_delalloc, nr_unmapped;
- if (count_page_state(page, &nr_delalloc, &nr_unmapped)) {
+ if (count_page_state(page, &nr_delalloc, &nr_unmapped))
need_trans = nr_delalloc + nr_unmapped;
- } else {
- need_trans = 1;
- }
if ((current->flags & (PF_FSTRANS)) && need_trans)
goto out_fail;
- error = write_full_page(page, nr_delalloc);
-
+ /*
+ * Convert delalloc or unmapped space to real space and flush out
+ * to disk.
+ */
+ error = delalloc_convert(page, 1, nr_delalloc == 0);
+ if (unlikely(error))
+ unlock_page(page);
return error;
out_fail:
@@ -812,24 +732,26 @@ linvfs_release_page(
struct page *page,
int gfp_mask)
{
- int need_trans;
int nr_delalloc, nr_unmapped;
if (count_page_state(page, &nr_delalloc, &nr_unmapped)) {
- need_trans = nr_delalloc;
- } else {
- need_trans = 0;
- }
-
- if (need_trans == 0) {
- return try_to_free_buffers(page);
- }
+ if (!nr_delalloc)
+ goto free_buffers;
+ }
if (gfp_mask & __GFP_FS) {
- if (release_page(page) == 0)
- return try_to_free_buffers(page);
+ /*
+ * Convert delalloc space to real space, do not flush the
+ * data out to disk, that will be done by the caller.
+ */
+ if (delalloc_convert(page, 0, 0) == 0)
+ goto free_buffers;
}
+
return 0;
+
+free_buffers:
+ return try_to_free_buffers(page);
}
diff --git a/fs/xfs/linux/xfs_fs_subr.c b/fs/xfs/linux/xfs_fs_subr.c
index 8d50bd04d718..eea74fce0050 100644
--- a/fs/xfs/linux/xfs_fs_subr.c
+++ b/fs/xfs/linux/xfs_fs_subr.c
@@ -135,7 +135,6 @@ fs_flushinval_pages(
struct inode *ip = LINVFS_GET_IP(vp);
if (VN_CACHED(vp)) {
- filemap_fdatawait(ip->i_mapping);
filemap_fdatawrite(ip->i_mapping);
filemap_fdatawait(ip->i_mapping);
@@ -159,7 +158,6 @@ fs_flush_pages(
struct inode *ip = LINVFS_GET_IP(vp);
if (VN_CACHED(vp)) {
- filemap_fdatawait(ip->i_mapping);
filemap_fdatawrite(ip->i_mapping);
filemap_fdatawait(ip->i_mapping);
}
diff --git a/fs/xfs/linux/xfs_globals.c b/fs/xfs/linux/xfs_globals.c
index 54a4343289f1..7f0ac30a83ba 100644
--- a/fs/xfs/linux/xfs_globals.c
+++ b/fs/xfs/linux/xfs_globals.c
@@ -41,12 +41,6 @@ uint64_t xfs_panic_mask; /* set to cause more panics */
unsigned long xfs_physmem;
/*
- * restricted_chown = 1 bsd style chown(2), only super-user can give away files
- * restricted_chown = 0 sysV style chown(2), non super-user can give away files
- */
-int restricted_chown = 1;
-
-/*
* Used to serialize atomicIncWithWrap.
*/
spinlock_t Atomic_spin = SPIN_LOCK_UNLOCKED;
@@ -69,3 +63,6 @@ mutex_t xfs_Gqm_lock;
EXPORT_SYMBOL(xfs_Gqm);
EXPORT_SYMBOL(xfs_next_bit);
EXPORT_SYMBOL(xfs_contig_bits);
+EXPORT_SYMBOL(xfs_bmbt_get_all);
+EXPORT_SYMBOL(xfs_bmbt_disk_get_all);
+
diff --git a/fs/xfs/linux/xfs_globals.h b/fs/xfs/linux/xfs_globals.h
index 943e029f1d42..07c9856b1353 100644
--- a/fs/xfs/linux/xfs_globals.h
+++ b/fs/xfs/linux/xfs_globals.h
@@ -39,7 +39,6 @@
extern uint64_t xfs_panic_mask; /* set to cause more panics */
-extern int restricted_chown;
extern unsigned long xfs_physmem;
extern struct cred *sys_cred;
diff --git a/fs/xfs/linux/xfs_ioctl.c b/fs/xfs/linux/xfs_ioctl.c
index 5dbf4fd9debf..03451043e983 100644
--- a/fs/xfs/linux/xfs_ioctl.c
+++ b/fs/xfs/linux/xfs_ioctl.c
@@ -67,7 +67,7 @@ xfs_find_handle(
if (copy_from_user(&hreq, (xfs_fsop_handlereq_t *)arg, sizeof(hreq)))
return -XFS_ERROR(EFAULT);
- bzero((char *)&handle, sizeof(handle));
+ memset((char *)&handle, 0, sizeof(handle));
switch (cmd) {
case XFS_IOC_PATH_TO_FSHANDLE:
@@ -228,7 +228,7 @@ xfs_vget_fsop_handlereq(
if (copy_from_user(handlep, hanp, hlen))
return XFS_ERROR(EFAULT);
if (hlen < sizeof(*handlep))
- bzero(((char *)handlep) + hlen, sizeof(*handlep) - hlen);
+ memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
if (hlen > sizeof(handlep->ha_fsid)) {
if (handlep->ha_fid.xfs_fid_len !=
(hlen - sizeof(handlep->ha_fsid)
@@ -264,12 +264,6 @@ xfs_vget_fsop_handlereq(
vpp = XFS_ITOV(ip);
inodep = LINVFS_GET_IP(vpp);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- error = linvfs_revalidate_core(inodep, ATTR_COMM);
- if (error) {
- iput(inodep);
- /* this error is (-) but our callers expect + */
- return XFS_ERROR(-error);
- }
*vp = vpp;
*inode = inodep;
diff --git a/fs/xfs/linux/xfs_iops.c b/fs/xfs/linux/xfs_iops.c
index 3beca5b8fd34..d2ca5a30238a 100644
--- a/fs/xfs/linux/xfs_iops.c
+++ b/fs/xfs/linux/xfs_iops.c
@@ -91,14 +91,14 @@ linvfs_mknod(
mode &= ~current->fs->umask;
#endif
- bzero(&va, sizeof(va));
+ memset(&va, 0, sizeof(va));
va.va_mask = AT_TYPE|AT_MODE;
va.va_type = IFTOVT(mode);
va.va_mode = mode;
switch (mode & S_IFMT) {
case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK:
- va.va_rdev = rdev;
+ va.va_rdev = XFS_MKDEV(MAJOR(rdev), MINOR(rdev));
va.va_mask |= AT_RDEV;
/*FALLTHROUGH*/
case S_IFREG:
@@ -122,8 +122,6 @@ linvfs_mknod(
if (S_ISCHR(mode) || S_ISBLK(mode))
ip->i_rdev = to_kdev_t(rdev);
- /* linvfs_revalidate_core returns (-) errors */
- error = -linvfs_revalidate_core(ip, ATTR_COMM);
validate_fields(dir);
d_instantiate(dentry, ip);
mark_inode_dirty_sync(ip);
@@ -186,7 +184,6 @@ linvfs_lookup(
VN_RELE(cvp);
return ERR_PTR(-EACCES);
}
- error = -linvfs_revalidate_core(ip, ATTR_COMM);
}
if (error && (error != ENOENT))
return ERR_PTR(-error);
@@ -262,14 +259,13 @@ linvfs_symlink(
dvp = LINVFS_GET_VP(dir);
- bzero(&va, sizeof(va));
+ memset(&va, 0, sizeof(va));
va.va_type = VLNK;
- va.va_mode = 0777 & ~current->fs->umask;
- va.va_mask = AT_TYPE|AT_MODE; /* AT_PROJID? */
+ va.va_mode = irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO;
+ va.va_mask = AT_TYPE|AT_MODE;
error = 0;
- VOP_SYMLINK(dvp, dentry, &va, (char *)symname,
- &cvp, NULL, error);
+ VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error);
if (!error) {
ASSERT(cvp);
ASSERT(cvp->v_type == VLNK);
@@ -278,10 +274,9 @@ linvfs_symlink(
error = ENOMEM;
VN_RELE(cvp);
} else {
- /* linvfs_revalidate_core returns (-) errors */
- error = -linvfs_revalidate_core(ip, ATTR_COMM);
d_instantiate(dentry, ip);
validate_fields(dir);
+ validate_fields(ip); /* size needs update */
mark_inode_dirty_sync(ip);
mark_inode_dirty_sync(dir);
}
@@ -369,7 +364,7 @@ linvfs_readlink(
}
/*
- * careful here - this function can get called recusively, so
+ * careful here - this function can get called recursively, so
* we need to be very careful about how much stack we use.
* uio is kmalloced for this reason...
*/
@@ -441,16 +436,6 @@ linvfs_permission(
* from the results of a getattr. This gets called out of things
* like stat.
*/
-int
-linvfs_revalidate_core(
- struct inode *inode,
- int flags)
-{
- vnode_t *vp = LINVFS_GET_VP(inode);
-
- /* vn_revalidate returns (-) error so this is ok */
- return vn_revalidate(vp, flags);
-}
STATIC int
linvfs_getattr(
@@ -463,7 +448,7 @@ linvfs_getattr(
int error = 0;
if (unlikely(vp->v_flag & VMODIFIED)) {
- error = linvfs_revalidate_core(inode, 0);
+ error = vn_revalidate(vp);
}
if (!error)
generic_fillattr(inode, stat);
@@ -528,7 +513,7 @@ linvfs_setattr(
}
if (!error) {
- vn_revalidate(vp, 0);
+ vn_revalidate(vp);
mark_inode_dirty_sync(inode);
}
return error;
@@ -618,30 +603,17 @@ linvfs_setxattr(
error = -ENOATTR;
p += xfs_namespaces[SYSTEM_NAMES].namelen;
if (strcmp(p, POSIXACL_ACCESS) == 0) {
- if (vp->v_flag & VMODIFIED) {
- error = linvfs_revalidate_core(inode, 0);
- if (error)
- return error;
- }
error = xfs_acl_vset(vp, data, size, _ACL_TYPE_ACCESS);
- if (!error) {
- VMODIFY(vp);
- error = linvfs_revalidate_core(inode, 0);
- }
}
else if (strcmp(p, POSIXACL_DEFAULT) == 0) {
- error = linvfs_revalidate_core(inode, 0);
- if (error)
- return error;
error = xfs_acl_vset(vp, data, size, _ACL_TYPE_DEFAULT);
- if (!error) {
- VMODIFY(vp);
- error = linvfs_revalidate_core(inode, 0);
- }
}
else if (strcmp(p, POSIXCAP) == 0) {
error = xfs_cap_vset(vp, data, size);
}
+ if (!error) {
+ error = vn_revalidate(vp);
+ }
return error;
}
@@ -689,19 +661,9 @@ linvfs_getxattr(
error = -ENOATTR;
p += xfs_namespaces[SYSTEM_NAMES].namelen;
if (strcmp(p, POSIXACL_ACCESS) == 0) {
- if (vp->v_flag & VMODIFIED) {
- error = linvfs_revalidate_core(inode, 0);
- if (error)
- return error;
- }
error = xfs_acl_vget(vp, data, size, _ACL_TYPE_ACCESS);
}
else if (strcmp(p, POSIXACL_DEFAULT) == 0) {
- if (vp->v_flag & VMODIFIED) {
- error = linvfs_revalidate_core(inode, 0);
- if (error)
- return error;
- }
error = xfs_acl_vget(vp, data, size, _ACL_TYPE_DEFAULT);
}
else if (strcmp(p, POSIXCAP) == 0) {
diff --git a/fs/xfs/linux/xfs_iops.h b/fs/xfs/linux/xfs_iops.h
index 3c4529374aec..c5ce4a6ea9f9 100644
--- a/fs/xfs/linux/xfs_iops.h
+++ b/fs/xfs/linux/xfs_iops.h
@@ -65,7 +65,6 @@ extern struct file_operations linvfs_dir_operations;
extern struct address_space_operations linvfs_aops;
-extern int linvfs_revalidate_core(struct inode *, int);
extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux/xfs_linux.h b/fs/xfs/linux/xfs_linux.h
index 7def3bb302b8..49bb2095c10c 100644
--- a/fs/xfs/linux/xfs_linux.h
+++ b/fs/xfs/linux/xfs_linux.h
@@ -67,6 +67,10 @@
#define STATIC static
#endif
+#define restricted_chown xfs_params.restrict_chown
+#define irix_sgid_inherit xfs_params.sgid_inherit
+#define irix_symlink_mode xfs_params.symlink_mode
+
typedef struct xfs_dirent { /* data from readdir() */
xfs_ino_t d_ino; /* inode number of entry */
xfs_off_t d_off; /* offset of disk directory entry */
diff --git a/fs/xfs/linux/xfs_lrw.c b/fs/xfs/linux/xfs_lrw.c
index c7467e2acce3..9f4a37c46f32 100644
--- a/fs/xfs/linux/xfs_lrw.c
+++ b/fs/xfs/linux/xfs_lrw.c
@@ -43,16 +43,16 @@
<< mp->m_writeio_log)
#define XFS_STRAT_WRITE_IMAPS 2
-STATIC int xfs_iomap_read(xfs_iocore_t *, loff_t, size_t, int, pb_bmap_t *,
- int *, struct pm *);
-STATIC int xfs_iomap_write(xfs_iocore_t *, loff_t, size_t, pb_bmap_t *,
- int *, int, struct pm *);
-STATIC int xfs_iomap_write_delay(xfs_iocore_t *, loff_t, size_t, pb_bmap_t *,
+STATIC int xfs_iomap_read(xfs_iocore_t *, loff_t, size_t, int, page_buf_bmap_t *,
+ int *);
+STATIC int xfs_iomap_write(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
+ int *, int);
+STATIC int xfs_iomap_write_delay(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
int *, int, int);
-STATIC int xfs_iomap_write_direct(xfs_iocore_t *, loff_t, size_t, pb_bmap_t *,
+STATIC int xfs_iomap_write_direct(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
int *, int, int);
STATIC int _xfs_imap_to_bmap(xfs_iocore_t *, xfs_off_t, xfs_bmbt_irec_t *,
- pb_bmap_t *, int, int);
+ page_buf_bmap_t *, int, int);
/*
@@ -136,11 +136,14 @@ xfs_read(
xfs_fsize_t n;
xfs_inode_t *ip;
xfs_mount_t *mp;
+ vnode_t *vp;
unsigned long seg;
int direct = filp->f_flags & O_DIRECT;
ip = XFS_BHVTOI(bdp);
+ vp = BHV_TO_VNODE(bdp);
mp = ip->i_mount;
+ vn_trace_entry(vp, "xfs_read", (inst_t *)__return_address);
XFS_STATS_INC(xfsstats.xs_read_calls);
@@ -194,7 +197,7 @@ xfs_read(
xfs_ilock(ip, XFS_IOLOCK_SHARED);
- if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
+ if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
!(filp->f_mode & FINVIS)) {
int error;
vrwlock_t locktype = VRWLOCK_READ;
@@ -230,8 +233,7 @@ xfs_zero_last_block(
xfs_iocore_t *io,
xfs_off_t offset,
xfs_fsize_t isize,
- xfs_fsize_t end_size,
- struct pm *pmp)
+ xfs_fsize_t end_size)
{
xfs_fileoff_t last_fsb;
xfs_mount_t *mp;
@@ -310,8 +312,7 @@ xfs_zero_eof(
xfs_iocore_t *io,
xfs_off_t offset, /* starting I/O offset */
xfs_fsize_t isize, /* current inode size */
- xfs_fsize_t end_size, /* terminal inode size */
- struct pm *pmp)
+ xfs_fsize_t end_size) /* terminal inode size */
{
struct inode *ip = LINVFS_GET_IP(vp);
xfs_fileoff_t start_zero_fsb;
@@ -337,7 +338,7 @@ xfs_zero_eof(
* First handle zeroing the block on which isize resides.
* We only zero a part of that block so it is handled specially.
*/
- error = xfs_zero_last_block(ip, io, offset, isize, end_size, pmp);
+ error = xfs_zero_last_block(ip, io, offset, isize, end_size);
if (error) {
ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
@@ -469,6 +470,7 @@ xfs_write(
XFS_STATS_INC(xfsstats.xs_write_calls);
vp = BHV_TO_VNODE(bdp);
+ vn_trace_entry(vp, "xfs_write", (inst_t *)__return_address);
xip = XFS_BHVTOI(bdp);
/* START copy & waste from filemap.c */
@@ -592,7 +594,7 @@ start:
if (!direct && (*offset > isize && isize)) {
error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, *offset,
- isize, *offset + size, NULL);
+ isize, *offset + size);
if (error) {
xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
return(-error);
@@ -755,112 +757,15 @@ retry:
return(ret);
}
-/*
- * xfs_bmap() is the same as the irix xfs_bmap from xfs_rw.c
- * execpt for slight changes to the params
- */
-int
-xfs_bmap(bhv_desc_t *bdp,
- xfs_off_t offset,
- ssize_t count,
- int flags,
- struct cred *cred,
- pb_bmap_t *pbmapp,
- int *npbmaps)
-{
- xfs_inode_t *ip;
- int error;
- int lockmode;
- int fsynced = 0;
- vnode_t *vp;
-
- ip = XFS_BHVTOI(bdp);
- ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
- ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
- ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
- ASSERT((flags & PBF_READ) || (flags & PBF_WRITE));
-
- if (XFS_FORCED_SHUTDOWN(ip->i_iocore.io_mount))
- return XFS_ERROR(EIO);
-
- if (flags & PBF_READ) {
- lockmode = xfs_ilock_map_shared(ip);
- error = xfs_iomap_read(&ip->i_iocore, offset, count,
- XFS_BMAPI_ENTIRE, pbmapp, npbmaps, NULL);
- xfs_iunlock_map_shared(ip, lockmode);
- } else { /* PBF_WRITE */
- ASSERT(flags & PBF_WRITE);
- vp = BHV_TO_VNODE(bdp);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- /*
- * Make sure that the dquots are there. This doesn't hold
- * the ilock across a disk read.
- */
-
- if (XFS_IS_QUOTA_ON(ip->i_mount)) {
- if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
- if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return XFS_ERROR(error);
- }
- }
- }
-retry:
- error = xfs_iomap_write(&ip->i_iocore, offset, count,
- pbmapp, npbmaps, flags, NULL);
- /* xfs_iomap_write unlocks/locks/unlocks */
-
- if (error == ENOSPC) {
- switch (fsynced) {
- case 0:
- if (ip->i_delayed_blks) {
- filemap_fdatawrite(LINVFS_GET_IP(vp)->i_mapping);
- fsynced = 1;
- } else {
- fsynced = 2;
- flags |= PBF_SYNC;
- }
- error = 0;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- goto retry;
- case 1:
- fsynced = 2;
- if (!(flags & PBF_SYNC)) {
- flags |= PBF_SYNC;
- error = 0;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- goto retry;
- }
- case 2:
- sync_blockdev(vp->v_vfsp->vfs_super->s_bdev);
- xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
- XFS_LOG_FORCE|XFS_LOG_SYNC);
-
- error = 0;
-/**
- delay(HZ);
-**/
- fsynced++;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- goto retry;
- }
- }
- }
-
- return XFS_ERROR(error);
-}
int
-xfs_strategy(bhv_desc_t *bdp,
+xfs_strategy(xfs_inode_t *ip,
xfs_off_t offset,
ssize_t count,
int flags,
- struct cred *cred,
- pb_bmap_t *pbmapp,
+ page_buf_bmap_t *pbmapp,
int *npbmaps)
{
- xfs_inode_t *ip;
xfs_iocore_t *io;
xfs_mount_t *mp;
int error;
@@ -876,20 +781,16 @@ xfs_strategy(bhv_desc_t *bdp,
xfs_bmbt_irec_t imap[XFS_MAX_RW_NBMAPS];
xfs_trans_t *tp;
- ip = XFS_BHVTOI(bdp);
io = &ip->i_iocore;
mp = ip->i_mount;
/* is_xfs = IO_IS_XFS(io); */
ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
((io->io_flags & XFS_IOCORE_RT) != 0));
- ASSERT((flags & PBF_READ) || (flags & PBF_WRITE));
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- ASSERT(flags & PBF_WRITE);
-
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimaps = min(XFS_MAX_RW_NBMAPS, *npbmaps);
end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
@@ -1082,12 +983,110 @@ xfs_strategy(bhv_desc_t *bdp,
}
+/*
+ * xfs_bmap() is the same as the irix xfs_bmap from xfs_rw.c
+ * execpt for slight changes to the params
+ */
+int
+xfs_bmap(bhv_desc_t *bdp,
+ xfs_off_t offset,
+ ssize_t count,
+ int flags,
+ page_buf_bmap_t *pbmapp,
+ int *npbmaps)
+{
+ xfs_inode_t *ip;
+ int error;
+ int lockmode;
+ int fsynced = 0;
+ vnode_t *vp;
+
+ ip = XFS_BHVTOI(bdp);
+ ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
+ ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
+ ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
+
+ if (XFS_FORCED_SHUTDOWN(ip->i_iocore.io_mount))
+ return XFS_ERROR(EIO);
+
+ if (flags & PBF_READ) {
+ lockmode = xfs_ilock_map_shared(ip);
+ error = xfs_iomap_read(&ip->i_iocore, offset, count,
+ XFS_BMAPI_ENTIRE, pbmapp, npbmaps);
+ xfs_iunlock_map_shared(ip, lockmode);
+ } else if (flags & PBF_FILE_ALLOCATE) {
+ error = xfs_strategy(ip, offset, count, flags,
+ pbmapp, npbmaps);
+ } else { /* PBF_WRITE */
+ ASSERT(flags & PBF_WRITE);
+ vp = BHV_TO_VNODE(bdp);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ /*
+ * Make sure that the dquots are there. This doesn't hold
+ * the ilock across a disk read.
+ */
+
+ if (XFS_IS_QUOTA_ON(ip->i_mount)) {
+ if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
+ if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return XFS_ERROR(error);
+ }
+ }
+ }
+retry:
+ error = xfs_iomap_write(&ip->i_iocore, offset, count,
+ pbmapp, npbmaps, flags);
+ /* xfs_iomap_write unlocks/locks/unlocks */
+
+ if (error == ENOSPC) {
+ switch (fsynced) {
+ case 0:
+ if (ip->i_delayed_blks) {
+ filemap_fdatawrite(LINVFS_GET_IP(vp)->i_mapping);
+ fsynced = 1;
+ } else {
+ fsynced = 2;
+ flags |= PBF_SYNC;
+ }
+ error = 0;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ goto retry;
+ case 1:
+ fsynced = 2;
+ if (!(flags & PBF_SYNC)) {
+ flags |= PBF_SYNC;
+ error = 0;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ goto retry;
+ }
+ case 2:
+ sync_blockdev(vp->v_vfsp->vfs_super->s_bdev);
+ xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
+ XFS_LOG_FORCE|XFS_LOG_SYNC);
+
+ error = 0;
+/**
+ delay(HZ);
+**/
+ fsynced++;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ goto retry;
+ }
+ }
+ }
+
+ return XFS_ERROR(error);
+}
+
+
STATIC int
_xfs_imap_to_bmap(
xfs_iocore_t *io,
xfs_off_t offset,
xfs_bmbt_irec_t *imap,
- pb_bmap_t *pbmapp,
+ page_buf_bmap_t *pbmapp,
int imaps, /* Number of imap entries */
int pbmaps) /* Number of pbmap entries */
{
@@ -1138,9 +1137,8 @@ xfs_iomap_read(
loff_t offset,
size_t count,
int flags,
- pb_bmap_t *pbmapp,
- int *npbmaps,
- struct pm *pmp)
+ page_buf_bmap_t *pbmapp,
+ int *npbmaps)
{
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
@@ -1191,10 +1189,9 @@ xfs_iomap_write(
xfs_iocore_t *io,
loff_t offset,
size_t count,
- pb_bmap_t *pbmapp,
+ page_buf_bmap_t *pbmapp,
int *npbmaps,
- int ioflag,
- struct pm *pmp)
+ int ioflag)
{
int maps;
int error = 0;
@@ -1211,7 +1208,7 @@ xfs_iomap_write(
*/
found = 0;
- error = xfs_iomap_read(io, offset, count, flags, pbmapp, npbmaps, NULL);
+ error = xfs_iomap_read(io, offset, count, flags, pbmapp, npbmaps);
if (error)
goto out;
@@ -1260,7 +1257,7 @@ xfs_write_bmap(
xfs_mount_t *mp,
xfs_iocore_t *io,
xfs_bmbt_irec_t *imapp,
- pb_bmap_t *pbmapp,
+ page_buf_bmap_t *pbmapp,
int iosize,
xfs_fileoff_t ioalign,
xfs_fsize_t isize)
@@ -1330,7 +1327,7 @@ xfs_iomap_write_delay(
xfs_iocore_t *io,
loff_t offset,
size_t count,
- pb_bmap_t *pbmapp,
+ page_buf_bmap_t *pbmapp,
int *npbmaps,
int ioflag,
int found)
@@ -1528,7 +1525,7 @@ xfs_iomap_write_direct(
xfs_iocore_t *io,
loff_t offset,
size_t count,
- pb_bmap_t *pbmapp,
+ page_buf_bmap_t *pbmapp,
int *npbmaps,
int ioflag,
int found)
@@ -1830,36 +1827,22 @@ XFS_log_write_unmount_ro(bhv_desc_t *bdp)
}
/*
- * In these two situations we disregard the readonly mount flag and
- * temporarily enable writes (we must, to ensure metadata integrity).
+ * If the underlying (log or data) device is readonly, there are some
+ * operations that cannot proceed.
*/
-STATIC int
-xfs_is_read_only(xfs_mount_t *mp)
+int
+xfs_dev_is_read_only(xfs_mount_t *mp, char *message)
{
if (bdev_read_only(mp->m_ddev_targp->pbr_bdev) ||
- bdev_read_only(mp->m_logdev_targp->pbr_bdev)) {
+ bdev_read_only(mp->m_logdev_targp->pbr_bdev) ||
+ (mp->m_rtdev_targp && bdev_read_only(mp->m_rtdev_targp->pbr_bdev))) {
+ cmn_err(CE_NOTE,
+ "XFS: %s required on read-only device.", message);
cmn_err(CE_NOTE,
"XFS: write access unavailable, cannot proceed.");
return EROFS;
}
- cmn_err(CE_NOTE,
- "XFS: write access will be enabled during mount.");
- XFS_MTOVFS(mp)->vfs_flag &= ~VFS_RDONLY;
- return 0;
-}
-int
-xfs_recover_read_only(xlog_t *log)
-{
- cmn_err(CE_NOTE, "XFS: WARNING: "
- "recovery required on readonly filesystem.");
- return xfs_is_read_only(log->l_mp);
+ return 0;
}
-int
-xfs_quotacheck_read_only(xfs_mount_t *mp)
-{
- cmn_err(CE_NOTE, "XFS: WARNING: "
- "quotacheck required on readonly filesystem.");
- return xfs_is_read_only(mp);
-}
diff --git a/fs/xfs/linux/xfs_lrw.h b/fs/xfs/linux/xfs_lrw.h
index 0ea2cfe9a860..3ac8eddedb23 100644
--- a/fs/xfs/linux/xfs_lrw.h
+++ b/fs/xfs/linux/xfs_lrw.h
@@ -39,13 +39,12 @@
*/
#define XFS_MAX_RW_NBMAPS 4
-extern int xfs_bmap (bhv_desc_t *, xfs_off_t, ssize_t, int, struct cred *, pb_bmap_t *, int *);
-extern int xfs_strategy (bhv_desc_t *, xfs_off_t, ssize_t, int, struct cred *, pb_bmap_t *, int *);
+extern int xfs_bmap (bhv_desc_t *, xfs_off_t, ssize_t, int, page_buf_bmap_t *, int *);
extern int xfsbdstrat (struct xfs_mount *, struct xfs_buf *);
extern int xfs_bdstrat_cb (struct xfs_buf *);
extern int xfs_zero_eof (vnode_t *, struct xfs_iocore *, xfs_off_t,
- xfs_fsize_t, xfs_fsize_t, struct pm *);
+ xfs_fsize_t, xfs_fsize_t);
extern ssize_t xfs_read (
struct bhv_desc *bdp,
struct file *filp,
@@ -62,8 +61,7 @@ extern ssize_t xfs_write (
loff_t *offp,
struct cred *credp);
-extern int xfs_recover_read_only (xlog_t *);
-extern int xfs_quotacheck_read_only (xfs_mount_t *);
+extern int xfs_dev_is_read_only(xfs_mount_t *, char *);
extern void XFS_log_write_unmount_ro (bhv_desc_t *);
diff --git a/fs/xfs/linux/xfs_super.c b/fs/xfs/linux/xfs_super.c
index 2dfaf44b0f7e..27be367ec316 100644
--- a/fs/xfs/linux/xfs_super.c
+++ b/fs/xfs/linux/xfs_super.c
@@ -92,8 +92,6 @@ STATIC struct export_operations linvfs_export_ops;
#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
-#define MNTOPT_OSYNCISDSYNC "osyncisdsync" /* o_sync == o_dsync on this fs */
- /* (this is now the default!) */
#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
#define MNTOPT_QUOTA "quota" /* disk quotas */
#define MNTOPT_MRQUOTA "mrquota" /* don't turnoff if SB has quotas on */
@@ -104,7 +102,6 @@ STATIC struct export_operations linvfs_export_ops;
#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
#define MNTOPT_NOUUID "nouuid" /* Ignore FS uuid */
-#define MNTOPT_IRIXSGID "irixsgid" /* Irix-style sgid inheritance */
#define MNTOPT_NOLOGFLUSH "nologflush" /* Don't use hard flushes in
log writing */
#define MNTOPT_MTPT "mtpt" /* filesystem mount point */
@@ -124,6 +121,9 @@ xfs_parseargs(
iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0;
+ /* Default to 32 bit inodes on linux all the time */
+ args->flags |= XFSMNT_32BITINODES;
+
/* Copy the already-parsed mount(2) flags we're interested in */
if (flags & MS_NOATIME)
args->flags |= XFSMNT_NOATIME;
@@ -175,9 +175,6 @@ xfs_parseargs(
args->iosizelog = (uint8_t) iosize;
} else if (!strcmp(this_char, MNTOPT_WSYNC)) {
args->flags |= XFSMNT_WSYNC;
- } else if (!strcmp(this_char, MNTOPT_OSYNCISDSYNC)) {
- /* no-op, this is now the default */
-printk("XFS: osyncisdsync is now the default, and will soon be deprecated.\n");
} else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
args->flags |= XFSMNT_OSYNCISOSYNC;
} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
@@ -212,10 +209,13 @@ printk("XFS: osyncisdsync is now the default, and will soon be deprecated.\n");
dswidth = simple_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_NOUUID)) {
args->flags |= XFSMNT_NOUUID;
- } else if (!strcmp(this_char, MNTOPT_IRIXSGID)) {
- args->flags |= XFSMNT_IRIXSGID;
} else if (!strcmp(this_char, MNTOPT_NOLOGFLUSH)) {
args->flags |= XFSMNT_NOLOGFLUSH;
+ } else if (!strcmp(this_char, "osyncisdsync")) {
+ /* no-op, this is now the default */
+printk("XFS: osyncisdsync is now the default, option is deprecated.\n");
+ } else if (!strcmp(this_char, "irixsgid")) {
+printk("XFS: irixsgid is now a sysctl(2) variable, option is deprecated.\n");
} else {
printk("XFS: unknown mount option [%s].\n", this_char);
return rval;
@@ -264,53 +264,72 @@ printk("XFS: osyncisdsync is now the default, and will soon be deprecated.\n");
return 0;
}
-/*
- * Convert one device special file to a dev_t.
- * Helper routine, used only by spectodevs below.
- */
STATIC int
-spectodev(
- const char *name,
- const char *id,
- dev_t *dev)
+xfs_showargs(
+ struct vfs *vfsp,
+ struct seq_file *m)
{
- struct nameidata nd;
- int error;
+ static struct proc_xfs_info {
+ int flag;
+ char *str;
+ } xfs_info[] = {
+ /* the few simple ones we can get from the mount struct */
+ { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
+ { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
+ { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
+ { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
+ { 0, NULL }
+ };
+ struct proc_xfs_info *xfs_infop;
+ struct xfs_mount *mp = XFS_BHVTOM(vfsp->vfs_fbhv);
- error = path_lookup(name, LOOKUP_FOLLOW, &nd);
- if (error)
- return error;
+ for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) {
+ if (mp->m_flags & xfs_infop->flag)
+ seq_puts(m, xfs_infop->str);
+ }
- *dev = kdev_t_to_nr(nd.dentry->d_inode->i_rdev);
- path_release(&nd);
- return 0;
-}
+ if (mp->m_qflags & XFS_UQUOTA_ACCT) {
+ (mp->m_qflags & XFS_UQUOTA_ENFD) ?
+ seq_puts(m, "," MNTOPT_UQUOTA) :
+ seq_puts(m, "," MNTOPT_UQUOTANOENF);
+ }
-/*
- * Convert device special files to dev_t for data, log, realtime.
- */
-int
-spectodevs(
- struct super_block *sb,
- struct xfs_mount_args *args,
- dev_t *ddevp,
- dev_t *logdevp,
- dev_t *rtdevp)
-{
- int rval = 0;
+ if (mp->m_qflags & XFS_GQUOTA_ACCT) {
+ (mp->m_qflags & XFS_GQUOTA_ENFD) ?
+ seq_puts(m, "," MNTOPT_GQUOTA) :
+ seq_puts(m, "," MNTOPT_GQUOTANOENF);
+ }
- *ddevp = sb->s_dev;
+ if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
+ seq_printf(m, "," MNTOPT_BIOSIZE "=%d", mp->m_writeio_log);
- if (args->logname[0])
- rval = spectodev(args->logname, "log", logdevp);
- else
- *logdevp = sb->s_dev;
+ if (mp->m_logbufs > 0)
+ seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
- if (args->rtname[0] && !rval)
- rval = spectodev(args->rtname, "realtime", rtdevp);
- else
- *rtdevp = 0;
- return rval;
+ if (mp->m_logbsize > 0)
+ seq_printf(m, "," MNTOPT_LOGBSIZE "=%d", mp->m_logbsize);
+
+ if (mp->m_ddev_targp->pbr_dev != mp->m_logdev_targp->pbr_dev)
+ seq_printf(m, "," MNTOPT_LOGDEV "=%s",
+ bdevname(mp->m_logdev_targp->pbr_bdev));
+
+ if (mp->m_rtdev_targp &&
+ mp->m_ddev_targp->pbr_dev != mp->m_rtdev_targp->pbr_dev)
+ seq_printf(m, "," MNTOPT_RTDEV "=%s",
+ bdevname(mp->m_rtdev_targp->pbr_bdev));
+
+ if (mp->m_dalign > 0)
+ seq_printf(m, "," MNTOPT_SUNIT "=%d",
+ (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
+
+ if (mp->m_swidth > 0)
+ seq_printf(m, "," MNTOPT_SWIDTH "=%d",
+ (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
+
+ if (vfsp->vfs_flag & VFS_DMI)
+ seq_puts(m, "," MNTOPT_DMAPI);
+
+ return 0;
}
@@ -439,7 +458,6 @@ linvfs_fill_super(
goto fail_unmount;
ip = LINVFS_GET_IP(rootvp);
- linvfs_revalidate_core(ip, ATTR_COMM);
sb->s_root = d_alloc_root(ip);
if (!sb->s_root)
@@ -493,12 +511,6 @@ linvfs_set_inode_ops(
{
vnode_t *vp = LINVFS_GET_VP(inode);
- inode->i_mode = VTTOIF(vp->v_type);
-
- /* If this isn't a new inode, nothing to do */
- if (!(inode->i_state & I_NEW))
- return;
-
if (vp->v_type == VNON) {
make_bad_inode(inode);
} else if (S_ISREG(inode->i_mode)) {
@@ -517,8 +529,6 @@ linvfs_set_inode_ops(
init_special_inode(inode, inode->i_mode,
kdev_t_to_nr(inode->i_rdev));
}
-
- unlock_new_inode(inode);
}
/*
@@ -580,7 +590,6 @@ linvfs_put_super(
struct super_block *sb)
{
int error;
- int sector_size;
vfs_t *vfsp = LINVFS_GET_VFS(sb);
VFS_DOUNMOUNT(vfsp, 0, NULL, NULL, error);
@@ -591,10 +600,6 @@ linvfs_put_super(
}
vfs_deallocate(vfsp);
-
- /* Reset device block size */
- sector_size = bdev_hardsect_size(sb->s_bdev);
- set_blocksize(sb->s_bdev, sector_size);
}
void
@@ -728,7 +733,6 @@ linvfs_get_parent(
VN_RELE(cvp);
return ERR_PTR(-EACCES);
}
- error = -linvfs_revalidate_core(ip, ATTR_COMM);
}
if (error)
return ERR_PTR(-error);
@@ -759,72 +763,9 @@ linvfs_show_options(
struct seq_file *m,
struct vfsmount *mnt)
{
- vfs_t *vfsp;
- xfs_mount_t *mp;
- static struct proc_xfs_info {
- int flag;
- char *str;
- } xfs_info[] = {
- /* the few simple ones we can get from the mount struct */
- { XFS_MOUNT_NOALIGN, ",noalign" },
- { XFS_MOUNT_NORECOVERY, ",norecovery" },
- { XFS_MOUNT_OSYNCISOSYNC, ",osyncisosync" },
- { XFS_MOUNT_NOUUID, ",nouuid" },
- { XFS_MOUNT_IRIXSGID, ",irixsgid" },
- { 0, NULL }
- };
- struct proc_xfs_info *xfs_infop;
-
- vfsp = LINVFS_GET_VFS(mnt->mnt_sb);
- mp = XFS_BHVTOM(vfsp->vfs_fbhv);
-
- for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) {
- if (mp->m_flags & xfs_infop->flag)
- seq_puts(m, xfs_infop->str);
- }
-
- if (mp->m_qflags & XFS_UQUOTA_ACCT) {
- seq_puts(m, ",uquota");
- if (!(mp->m_qflags & XFS_UQUOTA_ENFD))
- seq_puts(m, ",uqnoenforce");
- }
+ vfs_t *vfsp = LINVFS_GET_VFS(mnt->mnt_sb);
- if (mp->m_qflags & XFS_GQUOTA_ACCT) {
- seq_puts(m, ",gquota");
- if (!(mp->m_qflags & XFS_GQUOTA_ENFD))
- seq_puts(m, ",gqnoenforce");
- }
-
- if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
- seq_printf(m, ",biosize=%d", mp->m_writeio_log);
-
- if (mp->m_logbufs > 0)
- seq_printf(m, ",logbufs=%d", mp->m_logbufs);
-
- if (mp->m_logbsize > 0)
- seq_printf(m, ",logbsize=%d", mp->m_logbsize);
-
- if (mp->m_ddev_targp->pbr_dev != mp->m_logdev_targp->pbr_dev)
- seq_printf(m, ",logdev=%s",
- bdevname(mp->m_logdev_targp->pbr_bdev));
-
- if (mp->m_rtdev_targp &&
- mp->m_ddev_targp->pbr_dev != mp->m_rtdev_targp->pbr_dev)
- seq_printf(m, ",rtdev=%s",
- bdevname(mp->m_rtdev_targp->pbr_bdev));
-
- if (mp->m_dalign > 0)
- seq_printf(m, ",sunit=%d",
- (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
-
- if (mp->m_swidth > 0)
- seq_printf(m, ",swidth=%d",
- (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
-
- if (vfsp->vfs_flag & VFS_DMI)
- seq_puts(m, ",dmapi");
-
- return 0;
+ return xfs_showargs(vfsp, m);
}
STATIC struct super_operations linvfs_sops = {
diff --git a/fs/xfs/linux/xfs_super.h b/fs/xfs/linux/xfs_super.h
index e783163a2300..315910498a0c 100644
--- a/fs/xfs/linux/xfs_super.h
+++ b/fs/xfs/linux/xfs_super.h
@@ -80,18 +80,8 @@
((s)->s_fs_info = vfsp)
-struct xfs_mount_args;
-
extern void
linvfs_set_inode_ops(
struct inode *inode);
-extern int
-spectodevs(
- struct super_block *sb,
- struct xfs_mount_args *args,
- dev_t *ddevp,
- dev_t *logdevp,
- dev_t *rtdevp);
-
#endif /* __XFS_SUPER_H__ */
diff --git a/fs/xfs/linux/xfs_sysctl.c b/fs/xfs/linux/xfs_sysctl.c
index 840810b33f27..4229b8975e05 100644
--- a/fs/xfs/linux/xfs_sysctl.c
+++ b/fs/xfs/linux/xfs_sysctl.c
@@ -35,30 +35,34 @@
#include <linux/proc_fs.h>
/*
- * Tunable xfs parameters
+ * Tunable XFS parameters
*/
extern struct xfsstats xfsstats;
-unsigned long xfs_min[XFS_PARAM] = { 0, 0, 0 };
-unsigned long xfs_max[XFS_PARAM] = { XFS_REFCACHE_SIZE_MAX, XFS_REFCACHE_SIZE_MAX, 1 };
+STATIC ulong xfs_min[XFS_PARAM] = { \
+ 0, 0, 0, 0, 0, 0 };
+STATIC ulong xfs_max[XFS_PARAM] = { \
+ XFS_REFCACHE_SIZE_MAX, XFS_REFCACHE_SIZE_MAX, 1, 1, 1, 1 };
-xfs_param_t xfs_params = { 128, 32, 0 };
+xfs_param_t xfs_params = { 128, 32, 0, 1, 0, 0 };
static struct ctl_table_header *xfs_table_header;
-/* proc handlers */
-extern void xfs_refcache_resize(int xfs_refcache_new_size);
+/* Custom proc handlers */
-static int
-xfs_refcache_resize_proc_handler(ctl_table *ctl, int write, struct file * filp,
- void *buffer, size_t *lenp)
+STATIC int
+xfs_refcache_resize_proc_handler(
+ ctl_table *ctl,
+ int write,
+ struct file *filp,
+ void *buffer,
+ size_t *lenp)
{
- int ret;
- int *valp = ctl->data;
- int xfs_refcache_new_size;
- int xfs_refcache_old_size = *valp;
+ int ret, *valp = ctl->data;
+ int xfs_refcache_new_size;
+ int xfs_refcache_old_size = *valp;
ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp);
xfs_refcache_new_size = *valp;
@@ -73,12 +77,15 @@ xfs_refcache_resize_proc_handler(ctl_table *ctl, int write, struct file * filp,
return ret;
}
-static int
-xfs_stats_clear_proc_handler(ctl_table *ctl, int write, struct file * filp,
- void *buffer, size_t *lenp)
+STATIC int
+xfs_stats_clear_proc_handler(
+ ctl_table *ctl,
+ int write,
+ struct file *filp,
+ void *buffer,
+ size_t *lenp)
{
- int ret;
- int *valp = ctl->data;
+ int ret, *valp = ctl->data;
__uint32_t vn_active;
ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp);
@@ -95,7 +102,7 @@ xfs_stats_clear_proc_handler(ctl_table *ctl, int write, struct file * filp,
return ret;
}
-static ctl_table xfs_table[] = {
+STATIC ctl_table xfs_table[] = {
{XFS_REFCACHE_SIZE, "refcache_size", &xfs_params.refcache_size,
sizeof(ulong), 0644, NULL, &xfs_refcache_resize_proc_handler,
&sysctl_intvec, NULL, &xfs_min[0], &xfs_max[0]},
@@ -108,15 +115,27 @@ static ctl_table xfs_table[] = {
sizeof(ulong), 0644, NULL, &xfs_stats_clear_proc_handler,
&sysctl_intvec, NULL, &xfs_min[2], &xfs_max[2]},
+ {XFS_RESTRICT_CHOWN, "restrict_chown", &xfs_params.restrict_chown,
+ sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax,
+ &sysctl_intvec, NULL, &xfs_min[3], &xfs_max[3]},
+
+ {XFS_SGID_INHERIT, "irix_sgid_inherit", &xfs_params.sgid_inherit,
+ sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax,
+ &sysctl_intvec, NULL, &xfs_min[4], &xfs_max[4]},
+
+ {XFS_SYMLINK_MODE, "irix_symlink_mode", &xfs_params.symlink_mode,
+ sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax,
+ &sysctl_intvec, NULL, &xfs_min[5], &xfs_max[5]},
+
{0}
};
-static ctl_table xfs_dir_table[] = {
+STATIC ctl_table xfs_dir_table[] = {
{FS_XFS, "xfs", NULL, 0, 0555, xfs_table},
{0}
};
-static ctl_table xfs_root_table[] = {
+STATIC ctl_table xfs_root_table[] = {
{CTL_FS, "fs", NULL, 0, 0555, xfs_dir_table},
{0}
};
diff --git a/fs/xfs/linux/xfs_sysctl.h b/fs/xfs/linux/xfs_sysctl.h
index 6649017ec372..4bf5749d1827 100644
--- a/fs/xfs/linux/xfs_sysctl.h
+++ b/fs/xfs/linux/xfs_sysctl.h
@@ -39,18 +39,25 @@
* Tunable xfs parameters
*/
-#define XFS_PARAM 3
+#define XFS_PARAM (sizeof(struct xfs_param) / sizeof(ulong))
typedef struct xfs_param {
- ulong refcache_size; /* Size of nfs refcache */
- ulong refcache_purge; /* # of entries to purge each time */
- ulong stats_clear; /* reset all xfs stats to 0 */
+ ulong refcache_size; /* Size of NFS reference cache. */
+ ulong refcache_purge; /* # of entries to purge each time. */
+ ulong stats_clear; /* Reset all XFS statistics to zero. */
+ ulong restrict_chown; /* Root/non-root can give away files. */
+ ulong sgid_inherit; /* Inherit ISGID bit if process' GID is */
+ /* not a member of the parent dir GID. */
+ ulong symlink_mode; /* Symlink creat mode affected by umask. */
} xfs_param_t;
enum {
XFS_REFCACHE_SIZE = 1,
XFS_REFCACHE_PURGE = 2,
XFS_STATS_CLEAR = 3,
+ XFS_RESTRICT_CHOWN = 4,
+ XFS_SGID_INHERIT = 5,
+ XFS_SYMLINK_MODE = 6,
};
extern xfs_param_t xfs_params;
diff --git a/fs/xfs/linux/xfs_vfs.h b/fs/xfs/linux/xfs_vfs.h
index 0f384eb8220f..381cb9d7c6d4 100644
--- a/fs/xfs/linux/xfs_vfs.h
+++ b/fs/xfs/linux/xfs_vfs.h
@@ -92,6 +92,8 @@ typedef struct vfsops {
/* send dmapi mount event */
int (*vfs_dmapi_fsys_vector)(bhv_desc_t *,
struct dm_fcntl_vector *);
+ void (*vfs_init_vnode)(bhv_desc_t *, struct vnode *,
+ bhv_desc_t *, int);
void (*vfs_force_shutdown)(bhv_desc_t *,
int, char *, int);
} vfsops_t;
@@ -132,6 +134,14 @@ typedef struct vfsops {
rv = (*(VFS_FOPS(vfsp)->vfs_vget))((vfsp)->vfs_fbhv, vpp, fidp); \
BHV_READ_UNLOCK(&(vfsp)->vfs_bh); \
}
+
+#define VFS_INIT_VNODE(vfsp, vp, bhv, unlock) \
+{ \
+ BHV_READ_LOCK(&(vfsp)->vfs_bh); \
+ (*(VFS_FOPS(vfsp)->vfs_init_vnode))((vfsp)->vfs_fbhv, vp, bhv, unlock);\
+ BHV_READ_UNLOCK(&(vfsp)->vfs_bh); \
+}
+
/* No behavior lock here */
#define VFS_FORCE_SHUTDOWN(vfsp, flags) \
(*(VFS_FOPS(vfsp)->vfs_force_shutdown))((vfsp)->vfs_fbhv, flags, __FILE__, __LINE__);
diff --git a/fs/xfs/linux/xfs_vnode.c b/fs/xfs/linux/xfs_vnode.c
index 0d4cb5ea14eb..51a855c9a4a0 100644
--- a/fs/xfs/linux/xfs_vnode.c
+++ b/fs/xfs/linux/xfs_vnode.c
@@ -179,20 +179,10 @@ vn_get(struct vnode *vp, vmap_t *vmap)
if (inode->i_state & I_FREEING)
return NULL;
- inode = iget_locked(vmap->v_vfsp->vfs_super, vmap->v_ino);
+ inode = ilookup(vmap->v_vfsp->vfs_super, vmap->v_ino);
if (inode == NULL) /* Inode not present */
return NULL;
- /* We do not want to create new inodes via vn_get,
- * returning NULL here is OK.
- */
- if (inode->i_state & I_NEW) {
- make_bad_inode(inode);
- unlock_new_inode(inode);
- iput(inode);
- return NULL;
- }
-
vn_trace_exit(vp, "vn_get", (inst_t *)__return_address);
ASSERT((vp->v_flag & VPURGE) == 0);
@@ -203,7 +193,7 @@ vn_get(struct vnode *vp, vmap_t *vmap)
* "revalidate" the linux inode.
*/
int
-vn_revalidate(struct vnode *vp, int flags)
+vn_revalidate(struct vnode *vp)
{
int error;
struct inode *inode;
@@ -215,7 +205,7 @@ vn_revalidate(struct vnode *vp, int flags)
ASSERT(vp->v_bh.bh_first != NULL);
- VOP_GETATTR(vp, &va, flags & ATTR_LAZY, NULL, error);
+ VOP_GETATTR(vp, &va, 0, NULL, error);
if (! error) {
inode = LINVFS_GET_IP(vp);
@@ -225,27 +215,12 @@ vn_revalidate(struct vnode *vp, int flags)
inode->i_nlink = va.va_nlink;
inode->i_uid = va.va_uid;
inode->i_gid = va.va_gid;
- inode->i_rdev = mk_kdev(MAJOR(va.va_rdev),
- MINOR(va.va_rdev));
- inode->i_blksize = PAGE_CACHE_SIZE;
- inode->i_generation = va.va_gencount;
- if ((flags & ATTR_COMM) ||
- S_ISREG(inode->i_mode) ||
- S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)) {
- inode->i_size = va.va_size;
- inode->i_blocks = va.va_nblocks;
- inode->i_atime = va.va_atime.tv_sec;
- inode->i_mtime = va.va_mtime.tv_sec;
- inode->i_ctime = va.va_ctime.tv_sec;
- }
- if (flags & ATTR_LAZY)
- vp->v_flag &= ~VMODIFIED;
- else
- VUNMODIFY(vp);
- } else {
- vn_trace_exit(vp, "vn_revalidate.error",
- (inst_t *)__return_address);
+ inode->i_size = va.va_size;
+ inode->i_blocks = va.va_nblocks;
+ inode->i_mtime = va.va_mtime.tv_sec;
+ inode->i_ctime = va.va_ctime.tv_sec;
+ inode->i_atime = va.va_atime.tv_sec;
+ VUNMODIFY(vp);
}
return -error;
@@ -412,8 +387,7 @@ vn_remove(struct vnode *vp)
* After the following purge the vnode
* will no longer exist.
*/
- VMAP(vp, XFS_BHVTOI(vp->v_fbhv), vmap);
-
+ VMAP(vp, vmap);
vn_purge(vp, &vmap);
}
diff --git a/fs/xfs/linux/xfs_vnode.h b/fs/xfs/linux/xfs_vnode.h
index bf6025bfe0a4..dc76f19ca268 100644
--- a/fs/xfs/linux/xfs_vnode.h
+++ b/fs/xfs/linux/xfs_vnode.h
@@ -211,8 +211,7 @@ typedef int (*vop_fid2_t)(bhv_desc_t *, struct fid *);
typedef int (*vop_release_t)(bhv_desc_t *);
typedef int (*vop_rwlock_t)(bhv_desc_t *, vrwlock_t);
typedef void (*vop_rwunlock_t)(bhv_desc_t *, vrwlock_t);
-typedef int (*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int, struct cred *, struct page_buf_bmap_s *, int *);
-typedef int (*vop_strategy_t)(bhv_desc_t *, xfs_off_t, ssize_t, int, struct cred *, struct page_buf_bmap_s *, int *);
+typedef int (*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int, struct page_buf_bmap_s *, int *);
typedef int (*vop_reclaim_t)(bhv_desc_t *);
typedef int (*vop_attr_get_t)(bhv_desc_t *, char *, char *, int *, int,
struct cred *);
@@ -254,7 +253,6 @@ typedef struct vnodeops {
vop_rwlock_t vop_rwlock;
vop_rwunlock_t vop_rwunlock;
vop_bmap_t vop_bmap;
- vop_strategy_t vop_strategy;
vop_reclaim_t vop_reclaim;
vop_attr_get_t vop_attr_get;
vop_attr_set_t vop_attr_set;
@@ -286,16 +284,10 @@ typedef struct vnodeops {
rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,iov,segs,offset,cr);\
VN_BHV_READ_UNLOCK(&(vp)->v_bh); \
}
-#define VOP_BMAP(vp,of,sz,rw,cr,b,n,rv) \
+#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \
{ \
VN_BHV_READ_LOCK(&(vp)->v_bh); \
- rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,cr,b,n); \
- VN_BHV_READ_UNLOCK(&(vp)->v_bh); \
-}
-#define VOP_STRATEGY(vp,of,sz,rw,cr,b,n,rv) \
-{ \
- VN_BHV_READ_LOCK(&(vp)->v_bh); \
- rv = _VOP_(vop_strategy, vp)((vp)->v_fbhv,of,sz,rw,cr,b,n); \
+ rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n); \
VN_BHV_READ_UNLOCK(&(vp)->v_bh); \
}
#define VOP_OPEN(vp, cr, rv) \
@@ -528,14 +520,14 @@ typedef struct vattr {
mode_t va_mode; /* file access mode */
uid_t va_uid; /* owner user id */
gid_t va_gid; /* owner group id */
- dev_t va_fsid; /* file system id (dev for now) */
+ xfs_dev_t va_fsid; /* file system id (dev for now) */
xfs_ino_t va_nodeid; /* node id */
nlink_t va_nlink; /* number of references to file */
xfs_off_t va_size; /* file size in bytes */
timespec_t va_atime; /* time of last access */
timespec_t va_mtime; /* time of last modification */
timespec_t va_ctime; /* time file ``created'' */
- dev_t va_rdev; /* device the file represents */
+ xfs_dev_t va_rdev; /* device the file represents */
u_long va_blksize; /* fundamental block size */
__int64_t va_nblocks; /* # of blocks allocated */
u_long va_vcode; /* version code */
@@ -637,12 +629,13 @@ typedef struct vnode_map {
xfs_ino_t v_ino; /* inode # */
} vmap_t;
-#define VMAP(vp, ip, vmap) {(vmap).v_vfsp = (vp)->v_vfsp, \
- (vmap).v_number = (vp)->v_number, \
- (vmap).v_ino = (ip)->i_ino; }
+#define VMAP(vp, vmap) {(vmap).v_vfsp = (vp)->v_vfsp, \
+ (vmap).v_number = (vp)->v_number, \
+ (vmap).v_ino = (vp)->v_inode.i_ino; }
+
extern void vn_purge(struct vnode *, vmap_t *);
extern vnode_t *vn_get(struct vnode *, vmap_t *);
-extern int vn_revalidate(struct vnode *, int);
+extern int vn_revalidate(struct vnode *);
extern void vn_remove(struct vnode *);
static inline int vn_count(struct vnode *vp)
diff --git a/fs/xfs/pagebuf/page_buf.c b/fs/xfs/pagebuf/page_buf.c
index c98dc4637050..b066bc7878df 100644
--- a/fs/xfs/pagebuf/page_buf.c
+++ b/fs/xfs/pagebuf/page_buf.c
@@ -305,8 +305,7 @@ _pagebuf_initialize(
/*
* We don't want certain flags to appear in pb->pb_flags.
*/
- flags &= ~(PBF_LOCK|PBF_ENTER_PAGES|PBF_MAPPED);
- flags &= ~(PBF_DONT_BLOCK|PBF_READ_AHEAD);
+ flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
pb_tracking_get(pb);
@@ -545,9 +544,9 @@ _pagebuf_lookup_pages(
size -= nbytes;
if (!PageUptodate(page)) {
- if ((blocksize == PAGE_CACHE_SIZE) &&
- (flags & PBF_READ)) {
- pb->pb_locked = 1;
+ if (blocksize == PAGE_CACHE_SIZE) {
+ if (flags & PBF_READ)
+ pb->pb_locked = 1;
good_pages--;
} else if (!PagePrivate(page)) {
unsigned long i, range = (offset + nbytes) >> SECTOR_SHIFT;
@@ -717,7 +716,6 @@ found:
PBF_MAPPED | \
_PBF_LOCKABLE | \
_PBF_ALL_PAGES_MAPPED | \
- _PBF_SOME_INVALID_PAGES | \
_PBF_ADDR_ALLOCATED | \
_PBF_MEM_ALLOCATED;
PB_TRACE(pb, PB_TRACE_REC(got_lk), 0);
@@ -832,19 +830,11 @@ pagebuf_lookup(
int flags)
{
page_buf_t *pb = NULL;
- int status;
flags |= _PBF_PRIVATE_BH;
pb = pagebuf_allocate(flags);
if (pb) {
_pagebuf_initialize(pb, target, ioff, isize, flags);
- if (flags & PBF_ENTER_PAGES) {
- status = _pagebuf_lookup_pages(pb, &inode->i_data, 0);
- if (status != 0) {
- pagebuf_free(pb);
- return (NULL);
- }
- }
}
return pb;
}
@@ -985,6 +975,7 @@ pagebuf_get_no_daddr(
}
/* otherwise pagebuf_free just ignores it */
pb->pb_flags |= _PBF_MEM_ALLOCATED;
+ PB_CLEAR_OWNER(pb);
up(&PBP(pb)->pb_sema); /* Return unlocked pagebuf */
PB_TRACE(pb, PB_TRACE_REC(no_daddr), rmem);
@@ -1926,14 +1917,14 @@ STATIC ctl_table pagebuf_table[] = {
sizeof(ulong), 0644, NULL, &proc_doulongvec_ms_jiffies_minmax,
&sysctl_intvec, NULL, &pagebuf_min[1], &pagebuf_max[1]},
- {PB_STATS_CLEAR, "stats_clear", &pb_params.data[3],
+ {PB_STATS_CLEAR, "stats_clear", &pb_params.data[2],
sizeof(ulong), 0644, NULL, &pb_stats_clear_handler,
- &sysctl_intvec, NULL, &pagebuf_min[3], &pagebuf_max[3]},
+ &sysctl_intvec, NULL, &pagebuf_min[2], &pagebuf_max[2]},
#ifdef PAGEBUF_TRACE
- {PB_DEBUG, "debug", &pb_params.data[4],
+ {PB_DEBUG, "debug", &pb_params.data[3],
sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax,
- &sysctl_intvec, NULL, &pagebuf_min[4], &pagebuf_max[4]},
+ &sysctl_intvec, NULL, &pagebuf_min[3], &pagebuf_max[3]},
#endif
{0}
};
diff --git a/fs/xfs/pagebuf/page_buf.h b/fs/xfs/pagebuf/page_buf.h
index 18e27035cf72..ff240fefd32c 100644
--- a/fs/xfs/pagebuf/page_buf.h
+++ b/fs/xfs/pagebuf/page_buf.h
@@ -100,35 +100,27 @@ typedef enum page_buf_flags_e { /* pb_flags values */
PBF_MAPPABLE = (1 << 9),/* use directly-addressable pages */
PBF_STALE = (1 << 10), /* buffer has been staled, do not find it */
PBF_FS_MANAGED = (1 << 11), /* filesystem controls freeing memory */
- PBF_RELEASE = (1 << 12),/* buffer to be released after I/O is done */
/* flags used only as arguments to access routines */
PBF_LOCK = (1 << 13), /* lock requested */
PBF_TRYLOCK = (1 << 14), /* lock requested, but do not wait */
- PBF_ALLOCATE = (1 << 15), /* allocate all pages (UNUSED) */
- PBF_FILE_ALLOCATE = (1 << 16), /* allocate all file space */
- PBF_DONT_BLOCK = (1 << 17), /* do not block in current thread */
- PBF_DIRECT = (1 << 18), /* direct I/O desired */
- PBF_ENTER_PAGES = (1 << 21), /* create invalid pages for all */
- /* pages in the range of the buffer */
- /* not already associated with buffer */
+ PBF_FILE_ALLOCATE = (1 << 15), /* allocate all file space */
+ PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
+ PBF_DIRECT = (1 << 17), /* direct I/O desired */
/* flags used only internally */
_PBF_LOCKABLE = (1 << 19), /* page_buf_t may be locked */
_PBF_PRIVATE_BH = (1 << 20), /* do not use public buffer heads */
- _PBF_ALL_PAGES_MAPPED = (1 << 22),
+ _PBF_ALL_PAGES_MAPPED = (1 << 21),
/* all pages in rage are mapped */
- _PBF_SOME_INVALID_PAGES = (1 << 23),
- /* some mapped pages are not valid */
- _PBF_ADDR_ALLOCATED = (1 << 24),
+ _PBF_ADDR_ALLOCATED = (1 << 22),
/* pb_addr space was allocated */
- _PBF_MEM_ALLOCATED = (1 << 25),
+ _PBF_MEM_ALLOCATED = (1 << 23),
/* pb_mem and underlying pages allocated */
- PBF_FORCEIO = (1 << 27),
- PBF_FLUSH = (1 << 28), /* flush disk write cache */
- PBF_READ_AHEAD = (1 << 29),
- PBF_FS_RESERVED_3 = (1 << 31) /* reserved (XFS use: XFS_B_STALE) */
+ PBF_FORCEIO = (1 << 24),
+ PBF_FLUSH = (1 << 25), /* flush disk write cache */
+ PBF_READ_AHEAD = (1 << 26),
} page_buf_flags_t;
@@ -145,7 +137,6 @@ typedef struct pb_target {
struct block_device *pbr_bdev;
struct address_space *pbr_mapping;
unsigned int pbr_blocksize;
- unsigned int pbr_blocksize_bits;
} pb_target_t;
/*
@@ -303,26 +294,16 @@ extern int pagebuf_lock_value( /* return count on lock */
extern int pagebuf_lock( /* lock buffer */
page_buf_t *); /* buffer to lock */
-extern void pagebuf_lock_disable( /* disable buffer locking */
- struct pb_target *, /* inode for buffers */
- int); /* do blkdev_put? */
-
-extern struct pb_target *pagebuf_lock_enable(
- dev_t,
- int); /* do blkdev_get? */
-
-extern void pagebuf_target_blocksize(
- pb_target_t *,
- unsigned int); /* block size */
-
extern void pagebuf_target_clear(struct pb_target *);
extern void pagebuf_unlock( /* unlock buffer */
page_buf_t *); /* buffer to unlock */
/* Buffer Utility Routines */
-
-#define pagebuf_geterror(pb) ((pb)->pb_error)
+static inline int pagebuf_geterror(page_buf_t *pb)
+{
+ return (pb ? pb->pb_error : ENOMEM);
+}
extern void pagebuf_iodone( /* mark buffer I/O complete */
page_buf_t *); /* buffer to mark */
diff --git a/fs/xfs/pagebuf/page_buf_locking.c b/fs/xfs/pagebuf/page_buf_locking.c
index 6be04596ec11..ecabe0f3c2c2 100644
--- a/fs/xfs/pagebuf/page_buf_locking.c
+++ b/fs/xfs/pagebuf/page_buf_locking.c
@@ -1,6 +1,5 @@
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
- * Portions Copyright (c) 2002 Christoph Hellwig. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -58,10 +57,6 @@
#include "page_buf_internal.h"
-#ifndef EVMS_MAJOR
-#define EVMS_MAJOR 117
-#endif
-
/*
* pagebuf_cond_lock
*
@@ -126,82 +121,6 @@ pagebuf_lock(
return 0;
}
-/*
- * pagebuf_lock_disable
- *
- * pagebuf_lock_disable disables buffer object locking for an inode.
- * remove_super() does a blkdev_put for us on the data device, hence
- * the do_blkdev_put argument.
- */
-void
-pagebuf_lock_disable(
- pb_target_t *target,
- int do_blkdev_put)
-{
- pagebuf_delwri_flush(target, PBDF_WAIT, NULL);
- if (do_blkdev_put)
- blkdev_put(target->pbr_bdev, BDEV_FS);
- kfree(target);
-}
-
-/*
- * pagebuf_lock_enable
- *
- * get_sb_bdev() does a blkdev_get for us on the data device, hence
- * the do_blkdev_get argument.
- */
-pb_target_t *
-pagebuf_lock_enable(
- dev_t dev,
- int do_blkdev_get)
-{
- struct block_device *bdev;
- pb_target_t *target;
- int error = -ENOMEM;
-
- target = kmalloc(sizeof(pb_target_t), GFP_KERNEL);
- if (unlikely(!target))
- return ERR_PTR(error);
-
- bdev = bdget(dev);
- if (unlikely(!bdev))
- goto fail;
-
- if (do_blkdev_get) {
- error = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_FS);
- if (unlikely(error))
- goto fail;
- }
-
- target->pbr_dev = dev;
- target->pbr_bdev = bdev;
- target->pbr_mapping = bdev->bd_inode->i_mapping;
-
- pagebuf_target_blocksize(target, PAGE_CACHE_SIZE);
-
- if ((MAJOR(dev) == MD_MAJOR) || (MAJOR(dev) == EVMS_MAJOR))
- target->pbr_flags = PBR_ALIGNED_ONLY;
- else if (MAJOR(dev) == LVM_BLK_MAJOR)
- target->pbr_flags = PBR_SECTOR_ONLY;
- else
- target->pbr_flags = 0;
-
- return target;
-
-fail:
- kfree(target);
- return ERR_PTR(error);
-}
-
-void
-pagebuf_target_blocksize(
- pb_target_t *target,
- unsigned int blocksize)
-{
- target->pbr_blocksize = blocksize;
- target->pbr_blocksize_bits = ffs(blocksize) - 1;
-}
-
void
pagebuf_target_clear(
pb_target_t *target)
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index 795056094248..d6fe5d8b6983 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -40,6 +40,11 @@ int doass = 1;
static char message[256]; /* keep it off the stack */
static spinlock_t xfs_err_lock = SPIN_LOCK_UNLOCKED;
+/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
+static char *err_level[8] = {KERN_EMERG, KERN_ALERT, KERN_CRIT,
+ KERN_ERR, KERN_WARNING, KERN_NOTICE,
+ KERN_INFO, KERN_DEBUG};
+
void
assfail(char *a, char *f, int l)
{
@@ -71,10 +76,7 @@ get_thread_id(void)
return current->pid;
}
-# define xdprintk(format...) printk(format)
-#else
-# define xdprintk(format...) do { } while (0)
-#endif
+#endif /* DEBUG */
void
cmn_err(register int level, char *fmt, ...)
@@ -86,18 +88,7 @@ cmn_err(register int level, char *fmt, ...)
va_start(ap, fmt);
if (*fmt == '!') fp++;
vsprintf(message, fp, ap);
- switch (level) {
- case CE_CONT:
- case CE_WARN:
- printk("%s", message);
- break;
- case CE_DEBUG:
- xdprintk("%s", message);
- break;
- default:
- printk("%s\n", message);
- break;
- }
+ printk("%s%s\n", err_level[level], message);
va_end(ap);
spin_unlock(&xfs_err_lock);
@@ -111,18 +102,8 @@ icmn_err(register int level, char *fmt, va_list ap)
{
spin_lock(&xfs_err_lock);
vsprintf(message, fmt, ap);
- switch (level) {
- case CE_CONT:
- case CE_WARN:
- printk("%s", message);
- break;
- case CE_DEBUG:
- xdprintk("%s", message);
- break;
- default:
- printk("cmn_err level %d ", level);
- printk("%s\n", message);
- break;
- }
spin_unlock(&xfs_err_lock);
+ printk("%s%s\n", err_level[level], message);
+ if (level == CE_PANIC)
+ BUG();
}
diff --git a/fs/xfs/support/move.c b/fs/xfs/support/move.c
index 4fc3831eed38..15dbd090c6b8 100644
--- a/fs/xfs/support/move.c
+++ b/fs/xfs/support/move.c
@@ -72,9 +72,9 @@ uiomove(void *cp, size_t n, enum uio_rw rw, struct uio *uio)
case UIO_SYSSPACE:
if (rw == UIO_READ)
- bcopy(cp, iov->iov_base, cnt);
+ memcpy(iov->iov_base, cp, cnt);
else
- bcopy(iov->iov_base, cp, cnt);
+ memcpy(cp, iov->iov_base, cnt);
break;
default:
diff --git a/fs/xfs/support/move.h b/fs/xfs/support/move.h
index e01b7b6c7a15..dd63285df2e6 100644
--- a/fs/xfs/support/move.h
+++ b/fs/xfs/support/move.h
@@ -36,11 +36,6 @@
#include <linux/uio.h>
#include <asm/uaccess.h>
-#define bzero(p,s) memset((p), 0, (s))
-#define bcopy(s,d,n) memcpy((d),(s),(n))
-#define bcmp(s1,s2,l) memcmp(s1,s2,l)
-#define ovbcopy(from,to,count) memmove(to,from,count)
-
typedef struct iovec iovec_t;
typedef struct uio {
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c
index f30b857a3f9a..fd98101312c1 100644
--- a/fs/xfs/support/uuid.c
+++ b/fs/xfs/support/uuid.c
@@ -109,7 +109,7 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
void
uuid_create_nil(uuid_t *uuid)
{
- bzero(uuid, sizeof *uuid);
+ memset(uuid, 0, sizeof(*uuid));
}
int
@@ -129,7 +129,7 @@ uuid_is_nil(uuid_t *uuid)
int
uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
{
- return bcmp(uuid1, uuid2, sizeof(uuid_t)) ? B_FALSE : B_TRUE;
+ return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? B_FALSE : B_TRUE;
}
/*
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 4489ce3a1502..5c89a956c866 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -494,13 +494,13 @@ xfs_alloc_trace_modagf(
(void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT),
(void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT),
(void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO],
- ARCH_CONVERT);
+ ARCH_CONVERT),
(void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT],
- ARCH_CONVERT);
+ ARCH_CONVERT),
(void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO],
- ARCH_CONVERT);
+ ARCH_CONVERT),
(void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT],
- ARCH_CONVERT);
+ ARCH_CONVERT),
(void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT),
(void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT),
(void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT),
@@ -2597,7 +2597,7 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
s = mutex_spinlock(&mp->m_perag[agno].pagb_lock);
cnt = mp->m_perag[agno].pagb_count;
- uend = bno + len;
+ uend = bno + len - 1;
/* search pagb_list for this slot, skipping open slots */
for (bsy = mp->m_perag[agno].pagb_list, n = 0;
@@ -2607,16 +2607,16 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
* (start1,length1) within (start2, length2)
*/
if (bsy->busy_tp != NULL) {
- bend = bsy->busy_start + bsy->busy_length;
- if ( (bno >= bsy->busy_start && bno <= bend) ||
- (uend >= bsy->busy_start && uend <= bend) ||
- (bno <= bsy->busy_start && uend >= bsy->busy_start) ) {
+ bend = bsy->busy_start + bsy->busy_length - 1;
+ if ((bno > bend) ||
+ (uend < bsy->busy_start)) {
+ cnt--;
+ } else {
TRACE_BUSYSEARCH("xfs_alloc_search_busy",
"found1", agno, bno, len, n,
tp);
break;
}
- cnt--;
}
}
@@ -2626,7 +2626,7 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
*/
if (cnt) {
TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, n, tp);
- lsn = bsy->busy_tp->t_lsn;
+ lsn = bsy->busy_tp->t_commit_lsn;
mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s);
xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
} else {
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 11c7618be7c7..9bb3fe79243d 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -132,9 +132,9 @@ xfs_alloc_delrec(
}
#endif
if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
- ovbcopy(&lkp[ptr], &lkp[ptr - 1],
+ memmove(&lkp[ptr - 1], &lkp[ptr],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lkp)); /* INT_: mem copy */
- ovbcopy(&lpp[ptr], &lpp[ptr - 1],
+ memmove(&lpp[ptr - 1], &lpp[ptr],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lpp)); /* INT_: mem copy */
xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
@@ -147,7 +147,7 @@ xfs_alloc_delrec(
else {
lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
- ovbcopy(&lrp[ptr], &lrp[ptr - 1],
+ memmove(&lrp[ptr - 1], &lrp[ptr],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lrp));
xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
}
@@ -464,8 +464,8 @@ xfs_alloc_delrec(
return error;
}
#endif
- bcopy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */
- bcopy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */
+ memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */
+ memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */
xfs_alloc_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
xfs_alloc_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
@@ -476,7 +476,7 @@ xfs_alloc_delrec(
*/
lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur);
rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
- bcopy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp));
+ memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp));
xfs_alloc_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
}
@@ -697,9 +697,9 @@ xfs_alloc_insrec(
return error;
}
#endif
- ovbcopy(&kp[ptr - 1], &kp[ptr],
+ memmove(&kp[ptr], &kp[ptr - 1],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); /* INT_: copy */
- ovbcopy(&pp[ptr - 1], &pp[ptr],
+ memmove(&pp[ptr], &pp[ptr - 1],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); /* INT_: copy */
#ifdef DEBUG
if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
@@ -723,7 +723,7 @@ xfs_alloc_insrec(
* It's a leaf entry. Make a hole for the new record.
*/
rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
- ovbcopy(&rp[ptr - 1], &rp[ptr],
+ memmove(&rp[ptr], &rp[ptr - 1],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp));
/*
* Now stuff the new record in, bump numrecs
@@ -1217,12 +1217,12 @@ xfs_alloc_lshift(
return error;
}
#endif
- ovbcopy(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
- ovbcopy(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+ memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+ memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
} else {
- ovbcopy(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+ memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */
key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */
@@ -1475,8 +1475,8 @@ xfs_alloc_rshift(
return error;
}
#endif
- ovbcopy(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
- ovbcopy(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+ memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+ memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
#ifdef DEBUG
if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level)))
return error;
@@ -1492,7 +1492,7 @@ xfs_alloc_rshift(
lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
- ovbcopy(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+ memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
*rrp = *lrp;
xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */
@@ -1608,8 +1608,8 @@ xfs_alloc_split(
return error;
}
#endif
- bcopy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */
- bcopy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));/* INT_: copy */
+ memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */
+ memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); /* INT_: copy */
xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
*keyp = *rkp;
@@ -1623,7 +1623,7 @@ xfs_alloc_split(
lrp = XFS_ALLOC_REC_ADDR(left, i, cur);
rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
- bcopy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+ memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
keyp->ar_startblock = rrp->ar_startblock; /* INT_: direct copy */
keyp->ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
index 57a28544be96..4629bc745e07 100644
--- a/fs/xfs/xfs_arch.h
+++ b/fs/xfs/xfs_arch.h
@@ -260,13 +260,13 @@
#define DIRINO_COPY_ARCH(from,to,arch) \
if ((arch) == ARCH_NOCONVERT) { \
- bcopy(from,to,sizeof(xfs_ino_t)); \
+ memcpy(to,from,sizeof(xfs_ino_t)); \
} else { \
INT_SWAP_UNALIGNED_64(from,to); \
}
#define DIRINO4_COPY_ARCH(from,to,arch) \
if ((arch) == ARCH_NOCONVERT) { \
- bcopy((((__u8*)from+4)),to,sizeof(xfs_dir2_ino4_t)); \
+ memcpy(to,(((__u8*)from+4)),sizeof(xfs_dir2_ino4_t)); \
} else { \
INT_SWAP_UNALIGNED_32(from,to); \
}
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 74563b62d3fc..482a20fcbfde 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -120,7 +120,7 @@ xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp,
/*
* Fill in the arg structure for this request.
*/
- bzero((char *)&args, sizeof(args));
+ memset((char *)&args, 0, sizeof(args));
args.name = name;
args.namelen = namelen;
args.value = value;
@@ -215,7 +215,7 @@ xfs_attr_set(bhv_desc_t *bdp, char *name, char *value, int valuelen, int flags,
/*
* Fill in the arg structure for this request.
*/
- bzero((char *)&args, sizeof(args));
+ memset((char *)&args, 0, sizeof(args));
args.name = name;
args.namelen = namelen;
args.value = value;
@@ -469,7 +469,7 @@ xfs_attr_remove(bhv_desc_t *bdp, char *name, int flags, struct cred *cred)
/*
* Fill in the arg structure for this request.
*/
- bzero((char *)&args, sizeof(args));
+ memset((char *)&args, 0, sizeof(args));
args.name = name;
args.namelen = namelen;
args.flags = flags;
diff --git a/fs/xfs/xfs_attr_fetch.c b/fs/xfs/xfs_attr_fetch.c
index 0c9af54eeed5..4b1a23cb21a6 100644
--- a/fs/xfs/xfs_attr_fetch.c
+++ b/fs/xfs/xfs_attr_fetch.c
@@ -43,7 +43,7 @@ xfs_attr_fetch(xfs_inode_t *ip, char *name, char *value, int valuelen)
/*
* Do the argument setup for the xfs_attr routines.
*/
- bzero((char *)&args, sizeof(args));
+ memset((char *)&args, 0, sizeof(args));
args.dp = ip;
args.flags = ATTR_ROOT;
args.whichfork = XFS_ATTR_FORK;
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 884da53fa54d..b1c4836d6709 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -128,7 +128,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args)
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
if (sfe->namelen != args->namelen)
continue;
- if (bcmp(args->name, sfe->nameval, args->namelen) != 0)
+ if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (((args->flags & ATTR_ROOT) != 0) !=
((sfe->flags & XFS_ATTR_ROOT) != 0))
@@ -145,8 +145,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args)
sfe->namelen = args->namelen;
INT_SET(sfe->valuelen, ARCH_CONVERT, args->valuelen);
sfe->flags = (args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0;
- bcopy(args->name, sfe->nameval, args->namelen);
- bcopy(args->value, &sfe->nameval[args->namelen], args->valuelen);
+ memcpy(sfe->nameval, args->name, args->namelen);
+ memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
INT_MOD(sf->hdr.count, ARCH_CONVERT, 1);
INT_MOD(sf->hdr.totsize, ARCH_CONVERT, size);
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
@@ -178,7 +178,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
size = XFS_ATTR_SF_ENTSIZE(sfe);
if (sfe->namelen != args->namelen)
continue;
- if (bcmp(sfe->nameval, args->name, args->namelen) != 0)
+ if (memcmp(sfe->nameval, args->name, args->namelen) != 0)
continue;
if (((args->flags & ATTR_ROOT) != 0) !=
((sfe->flags & XFS_ATTR_ROOT) != 0))
@@ -191,7 +191,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
end = base + size;
totsize = INT_GET(sf->hdr.totsize, ARCH_CONVERT);
if (end != totsize) {
- ovbcopy(&((char *)sf)[end], &((char *)sf)[base],
+ memmove(&((char *)sf)[base], &((char *)sf)[end],
totsize - end);
}
INT_MOD(sf->hdr.count, ARCH_CONVERT, -1);
@@ -222,7 +222,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
if (sfe->namelen != args->namelen)
continue;
- if (bcmp(args->name, sfe->nameval, args->namelen) != 0)
+ if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (((args->flags & ATTR_ROOT) != 0) !=
((sfe->flags & XFS_ATTR_ROOT) != 0))
@@ -250,7 +250,7 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
if (sfe->namelen != args->namelen)
continue;
- if (bcmp(args->name, sfe->nameval, args->namelen) != 0)
+ if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (((args->flags & ATTR_ROOT) != 0) !=
((sfe->flags & XFS_ATTR_ROOT) != 0))
@@ -264,7 +264,7 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
return(XFS_ERROR(ERANGE));
}
args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT);
- bcopy(&sfe->nameval[args->namelen], args->value,
+ memcpy(args->value, &sfe->nameval[args->namelen],
args->valuelen);
return(XFS_ERROR(EEXIST));
}
@@ -293,7 +293,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
size = INT_GET(sf->hdr.totsize, ARCH_CONVERT);
tmpbuffer = kmem_alloc(size, KM_SLEEP);
ASSERT(tmpbuffer != NULL);
- bcopy(ifp->if_u1.if_data, tmpbuffer, size);
+ memcpy(tmpbuffer, ifp->if_u1.if_data, size);
sf = (xfs_attr_shortform_t *)tmpbuffer;
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
@@ -307,7 +307,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
if (error == EIO)
goto out;
xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
- bcopy(tmpbuffer, ifp->if_u1.if_data, size); /* it back */
+ memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
goto out;
}
@@ -319,11 +319,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
if (error)
goto out;
xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
- bcopy(tmpbuffer, ifp->if_u1.if_data, size); /* it back */
+ memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
goto out;
}
- bzero((char *)&nargs, sizeof(nargs));
+ memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
nargs.firstblock = args->firstblock;
nargs.flist = args->flist;
@@ -590,11 +590,11 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args)
ASSERT(tmpbuffer != NULL);
ASSERT(bp != NULL);
- bcopy(bp->data, tmpbuffer, XFS_LBSIZE(dp->i_mount));
+ memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
== XFS_ATTR_LEAF_MAGIC);
- bzero(bp->data, XFS_LBSIZE(dp->i_mount));
+ memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
/*
* Clean out the prior contents of the attribute list.
@@ -609,7 +609,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args)
/*
* Copy the attributes
*/
- bzero((char *)&nargs, sizeof(nargs));
+ memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
nargs.firstblock = args->firstblock;
nargs.flist = args->flist;
@@ -669,7 +669,7 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
if (error)
goto out;
ASSERT(bp2 != NULL);
- bcopy(bp1->data, bp2->data, XFS_LBSIZE(dp->i_mount));
+ memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount));
xfs_da_buf_done(bp1);
bp1 = NULL;
xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
@@ -725,7 +725,7 @@ xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
return(error);
ASSERT(bp != NULL);
leaf = bp->data;
- bzero((char *)leaf, XFS_LBSIZE(dp->i_mount));
+ memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
hdr = &leaf->hdr;
INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_ATTR_LEAF_MAGIC);
INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount));
@@ -900,7 +900,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
if (args->index < INT_GET(hdr->count, ARCH_CONVERT)) {
tmp = INT_GET(hdr->count, ARCH_CONVERT) - args->index;
tmp *= sizeof(xfs_attr_leaf_entry_t);
- ovbcopy((char *)entry, (char *)(entry+1), tmp);
+ memmove((char *)(entry+1), (char *)entry, tmp);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
}
@@ -955,13 +955,13 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
name_loc->namelen = args->namelen;
INT_SET(name_loc->valuelen, ARCH_CONVERT, args->valuelen);
- bcopy(args->name, (char *)name_loc->nameval, args->namelen);
- bcopy(args->value, (char *)&name_loc->nameval[args->namelen],
+ memcpy((char *)name_loc->nameval, args->name, args->namelen);
+ memcpy((char *)&name_loc->nameval[args->namelen], args->value,
INT_GET(name_loc->valuelen, ARCH_CONVERT));
} else {
name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
name_rmt->namelen = args->namelen;
- bcopy(args->name, (char *)name_rmt->name, args->namelen);
+ memcpy((char *)name_rmt->name, args->name, args->namelen);
entry->flags |= XFS_ATTR_INCOMPLETE;
/* just in case */
INT_ZERO(name_rmt->valuelen, ARCH_CONVERT);
@@ -1017,8 +1017,8 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
mp = trans->t_mountp;
tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
ASSERT(tmpbuffer != NULL);
- bcopy(bp->data, tmpbuffer, XFS_LBSIZE(mp));
- bzero(bp->data, XFS_LBSIZE(mp));
+ memcpy(tmpbuffer, bp->data, XFS_LBSIZE(mp));
+ memset(bp->data, 0, XFS_LBSIZE(mp));
/*
* Copy basic information
@@ -1390,7 +1390,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
* path point to the block we want to drop (this one).
*/
forward = (!INT_ISZERO(info->forw, ARCH_CONVERT));
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
if (error)
@@ -1450,7 +1450,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
*/
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno) {
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
@@ -1585,7 +1585,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
/*
* Compress the remaining entries and zero out the removed stuff.
*/
- bzero(XFS_ATTR_LEAF_NAME(leaf, args->index), entsize);
+ memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize);
INT_MOD(hdr->usedbytes, ARCH_CONVERT, -entsize);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
@@ -1593,12 +1593,12 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
tmp = (INT_GET(hdr->count, ARCH_CONVERT) - args->index)
* sizeof(xfs_attr_leaf_entry_t);
- ovbcopy((char *)(entry+1), (char *)entry, tmp);
+ memmove((char *)entry, (char *)(entry+1), tmp);
INT_MOD(hdr->count, ARCH_CONVERT, -1);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)];
- bzero((char *)entry, sizeof(xfs_attr_leaf_entry_t));
+ memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t));
/*
* If we removed the first entry, re-find the first used byte
@@ -1701,7 +1701,7 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
*/
tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP);
ASSERT(tmpbuffer != NULL);
- bzero(tmpbuffer, state->blocksize);
+ memset(tmpbuffer, 0, state->blocksize);
tmp_leaf = (xfs_attr_leafblock_t *)tmpbuffer;
tmp_hdr = &tmp_leaf->hdr;
tmp_hdr->info = save_hdr->info; /* struct copy */
@@ -1729,7 +1729,7 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
(int)INT_GET(drop_hdr->count, ARCH_CONVERT),
mp);
}
- bcopy((char *)tmp_leaf, (char *)save_leaf, state->blocksize);
+ memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize);
kmem_free(tmpbuffer, state->blocksize);
}
@@ -1840,7 +1840,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, probe);
if (name_loc->namelen != args->namelen)
continue;
- if (bcmp(args->name, (char *)name_loc->nameval,
+ if (memcmp(args->name, (char *)name_loc->nameval,
args->namelen) != 0)
continue;
if (((args->flags & ATTR_ROOT) != 0) !=
@@ -1852,7 +1852,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, probe);
if (name_rmt->namelen != args->namelen)
continue;
- if (bcmp(args->name, (char *)name_rmt->name,
+ if (memcmp(args->name, (char *)name_rmt->name,
args->namelen) != 0)
continue;
if (((args->flags & ATTR_ROOT) != 0) !=
@@ -1895,7 +1895,7 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
ASSERT(name_loc->namelen == args->namelen);
- ASSERT(bcmp(args->name, name_loc->nameval, args->namelen) == 0);
+ ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT);
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = valuelen;
@@ -1906,11 +1906,11 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
return(XFS_ERROR(ERANGE));
}
args->valuelen = valuelen;
- bcopy(&name_loc->nameval[args->namelen], args->value, valuelen);
+ memcpy(args->value, &name_loc->nameval[args->namelen], valuelen);
} else {
name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
ASSERT(name_rmt->namelen == args->namelen);
- ASSERT(bcmp(args->name, name_rmt->name, args->namelen) == 0);
+ ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
valuelen = INT_GET(name_rmt->valuelen, ARCH_CONVERT);
args->rmtblkno = INT_GET(name_rmt->valueblk, ARCH_CONVERT);
args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
@@ -1983,7 +1983,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
tmp *= sizeof(xfs_attr_leaf_entry_t);
entry_s = &leaf_d->entries[start_d];
entry_d = &leaf_d->entries[start_d + count];
- ovbcopy((char *)entry_s, (char *)entry_d, tmp);
+ memmove((char *)entry_d, (char *)entry_s, tmp);
}
/*
@@ -2004,7 +2004,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
* off for 6.2, should be revisited later.
*/
if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
- bzero(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp);
+ memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp);
INT_MOD(hdr_s->count, ARCH_CONVERT, -1);
entry_d--; /* to compensate for ++ in loop hdr */
@@ -2021,11 +2021,11 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
entry_d->flags = entry_s->flags;
ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp
<= XFS_LBSIZE(mp));
- ovbcopy(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i),
- XFS_ATTR_LEAF_NAME(leaf_d, desti), tmp);
+ memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti),
+ XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp);
ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp
<= XFS_LBSIZE(mp));
- bzero(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp);
+ memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp);
INT_MOD(hdr_d->usedbytes, ARCH_CONVERT, tmp);
INT_MOD(hdr_s->count, ARCH_CONVERT, -1);
@@ -2047,7 +2047,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
entry_s = &leaf_s->entries[start_s];
ASSERT(((char *)entry_s + tmp) <=
((char *)leaf_s + XFS_LBSIZE(mp)));
- bzero((char *)entry_s, tmp);
+ memset((char *)entry_s, 0, tmp);
} else {
/*
* Move the remaining entries down to fill the hole,
@@ -2057,14 +2057,14 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
tmp *= sizeof(xfs_attr_leaf_entry_t);
entry_s = &leaf_s->entries[start_s + count];
entry_d = &leaf_s->entries[start_s];
- ovbcopy((char *)entry_s, (char *)entry_d, tmp);
+ memmove((char *)entry_d, (char *)entry_s, tmp);
tmp = count * sizeof(xfs_attr_leaf_entry_t);
entry_s = &leaf_s->entries[INT_GET(hdr_s->count,
ARCH_CONVERT)];
ASSERT(((char *)entry_s + tmp) <=
((char *)leaf_s + XFS_LBSIZE(mp)));
- bzero((char *)entry_s, tmp);
+ memset((char *)entry_s, 0, tmp);
}
/*
@@ -2345,7 +2345,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context,
aep = (attrlist_ent_t *)&(((char *)context->alist)[ context->firstu ]);
aep->a_valuelen = valuelen;
- bcopy(name, aep->a_name, namelen);
+ memcpy(aep->a_name, name, namelen);
aep->a_name[ namelen ] = 0;
context->alist->al_offset[ context->count++ ] = context->firstu;
context->alist->al_count = context->count;
@@ -2404,7 +2404,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
}
ASSERT(INT_GET(entry->hashval, ARCH_CONVERT) == args->hashval);
ASSERT(namelen == args->namelen);
- ASSERT(bcmp(name, args->name, namelen) == 0);
+ ASSERT(memcmp(name, args->name, namelen) == 0);
#endif /* DEBUG */
entry->flags &= ~XFS_ATTR_INCOMPLETE;
@@ -2559,7 +2559,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
}
ASSERT(INT_GET(entry1->hashval, ARCH_CONVERT) == INT_GET(entry2->hashval, ARCH_CONVERT));
ASSERT(namelen1 == namelen2);
- ASSERT(bcmp(name1, name2, namelen1) == 0);
+ ASSERT(memcmp(name1, name2, namelen1) == 0);
#endif /* DEBUG */
ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 6bf9632238e7..d89a4a83d611 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -489,7 +489,7 @@ xfs_bmap_add_attrfork_local(
return 0;
if ((ip->i_d.di_mode & IFMT) == IFDIR) {
mp = ip->i_mount;
- bzero(&dargs, sizeof(dargs));
+ memset(&dargs, 0, sizeof(dargs));
dargs.dp = ip;
dargs.firstblock = firstblock;
dargs.flist = flist;
@@ -3146,7 +3146,7 @@ xfs_bmap_delete_exlist(
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
base = ifp->if_u1.if_extents;
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count;
- ovbcopy(&base[idx + count], &base[idx],
+ memmove(&base[idx], &base[idx + count],
(nextents - idx) * sizeof(*base));
xfs_iext_realloc(ip, -count, whichfork);
}
@@ -3174,7 +3174,7 @@ xfs_bmap_extents_to_btree(
xfs_btree_cur_t *cur; /* bmap btree cursor */
xfs_bmbt_rec_t *ep; /* extent list pointer */
int error; /* error return value */
- xfs_extnum_t i; /* extent list index */
+ xfs_extnum_t i, cnt; /* extent list index */
xfs_ifork_t *ifp; /* inode fork pointer */
xfs_bmbt_key_t *kp; /* root block key pointer */
xfs_mount_t *mp; /* mount structure */
@@ -3256,24 +3256,25 @@ xfs_bmap_extents_to_btree(
ablock = XFS_BUF_TO_BMBT_BLOCK(abp);
INT_SET(ablock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
INT_ZERO(ablock->bb_level, ARCH_CONVERT);
- INT_ZERO(ablock->bb_numrecs, ARCH_CONVERT);
INT_SET(ablock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
INT_SET(ablock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- for (ep = ifp->if_u1.if_extents, i = 0; i < nextents; i++, ep++) {
+ for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) {
if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) {
- *arp++ = *ep;
- INT_MOD(ablock->bb_numrecs, ARCH_CONVERT, +1);
+ arp->l0 = INT_GET(ep->l0, ARCH_CONVERT);
+ arp->l1 = INT_GET(ep->l1, ARCH_CONVERT);
+ arp++; cnt++;
}
}
+ INT_SET(ablock->bb_numrecs, ARCH_CONVERT, cnt);
ASSERT(INT_GET(ablock->bb_numrecs, ARCH_CONVERT) == XFS_IFORK_NEXTENTS(ip, whichfork));
/*
* Fill in the root key and pointer.
*/
kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
- INT_SET(kp->br_startoff, ARCH_CONVERT, xfs_bmbt_get_startoff(arp));
+ INT_SET(kp->br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(arp));
pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
INT_SET(*pp, ARCH_CONVERT, args.fsbno);
/*
@@ -3310,7 +3311,7 @@ xfs_bmap_insert_exlist(
xfs_iext_realloc(ip, count, whichfork);
base = ifp->if_u1.if_extents;
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- ovbcopy(&base[idx], &base[idx + count],
+ memmove(&base[idx + count], &base[idx],
(nextents - (idx + count)) * sizeof(*base));
for (to = idx; to < idx + count; to++, new++)
xfs_bmbt_set_all(&base[to], new);
@@ -3380,7 +3381,7 @@ xfs_bmap_local_to_extents(
ASSERT(args.len == 1);
*firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
- bcopy(ifp->if_u1.if_data, (char *)XFS_BUF_PTR(bp),
+ memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
ifp->if_bytes);
xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
@@ -3556,7 +3557,7 @@ xfs_bmap_trace_addentry(
if (cnt == 1) {
ASSERT(r2 == NULL);
r2 = &tr2;
- bzero(&tr2, sizeof(tr2));
+ memset(&tr2, 0, sizeof(tr2));
} else
ASSERT(r2 != NULL);
ktrace_enter(xfs_bmap_trace_buf,
@@ -4332,7 +4333,7 @@ xfs_bmap_read_extents(
#ifdef XFS_BMAP_TRACE
static char fname[] = "xfs_bmap_read_extents";
#endif
- xfs_extnum_t i; /* index into the extents list */
+ xfs_extnum_t i, j; /* index into the extents list */
xfs_ifork_t *ifp; /* fork structure */
int level; /* btree level, for checking */
xfs_mount_t *mp; /* file system mount structure */
@@ -4373,28 +4374,9 @@ xfs_bmap_read_extents(
break;
pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
1, mp->m_bmap_dmxr[1]);
-#ifndef __KERNEL__
XFS_WANT_CORRUPTED_GOTO(
XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)),
error0);
-#else /* additional, temporary, debugging code */
- if (!(XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)))) {
- cmn_err(CE_NOTE,
- "xfs_bmap_read_extents: FSB Sanity Check:");
- if (!(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount))
- cmn_err(CE_NOTE,
- "bad AG count %d < agcount %d",
- XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)),
- mp->m_sb.sb_agcount);
- if (!(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks))
- cmn_err(CE_NOTE,
- "bad AG BNO %d < %d",
- XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)),
- mp->m_sb.sb_agblocks);
- error = XFS_ERROR(EFSCORRUPTED);
- goto error0;
- }
-#endif
bno = INT_GET(*pp, ARCH_CONVERT);
xfs_trans_brelse(tp, bp);
}
@@ -4408,7 +4390,7 @@ xfs_bmap_read_extents(
* Loop over all leaf nodes. Copy information to the extent list.
*/
for (;;) {
- xfs_bmbt_rec_t *frp;
+ xfs_bmbt_rec_t *frp, *temp;
xfs_fsblock_t nextbno;
xfs_extnum_t num_recs;
@@ -4422,35 +4404,9 @@ xfs_bmap_read_extents(
(unsigned long long) ip->i_ino);
goto error0;
}
-#ifndef __KERNEL__
XFS_WANT_CORRUPTED_GOTO(
XFS_BMAP_SANITY_CHECK(mp, block, 0),
error0);
-#else /* additional, temporary, debugging code */
- if (!(XFS_BMAP_SANITY_CHECK(mp, block, 0))) {
- cmn_err(CE_NOTE,
- "xfs_bmap_read_extents: BMAP Sanity Check:");
- if (!(INT_GET(block->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC))
- cmn_err(CE_NOTE,
- "bb_magic 0x%x",
- INT_GET(block->bb_magic, ARCH_CONVERT));
- if (!(INT_GET(block->bb_level, ARCH_CONVERT) == level))
- cmn_err(CE_NOTE,
- "bb_level %d",
- INT_GET(block->bb_level, ARCH_CONVERT));
- if (!(INT_GET(block->bb_numrecs, ARCH_CONVERT) > 0))
- cmn_err(CE_NOTE,
- "bb_numrecs %d",
- INT_GET(block->bb_numrecs, ARCH_CONVERT));
- if (!(INT_GET(block->bb_numrecs, ARCH_CONVERT) <= (mp)->m_bmap_dmxr[(level) != 0]))
- cmn_err(CE_NOTE,
- "bb_numrecs %d < m_bmap_dmxr[] %d",
- INT_GET(block->bb_numrecs, ARCH_CONVERT),
- (mp)->m_bmap_dmxr[(level) != 0]);
- error = XFS_ERROR(EFSCORRUPTED);
- goto error0;
- }
-#endif
/*
* Read-ahead the next leaf block, if any.
*/
@@ -4462,18 +4418,21 @@ xfs_bmap_read_extents(
*/
frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
block, 1, mp->m_bmap_dmxr[0]);
- bcopy(frp, trp, num_recs * sizeof(*frp));
+ temp = trp;
+ for (j = 0; j < num_recs; j++, frp++, trp++) {
+ trp->l0 = INT_GET(frp->l0, ARCH_CONVERT);
+ trp->l1 = INT_GET(frp->l1, ARCH_CONVERT);
+ }
if (exntf == XFS_EXTFMT_NOSTATE) {
/*
* Check all attribute bmap btree records and
* any "older" data bmap btree records for a
* set bit in the "extent flag" position.
*/
- if (xfs_check_nostate_extents(trp, num_recs)) {
+ if (xfs_check_nostate_extents(temp, num_recs)) {
goto error0;
}
}
- trp += num_recs;
i += num_recs;
xfs_trans_brelse(tp, bp);
bno = nextbno;
@@ -4650,11 +4609,6 @@ xfs_bmapi(
if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) {
-#ifdef __KERNEL__ /* additional, temporary, debugging code */
- cmn_err(CE_NOTE,
- "EFSCORRUPTED returned from file %s line %d",
- __FILE__, __LINE__);
-#endif
return XFS_ERROR(EFSCORRUPTED);
}
mp = ip->i_mount;
@@ -5150,11 +5104,6 @@ xfs_bmapi_single(
ifp = XFS_IFORK_PTR(ip, whichfork);
if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) {
-#ifdef __KERNEL__ /* additional, temporary, debugging code */
- cmn_err(CE_NOTE,
- "EFSCORRUPTED returned from file %s line %d",
- __FILE__, __LINE__);
-#endif
return XFS_ERROR(EFSCORRUPTED);
}
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -5228,11 +5177,6 @@ xfs_bunmapi(
ifp = XFS_IFORK_PTR(ip, whichfork);
if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
-#ifdef __KERNEL__ /* additional, temporary, debugging code */
- cmn_err(CE_NOTE,
- "EFSCORRUPTED returned from file %s line %d",
- __FILE__, __LINE__);
-#endif
return XFS_ERROR(EFSCORRUPTED);
}
mp = ip->i_mount;
@@ -6317,7 +6261,7 @@ xfs_bmap_count_leaves(
int b;
for ( b = 1; b <= numrecs; b++, frp++)
- *count += xfs_bmbt_get_blockcount(frp);
+ *count += xfs_bmbt_disk_get_blockcount(frp);
return 0;
}
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 5b384f4f9cba..07d513d24f74 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -207,7 +207,7 @@ xfs_bmbt_trace_argifr(
xfs_bmbt_irec_t s;
d = (xfs_dfsbno_t)f;
- xfs_bmbt_get_all(r, &s);
+ xfs_bmbt_disk_get_all(r, &s);
o = (xfs_dfiloff_t)s.br_startoff;
b = (xfs_dfsbno_t)s.br_startblock;
c = s.br_blockcount;
@@ -381,9 +381,9 @@ xfs_bmbt_delrec(
}
#endif
if (ptr < numrecs) {
- ovbcopy(&kp[ptr], &kp[ptr - 1],
+ memmove(&kp[ptr - 1], &kp[ptr],
(numrecs - ptr) * sizeof(*kp));
- ovbcopy(&pp[ptr], &pp[ptr - 1], /* INT_: direct copy */
+ memmove(&pp[ptr - 1], &pp[ptr], /* INT_: direct copy */
(numrecs - ptr) * sizeof(*pp));
xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1);
xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1);
@@ -391,12 +391,12 @@ xfs_bmbt_delrec(
} else {
rp = XFS_BMAP_REC_IADDR(block, 1, cur);
if (ptr < numrecs) {
- ovbcopy(&rp[ptr], &rp[ptr - 1],
+ memmove(&rp[ptr - 1], &rp[ptr],
(numrecs - ptr) * sizeof(*rp));
xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1);
}
if (ptr == 1) {
- INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_get_startoff(rp));
+ INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(rp));
kp = &key;
}
}
@@ -619,14 +619,14 @@ xfs_bmbt_delrec(
}
}
#endif
- bcopy(rkp, lkp, numrrecs * sizeof(*lkp));
- bcopy(rpp, lpp, numrrecs * sizeof(*lpp));
+ memcpy(lkp, rkp, numrrecs * sizeof(*lkp));
+ memcpy(lpp, rpp, numrrecs * sizeof(*lpp));
xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
} else {
lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur);
rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
- bcopy(rrp, lrp, numrrecs * sizeof(*lrp));
+ memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
}
INT_MOD(left->bb_numrecs, ARCH_CONVERT, numrrecs);
@@ -711,10 +711,10 @@ xfs_bmbt_get_rec(
return 0;
}
rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
- *off = xfs_bmbt_get_startoff(rp);
- *bno = xfs_bmbt_get_startblock(rp);
- *len = xfs_bmbt_get_blockcount(rp);
- *state = xfs_bmbt_get_state(rp);
+ *off = xfs_bmbt_disk_get_startoff(rp);
+ *bno = xfs_bmbt_disk_get_startblock(rp);
+ *len = xfs_bmbt_disk_get_blockcount(rp);
+ *state = xfs_bmbt_disk_get_state(rp);
*stat = 1;
return 0;
}
@@ -757,7 +757,8 @@ xfs_bmbt_insrec(
XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp);
ncur = (xfs_btree_cur_t *)0;
- INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_get_startoff(recp));
+ INT_SET(key.br_startoff, ARCH_CONVERT,
+ xfs_bmbt_disk_get_startoff(recp));
optr = ptr = cur->bc_ptrs[level];
if (ptr == 0) {
XFS_BMBT_TRACE_CURSOR(cur, EXIT);
@@ -835,7 +836,7 @@ xfs_bmbt_insrec(
}
#endif
ptr = cur->bc_ptrs[level];
- xfs_bmbt_set_allf(&nrec,
+ xfs_bmbt_disk_set_allf(&nrec,
nkey.br_startoff, 0, 0,
XFS_EXT_NORM);
} else {
@@ -861,9 +862,9 @@ xfs_bmbt_insrec(
}
}
#endif
- ovbcopy(&kp[ptr - 1], &kp[ptr],
+ memmove(&kp[ptr], &kp[ptr - 1],
(numrecs - ptr + 1) * sizeof(*kp));
- ovbcopy(&pp[ptr - 1], &pp[ptr], /* INT_: direct copy */
+ memmove(&pp[ptr], &pp[ptr - 1], /* INT_: direct copy */
(numrecs - ptr + 1) * sizeof(*pp));
#ifdef DEBUG
if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)*bnop,
@@ -880,7 +881,7 @@ xfs_bmbt_insrec(
xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs);
} else {
rp = XFS_BMAP_REC_IADDR(block, 1, cur);
- ovbcopy(&rp[ptr - 1], &rp[ptr],
+ memmove(&rp[ptr], &rp[ptr - 1],
(numrecs - ptr + 1) * sizeof(*rp));
rp[ptr - 1] = *recp;
numrecs++;
@@ -980,7 +981,7 @@ xfs_bmbt_killroot(
ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) == INT_GET(cblock->bb_numrecs, ARCH_CONVERT));
kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
- bcopy(ckp, kp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*kp));
+ memcpy(kp, ckp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*kp));
pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
#ifdef DEBUG
@@ -991,7 +992,7 @@ xfs_bmbt_killroot(
}
}
#endif
- bcopy(cpp, pp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*pp));
+ memcpy(pp, cpp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*pp));
xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
cur->bc_private.b.flist, cur->bc_mp);
if (!async)
@@ -1175,7 +1176,7 @@ xfs_bmbt_lookup(
startoff = INT_GET(kkp->br_startoff, ARCH_CONVERT);
} else {
krp = krbase + keyno - 1;
- startoff = xfs_bmbt_get_startoff(krp);
+ startoff = xfs_bmbt_disk_get_startoff(krp);
}
diff = (xfs_sfiloff_t)
(startoff - rp->br_startoff);
@@ -1349,14 +1350,15 @@ xfs_bmbt_lshift(
}
}
#endif
- ovbcopy(rkp + 1, rkp, rrecs * sizeof(*rkp));
- ovbcopy(rpp + 1, rpp, rrecs * sizeof(*rpp));
+ memmove(rkp, rkp + 1, rrecs * sizeof(*rkp));
+ memmove(rpp, rpp + 1, rrecs * sizeof(*rpp));
xfs_bmbt_log_keys(cur, rbp, 1, rrecs);
xfs_bmbt_log_ptrs(cur, rbp, 1, rrecs);
} else {
- ovbcopy(rrp + 1, rrp, rrecs * sizeof(*rrp));
+ memmove(rrp, rrp + 1, rrecs * sizeof(*rrp));
xfs_bmbt_log_recs(cur, rbp, 1, rrecs);
- INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_get_startoff(rrp));
+ INT_SET(key.br_startoff, ARCH_CONVERT,
+ xfs_bmbt_disk_get_startoff(rrp));
rkp = &key;
}
if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) {
@@ -1452,8 +1454,8 @@ xfs_bmbt_rshift(
}
}
#endif
- ovbcopy(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
- ovbcopy(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+ memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+ memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
#ifdef DEBUG
if ((error = xfs_btree_check_lptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) {
XFS_BMBT_TRACE_CURSOR(cur, ERROR);
@@ -1467,10 +1469,11 @@ xfs_bmbt_rshift(
} else {
lrp = XFS_BMAP_REC_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
- ovbcopy(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+ memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
*rrp = *lrp;
xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
- INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_get_startoff(rrp));
+ INT_SET(key.br_startoff, ARCH_CONVERT,
+ xfs_bmbt_disk_get_startoff(rrp));
rkp = &key;
}
INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1);
@@ -1629,17 +1632,17 @@ xfs_bmbt_split(
}
}
#endif
- bcopy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
- bcopy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+ memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+ memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
keyp->br_startoff = INT_GET(rkp->br_startoff, ARCH_CONVERT);
} else {
lrp = XFS_BMAP_REC_IADDR(left, i, cur);
rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
- bcopy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+ memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
- keyp->br_startoff = xfs_bmbt_get_startoff(rrp);
+ keyp->br_startoff = xfs_bmbt_disk_get_startoff(rrp);
}
INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT)));
right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */
@@ -1748,8 +1751,8 @@ xfs_bmdr_to_bmbt(
fpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT);
- bcopy(fkp, tkp, sizeof(*fkp) * dmxr);
- bcopy(fpp, tpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */
+ memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
+ memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */
}
/*
@@ -1874,17 +1877,16 @@ xfs_bmbt_delete(
* This code must be in sync with the routines xfs_bmbt_get_startoff,
* xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
*/
-void
-xfs_bmbt_get_all(
- xfs_bmbt_rec_t *r,
- xfs_bmbt_irec_t *s)
+
+static __inline__ void
+__xfs_bmbt_get_all(
+ __uint64_t l0,
+ __uint64_t l1,
+ xfs_bmbt_irec_t *s)
{
int ext_flag;
xfs_exntst_t st;
- __uint64_t l0, l1;
- l0 = INT_GET(r->l0, ARCH_CONVERT);
- l1 = INT_GET(r->l1, ARCH_CONVERT);
ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
s->br_startoff = ((xfs_fileoff_t)l0 &
XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
@@ -1915,6 +1917,14 @@ xfs_bmbt_get_all(
s->br_state = st;
}
+void
+xfs_bmbt_get_all(
+ xfs_bmbt_rec_t *r,
+ xfs_bmbt_irec_t *s)
+{
+ __xfs_bmbt_get_all(r->l0, r->l1, s);
+}
+
/*
* Get the block pointer for the given level of the cursor.
* Fill in the buffer pointer, if applicable.
@@ -1941,23 +1951,94 @@ xfs_bmbt_get_block(
}
/*
- * Extract the blockcount field from a bmap extent record.
+ * Extract the blockcount field from an in memory bmap extent record.
*/
xfs_filblks_t
xfs_bmbt_get_blockcount(
xfs_bmbt_rec_t *r)
{
- return (xfs_filblks_t)(INT_GET(r->l1, ARCH_CONVERT) & XFS_MASK64LO(21));
+ return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
}
/*
- * Extract the startblock field from a bmap extent record.
+ * Extract the startblock field from an in memory bmap extent record.
*/
xfs_fsblock_t
xfs_bmbt_get_startblock(
xfs_bmbt_rec_t *r)
{
#if XFS_BIG_FILESYSTEMS
+ return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
+ (((xfs_fsblock_t)r->l1) >> 21);
+#else
+#ifdef DEBUG
+ xfs_dfsbno_t b;
+
+ b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
+ (((xfs_dfsbno_t)r->l1) >> 21);
+ ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
+ return (xfs_fsblock_t)b;
+#else /* !DEBUG */
+ return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
+#endif /* DEBUG */
+#endif /* XFS_BIG_FILESYSTEMS */
+}
+
+/*
+ * Extract the startoff field from an in memory bmap extent record.
+ */
+xfs_fileoff_t
+xfs_bmbt_get_startoff(
+ xfs_bmbt_rec_t *r)
+{
+ return ((xfs_fileoff_t)r->l0 &
+ XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+}
+
+xfs_exntst_t
+xfs_bmbt_get_state(
+ xfs_bmbt_rec_t *r)
+{
+ int ext_flag;
+
+ ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
+ return xfs_extent_state(xfs_bmbt_get_blockcount(r),
+ ext_flag);
+}
+
+#if ARCH_CONVERT != ARCH_NOCONVERT
+/* Endian flipping versions of the bmbt extraction functions */
+void
+xfs_bmbt_disk_get_all(
+ xfs_bmbt_rec_t *r,
+ xfs_bmbt_irec_t *s)
+{
+ __uint64_t l0, l1;
+
+ l0 = INT_GET(r->l0, ARCH_CONVERT);
+ l1 = INT_GET(r->l1, ARCH_CONVERT);
+
+ __xfs_bmbt_get_all(l0, l1, s);
+}
+
+/*
+ * Extract the blockcount field from an on disk bmap extent record.
+ */
+xfs_filblks_t
+xfs_bmbt_disk_get_blockcount(
+ xfs_bmbt_rec_t *r)
+{
+ return (xfs_filblks_t)(INT_GET(r->l1, ARCH_CONVERT) & XFS_MASK64LO(21));
+}
+
+/*
+ * Extract the startblock field from an on disk bmap extent record.
+ */
+xfs_fsblock_t
+xfs_bmbt_disk_get_startblock(
+ xfs_bmbt_rec_t *r)
+{
+#if XFS_BIG_FILESYSTEMS
return (((xfs_fsblock_t)INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(9)) << 43) |
(((xfs_fsblock_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21);
#else
@@ -1975,10 +2056,10 @@ xfs_bmbt_get_startblock(
}
/*
- * Extract the startoff field from a bmap extent record.
+ * Extract the startoff field from a disk format bmap extent record.
*/
xfs_fileoff_t
-xfs_bmbt_get_startoff(
+xfs_bmbt_disk_get_startoff(
xfs_bmbt_rec_t *r)
{
return ((xfs_fileoff_t)INT_GET(r->l0, ARCH_CONVERT) &
@@ -1986,15 +2067,16 @@ xfs_bmbt_get_startoff(
}
xfs_exntst_t
-xfs_bmbt_get_state(
+xfs_bmbt_disk_get_state(
xfs_bmbt_rec_t *r)
{
int ext_flag;
ext_flag = (int)((INT_GET(r->l0, ARCH_CONVERT)) >> (64 - BMBT_EXNTFLAG_BITLEN));
- return xfs_extent_state(xfs_bmbt_get_blockcount(r),
+ return xfs_extent_state(xfs_bmbt_disk_get_blockcount(r),
ext_flag);
}
+#endif
/*
@@ -2103,7 +2185,7 @@ xfs_bmbt_insert(
XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
level = 0;
nbno = NULLFSBLOCK;
- xfs_bmbt_set_all(&nrec, &cur->bc_rec.b);
+ xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
ncur = (xfs_btree_cur_t *)0;
pcur = cur;
do {
@@ -2333,7 +2415,7 @@ xfs_bmbt_newroot(
cur->bc_ptrs[level + 1] = 1;
kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
- bcopy(kp, ckp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*kp));
+ memcpy(ckp, kp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*kp));
cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
#ifdef DEBUG
for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) {
@@ -2343,7 +2425,7 @@ xfs_bmbt_newroot(
}
}
#endif
- bcopy(pp, cpp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*pp));
+ memcpy(cpp, pp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*pp));
#ifdef DEBUG
if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)args.fsbno,
level))) {
@@ -2388,6 +2470,97 @@ xfs_bmbt_set_all(
ASSERT((s->br_startblock & XFS_MASK64HI(12)) == 0);
#endif /* XFS_BIG_FILESYSTEMS */
#if XFS_BIG_FILESYSTEMS
+ r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+ ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
+ ((xfs_bmbt_rec_base_t)s->br_startblock >> 43);
+ r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+ ((xfs_bmbt_rec_base_t)s->br_blockcount &
+ (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+#else /* !XFS_BIG_FILESYSTEMS */
+ if (ISNULLSTARTBLOCK(s->br_startblock)) {
+ r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+ ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
+ (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+ r->l1 = XFS_MASK64HI(11) |
+ ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+ ((xfs_bmbt_rec_base_t)s->br_blockcount &
+ (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+ } else {
+ r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+ ((xfs_bmbt_rec_base_t)s->br_startoff << 9);
+ r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+ ((xfs_bmbt_rec_base_t)s->br_blockcount &
+ (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+ }
+#endif /* XFS_BIG_FILESYSTEMS */
+}
+
+/*
+ * Set all the fields in a bmap extent record from the arguments.
+ */
+void
+xfs_bmbt_set_allf(
+ xfs_bmbt_rec_t *r,
+ xfs_fileoff_t o,
+ xfs_fsblock_t b,
+ xfs_filblks_t c,
+ xfs_exntst_t v)
+{
+ int extent_flag;
+
+ ASSERT((v == XFS_EXT_NORM) || (v == XFS_EXT_UNWRITTEN));
+ extent_flag = (v == XFS_EXT_NORM) ? 0 : 1;
+ ASSERT((o & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
+ ASSERT((c & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
+#if XFS_BIG_FILESYSTEMS
+ ASSERT((b & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
+#endif /* XFS_BIG_FILESYSTEMS */
+#if XFS_BIG_FILESYSTEMS
+ r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+ ((xfs_bmbt_rec_base_t)o << 9) |
+ ((xfs_bmbt_rec_base_t)b >> 43));
+ r->l1 = ((xfs_bmbt_rec_base_t)b << 21) |
+ ((xfs_bmbt_rec_base_t)c &
+ (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+#else /* !XFS_BIG_FILESYSTEMS */
+ if (ISNULLSTARTBLOCK(b)) {
+ r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+ ((xfs_bmbt_rec_base_t)o << 9) |
+ (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+ r->l1 = XFS_MASK64HI(11) |
+ ((xfs_bmbt_rec_base_t)b << 21) |
+ ((xfs_bmbt_rec_base_t)c &
+ (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+ } else {
+ r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+ ((xfs_bmbt_rec_base_t)o << 9);
+ r->l1 = ((xfs_bmbt_rec_base_t)b << 21) |
+ ((xfs_bmbt_rec_base_t)c &
+ (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+ }
+#endif /* XFS_BIG_FILESYSTEMS */
+}
+
+#if ARCH_CONVERT != ARCH_NOCONVERT
+/*
+ * Set all the fields in a bmap extent record from the uncompressed form.
+ */
+void
+xfs_bmbt_disk_set_all(
+ xfs_bmbt_rec_t *r,
+ xfs_bmbt_irec_t *s)
+{
+ int extent_flag;
+
+ ASSERT((s->br_state == XFS_EXT_NORM) ||
+ (s->br_state == XFS_EXT_UNWRITTEN));
+ extent_flag = (s->br_state == XFS_EXT_NORM) ? 0 : 1;
+ ASSERT((s->br_startoff & XFS_MASK64HI(9)) == 0);
+ ASSERT((s->br_blockcount & XFS_MASK64HI(43)) == 0);
+#if XFS_BIG_FILESYSTEMS
+ ASSERT((s->br_startblock & XFS_MASK64HI(12)) == 0);
+#endif /* XFS_BIG_FILESYSTEMS */
+#if XFS_BIG_FILESYSTEMS
INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
((xfs_bmbt_rec_base_t)s->br_startblock >> 43));
@@ -2414,10 +2587,10 @@ xfs_bmbt_set_all(
}
/*
- * Set all the fields in a bmap extent record from the arguments.
+ * Set all the fields in a disk format bmap extent record from the arguments.
*/
void
-xfs_bmbt_set_allf(
+xfs_bmbt_disk_set_allf(
xfs_bmbt_rec_t *r,
xfs_fileoff_t o,
xfs_fsblock_t b,
@@ -2458,6 +2631,7 @@ xfs_bmbt_set_allf(
}
#endif /* XFS_BIG_FILESYSTEMS */
}
+#endif
/*
* Set the blockcount field in a bmap extent record.
@@ -2468,8 +2642,8 @@ xfs_bmbt_set_blockcount(
xfs_filblks_t v)
{
ASSERT((v & XFS_MASK64HI(43)) == 0);
- INT_SET(r->l1, ARCH_CONVERT, (INT_GET(r->l1, ARCH_CONVERT) & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
- (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21)));
+ r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
+ (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
}
/*
@@ -2484,20 +2658,20 @@ xfs_bmbt_set_startblock(
ASSERT((v & XFS_MASK64HI(12)) == 0);
#endif /* XFS_BIG_FILESYSTEMS */
#if XFS_BIG_FILESYSTEMS
- INT_SET(r->l0, ARCH_CONVERT, (INT_GET(r->l0, ARCH_CONVERT) & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
- (xfs_bmbt_rec_base_t)(v >> 43));
- INT_SET(r->l1, ARCH_CONVERT, (INT_GET(r->l1, ARCH_CONVERT) & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
- (xfs_bmbt_rec_base_t)(v << 21));
+ r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
+ (xfs_bmbt_rec_base_t)(v >> 43);
+ r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
+ (xfs_bmbt_rec_base_t)(v << 21);
#else /* !XFS_BIG_FILESYSTEMS */
if (ISNULLSTARTBLOCK(v)) {
- INT_SET(r->l0, ARCH_CONVERT, (INT_GET(r->l0, ARCH_CONVERT) | (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)));
- INT_SET(r->l1, ARCH_CONVERT, (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
+ r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+ r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
((xfs_bmbt_rec_base_t)v << 21) |
- (INT_GET(r->l1, ARCH_CONVERT) & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+ (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
} else {
- INT_SET(r->l0, ARCH_CONVERT, (INT_GET(r->l0, ARCH_CONVERT) & ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9)));
- INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)v << 21) |
- (INT_GET(r->l1, ARCH_CONVERT) & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+ r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+ r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
+ (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
}
#endif /* XFS_BIG_FILESYSTEMS */
}
@@ -2511,9 +2685,9 @@ xfs_bmbt_set_startoff(
xfs_fileoff_t v)
{
ASSERT((v & XFS_MASK64HI(9)) == 0);
- INT_SET(r->l0, ARCH_CONVERT, (INT_GET(r->l0, ARCH_CONVERT) & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
+ r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
((xfs_bmbt_rec_base_t)v << 9) |
- (INT_GET(r->l0, ARCH_CONVERT) & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)));
+ (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
}
/*
@@ -2526,9 +2700,9 @@ xfs_bmbt_set_state(
{
ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
if (v == XFS_EXT_NORM)
- INT_SET(r->l0, ARCH_CONVERT, INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN));
+ r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
else
- INT_SET(r->l0, ARCH_CONVERT, INT_GET(r->l0, ARCH_CONVERT) | XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN));
+ r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
}
/*
@@ -2559,8 +2733,8 @@ xfs_bmbt_to_bmdr(
fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT);
- bcopy(fkp, tkp, sizeof(*fkp) * dmxr);
- bcopy(fpp, tpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */
+ memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
+ memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */
}
/*
@@ -2596,7 +2770,7 @@ xfs_bmbt_update(
#endif
ptr = cur->bc_ptrs[0];
rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
- xfs_bmbt_set_allf(rp, off, bno, len, state);
+ xfs_bmbt_disk_set_allf(rp, off, bno, len, state);
xfs_bmbt_log_recs(cur, bp, ptr, ptr);
if (ptr > 1) {
XFS_BMBT_TRACE_CURSOR(cur, EXIT);
@@ -2618,13 +2792,14 @@ xfs_bmbt_update(
* Return an error condition (1) if any flags found,
* otherwise return 0.
*/
+
int
xfs_check_nostate_extents(
xfs_bmbt_rec_t *ep,
xfs_extnum_t num)
{
for (; num > 0; num--, ep++) {
- if (((INT_GET(ep->l0, ARCH_CONVERT)) >>
+ if ((ep->l0 >>
(64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
ASSERT(0);
return 1;
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index a9ec9c58252d..8aeefd43c967 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -509,6 +509,41 @@ xfs_exntst_t
xfs_bmbt_get_state(
xfs_bmbt_rec_t *r);
+#if ARCH_CONVERT != ARCH_NOCONVERT
+void
+xfs_bmbt_disk_get_all(
+ xfs_bmbt_rec_t *r,
+ xfs_bmbt_irec_t *s);
+
+xfs_exntst_t
+xfs_bmbt_disk_get_state(
+ xfs_bmbt_rec_t *r);
+
+xfs_filblks_t
+xfs_bmbt_disk_get_blockcount(
+ xfs_bmbt_rec_t *r);
+
+xfs_fsblock_t
+xfs_bmbt_disk_get_startblock(
+ xfs_bmbt_rec_t *r);
+
+xfs_fileoff_t
+xfs_bmbt_disk_get_startoff(
+ xfs_bmbt_rec_t *r);
+
+#else
+#define xfs_bmbt_disk_get_all(r, s) \
+ xfs_bmbt_get_all(r, s)
+#define xfs_bmbt_disk_get_state(r) \
+ xfs_bmbt_get_state(r)
+#define xfs_bmbt_disk_get_blockcount(r) \
+ xfs_bmbt_get_blockcount(r)
+#define xfs_bmbt_disk_get_startblock(r) \
+ xfs_bmbt_get_blockcount(r)
+#define xfs_bmbt_disk_get_startoff(r) \
+ xfs_bmbt_get_startoff(r)
+#endif
+
int
xfs_bmbt_increment(
struct xfs_btree_cur *,
@@ -607,6 +642,26 @@ xfs_bmbt_set_state(
xfs_bmbt_rec_t *r,
xfs_exntst_t v);
+#if ARCH_CONVERT != ARCH_NOCONVERT
+void
+xfs_bmbt_disk_set_all(
+ xfs_bmbt_rec_t *r,
+ xfs_bmbt_irec_t *s);
+
+void
+xfs_bmbt_disk_set_allf(
+ xfs_bmbt_rec_t *r,
+ xfs_fileoff_t o,
+ xfs_fsblock_t b,
+ xfs_filblks_t c,
+ xfs_exntst_t v);
+#else
+#define xfs_bmbt_disk_set_all(r, s) \
+ xfs_bmbt_set_all(r, s)
+#define xfs_bmbt_disk_set_allf(r, 0, b, c, v) \
+ xfs_bmbt_set_allf(r, 0, b, c, v)
+#endif
+
void
xfs_bmbt_to_bmdr(
xfs_bmbt_block_t *,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 7dcef68fb253..115b05df35ba 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -261,9 +261,9 @@ xfs_btree_check_rec(
r1 = ar1;
r2 = ar2;
- ASSERT(xfs_bmbt_get_startoff(r1) +
- xfs_bmbt_get_blockcount(r1) <=
- xfs_bmbt_get_startoff(r2));
+ ASSERT(xfs_bmbt_disk_get_startoff(r1) +
+ xfs_bmbt_disk_get_blockcount(r1) <=
+ xfs_bmbt_disk_get_startoff(r2));
break;
}
case XFS_BTNUM_INO: {
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index ac62646cacde..8a837fab5ad0 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -766,7 +766,7 @@ xfs_buf_item_init(
* to have logged.
*/
bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP);
- bcopy(XFS_BUF_PTR(bp), bip->bli_orig, XFS_BUF_COUNT(bp));
+ memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp));
bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP);
#endif
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h
index 14195151eeea..fd7d80ebe2ef 100644
--- a/fs/xfs/xfs_clnt.h
+++ b/fs/xfs/xfs_clnt.h
@@ -112,15 +112,14 @@ struct xfs_mount_args {
/* only) */
#define XFSMNT_NOTSERVER 0x00100000 /* give up being the server */
/* (remount only) */
-#define XFSMNT_DMAPI 0x00200000 /* enable dmapi/xdsm */
+#define XFSMNT_32BITINODES 0x00200000 /* restrict inodes to 32
+ * bits of address space */
#define XFSMNT_GQUOTA 0x00400000 /* group quota accounting */
#define XFSMNT_GQUOTAENF 0x00800000 /* group quota limit
* enforcement */
#define XFSMNT_NOUUID 0x01000000 /* Ignore fs uuid */
-#define XFSMNT_32BITINODES 0x02000000 /* restrict inodes to 32
- * bits of address space */
-#define XFSMNT_IRIXSGID 0x04000000 /* Irix-style sgid inheritance */
-#define XFSMNT_NOLOGFLUSH 0x08000000 /* Don't flush for log blocks */
+#define XFSMNT_DMAPI 0x02000000 /* enable dmapi/xdsm */
+#define XFSMNT_NOLOGFLUSH 0x04000000 /* Don't flush for log blocks */
/* Did we get any args for CXFS to consume? */
#define XFSARGS_FOR_CXFSARR(ap) \
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 020801f897ae..06d8371730bc 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -360,7 +360,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
size = (int)((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] -
(char *)leaf);
}
- bcopy(oldroot, node, size);
+ memcpy(node, oldroot, size);
xfs_da_log_buf(tp, bp, 0, size - 1);
xfs_da_buf_done(blk1->bp);
blk1->bp = bp;
@@ -527,7 +527,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
tmp *= (uint)sizeof(xfs_da_node_entry_t);
btree_s = &node2->btree[0];
btree_d = &node2->btree[count];
- ovbcopy(btree_s, btree_d, tmp);
+ memmove(btree_d, btree_s, tmp);
}
/*
@@ -538,7 +538,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
tmp = count * (uint)sizeof(xfs_da_node_entry_t);
btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count];
btree_d = &node2->btree[0];
- bcopy(btree_s, btree_d, tmp);
+ memcpy(btree_d, btree_s, tmp);
INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count));
} else {
@@ -550,7 +550,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
tmp = count * (uint)sizeof(xfs_da_node_entry_t);
btree_s = &node2->btree[0];
btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)];
- bcopy(btree_s, btree_d, tmp);
+ memcpy(btree_d, btree_s, tmp);
INT_MOD(node1->hdr.count, ARCH_CONVERT, count);
xfs_da_log_buf(tp, blk1->bp,
XFS_DA_LOGRANGE(node1, btree_d, tmp));
@@ -562,7 +562,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
tmp *= (uint)sizeof(xfs_da_node_entry_t);
btree_s = &node2->btree[count];
btree_d = &node2->btree[0];
- ovbcopy(btree_s, btree_d, tmp);
+ memmove(btree_d, btree_s, tmp);
INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count));
}
@@ -622,7 +622,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
btree = &node->btree[ oldblk->index ];
if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) {
tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree);
- ovbcopy(btree, btree + 1, tmp);
+ memmove(btree + 1, btree, tmp);
}
INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval);
INT_SET(btree->before, ARCH_CONVERT, newblk->blkno);
@@ -790,7 +790,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
}
ASSERT(INT_ISZERO(blkinfo->forw, ARCH_CONVERT));
ASSERT(INT_ISZERO(blkinfo->back, ARCH_CONVERT));
- bcopy(bp->data, root_blk->bp->data, state->blocksize);
+ memcpy(root_blk->bp->data, bp->data, state->blocksize);
xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
error = xfs_da_shrink_inode(args, child, bp);
return(error);
@@ -842,7 +842,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
* path point to the block we want to drop (this one).
*/
forward = (!INT_ISZERO(info->forw, ARCH_CONVERT));
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
if (error)
@@ -898,7 +898,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
*/
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno) {
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
@@ -1001,12 +1001,12 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
if (drop_blk->index < (INT_GET(node->hdr.count, ARCH_CONVERT)-1)) {
tmp = INT_GET(node->hdr.count, ARCH_CONVERT) - drop_blk->index - 1;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
- ovbcopy(btree + 1, btree, tmp);
+ memmove(btree, btree + 1, tmp);
xfs_da_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, btree, tmp));
btree = &node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ];
}
- bzero((char *)btree, sizeof(xfs_da_node_entry_t));
+ memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
xfs_da_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
INT_MOD(node->hdr.count, ARCH_CONVERT, -1);
@@ -1049,7 +1049,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
{
btree = &save_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT) ];
tmp = INT_GET(save_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t);
- ovbcopy(&save_node->btree[0], btree, tmp);
+ memmove(btree, &save_node->btree[0], tmp);
btree = &save_node->btree[0];
xfs_da_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, btree,
@@ -1067,7 +1067,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
* Move all the B-tree elements from drop_blk to save_blk.
*/
tmp = INT_GET(drop_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t);
- bcopy(&drop_node->btree[0], btree, tmp);
+ memcpy(btree, &drop_node->btree[0], tmp);
INT_MOD(save_node->hdr.count, ARCH_CONVERT, INT_GET(drop_node->hdr.count, ARCH_CONVERT));
xfs_da_log_buf(tp, save_blk->bp,
@@ -1798,7 +1798,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
/*
* Copy the last block into the dead buffer and log it.
*/
- bcopy(last_buf->data, dead_buf->data, mp->m_dirblksize);
+ memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
dead_info = dead_buf->data;
/*
@@ -2343,7 +2343,7 @@ xfs_da_state_free(xfs_da_state_t *state)
if (state->extravalid && state->extrablk.bp)
xfs_da_buf_done(state->extrablk.bp);
#ifdef DEBUG
- bzero((char *)state, sizeof(*state));
+ memset((char *)state, 0, sizeof(*state));
#endif /* DEBUG */
kmem_zone_free(xfs_da_state_zone, state);
}
@@ -2390,7 +2390,7 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
bp = bps[i];
- bcopy(XFS_BUF_PTR(bp), (char *)dabuf->data + off,
+ memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp),
XFS_BUF_COUNT(bp));
}
}
@@ -2431,7 +2431,7 @@ xfs_da_buf_clean(xfs_dabuf_t *dabuf)
for (i = off = 0; i < dabuf->nbuf;
i++, off += XFS_BUF_COUNT(bp)) {
bp = dabuf->bps[i];
- bcopy((char *)dabuf->data + off, XFS_BUF_PTR(bp),
+ memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off,
XFS_BUF_COUNT(bp));
}
}
@@ -2462,7 +2462,7 @@ xfs_da_buf_done(xfs_dabuf_t *dabuf)
dabuf->next->prev = dabuf->prev;
mutex_spinunlock(&xfs_dabuf_global_lock, s);
}
- bzero(dabuf, XFS_DA_BUF_SIZE(dabuf->nbuf));
+ memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf));
#endif
if (dabuf->nbuf == 1)
kmem_zone_free(xfs_dabuf_zone, dabuf);
@@ -2532,7 +2532,7 @@ xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
bp = dabuf->bps[0];
} else {
bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
- bcopy(dabuf->bps, bplist, nbuf * sizeof(*bplist));
+ memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
}
xfs_da_buf_done(dabuf);
for (i = 0; i < nbuf; i++)
@@ -2558,7 +2558,7 @@ xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
bp = dabuf->bps[0];
} else {
bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
- bcopy(dabuf->bps, bplist, nbuf * sizeof(*bplist));
+ memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
}
xfs_da_buf_done(dabuf);
for (i = 0; i < nbuf; i++)
diff --git a/fs/xfs/xfs_dir.c b/fs/xfs/xfs_dir.c
index ea2f5798cf70..ad3ecd88f1a6 100644
--- a/fs/xfs/xfs_dir.c
+++ b/fs/xfs/xfs_dir.c
@@ -210,7 +210,7 @@ xfs_dir_init(xfs_trans_t *trans, xfs_inode_t *dir, xfs_inode_t *parent_dir)
xfs_da_args_t args;
int error;
- bzero((char *)&args, sizeof(args));
+ memset((char *)&args, 0, sizeof(args));
args.dp = dir;
args.trans = trans;
@@ -534,7 +534,7 @@ xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, xfs_dinode_t *dp)
return 0;
}
if (INT_GET(dp->di_core.di_size, ARCH_CONVERT) < sizeof(sf->hdr)) {
- xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform size: dp 0x%p\n",
+ xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform size: dp 0x%p",
dp);
return 1;
}
@@ -546,7 +546,7 @@ xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, xfs_dinode_t *dp)
count = sf->hdr.count;
if ((count < 0) || ((count * 10) > XFS_LITINO(mp))) {
xfs_fs_cmn_err(CE_WARN, mp,
- "Invalid shortform count: dp 0x%p\n", dp);
+ "Invalid shortform count: dp 0x%p", dp);
return(1);
}
@@ -561,7 +561,7 @@ xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, xfs_dinode_t *dp)
xfs_dir_ino_validate(mp, ino);
if (sfe->namelen >= XFS_LITINO(mp)) {
xfs_fs_cmn_err(CE_WARN, mp,
- "Invalid shortform namelen: dp 0x%p\n", dp);
+ "Invalid shortform namelen: dp 0x%p", dp);
return 1;
}
namelen_sum += sfe->namelen;
@@ -569,7 +569,7 @@ xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, xfs_dinode_t *dp)
}
if (namelen_sum >= XFS_LITINO(mp)) {
xfs_fs_cmn_err(CE_WARN, mp,
- "Invalid shortform namelen: dp 0x%p\n", dp);
+ "Invalid shortform namelen: dp 0x%p", dp);
return 1;
}
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 1ee94f626cc1..403117dd28ff 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -147,7 +147,7 @@ xfs_dir2_init(
xfs_da_args_t args; /* operation arguments */
int error; /* error return value */
- bzero((char *)&args, sizeof(args));
+ memset((char *)&args, 0, sizeof(args));
args.dp = dp;
args.trans = tp;
ASSERT((dp->i_d.di_mode & IFMT) == IFDIR);
@@ -711,7 +711,7 @@ xfs_dir2_put_dirent64_direct(
idbp->d_off = pa->cook;
idbp->d_name[namelen] = '\0';
pa->done = 1;
- bcopy(pa->name, idbp->d_name, namelen);
+ memcpy(idbp->d_name, pa->name, namelen);
return 0;
}
@@ -743,7 +743,7 @@ xfs_dir2_put_dirent64_uio(
idbp->d_ino = pa->ino;
idbp->d_off = pa->cook;
idbp->d_name[namelen] = '\0';
- bcopy(pa->name, idbp->d_name, namelen);
+ memcpy(idbp->d_name, pa->name, namelen);
rval = uiomove((caddr_t)idbp, reclen, UIO_READ, uio);
pa->done = (rval == 0);
return rval;
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index de56814d5d25..3756923e8740 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -331,7 +331,7 @@ xfs_dir2_block_addname(
blp--;
mid++;
if (mid)
- ovbcopy(&blp[1], blp, mid * sizeof(*blp));
+ memmove(blp, &blp[1], mid * sizeof(*blp));
lfloglow = 0;
lfloghigh = mid;
}
@@ -357,7 +357,7 @@ xfs_dir2_block_addname(
(highstale == INT_GET(btp->count, ARCH_CONVERT) ||
mid - lowstale <= highstale - mid)) {
if (mid - lowstale)
- ovbcopy(&blp[lowstale + 1], &blp[lowstale],
+ memmove(&blp[lowstale], &blp[lowstale + 1],
(mid - lowstale) * sizeof(*blp));
lfloglow = MIN(lowstale, lfloglow);
lfloghigh = MAX(mid, lfloghigh);
@@ -369,7 +369,7 @@ xfs_dir2_block_addname(
ASSERT(highstale < INT_GET(btp->count, ARCH_CONVERT));
mid++;
if (highstale - mid)
- ovbcopy(&blp[mid], &blp[mid + 1],
+ memmove(&blp[mid + 1], &blp[mid],
(highstale - mid) * sizeof(*blp));
lfloglow = MIN(mid, lfloglow);
lfloghigh = MAX(highstale, lfloghigh);
@@ -397,7 +397,7 @@ xfs_dir2_block_addname(
*/
INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
dep->namelen = args->namelen;
- bcopy(args->name, dep->name, args->namelen);
+ memcpy(dep->name, args->name, args->namelen);
tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block));
/*
@@ -717,7 +717,7 @@ xfs_dir2_block_lookup_int(
*/
if (dep->namelen == args->namelen &&
dep->name[0] == args->name[0] &&
- bcmp(dep->name, args->name, args->namelen) == 0) {
+ memcmp(dep->name, args->name, args->namelen) == 0) {
*bpp = bp;
*entno = mid;
return 0;
@@ -1075,7 +1075,7 @@ xfs_dir2_sf_to_block(
buf_len = dp->i_df.if_bytes;
buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);
- bcopy(sfp, buf, dp->i_df.if_bytes);
+ memcpy(buf, sfp, dp->i_df.if_bytes);
xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
dp->i_d.di_size = 0;
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
@@ -1199,7 +1199,7 @@ xfs_dir2_sf_to_block(
INT_SET(dep->inumber, ARCH_CONVERT, XFS_DIR2_SF_GET_INUMBER_ARCH(sfp,
XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT));
dep->namelen = sfep->namelen;
- bcopy(sfep->name, dep->name, dep->namelen);
+ memcpy(dep->name, sfep->name, dep->namelen);
tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block));
xfs_dir2_data_log_entry(tp, bp, dep);
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 7ea956729cf1..7481245193d4 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -340,7 +340,7 @@ xfs_dir2_data_freescan(
/*
* Start by clearing the table.
*/
- bzero(d->hdr.bestfree, sizeof(d->hdr.bestfree));
+ memset(d->hdr.bestfree, 0, sizeof(d->hdr.bestfree));
*loghead = 1;
/*
* Set up pointers.
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index c201111f7339..7ab474f6a3a3 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -110,7 +110,7 @@ xfs_dir2_block_to_leaf(
* Could compact these but I think we always do the conversion
* after squeezing out stale entries.
*/
- bcopy(blp, leaf->ents, INT_GET(btp->count, ARCH_CONVERT) * sizeof(xfs_dir2_leaf_entry_t));
+ memcpy(leaf->ents, blp, INT_GET(btp->count, ARCH_CONVERT) * sizeof(xfs_dir2_leaf_entry_t));
xfs_dir2_leaf_log_ents(tp, lbp, 0, INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1);
needscan = 0;
needlog = 1;
@@ -353,7 +353,7 @@ xfs_dir2_leaf_addname(
*/
if (use_block >= INT_GET(ltp->bestcount, ARCH_CONVERT)) {
bestsp--;
- ovbcopy(&bestsp[1], &bestsp[0],
+ memmove(&bestsp[0], &bestsp[1],
INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(bestsp[0]));
INT_MOD(ltp->bestcount, ARCH_CONVERT, +1);
xfs_dir2_leaf_log_tail(tp, lbp);
@@ -402,7 +402,7 @@ xfs_dir2_leaf_addname(
dep = (xfs_dir2_data_entry_t *)dup;
INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
dep->namelen = args->namelen;
- bcopy(args->name, dep->name, dep->namelen);
+ memcpy(dep->name, args->name, dep->namelen);
tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data));
/*
@@ -434,7 +434,7 @@ xfs_dir2_leaf_addname(
* lep is still good as the index leaf entry.
*/
if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT))
- ovbcopy(lep, lep + 1,
+ memmove(lep + 1, lep,
(INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep));
/*
* Record low and high logging indices for the leaf.
@@ -493,8 +493,8 @@ xfs_dir2_leaf_addname(
* and make room for the new entry.
*/
if (index - lowstale - 1 > 0)
- ovbcopy(&leaf->ents[lowstale + 1],
- &leaf->ents[lowstale],
+ memmove(&leaf->ents[lowstale],
+ &leaf->ents[lowstale + 1],
(index - lowstale - 1) * sizeof(*lep));
lep = &leaf->ents[index - 1];
lfloglow = MIN(lowstale, lfloglow);
@@ -512,8 +512,8 @@ xfs_dir2_leaf_addname(
* and make room for the new entry.
*/
if (highstale - index > 0)
- ovbcopy(&leaf->ents[index],
- &leaf->ents[index + 1],
+ memmove(&leaf->ents[index + 1],
+ &leaf->ents[index],
(highstale - index) * sizeof(*lep));
lep = &leaf->ents[index];
lfloglow = MIN(index, lfloglow);
@@ -847,7 +847,7 @@ xfs_dir2_leaf_getdents(
* the table.
*/
if (!map->br_blockcount && --map_valid)
- ovbcopy(&map[1], &map[0],
+ memmove(&map[0], &map[1],
sizeof(map[0]) *
map_valid);
i -= j;
@@ -909,8 +909,8 @@ xfs_dir2_leaf_getdents(
nmap--;
length = map_valid + nmap - i;
if (length)
- ovbcopy(&map[i + 1],
- &map[i],
+ memmove(&map[i],
+ &map[i + 1],
sizeof(map[i]) *
length);
} else {
@@ -1409,7 +1409,7 @@ xfs_dir2_leaf_lookup_int(
*/
if (dep->namelen == args->namelen &&
dep->name[0] == args->name[0] &&
- bcmp(dep->name, args->name, args->namelen) == 0) {
+ memcmp(dep->name, args->name, args->namelen) == 0) {
*dbpp = dbp;
*indexp = index;
return 0;
@@ -1544,7 +1544,7 @@ xfs_dir2_leaf_removename(
* Copy the table down so inactive entries at the
* end are removed.
*/
- ovbcopy(bestsp, &bestsp[db - i],
+ memmove(&bestsp[db - i], bestsp,
(INT_GET(ltp->bestcount, ARCH_CONVERT) - (db - i)) * sizeof(*bestsp));
INT_MOD(ltp->bestcount, ARCH_CONVERT, -(db - i));
xfs_dir2_leaf_log_tail(tp, lbp);
@@ -1728,7 +1728,7 @@ xfs_dir2_leaf_trim_data(
*/
bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT);
INT_MOD(ltp->bestcount, ARCH_CONVERT, -1);
- ovbcopy(&bestsp[0], &bestsp[1], INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(*bestsp));
+ memmove(&bestsp[1], &bestsp[0], INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(*bestsp));
xfs_dir2_leaf_log_tail(tp, lbp);
xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1);
return 0;
@@ -1842,7 +1842,7 @@ xfs_dir2_node_to_leaf(
/*
* Set up the leaf bests table.
*/
- bcopy(free->bests, XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT),
+ memcpy(XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT), free->bests,
INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(leaf->bests[0]));
xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1);
xfs_dir2_leaf_log_tail(tp, lbp);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 99661539e595..54234b40ed6b 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -239,7 +239,7 @@ xfs_dir2_leafn_add(
if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) {
lep = &leaf->ents[index];
if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT))
- ovbcopy(lep, lep + 1,
+ memmove(lep + 1, lep,
(INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep));
lfloglow = index;
lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT);
@@ -288,8 +288,8 @@ xfs_dir2_leafn_add(
XFS_DIR2_NULL_DATAPTR);
ASSERT(index - lowstale - 1 >= 0);
if (index - lowstale - 1 > 0)
- ovbcopy(&leaf->ents[lowstale + 1],
- &leaf->ents[lowstale],
+ memmove(&leaf->ents[lowstale],
+ &leaf->ents[lowstale + 1],
(index - lowstale - 1) * sizeof(*lep));
lep = &leaf->ents[index - 1];
lfloglow = MIN(lowstale, lfloglow);
@@ -304,8 +304,8 @@ xfs_dir2_leafn_add(
XFS_DIR2_NULL_DATAPTR);
ASSERT(highstale - index >= 0);
if (highstale - index > 0)
- ovbcopy(&leaf->ents[index],
- &leaf->ents[index + 1],
+ memmove(&leaf->ents[index + 1],
+ &leaf->ents[index],
(highstale - index) * sizeof(*lep));
lep = &leaf->ents[index];
lfloglow = MIN(index, lfloglow);
@@ -564,7 +564,7 @@ xfs_dir2_leafn_lookup_int(
*/
if (dep->namelen == args->namelen &&
dep->name[0] == args->name[0] &&
- bcmp(dep->name, args->name, args->namelen) == 0) {
+ memcmp(dep->name, args->name, args->namelen) == 0) {
args->inumber = INT_GET(dep->inumber, ARCH_CONVERT);
*indexp = index;
state->extravalid = 1;
@@ -644,7 +644,7 @@ xfs_dir2_leafn_moveents(
* to hold the new entries.
*/
if (start_d < INT_GET(leaf_d->hdr.count, ARCH_CONVERT)) {
- ovbcopy(&leaf_d->ents[start_d], &leaf_d->ents[start_d + count],
+ memmove(&leaf_d->ents[start_d + count], &leaf_d->ents[start_d],
(INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - start_d) *
sizeof(xfs_dir2_leaf_entry_t));
xfs_dir2_leaf_log_ents(tp, bp_d, start_d + count,
@@ -666,7 +666,7 @@ xfs_dir2_leafn_moveents(
/*
* Copy the leaf entries from source to destination.
*/
- bcopy(&leaf_s->ents[start_s], &leaf_d->ents[start_d],
+ memcpy(&leaf_d->ents[start_d], &leaf_s->ents[start_s],
count * sizeof(xfs_dir2_leaf_entry_t));
xfs_dir2_leaf_log_ents(tp, bp_d, start_d, start_d + count - 1);
/*
@@ -674,7 +674,7 @@ xfs_dir2_leafn_moveents(
* delete the ones we copied by sliding the next ones down.
*/
if (start_s + count < INT_GET(leaf_s->hdr.count, ARCH_CONVERT)) {
- ovbcopy(&leaf_s->ents[start_s + count], &leaf_s->ents[start_s],
+ memmove(&leaf_s->ents[start_s], &leaf_s->ents[start_s + count],
count * sizeof(xfs_dir2_leaf_entry_t));
xfs_dir2_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1);
}
@@ -1135,7 +1135,7 @@ xfs_dir2_leafn_toosmall(
* path point to the block we want to drop (this one).
*/
forward = !INT_ISZERO(info->forw, ARCH_CONVERT);
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
error = xfs_da_path_shift(state, &state->altpath, forward, 0,
&rval);
if (error)
@@ -1197,7 +1197,7 @@ xfs_dir2_leafn_toosmall(
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
*/
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno)
error = xfs_da_path_shift(state, &state->altpath, forward, 0,
&rval);
@@ -1685,7 +1685,7 @@ xfs_dir2_node_addname_int(
dep = (xfs_dir2_data_entry_t *)dup;
INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
dep->namelen = args->namelen;
- bcopy(args->name, dep->name, dep->namelen);
+ memcpy(dep->name, args->name, dep->namelen);
tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data));
xfs_dir2_data_log_entry(tp, dbp, dep);
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index c1d2d3d9b2c2..aaba9972bd57 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -176,7 +176,7 @@ xfs_dir2_block_to_sf(
* and add local data.
*/
block = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
- bcopy(bp->data, block, mp->m_dirblksize);
+ memcpy(block, bp->data, mp->m_dirblksize);
logflags = XFS_ILOG_CORE;
if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) {
ASSERT(error != ENOSPC);
@@ -198,7 +198,7 @@ xfs_dir2_block_to_sf(
* Copy the header into the newly allocate local space.
*/
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- bcopy(sfhp, sfp, XFS_DIR2_SF_HDR_SIZE(sfhp->i8count));
+ memcpy(sfp, sfhp, XFS_DIR2_SF_HDR_SIZE(sfhp->i8count));
dp->i_d.di_size = size;
/*
* Set up to loop over the block's entries.
@@ -241,7 +241,7 @@ xfs_dir2_block_to_sf(
XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep,
(xfs_dir2_data_aoff_t)
((char *)dep - (char *)block), ARCH_CONVERT);
- bcopy(dep->name, sfep->name, dep->namelen);
+ memcpy(sfep->name, dep->name, dep->namelen);
temp=INT_GET(dep->inumber, ARCH_CONVERT);
XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &temp,
XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT);
@@ -405,7 +405,7 @@ xfs_dir2_sf_addname_easy(
*/
sfep->namelen = args->namelen;
XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep, offset, ARCH_CONVERT);
- bcopy(args->name, sfep->name, sfep->namelen);
+ memcpy(sfep->name, args->name, sfep->namelen);
XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber,
XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT);
/*
@@ -457,7 +457,7 @@ xfs_dir2_sf_addname_hard(
old_isize = (int)dp->i_d.di_size;
buf = kmem_alloc(old_isize, KM_SLEEP);
oldsfp = (xfs_dir2_sf_t *)buf;
- bcopy(sfp, oldsfp, old_isize);
+ memcpy(oldsfp, sfp, old_isize);
/*
* Loop over the old directory finding the place we're going
* to insert the new entry.
@@ -490,14 +490,14 @@ xfs_dir2_sf_addname_hard(
* Copy the first part of the directory, including the header.
*/
nbytes = (int)((char *)oldsfep - (char *)oldsfp);
- bcopy(oldsfp, sfp, nbytes);
+ memcpy(sfp, oldsfp, nbytes);
sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + nbytes);
/*
* Fill in the new entry, and update the header counts.
*/
sfep->namelen = args->namelen;
XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep, offset, ARCH_CONVERT);
- bcopy(args->name, sfep->name, sfep->namelen);
+ memcpy(sfep->name, args->name, sfep->namelen);
XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber,
XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT);
sfp->hdr.count++;
@@ -510,7 +510,7 @@ xfs_dir2_sf_addname_hard(
*/
if (!eof) {
sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep);
- bcopy(oldsfep, sfep, old_isize - nbytes);
+ memcpy(sfep, oldsfep, old_isize - nbytes);
}
kmem_free(buf, old_isize);
dp->i_d.di_size = new_isize;
@@ -916,7 +916,7 @@ xfs_dir2_sf_lookup(
i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) {
if (sfep->namelen == args->namelen &&
sfep->name[0] == args->name[0] &&
- bcmp(args->name, sfep->name, args->namelen) == 0) {
+ memcmp(args->name, sfep->name, args->namelen) == 0) {
args->inumber =
XFS_DIR2_SF_GET_INUMBER_ARCH(sfp,
XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT);
@@ -971,7 +971,7 @@ xfs_dir2_sf_removename(
i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) {
if (sfep->namelen == args->namelen &&
sfep->name[0] == args->name[0] &&
- bcmp(sfep->name, args->name, args->namelen) == 0) {
+ memcmp(sfep->name, args->name, args->namelen) == 0) {
ASSERT(XFS_DIR2_SF_GET_INUMBER_ARCH(sfp,
XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT) ==
args->inumber);
@@ -994,7 +994,7 @@ xfs_dir2_sf_removename(
* Copy the part if any after the removed entry, sliding it down.
*/
if (byteoff + entsize < oldsize)
- ovbcopy((char *)sfp + byteoff + entsize, (char *)sfp + byteoff,
+ memmove((char *)sfp + byteoff, (char *)sfp + byteoff + entsize,
oldsize - (byteoff + entsize));
/*
* Fix up the header and file size.
@@ -1108,7 +1108,7 @@ xfs_dir2_sf_replace(
i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) {
if (sfep->namelen == args->namelen &&
sfep->name[0] == args->name[0] &&
- bcmp(args->name, sfep->name, args->namelen) == 0) {
+ memcmp(args->name, sfep->name, args->namelen) == 0) {
#if XFS_BIG_FILESYSTEMS || defined(DEBUG)
ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp,
XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT);
@@ -1196,7 +1196,7 @@ xfs_dir2_sf_toino4(
buf = kmem_alloc(oldsize, KM_SLEEP);
oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(oldsfp->hdr.i8count == 1);
- bcopy(oldsfp, buf, oldsize);
+ memcpy(buf, oldsfp, oldsize);
/*
* Compute the new inode size.
*/
@@ -1228,7 +1228,7 @@ xfs_dir2_sf_toino4(
oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
sfep->offset = oldsfep->offset;
- bcopy(oldsfep->name, sfep->name, sfep->namelen);
+ memcpy(sfep->name, oldsfep->name, sfep->namelen);
ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp,
XFS_DIR2_SF_INUMBERP(oldsfep), ARCH_CONVERT);
XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT);
@@ -1273,7 +1273,7 @@ xfs_dir2_sf_toino8(
buf = kmem_alloc(oldsize, KM_SLEEP);
oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
ASSERT(oldsfp->hdr.i8count == 0);
- bcopy(oldsfp, buf, oldsize);
+ memcpy(buf, oldsfp, oldsize);
/*
* Compute the new inode size.
*/
@@ -1305,7 +1305,7 @@ xfs_dir2_sf_toino8(
oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
sfep->offset = oldsfep->offset;
- bcopy(oldsfep->name, sfep->name, sfep->namelen);
+ memcpy(sfep->name, oldsfep->name, sfep->namelen);
ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp,
XFS_DIR2_SF_INUMBERP(oldsfep), ARCH_CONVERT);
XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT);
diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h
index 576c19f17c68..7ee59c6107b3 100644
--- a/fs/xfs/xfs_dir2_sf.h
+++ b/fs/xfs/xfs_dir2_sf.h
@@ -93,7 +93,7 @@ typedef struct { __uint8_t i[2]; } xfs_dir2_sf_off_t;
* be calculated on the fly.
*
* Entries are packed toward the top as tightly as possible. The header
- * and the elements must be bcopy()'d out into a work area to get correct
+ * and the elements must be memcpy'd out into a work area to get correct
* alignment for the inode number fields.
*/
typedef struct xfs_dir2_sf_hdr {
diff --git a/fs/xfs/xfs_dir2_trace.c b/fs/xfs/xfs_dir2_trace.c
index fe9280e1f427..3e517abee119 100644
--- a/fs/xfs/xfs_dir2_trace.c
+++ b/fs/xfs/xfs_dir2_trace.c
@@ -65,9 +65,9 @@ xfs_dir2_trace_enter(
ASSERT(xfs_dir2_trace_buf);
ASSERT(dp->i_dir_trace);
if (name)
- bcopy(name, n, min(sizeof(n), namelen));
+ memcpy(n, name, min(sizeof(n), namelen));
else
- bzero((char *)n, sizeof(n));
+ memset((char *)n, 0, sizeof(n));
ktrace_enter(xfs_dir2_trace_buf,
(void *)(__psunsigned_t)type, (void *)where,
(void *)a0, (void *)a1, (void *)a2, (void *)a3,
diff --git a/fs/xfs/xfs_dir_leaf.c b/fs/xfs/xfs_dir_leaf.c
index 1aceaf37693d..d689c169f6e7 100644
--- a/fs/xfs/xfs_dir_leaf.c
+++ b/fs/xfs/xfs_dir_leaf.c
@@ -105,7 +105,7 @@ xfs_dir_ino_validate(xfs_mount_t *mp, xfs_ino_t ino)
XFS_AGINO_TO_INO(mp, agno, agino) == ino;
if (XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE,
XFS_RANDOM_DIR_INO_VALIDATE)) {
- xfs_fs_cmn_err(CE_WARN, mp, "Invalid inode number 0x%Lx\n",
+ xfs_fs_cmn_err(CE_WARN, mp, "Invalid inode number 0x%Lx",
(unsigned long long) ino);
return XFS_ERROR(EFSCORRUPTED);
}
@@ -171,7 +171,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args)
for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) {
if (sfe->namelen == args->namelen &&
args->name[0] == sfe->name[0] &&
- bcmp(args->name, sfe->name, args->namelen) == 0)
+ memcmp(args->name, sfe->name, args->namelen) == 0)
return(XFS_ERROR(EEXIST));
sfe = XFS_DIR_SF_NEXTENTRY(sfe);
}
@@ -184,7 +184,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args)
XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &sfe->inumber, ARCH_CONVERT);
sfe->namelen = args->namelen;
- bcopy(args->name, sfe->name, sfe->namelen);
+ memcpy(sfe->name, args->name, sfe->namelen);
INT_MOD(sf->hdr.count, ARCH_CONVERT, +1);
dp->i_d.di_size += size;
@@ -223,7 +223,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args)
size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe);
if (sfe->namelen == args->namelen &&
sfe->name[0] == args->name[0] &&
- bcmp(sfe->name, args->name, args->namelen) == 0)
+ memcmp(sfe->name, args->name, args->namelen) == 0)
break;
base += size;
sfe = XFS_DIR_SF_NEXTENTRY(sfe);
@@ -234,7 +234,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args)
}
if ((base + size) != dp->i_d.di_size) {
- ovbcopy(&((char *)sf)[base+size], &((char *)sf)[base],
+ memmove(&((char *)sf)[base], &((char *)sf)[base+size],
dp->i_d.di_size - (base+size));
}
INT_MOD(sf->hdr.count, ARCH_CONVERT, -1);
@@ -283,7 +283,7 @@ xfs_dir_shortform_lookup(xfs_da_args_t *args)
for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) {
if (sfe->namelen == args->namelen &&
sfe->name[0] == args->name[0] &&
- bcmp(args->name, sfe->name, args->namelen) == 0) {
+ memcmp(args->name, sfe->name, args->namelen) == 0) {
XFS_DIR_SF_GET_DIRINO_ARCH(&sfe->inumber, &args->inumber, ARCH_CONVERT);
return(XFS_ERROR(EEXIST));
}
@@ -324,7 +324,7 @@ xfs_dir_shortform_to_leaf(xfs_da_args_t *iargs)
tmpbuffer = kmem_alloc(size, KM_SLEEP);
ASSERT(tmpbuffer != NULL);
- bcopy(dp->i_df.if_u1.if_data, tmpbuffer, size);
+ memcpy(tmpbuffer, dp->i_df.if_u1.if_data, size);
sf = (xfs_dir_shortform_t *)tmpbuffer;
XFS_DIR_SF_GET_DIRINO_ARCH(&sf->hdr.parent, &inumber, ARCH_CONVERT);
@@ -611,8 +611,8 @@ xfs_dir_shortform_replace(xfs_da_args_t *args)
for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) {
if (sfe->namelen == args->namelen &&
sfe->name[0] == args->name[0] &&
- bcmp(args->name, sfe->name, args->namelen) == 0) {
- ASSERT(bcmp((char *)&args->inumber,
+ memcmp(args->name, sfe->name, args->namelen) == 0) {
+ ASSERT(memcmp((char *)&args->inumber,
(char *)&sfe->inumber, sizeof(xfs_ino_t)));
XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &sfe->inumber, ARCH_CONVERT);
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA);
@@ -650,10 +650,10 @@ xfs_dir_leaf_to_shortform(xfs_da_args_t *iargs)
if (retval)
return(retval);
ASSERT(bp != NULL);
- bcopy(bp->data, tmpbuffer, XFS_LBSIZE(dp->i_mount));
+ memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
leaf = (xfs_dir_leafblock_t *)tmpbuffer;
ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
- bzero(bp->data, XFS_LBSIZE(dp->i_mount));
+ memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
/*
* Find and special case the parent inode number
@@ -736,7 +736,7 @@ xfs_dir_leaf_to_node(xfs_da_args_t *args)
return(retval);
}
ASSERT(bp2 != NULL);
- bcopy(bp1->data, bp2->data, XFS_LBSIZE(dp->i_mount));
+ memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount));
xfs_da_buf_done(bp1);
xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
@@ -787,7 +787,7 @@ xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
return(retval);
ASSERT(bp != NULL);
leaf = bp->data;
- bzero((char *)leaf, XFS_LBSIZE(dp->i_mount));
+ memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
hdr = &leaf->hdr;
INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_DIR_LEAF_MAGIC);
INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount));
@@ -960,7 +960,7 @@ xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index,
if (index < INT_GET(hdr->count, ARCH_CONVERT)) {
tmp = INT_GET(hdr->count, ARCH_CONVERT) - index;
tmp *= (uint)sizeof(xfs_dir_leaf_entry_t);
- ovbcopy(entry, entry + 1, tmp);
+ memmove(entry + 1, entry, tmp);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry)));
}
@@ -986,7 +986,7 @@ xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index,
*/
namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &namest->inumber, ARCH_CONVERT);
- bcopy(args->name, namest->name, args->namelen);
+ memcpy(namest->name, args->name, args->namelen);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, namest, XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry)));
@@ -1029,7 +1029,7 @@ xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp, int musthave,
lbsize = XFS_LBSIZE(mp);
tmpbuffer = kmem_alloc(lbsize, KM_SLEEP);
ASSERT(tmpbuffer != NULL);
- bcopy(bp->data, tmpbuffer, lbsize);
+ memcpy(tmpbuffer, bp->data, lbsize);
/*
* Make a second copy in case xfs_dir_leaf_moveents()
@@ -1037,9 +1037,9 @@ xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp, int musthave,
*/
if (musthave || justcheck) {
tmpbuffer2 = kmem_alloc(lbsize, KM_SLEEP);
- bcopy(bp->data, tmpbuffer2, lbsize);
+ memcpy(tmpbuffer2, bp->data, lbsize);
}
- bzero(bp->data, lbsize);
+ memset(bp->data, 0, lbsize);
/*
* Copy basic information
@@ -1072,7 +1072,7 @@ xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp, int musthave,
if (justcheck || rval == ENOSPC) {
ASSERT(tmpbuffer2);
- bcopy(tmpbuffer2, bp->data, lbsize);
+ memcpy(bp->data, tmpbuffer2, lbsize);
} else {
xfs_da_log_buf(trans, bp, 0, lbsize - 1);
}
@@ -1357,7 +1357,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
* path point to the block we want to drop (this one).
*/
forward = !INT_ISZERO(info->forw, ARCH_CONVERT);
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
if (error)
@@ -1418,7 +1418,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
*/
- bcopy(&state->path, &state->altpath, sizeof(state->path));
+ memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno) {
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
@@ -1538,17 +1538,17 @@ xfs_dir_leaf_remove(xfs_trans_t *trans, xfs_dabuf_t *bp, int index)
* Compress the remaining entries and zero out the removed stuff.
*/
namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
- bzero((char *)namest, entsize);
+ memset((char *)namest, 0, entsize);
xfs_da_log_buf(trans, bp, XFS_DA_LOGRANGE(leaf, namest, entsize));
INT_MOD(hdr->namebytes, ARCH_CONVERT, -(entry->namelen));
tmp = (INT_GET(hdr->count, ARCH_CONVERT) - index) * (uint)sizeof(xfs_dir_leaf_entry_t);
- ovbcopy(entry + 1, entry, tmp);
+ memmove(entry, entry + 1, tmp);
INT_MOD(hdr->count, ARCH_CONVERT, -1);
xfs_da_log_buf(trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry)));
entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)];
- bzero((char *)entry, sizeof(xfs_dir_leaf_entry_t));
+ memset((char *)entry, 0, sizeof(xfs_dir_leaf_entry_t));
/*
* If we removed the first entry, re-find the first used byte
@@ -1642,7 +1642,7 @@ xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
*/
tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP);
ASSERT(tmpbuffer != NULL);
- bzero(tmpbuffer, state->blocksize);
+ memset(tmpbuffer, 0, state->blocksize);
tmp_leaf = (xfs_dir_leafblock_t *)tmpbuffer;
tmp_hdr = &tmp_leaf->hdr;
tmp_hdr->info = save_hdr->info; /* struct copy */
@@ -1664,7 +1664,7 @@ xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
tmp_leaf, INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT),
(int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
}
- bcopy(tmp_leaf, save_leaf, state->blocksize);
+ memcpy(save_leaf, tmp_leaf, state->blocksize);
kmem_free(tmpbuffer, state->blocksize);
}
@@ -1750,7 +1750,7 @@ xfs_dir_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args, int *index)
namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
if (entry->namelen == args->namelen &&
namest->name[0] == args->name[0] &&
- bcmp(args->name, namest->name, args->namelen) == 0) {
+ memcmp(args->name, namest->name, args->namelen) == 0) {
XFS_DIR_SF_GET_DIRINO_ARCH(&namest->inumber, &args->inumber, ARCH_CONVERT);
*index = probe;
return(XFS_ERROR(EEXIST));
@@ -1813,7 +1813,7 @@ xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s,
tmp *= (uint)sizeof(xfs_dir_leaf_entry_t);
entry_s = &leaf_d->entries[start_d];
entry_d = &leaf_d->entries[start_d + count];
- bcopy(entry_s, entry_d, tmp);
+ memcpy(entry_d, entry_s, tmp);
}
/*
@@ -1831,11 +1831,11 @@ xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s,
INT_COPY(entry_d->nameidx, hdr_d->firstused, ARCH_CONVERT);
entry_d->namelen = entry_s->namelen;
ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp));
- bcopy(XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)),
- XFS_DIR_LEAF_NAMESTRUCT(leaf_d, INT_GET(entry_d->nameidx, ARCH_CONVERT)), tmp);
+ memcpy(XFS_DIR_LEAF_NAMESTRUCT(leaf_d, INT_GET(entry_d->nameidx, ARCH_CONVERT)),
+ XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)), tmp);
ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp));
- bzero((char *)XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)),
- tmp);
+ memset((char *)XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)),
+ 0, tmp);
INT_MOD(hdr_s->namebytes, ARCH_CONVERT, -(entry_d->namelen));
INT_MOD(hdr_d->namebytes, ARCH_CONVERT, entry_d->namelen);
INT_MOD(hdr_s->count, ARCH_CONVERT, -1);
@@ -1853,7 +1853,7 @@ xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s,
tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t);
entry_s = &leaf_s->entries[start_s];
ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp));
- bzero((char *)entry_s, tmp);
+ memset((char *)entry_s, 0, tmp);
} else {
/*
* Move the remaining entries down to fill the hole,
@@ -1863,12 +1863,12 @@ xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s,
tmp *= (uint)sizeof(xfs_dir_leaf_entry_t);
entry_s = &leaf_s->entries[start_s + count];
entry_d = &leaf_s->entries[start_s];
- bcopy(entry_s, entry_d, tmp);
+ memcpy(entry_d, entry_s, tmp);
tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t);
entry_s = &leaf_s->entries[INT_GET(hdr_s->count, ARCH_CONVERT)];
ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp));
- bzero((char *)entry_s, tmp);
+ memset((char *)entry_s, 0, tmp);
}
/*
@@ -2191,7 +2191,7 @@ xfs_dir_put_dirent64_direct(xfs_dir_put_args_t *pa)
idbp->d_off = pa->cook.o;
idbp->d_name[namelen] = '\0';
pa->done = 1;
- bcopy(pa->name, idbp->d_name, namelen);
+ memcpy(idbp->d_name, pa->name, namelen);
return 0;
}
@@ -2217,7 +2217,7 @@ xfs_dir_put_dirent64_uio(xfs_dir_put_args_t *pa)
idbp->d_ino = pa->ino;
idbp->d_off = pa->cook.o;
idbp->d_name[namelen] = '\0';
- bcopy(pa->name, idbp->d_name, namelen);
+ memcpy(idbp->d_name, pa->name, namelen);
retval = uiomove((caddr_t)idbp, reclen, UIO_READ, uio);
pa->done = (retval == 0);
return retval;
diff --git a/fs/xfs/xfs_dir_sf.h b/fs/xfs/xfs_dir_sf.h
index ede171472223..10c60645f1fc 100644
--- a/fs/xfs/xfs_dir_sf.h
+++ b/fs/xfs/xfs_dir_sf.h
@@ -46,7 +46,7 @@ typedef struct { __uint8_t i[sizeof(xfs_ino_t)]; } xfs_dir_ino_t;
* be calculated on the fly.
*
* Entries are packed toward the top as tight as possible. The header
- * and the elements much be bcopy()'d out into a work area to get correct
+ * and the elements much be memcpy'd out into a work area to get correct
* alignment for the inode number fields.
*/
typedef struct xfs_dir_shortform {
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index f67497d78935..fd1ae9b156b1 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -94,7 +94,7 @@ xfs_qm_dqinit(
#endif
} else {
/*
- * Only the q_core portion was bzeroed in dqreclaim_one().
+ * Only the q_core portion was zeroed in dqreclaim_one().
* So, we need to reset others.
*/
dqp->q_nrefs = 0;
@@ -156,7 +156,7 @@ xfs_qm_dqinit_core(
xfs_dqblk_t *d)
{
/*
- * Caller has bzero'd the entire dquot 'chunk' already.
+ * Caller has zero'd the entire dquot 'chunk' already.
*/
INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC);
INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION);
@@ -351,7 +351,7 @@ xfs_qm_init_dquot_blk(
*/
curid = id - (id % XFS_QM_DQPERBLK(mp));
ASSERT(curid >= 0);
- bzero(d, BBTOB(XFS_QI_DQCHUNKLEN(mp)));
+ memset(d, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)));
for (i = 0; i < XFS_QM_DQPERBLK(mp); i++, d++, curid++)
xfs_qm_dqinit_core(curid, type, d);
xfs_trans_dquot_buf(tp, bp,
@@ -614,7 +614,7 @@ xfs_qm_dqread(
}
/* copy everything from disk dquot to the incore dquot */
- bcopy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
+ memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
ASSERT(INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id);
xfs_qm_dquot_logitem_init(dqp);
@@ -1209,7 +1209,7 @@ xfs_qm_dqflush(
}
/* This is the only portion of data that needs to persist */
- bcopy(&(dqp->q_core), ddqp, sizeof(xfs_disk_dquot_t));
+ memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t));
/*
* Clear the dirty field and remember the flush lsn for later use.
@@ -1475,7 +1475,7 @@ xfs_qm_dqpurge(
dqp->q_mount = NULL;;
dqp->q_hash = NULL;
dqp->dq_flags = XFS_DQ_INACTIVE;
- bzero(&dqp->q_core, sizeof(dqp->q_core));
+ memset(&dqp->q_core, 0, sizeof(dqp->q_core));
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp);
XFS_DQ_HASH_UNLOCK(thishash);
@@ -1585,7 +1585,7 @@ xfs_qm_dqcheck(
*/
ASSERT(id != -1);
ASSERT(flags & XFS_QMOPT_DQREPAIR);
- bzero(ddq, sizeof(xfs_dqblk_t));
+ memset(ddq, 0, sizeof(xfs_dqblk_t));
xfs_qm_dqinit_core(id, type, (xfs_dqblk_t *)ddq);
return (errs);
}
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index b5ceb0316aba..823aa2913e75 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -69,9 +69,9 @@ char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR];
void
xfs_error_test_init(void)
{
- bzero(xfs_etest, sizeof(xfs_etest));
- bzero(xfs_etest_fsid, sizeof(xfs_etest_fsid));
- bzero(xfs_etest_fsname, sizeof(xfs_etest_fsname));
+ memset(xfs_etest, 0, sizeof(xfs_etest));
+ memset(xfs_etest_fsid, 0, sizeof(xfs_etest_fsid));
+ memset(xfs_etest_fsname, 0, sizeof(xfs_etest_fsname));
}
int
@@ -84,12 +84,12 @@ xfs_error_test(int error_tag, int *fsidp, char *expression,
if (random() % randfactor)
return 0;
- bcopy(fsidp, &fsid, sizeof(fsid_t));
+ memcpy(&fsid, fsidp, sizeof(fsid_t));
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) {
cmn_err(CE_WARN,
- "Injecting error (%s) at file %s, line %d, on filesystem \"%s\"\n",
+ "Injecting error (%s) at file %s, line %d, on filesystem \"%s\"",
expression, file, line, xfs_etest_fsname[i]);
return 1;
}
@@ -105,7 +105,7 @@ xfs_errortag_add(int error_tag, xfs_mount_t *mp)
int len;
int64_t fsid;
- bcopy(mp->m_fixedfsid, &fsid, sizeof(fsid_t));
+ memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t));
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) {
@@ -138,7 +138,7 @@ xfs_errortag_clear(int error_tag, xfs_mount_t *mp)
int i;
int64_t fsid;
- bcopy(mp->m_fixedfsid, &fsid, sizeof(fsid_t));
+ memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t));
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) {
@@ -191,7 +191,7 @@ xfs_errortag_clearall(xfs_mount_t *mp)
{
int64_t fsid;
- bcopy(mp->m_fixedfsid, &fsid, sizeof(fsid_t));
+ memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t));
return xfs_errortag_clearall_umount(fsid, mp->m_fsname, 1);
}
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 7984d92618fd..0311d1258076 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -427,9 +427,9 @@ typedef struct xfs_handle {
- (char *) &(handle)) \
+ (handle).ha_fid.xfs_fid_len)
-#define XFS_HANDLE_CMP(h1, h2) bcmp(h1, h2, sizeof (xfs_handle_t))
+#define XFS_HANDLE_CMP(h1, h2) memcmp(h1, h2, sizeof(xfs_handle_t))
-#define FSHSIZE sizeof (fsid_t)
+#define FSHSIZE sizeof(fsid_t)
/*
@@ -498,13 +498,5 @@ typedef struct xfs_handle {
#define BTOBB(bytes) (((__u64)(bytes) + BBSIZE - 1) >> BBSHIFT)
#define BTOBBT(bytes) ((__u64)(bytes) >> BBSHIFT)
#define BBTOB(bbs) ((bbs) << BBSHIFT)
-#define OFFTOBB(bytes) (((__u64)(bytes) + BBSIZE - 1) >> BBSHIFT)
-#define OFFTOBBT(bytes) ((__u64)(bytes) >> BBSHIFT)
-#define BBTOOFF(bbs) ((__u64)(bbs) << BBSHIFT)
-
-#define SEEKLIMIT32 0x7fffffff
-#define BBSEEKLIMIT32 BTOBBT(SEEKLIMIT32)
-#define SEEKLIMIT 0x7fffffffffffffffLL
-#define BBSEEKLIMIT OFFTOBBT(SEEKLIMIT)
#endif /* _LINUX_XFS_FS_H */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 4cd53ed2d791..aac21b2532a6 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -149,7 +149,7 @@ xfs_growfs_data_private(
sizeof(xfs_perag_t) * nagcount,
sizeof(xfs_perag_t) * oagcount,
KM_SLEEP);
- bzero(&mp->m_perag[oagcount],
+ memset(&mp->m_perag[oagcount], 0,
(nagcount - oagcount) * sizeof(xfs_perag_t));
mp->m_flags |= XFS_MOUNT_32BITINODES;
xfs_initialize_perag(mp, nagcount);
@@ -175,7 +175,7 @@ xfs_growfs_data_private(
disk_addr,
sectbb, 0);
agf = XFS_BUF_TO_AGF(bp);
- bzero(agf, mp->m_sb.sb_sectsize);
+ memset(agf, 0, mp->m_sb.sb_sectsize);
INT_SET(agf->agf_magicnum, ARCH_CONVERT, XFS_AGF_MAGIC);
INT_SET(agf->agf_versionnum, ARCH_CONVERT, XFS_AGF_VERSION);
INT_SET(agf->agf_seqno, ARCH_CONVERT, agno);
@@ -208,7 +208,7 @@ xfs_growfs_data_private(
disk_addr,
sectbb, 0);
agi = XFS_BUF_TO_AGI(bp);
- bzero(agi, mp->m_sb.sb_sectsize);
+ memset(agi, 0, mp->m_sb.sb_sectsize);
INT_SET(agi->agi_magicnum, ARCH_CONVERT, XFS_AGI_MAGIC);
INT_SET(agi->agi_versionnum, ARCH_CONVERT, XFS_AGI_VERSION);
INT_SET(agi->agi_seqno, ARCH_CONVERT, agno);
@@ -233,7 +233,7 @@ xfs_growfs_data_private(
disk_addr,
BTOBB(bsize), 0);
block = XFS_BUF_TO_SBLOCK(bp);
- bzero(block, bsize);
+ memset(block, 0, bsize);
INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTB_MAGIC);
INT_ZERO(block->bb_level, ARCH_CONVERT);
INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
@@ -255,7 +255,7 @@ xfs_growfs_data_private(
disk_addr,
BTOBB(bsize), 0);
block = XFS_BUF_TO_SBLOCK(bp);
- bzero(block, bsize);
+ memset(block, 0, bsize);
INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTC_MAGIC);
INT_ZERO(block->bb_level, ARCH_CONVERT);
INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
@@ -278,7 +278,7 @@ xfs_growfs_data_private(
disk_addr,
BTOBB(bsize), 0);
block = XFS_BUF_TO_SBLOCK(bp);
- bzero(block, bsize);
+ memset(block, 0, bsize);
INT_SET(block->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC);
INT_ZERO(block->bb_level, ARCH_CONVERT);
INT_ZERO(block->bb_numrecs, ARCH_CONVERT);
@@ -353,7 +353,7 @@ xfs_growfs_data_private(
sectbb, 0, &bp);
if (error) {
xfs_fs_cmn_err(CE_WARN, mp,
- "error %d reading secondary superblock for ag %d\n",
+ "error %d reading secondary superblock for ag %d",
error, agno);
break;
}
@@ -368,7 +368,7 @@ xfs_growfs_data_private(
continue;
} else {
xfs_fs_cmn_err(CE_WARN, mp,
- "write error %d updating secondary superblock for ag %d\n",
+ "write error %d updating secondary superblock for ag %d",
error, agno);
break; /* no point in continuing */
}
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 185e62f08c70..b8f68d7c3605 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -263,7 +263,7 @@ xfs_ialloc_ag_alloc(
INT_ZERO(dic.di_gid, ARCH_CONVERT);
INT_ZERO(dic.di_nlink, ARCH_CONVERT);
INT_ZERO(dic.di_projid, ARCH_CONVERT);
- bzero(&(dic.di_pad[0]),sizeof(dic.di_pad));
+ memset(&(dic.di_pad[0]), 0, sizeof(dic.di_pad));
INT_SET(dic.di_atime.t_sec, ARCH_CONVERT, ztime.t_sec);
INT_SET(dic.di_atime.t_nsec, ARCH_CONVERT, ztime.t_nsec);
@@ -287,7 +287,7 @@ xfs_ialloc_ag_alloc(
for (i = 0; i < ninodes; i++) {
free = XFS_MAKE_IPTR(args.mp, fbuf, i);
- bcopy (&dic, &(free->di_core), sizeof(xfs_dinode_core_t));
+ memcpy(&(free->di_core), &dic, sizeof(xfs_dinode_core_t));
INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
xfs_ialloc_log_di(tp, fbuf, i,
XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 0feaf85a5455..bded6cfd75f3 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -139,9 +139,9 @@ xfs_inobt_delrec(
}
#endif
if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
- ovbcopy(&kp[ptr], &kp[ptr - 1],
+ memmove(&kp[ptr - 1], &kp[ptr],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*kp));
- ovbcopy(&pp[ptr], &pp[ptr - 1],
+ memmove(&pp[ptr - 1], &pp[ptr],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*pp));
xfs_inobt_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
xfs_inobt_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
@@ -154,7 +154,7 @@ xfs_inobt_delrec(
else {
rp = XFS_INOBT_REC_ADDR(block, 1, cur);
if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
- ovbcopy(&rp[ptr], &rp[ptr - 1],
+ memmove(&rp[ptr - 1], &rp[ptr],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*rp));
xfs_inobt_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
}
@@ -450,8 +450,8 @@ xfs_inobt_delrec(
return error;
}
#endif
- bcopy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp));
- bcopy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp));
+ memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp));
+ memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp));
xfs_inobt_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
xfs_inobt_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
@@ -462,7 +462,7 @@ xfs_inobt_delrec(
*/
lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur);
rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
- bcopy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp));
+ memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp));
xfs_inobt_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
}
@@ -690,9 +690,9 @@ xfs_inobt_insrec(
return error;
}
#endif
- ovbcopy(&kp[ptr - 1], &kp[ptr],
+ memmove(&kp[ptr], &kp[ptr - 1],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp));
- ovbcopy(&pp[ptr - 1], &pp[ptr],
+ memmove(&pp[ptr], &pp[ptr - 1],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp));
/*
* Now stuff the new data in, bump numrecs and log the new data.
@@ -711,7 +711,7 @@ xfs_inobt_insrec(
* It's a leaf entry. Make a hole for the new record.
*/
rp = XFS_INOBT_REC_ADDR(block, 1, cur);
- ovbcopy(&rp[ptr - 1], &rp[ptr],
+ memmove(&rp[ptr], &rp[ptr - 1],
(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp));
/*
* Now stuff the new record in, bump numrecs
@@ -1170,12 +1170,12 @@ xfs_inobt_lshift(
return error;
}
#endif
- ovbcopy(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
- ovbcopy(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+ memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+ memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
} else {
- ovbcopy(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+ memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
key.ir_startino = rrp->ir_startino; /* INT_: direct copy */
rkp = &key;
@@ -1421,8 +1421,8 @@ xfs_inobt_rshift(
return error;
}
#endif
- ovbcopy(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
- ovbcopy(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+ memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+ memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
#ifdef DEBUG
if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level)))
return error;
@@ -1434,7 +1434,7 @@ xfs_inobt_rshift(
} else {
lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
- ovbcopy(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+ memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
*rrp = *lrp;
xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
key.ir_startino = rrp->ir_startino; /* INT_: direct copy */
@@ -1562,8 +1562,8 @@ xfs_inobt_split(
return error;
}
#endif
- bcopy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
- bcopy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+ memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
+ memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
*keyp = *rkp;
@@ -1574,7 +1574,7 @@ xfs_inobt_split(
else {
lrp = XFS_INOBT_REC_ADDR(left, i, cur);
rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
- bcopy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+ memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
keyp->ir_startino = rrp->ir_startino; /* INT_: direct copy */
}
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 7a8b02b39557..52c1b10c2afd 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -31,6 +31,7 @@
*/
#include <xfs.h>
+#include <linux/pagemap.h>
/*
@@ -109,25 +110,36 @@ xfs_chash_free(xfs_mount_t *mp)
mp->m_chash = NULL;
}
-
-static inline void
-xfs_iget_vnode_init(
+void
+xfs_revalidate_inode(
xfs_mount_t *mp,
vnode_t *vp,
xfs_inode_t *ip)
{
- vp->v_vfsp = XFS_MTOVFS(mp);
- vp->v_type = IFTOVT(ip->i_d.di_mode);
+ struct inode *inode = LINVFS_GET_IP(vp);
- /* If we have a real type for an on-disk inode, we can set ops(&unlock)
- * now. If it's a new inode being created, xfs_ialloc will handle it.
- */
- if (vp->v_type != VNON) {
- linvfs_set_inode_ops(LINVFS_GET_IP(vp));
+ inode->i_mode = (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type);
+ inode->i_nlink = ip->i_d.di_nlink;
+ inode->i_uid = ip->i_d.di_uid;
+ inode->i_gid = ip->i_d.di_gid;
+ if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) {
+ inode->i_rdev = NODEV;
+ } else {
+ xfs_dev_t dev = ip->i_df.if_u2.if_rdev;
+ inode->i_rdev = XFS_DEV_TO_KDEVT(dev);
}
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_generation = ip->i_d.di_gen;
+ inode->i_size = ip->i_d.di_size;
+ inode->i_blocks =
+ XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
+ inode->i_atime = ip->i_d.di_atime.t_sec;
+ inode->i_mtime = ip->i_d.di_mtime.t_sec;
+ inode->i_ctime = ip->i_d.di_ctime.t_sec;
+
+ vp->v_flag &= ~VMODIFIED;
}
-
/*
* Look up an inode by number in the given file system.
* The inode is looked up in the hash table for the file system
@@ -159,7 +171,7 @@ xfs_iget_vnode_init(
* bno -- the block number starting the buffer containing the inode,
* if known (as by bulkstat), else 0.
*/
-int
+STATIC int
xfs_iget_core(
vnode_t *vp,
xfs_mount_t *mp,
@@ -205,16 +217,9 @@ again:
goto again;
}
- xfs_iget_vnode_init(mp, vp, ip);
-
vn_trace_exit(vp, "xfs_iget.alloc",
(inst_t *)__return_address);
- bhv_desc_init(&(ip->i_bhv_desc), ip, vp,
- &xfs_vnodeops);
- vn_bhv_insert_initial(VN_BHV_HEAD(vp),
- &(ip->i_bhv_desc));
-
XFS_STATS_INC(xfsstats.xs_ig_found);
read_unlock(&ih->ih_lock);
@@ -240,18 +245,12 @@ again:
cmn_err(CE_PANIC,
"xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
inode_vp, vp);
- BUG();
}
read_unlock(&ih->ih_lock);
XFS_STATS_INC(xfsstats.xs_ig_found);
- /*
- * Make sure the vnode and the inode are hooked up
- */
- xfs_iget_vnode_init(mp, vp, ip);
-
finish_inode:
if (lock_flags != 0) {
xfs_ilock(ip, lock_flags);
@@ -288,19 +287,8 @@ finish_inode:
return error;
}
- /*
- * Vnode provided by vn_initialize.
- */
-
- xfs_iget_vnode_init(mp, vp, ip);
-
vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address);
- if (vp->v_fbhv == NULL) {
- bhv_desc_init(&(ip->i_bhv_desc), ip, vp, &xfs_vnodeops);
- vn_bhv_insert_initial(VN_BHV_HEAD(vp), &(ip->i_bhv_desc));
- }
-
xfs_inode_lock_init(ip, vp);
xfs_iocore_inode_init(ip);
@@ -429,8 +417,11 @@ finish_inode:
*ipp = ip;
- /* Update the linux inode */
- error = vn_revalidate(vp, ATTR_COMM|ATTR_LAZY);
+ /*
+ * If we have a real type for an on-disk inode, we can set ops(&unlock)
+ * now. If it's a new inode being created, xfs_ialloc will handle it.
+ */
+ VFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);
return 0;
}
@@ -495,7 +486,6 @@ inode_allocate:
newnode = (ip->i_d.di_mode == 0);
if (newnode)
xfs_iocore_inode_reinit(ip);
- vn_revalidate(vp, ATTR_COMM|ATTR_LAZY);
XFS_STATS_INC(xfsstats.xs_ig_found);
*ipp = ip;
error = 0;
@@ -506,7 +496,6 @@ inode_allocate:
return error;
}
-
/*
* Do the setup for the various locks within the incore inode.
*/
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index f7c35e754899..bd343063b60d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -56,8 +56,9 @@ STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
*/
STATIC void
xfs_validate_extents(
- xfs_bmbt_rec_32_t *ep,
+ xfs_bmbt_rec_t *ep,
int nrecs,
+ int disk,
xfs_exntfmt_t fmt)
{
xfs_bmbt_irec_t irec;
@@ -65,15 +66,18 @@ xfs_validate_extents(
xfs_bmbt_rec_t rec;
for (i = 0; i < nrecs; i++) {
- bcopy(ep, &rec, sizeof(rec));
- xfs_bmbt_get_all(&rec, &irec);
+ memcpy(&rec, ep, sizeof(rec));
+ if (disk)
+ xfs_bmbt_disk_get_all(&rec, &irec);
+ else
+ xfs_bmbt_get_all(&rec, &irec);
if (fmt == XFS_EXTFMT_NOSTATE)
ASSERT(irec.br_state == XFS_EXT_NORM);
ep++;
}
}
#else /* DEBUG */
-#define xfs_validate_extents(ep, nrecs, fmt)
+#define xfs_validate_extents(ep, nrecs, disk, fmt)
#endif /* DEBUG */
/*
@@ -551,7 +555,7 @@ xfs_iformat_local(
/*
* If the size is unreasonable, then something
* is wrong and we just bail out rather than crash in
- * kmem_alloc() or bcopy() below.
+ * kmem_alloc() or memcpy() below.
*/
if (size > XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT)) {
xfs_fs_cmn_err(CE_WARN, ip->i_mount,
@@ -575,7 +579,8 @@ xfs_iformat_local(
ifp->if_bytes = size;
ifp->if_real_bytes = real_size;
if (size)
- bcopy(XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT), ifp->if_u1.if_data, size);
+ memcpy(ifp->if_u1.if_data,
+ XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT), size);
ifp->if_flags &= ~XFS_IFEXTENTS;
ifp->if_flags |= XFS_IFINLINE;
return 0;
@@ -597,9 +602,10 @@ xfs_iformat_extents(
int whichfork)
{
xfs_ifork_t *ifp;
- int nex;
+ int nex, i;
int real_size;
int size;
+ xfs_bmbt_rec_t *ep, *dp;
ifp = XFS_IFORK_PTR(ip, whichfork);
nex = XFS_DFORK_NEXTENTS_ARCH(dip, whichfork, ARCH_CONVERT);
@@ -608,7 +614,7 @@ xfs_iformat_extents(
/*
* If the number of extents is unreasonable, then something
* is wrong and we just bail out rather than crash in
- * kmem_alloc() or bcopy() below.
+ * kmem_alloc() or memcpy() below.
*/
if (size < 0 || size > XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT)) {
xfs_fs_cmn_err(CE_WARN, ip->i_mount,
@@ -632,10 +638,18 @@ xfs_iformat_extents(
ifp->if_real_bytes = real_size;
if (size) {
xfs_validate_extents(
- (xfs_bmbt_rec_32_t *)XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT),
- nex, XFS_EXTFMT_INODE(ip));
- bcopy(XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT), ifp->if_u1.if_extents,
- size);
+ (xfs_bmbt_rec_t *)XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT),
+ nex, 1, XFS_EXTFMT_INODE(ip));
+ dp = (xfs_bmbt_rec_t *)XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT);
+ ep = ifp->if_u1.if_extents;
+#if ARCH_CONVERT != ARCH_NOCONVERT
+ for (i = 0; i < nex; i++, ep++, dp++) {
+ ep->l0 = INT_GET(dp->l0, ARCH_CONVERT);
+ ep->l1 = INT_GET(dp->l1, ARCH_CONVERT);
+ }
+#else
+ memcpy(ep, dp, size);
+#endif
xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex,
whichfork);
if (whichfork != XFS_DATA_FORK ||
@@ -731,9 +745,9 @@ xfs_xlate_dinode_core(xfs_caddr_t buf, xfs_dinode_core_t *dip,
if (arch == ARCH_NOCONVERT) {
if (dir>0) {
- bcopy((xfs_caddr_t)buf_core, (xfs_caddr_t)mem_core, sizeof(xfs_dinode_core_t));
+ memcpy((xfs_caddr_t)mem_core, (xfs_caddr_t)buf_core, sizeof(xfs_dinode_core_t));
} else {
- bcopy((xfs_caddr_t)mem_core, (xfs_caddr_t)buf_core, sizeof(xfs_dinode_core_t));
+ memcpy((xfs_caddr_t)buf_core, (xfs_caddr_t)mem_core, sizeof(xfs_dinode_core_t));
}
return;
}
@@ -749,9 +763,9 @@ xfs_xlate_dinode_core(xfs_caddr_t buf, xfs_dinode_core_t *dip,
INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch);
if (dir>0) {
- bcopy(buf_core->di_pad, mem_core->di_pad, sizeof(buf_core->di_pad));
+ memcpy(mem_core->di_pad, buf_core->di_pad, sizeof(buf_core->di_pad));
} else {
- bcopy(mem_core->di_pad, buf_core->di_pad, sizeof(buf_core->di_pad));
+ memcpy(buf_core->di_pad, mem_core->di_pad, sizeof(buf_core->di_pad));
}
INT_XLATE(buf_core->di_atime.t_sec, mem_core->di_atime.t_sec, dir, arch);
@@ -978,8 +992,8 @@ xfs_iread_extents(
ifp->if_flags &= ~XFS_IFEXTENTS;
return error;
}
- xfs_validate_extents((xfs_bmbt_rec_32_t *)ifp->if_u1.if_extents,
- XFS_IFORK_NEXTENTS(ip, whichfork), XFS_EXTFMT_INODE(ip));
+ xfs_validate_extents((xfs_bmbt_rec_t *)ifp->if_u1.if_extents,
+ XFS_IFORK_NEXTENTS(ip, whichfork), 0, XFS_EXTFMT_INODE(ip));
return 0;
}
@@ -1015,7 +1029,7 @@ xfs_ialloc(
xfs_inode_t *pip,
mode_t mode,
nlink_t nlink,
- dev_t rdev,
+ xfs_dev_t rdev,
cred_t *cr,
xfs_prid_t prid,
int okalloc,
@@ -1065,10 +1079,7 @@ xfs_ialloc(
ip->i_d.di_uid = current->fsuid;
ip->i_d.di_gid = current->fsgid;
ip->i_d.di_projid = prid;
- bzero(&(ip->i_d.di_pad[0]), sizeof(ip->i_d.di_pad));
-
- /* now that we have a v_type we can set Linux inode ops (& unlock) */
- linvfs_set_inode_ops(LINVFS_GET_IP(XFS_ITOV(ip)));
+ memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
/*
* If the superblock version is up to where we support new format
@@ -1100,14 +1111,13 @@ xfs_ialloc(
/*
* If the group ID of the new file does not match the effective group
- * ID or one of the supplementary group IDs, the ISGID bit is
- * cleared if the "irixsgid" mount option is set.
+ * ID or one of the supplementary group IDs, the ISGID bit is cleared
+ * (and only if the irix_sgid_inherit compatibility variable is set).
*/
- if (ip->i_d.di_mode & ISGID) {
- if (!in_group_p((gid_t)ip->i_d.di_gid)
- && (ip->i_mount->m_flags & XFS_MOUNT_IRIXSGID)) {
- ip->i_d.di_mode &= ~ISGID;
- }
+ if ((irix_sgid_inherit) &&
+ (ip->i_d.di_mode & ISGID) &&
+ (!in_group_p((gid_t)ip->i_d.di_gid))) {
+ ip->i_d.di_mode &= ~ISGID;
}
ip->i_d.di_size = 0;
@@ -1128,7 +1138,7 @@ xfs_ialloc(
case IFBLK:
case IFSOCK:
ip->i_d.di_format = XFS_DINODE_FMT_DEV;
- ip->i_df.if_u2.if_rdev = IRIX_MKDEV(MAJOR(rdev), MINOR(rdev));
+ ip->i_df.if_u2.if_rdev = rdev;
ip->i_df.if_flags = 0;
flags |= XFS_ILOG_DEV;
break;
@@ -1172,6 +1182,10 @@ xfs_ialloc(
* Log the new values stuffed into the inode.
*/
xfs_trans_log_inode(tp, ip, flags);
+
+ /* now that we have a v_type we can set Linux inode ops (& unlock) */
+ VFS_INIT_VNODE(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
+
*ipp = ip;
return 0;
}
@@ -1714,7 +1728,7 @@ xfs_igrow_start(
* and any blocks between the old and new file sizes.
*/
error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, isize,
- new_size, NULL);
+ new_size);
return error;
}
@@ -2156,7 +2170,7 @@ xfs_iroot_realloc(
ifp->if_broot_bytes = (int)new_size;
ASSERT(ifp->if_broot_bytes <=
XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
- ovbcopy(op, np, cur_max * (uint)sizeof(xfs_dfsbno_t));
+ memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
return;
}
@@ -2178,7 +2192,7 @@ xfs_iroot_realloc(
/*
* First copy over the btree block header.
*/
- bcopy(ifp->if_broot, new_broot, sizeof(xfs_bmbt_block_t));
+ memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
} else {
new_broot = NULL;
ifp->if_flags &= ~XFS_IFBROOT;
@@ -2195,7 +2209,7 @@ xfs_iroot_realloc(
ifp->if_broot_bytes);
np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
(int)new_size);
- bcopy(op, np, new_max * (uint)sizeof(xfs_bmbt_rec_t));
+ memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
/*
* Then copy the pointers.
@@ -2204,7 +2218,7 @@ xfs_iroot_realloc(
ifp->if_broot_bytes);
np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
(int)new_size);
- bcopy(op, np, new_max * (uint)sizeof(xfs_dfsbno_t));
+ memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
}
kmem_free(ifp->if_broot, ifp->if_broot_bytes);
ifp->if_broot = new_broot;
@@ -2268,8 +2282,8 @@ xfs_iext_realloc(
* so the if_extents pointer is null.
*/
if (ifp->if_u1.if_extents) {
- bcopy(ifp->if_u1.if_extents,
- ifp->if_u2.if_inline_ext, new_size);
+ memcpy(ifp->if_u2.if_inline_ext,
+ ifp->if_u1.if_extents, new_size);
kmem_free(ifp->if_u1.if_extents,
ifp->if_real_bytes);
}
@@ -2286,7 +2300,7 @@ xfs_iext_realloc(
if (ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext) {
ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
kmem_alloc(rnew_size, KM_SLEEP);
- bcopy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
+ memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
sizeof(ifp->if_u2.if_inline_ext));
} else if (rnew_size != ifp->if_real_bytes) {
ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
@@ -2349,7 +2363,7 @@ xfs_idata_realloc(
ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
ASSERT(ifp->if_real_bytes != 0);
- bcopy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
+ memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
new_size);
kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
@@ -2382,8 +2396,8 @@ xfs_idata_realloc(
} else {
ASSERT(ifp->if_real_bytes == 0);
ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
- bcopy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
- ifp->if_bytes);
+ memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
+ ifp->if_bytes);
}
}
ifp->if_real_bytes = real_size;
@@ -2610,17 +2624,17 @@ xfs_iunpin_wait(
* returns the number of bytes copied into the buffer.
*
* If there are no delayed allocation extents, then we can just
- * bcopy() the extents into the buffer. Otherwise, we need to
+ * memcpy() the extents into the buffer. Otherwise, we need to
* examine each extent in turn and skip those which are delayed.
*/
int
xfs_iextents_copy(
xfs_inode_t *ip,
- xfs_bmbt_rec_32_t *buffer,
+ xfs_bmbt_rec_t *buffer,
int whichfork)
{
int copied;
- xfs_bmbt_rec_32_t *dest_ep;
+ xfs_bmbt_rec_t *dest_ep;
xfs_bmbt_rec_t *ep;
#ifdef XFS_BMAP_TRACE
static char fname[] = "xfs_iextents_copy";
@@ -2637,28 +2651,13 @@ xfs_iextents_copy(
nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork);
ASSERT(nrecs > 0);
- if (nrecs == XFS_IFORK_NEXTENTS(ip, whichfork)) {
- /*
- * There are no delayed allocation extents,
- * so just copy everything.
- */
- ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
- ASSERT(ifp->if_bytes ==
- (XFS_IFORK_NEXTENTS(ip, whichfork) *
- (uint)sizeof(xfs_bmbt_rec_t)));
- bcopy(ifp->if_u1.if_extents, buffer, ifp->if_bytes);
- xfs_validate_extents(buffer, nrecs, XFS_EXTFMT_INODE(ip));
- return ifp->if_bytes;
- }
- ASSERT(whichfork == XFS_DATA_FORK);
/*
* There are some delayed allocation extents in the
* inode, so copy the extents one at a time and skip
* the delayed ones. There must be at least one
* non-delayed extent.
*/
- ASSERT(nrecs > ip->i_d.di_nextents);
ep = ifp->if_u1.if_extents;
dest_ep = buffer;
copied = 0;
@@ -2672,15 +2671,19 @@ xfs_iextents_copy(
continue;
}
- *dest_ep = *(xfs_bmbt_rec_32_t *)ep;
+#if ARCH_CONVERT != ARCH_NOCONVERT
+ /* Translate to on disk format */
+ dest_ep->l0 = INT_GET(ep->l0, ARCH_CONVERT);
+ dest_ep->l1 = INT_GET(ep->l1, ARCH_CONVERT);
+#else
+ *dest_ep = *ep;
+#endif
dest_ep++;
ep++;
copied++;
}
ASSERT(copied != 0);
- ASSERT(copied == ip->i_d.di_nextents);
- ASSERT((copied * (uint)sizeof(xfs_bmbt_rec_t)) <= XFS_IFORK_DSIZE(ip));
- xfs_validate_extents(buffer, copied, XFS_EXTFMT_INODE(ip));
+ xfs_validate_extents(buffer, copied, 1, XFS_EXTFMT_INODE(ip));
return (copied * (uint)sizeof(xfs_bmbt_rec_t));
}
@@ -2736,7 +2739,7 @@ xfs_iflush_fork(
(ifp->if_bytes > 0)) {
ASSERT(ifp->if_u1.if_data != NULL);
ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
- bcopy(ifp->if_u1.if_data, cp, ifp->if_bytes);
+ memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
}
if (whichfork == XFS_DATA_FORK) {
if (XFS_DIR_SHORTFORM_VALIDATE_ONDISK(mp, dip)) {
@@ -2753,7 +2756,7 @@ xfs_iflush_fork(
if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
(ifp->if_bytes > 0)) {
ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
- (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_32_t *)cp,
+ (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
whichfork);
}
break;
@@ -2781,7 +2784,7 @@ xfs_iflush_fork(
case XFS_DINODE_FMT_UUID:
if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
ASSERT(whichfork == XFS_DATA_FORK);
- bcopy(&ip->i_df.if_u2.if_uuid, &dip->di_u.di_muuid,
+ memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
sizeof(uuid_t));
}
break;
@@ -3208,8 +3211,8 @@ xfs_iflush_int(
INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2);
ip->i_d.di_onlink = 0;
INT_ZERO(dip->di_core.di_onlink, ARCH_CONVERT);
- bzero(&(ip->i_d.di_pad[0]), sizeof(ip->i_d.di_pad));
- bzero(&(dip->di_core.di_pad[0]),
+ memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
+ memset(&(dip->di_core.di_pad[0]), 0,
sizeof(dip->di_core.di_pad));
ASSERT(ip->i_d.di_projid == 0);
}
@@ -3377,7 +3380,7 @@ xfs_iflush_all(
* entry in the list anyway so we'll know below
* whether we reached the end or not.
*/
- VMAP(vp, ip, vmap);
+ VMAP(vp, vmap);
vp->v_flag |= VPURGE; /* OK for vn_purge */
XFS_MOUNT_IUNLOCK(mp);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 308bc90d825f..38669ac946f0 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -98,7 +98,6 @@ struct xfs_inode_log_item;
struct xfs_mount;
struct xfs_trans;
struct xfs_dquot;
-struct pm;
/*
@@ -119,23 +118,6 @@ typedef struct xfs_gap {
xfs_extlen_t xg_count_fsb;
} xfs_gap_t;
-/*
- * This structure is used to hold common pieces of the buffer
- * and file for xfs_dio_write and xfs_dio_read.
- */
-typedef struct xfs_dio {
- struct xfs_buf *xd_bp;
- bhv_desc_t *xd_bdp;
- struct xfs_inode *xd_ip;
- struct xfs_iocore *xd_io;
- struct cred *xd_cr;
- struct pm *xd_pmp;
- int xd_blkalgn;
- int xd_ioflag;
- xfs_off_t xd_start;
- size_t xd_length;
-} xfs_dio_t;
-
typedef struct dm_attrs_s {
__uint32_t da_dmevmask; /* DMIG event mask */
__uint16_t da_dmstate; /* DMIG state info */
@@ -516,7 +498,7 @@ int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
xfs_inode_t **, xfs_daddr_t);
int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int);
int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, nlink_t,
- dev_t, struct cred *, xfs_prid_t, int,
+ xfs_dev_t, struct cred *, xfs_prid_t, int,
struct xfs_buf **, boolean_t *, xfs_inode_t **);
void xfs_xlate_dinode_core(xfs_caddr_t, struct xfs_dinode_core *, int,
xfs_arch_t);
@@ -538,7 +520,7 @@ void xfs_iext_realloc(xfs_inode_t *, int, int);
void xfs_iroot_realloc(xfs_inode_t *, int, int);
void xfs_ipin(xfs_inode_t *);
void xfs_iunpin(xfs_inode_t *);
-int xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_32_t *, int);
+int xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_t *, int);
int xfs_iflush(xfs_inode_t *, uint);
int xfs_iflush_all(struct xfs_mount *, int);
int xfs_ibusy_check(xfs_inode_t *, int);
@@ -550,7 +532,7 @@ void xfs_lock_inodes(xfs_inode_t **, int, int, uint);
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
-
+void xfs_revalidate_inode(struct xfs_mount *, vnode_t *vp, xfs_inode_t *);
#ifdef DEBUG
void xfs_isize_check(struct xfs_mount *, xfs_inode_t *, xfs_fsize_t);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 9beacce25dec..d23d596d7973 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -96,7 +96,7 @@ xfs_inode_item_size(
if (iip->ili_root_size > 0) {
ASSERT(iip->ili_root_size ==
ip->i_df.if_broot_bytes);
- ASSERT(bcmp(iip->ili_orig_root,
+ ASSERT(memcmp(iip->ili_orig_root,
ip->i_df.if_broot,
iip->ili_root_size) == 0);
} else {
@@ -214,7 +214,7 @@ xfs_inode_item_format(
xfs_log_iovec_t *vecp;
xfs_inode_t *ip;
size_t data_bytes;
- xfs_bmbt_rec_32_t *ext_buffer;
+ xfs_bmbt_rec_t *ext_buffer;
int nrecs;
xfs_mount_t *mp;
@@ -297,7 +297,7 @@ xfs_inode_item_format(
*/
ip->i_d.di_version = XFS_DINODE_VERSION_2;
ip->i_d.di_onlink = 0;
- bzero(&(ip->i_d.di_pad[0]), sizeof(ip->i_d.di_pad));
+ memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
}
}
@@ -314,6 +314,7 @@ xfs_inode_item_format(
nrecs = ip->i_df.if_bytes /
(uint)sizeof(xfs_bmbt_rec_t);
ASSERT(nrecs > 0);
+#if ARCH_CONVERT == ARCH_NOCONVERT
if (nrecs == ip->i_d.di_nextents) {
/*
* There are no delayed allocation
@@ -323,10 +324,14 @@ xfs_inode_item_format(
vecp->i_addr =
(char *)(ip->i_df.if_u1.if_extents);
vecp->i_len = ip->i_df.if_bytes;
- } else {
+ } else
+#endif
+ {
/*
* There are delayed allocation extents
- * in the inode. Use xfs_iextents_copy()
+ * in the inode, or we need to convert
+ * the extents to on disk format.
+ * Use xfs_iextents_copy()
* to copy only the real extents into
* a separate buffer. We'll free the
* buffer in the unlock routine.
@@ -336,7 +341,7 @@ xfs_inode_item_format(
iip->ili_extents_buf = ext_buffer;
vecp->i_addr = (xfs_caddr_t)ext_buffer;
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
- XFS_DATA_FORK);
+ XFS_DATA_FORK);
}
ASSERT(vecp->i_len <= ip->i_df.if_bytes);
iip->ili_format.ilf_dsize = vecp->i_len;
@@ -428,6 +433,7 @@ xfs_inode_item_format(
ASSERT(!(iip->ili_format.ilf_fields &
(XFS_ILOG_ADATA | XFS_ILOG_ABROOT)));
if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) {
+ ASSERT(!(iip->ili_format.ilf_fields & XFS_ILOG_DEXT));
ASSERT(ip->i_afp->if_bytes > 0);
ASSERT(ip->i_afp->if_u1.if_extents != NULL);
ASSERT(ip->i_d.di_anextents > 0);
@@ -437,12 +443,25 @@ xfs_inode_item_format(
#endif
ASSERT(nrecs > 0);
ASSERT(nrecs == ip->i_d.di_anextents);
+#if ARCH_CONVERT == ARCH_NOCONVERT
/*
* There are not delayed allocation extents
* for attributes, so just point at the array.
*/
vecp->i_addr = (char *)(ip->i_afp->if_u1.if_extents);
vecp->i_len = ip->i_afp->if_bytes;
+#else
+ ASSERT(iip->ili_aextents_buf == NULL);
+ /*
+ * Need to endian flip before logging
+ */
+ ext_buffer = kmem_alloc(ip->i_df.if_bytes,
+ KM_SLEEP);
+ iip->ili_aextents_buf = ext_buffer;
+ vecp->i_addr = (xfs_caddr_t)ext_buffer;
+ vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
+ XFS_ATTR_FORK);
+#endif
iip->ili_format.ilf_asize = vecp->i_len;
vecp++;
nvecs++;
@@ -630,7 +649,6 @@ xfs_inode_item_unlock(
* If the inode needed a separate buffer with which to log
* its extents, then free it now.
*/
- /* FIXME */
if (iip->ili_extents_buf != NULL) {
ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS);
ASSERT(ip->i_d.di_nextents > 0);
@@ -639,6 +657,14 @@ xfs_inode_item_unlock(
kmem_free(iip->ili_extents_buf, ip->i_df.if_bytes);
iip->ili_extents_buf = NULL;
}
+ if (iip->ili_aextents_buf != NULL) {
+ ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS);
+ ASSERT(ip->i_d.di_anextents > 0);
+ ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT);
+ ASSERT(ip->i_afp->if_bytes > 0);
+ kmem_free(iip->ili_aextents_buf, ip->i_afp->if_bytes);
+ iip->ili_aextents_buf = NULL;
+ }
/*
* Figure out if we should unlock the inode or not.
@@ -889,7 +915,7 @@ xfs_inode_item_init(
iip->ili_inode = ip;
/*
- We have bzeroed memory. No need ...
+ We have zeroed memory. No need ...
iip->ili_extents_buf = NULL;
iip->ili_pushbuf_flag = 0;
*/
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index d90407088842..4970205a5e69 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -141,7 +141,10 @@ typedef struct xfs_inode_log_item {
unsigned short ili_flags; /* misc flags */
unsigned short ili_logged; /* flushed logged data */
unsigned int ili_last_fields; /* fields when flushed */
- struct xfs_bmbt_rec_32 *ili_extents_buf; /* array of logged exts */
+ struct xfs_bmbt_rec_32 *ili_extents_buf; /* array of logged
+ data exts */
+ struct xfs_bmbt_rec_32 *ili_aextents_buf; /* array of logged
+ attr exts */
unsigned int ili_pushbuf_flag; /* one bit used in push_ail */
#ifdef DEBUG
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index d46f2ccf3844..8fe8097215df 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -146,7 +146,7 @@ xfs_bulkstat_one(
buf->bs_extsize = INT_GET(dic->di_extsize, arch) << mp->m_sb.sb_blocklog;
buf->bs_extents = INT_GET(dic->di_nextents, arch);
buf->bs_gen = INT_GET(dic->di_gen, arch);
- bzero(buf->bs_pad, sizeof(buf->bs_pad));
+ memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
buf->bs_dmevmask = INT_GET(dic->di_dmevmask, arch);
buf->bs_dmstate = INT_GET(dic->di_dmstate, arch);
buf->bs_aextents = INT_GET(dic->di_anextents, arch);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 942d07386d9e..d156b9cb7a7f 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1586,8 +1586,8 @@ xlog_unalloc_log(xlog_t *log)
* 1. If first write of transaction, write start record
* 2. Write log operation header (header per region)
* 3. Find out if we can fit entire region into this iclog
- * 4. Potentially, verify destination bcopy ptr
- * 5. Bcopy (partial) region
+ * 4. Potentially, verify destination memcpy ptr
+ * 5. Memcpy (partial) region
* 6. If partial copy, release iclog; otherwise, continue
* copying more regions into current iclog
* 4. Mark want sync bit (in simulation mode)
@@ -1628,8 +1628,8 @@ xlog_write(xfs_mount_t * mp,
int start_rec_copy; /* # bytes to copy for start record */
int partial_copy; /* did we split a region? */
int partial_copy_len;/* # bytes copied if split region */
- int need_copy; /* # bytes need to bcopy this region */
- int copy_len; /* # bytes actually bcopy'ing */
+ int need_copy; /* # bytes need to memcpy this region */
+ int copy_len; /* # bytes actually memcpy'ing */
int copy_off; /* # bytes from entry start */
int contwr; /* continued write of in-core log? */
int firstwr = 0; /* first write of transaction */
@@ -1733,7 +1733,7 @@ xlog_write(xfs_mount_t * mp,
/* Partial write last time? => (partial_copy != 0)
* need_copy is the amount we'd like to copy if everything could
- * fit in the current bcopy.
+ * fit in the current memcpy.
*/
need_copy = reg[index].i_len - partial_copy_len;
@@ -1759,7 +1759,7 @@ xlog_write(xfs_mount_t * mp,
/* copy region */
ASSERT(copy_len >= 0);
- bcopy(reg[index].i_addr + copy_off, (xfs_caddr_t)ptr, copy_len);
+ memcpy((xfs_caddr_t)ptr, reg[index].i_addr + copy_off, copy_len);
xlog_write_adv_cnt(ptr, len, log_offset, copy_len);
/* make copy_len total bytes copied, including headers */
@@ -1836,7 +1836,7 @@ xlog_state_clean_log(xlog_t *log)
changed = 2;
}
INT_ZERO(iclog->ic_header.h_num_logops, ARCH_CONVERT);
- bzero(iclog->ic_header.h_cycle_data,
+ memset(iclog->ic_header.h_cycle_data, 0,
sizeof(iclog->ic_header.h_cycle_data));
INT_ZERO(iclog->ic_header.h_lsn, ARCH_CONVERT);
} else if (iclog->ic_state == XLOG_STATE_ACTIVE)
@@ -2064,7 +2064,7 @@ xlog_state_do_callback(
} while (first_iclog != iclog);
if (repeats && (repeats % 10) == 0) {
xfs_fs_cmn_err(CE_WARN, log->l_mp,
- "xlog_state_do_callback: looping %d\n", repeats);
+ "xlog_state_do_callback: looping %d", repeats);
}
} while (!ioerrors && loopdidcallbacks);
@@ -2155,20 +2155,13 @@ xlog_state_done_syncing(
iclog->ic_state = XLOG_STATE_DONE_SYNC;
}
- /*
- * Someone could be sleeping on the next iclog even though it is
- * in the ACTIVE state. We kick off one thread to force the
- * iclog buffer out.
- */
- if (iclog->ic_next->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
- sv_signal(&iclog->ic_next->ic_forcesema);
LOG_UNLOCK(log, s);
xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
} /* xlog_state_done_syncing */
/*
- * Update counters atomically now that bcopy is done.
+ * Update counters atomically now that memcpy is done.
*/
/* ARGSUSED */
static inline void
@@ -2984,11 +2977,9 @@ xlog_state_sync(xlog_t *log,
uint flags)
{
xlog_in_core_t *iclog;
- int already_slept = 0;
SPLDECL(s);
-try_again:
s = LOG_LOCK(log);
iclog = log->l_iclog;
@@ -3009,39 +3000,12 @@ try_again:
}
if (iclog->ic_state == XLOG_STATE_ACTIVE) {
- /*
- * We sleep here if we haven't already slept (e.g.
- * this is the first time we've looked at the correct
- * iclog buf) and the buffer before us is going to
- * be sync'ed. We have to do that to ensure that the
- * log records go out in the proper order. When it's
- * done, someone waiting on this buffer will be woken up
- * (maybe us) to flush this buffer out.
- *
- * Otherwise, we mark the buffer WANT_SYNC, and bump
- * up the refcnt so we can release the log (which drops
- * the ref count). The state switch keeps new transaction
- * commits from using this buffer. When the current commits
- * finish writing into the buffer, the refcount will drop to
- * zero and the buffer will go out then.
- */
- if (!already_slept &&
- (iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC |
- XLOG_STATE_SYNCING))) {
- ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
- XFS_STATS_INC(xfsstats.xs_log_force_sleep);
- sv_wait(&iclog->ic_prev->ic_forcesema, PSWP,
- &log->l_icloglock, s);
- already_slept = 1;
- goto try_again;
- } else {
- iclog->ic_refcnt++;
- xlog_state_switch_iclogs(log, iclog, 0);
- LOG_UNLOCK(log, s);
- if (xlog_state_release_iclog(log, iclog))
- return XFS_ERROR(EIO);
- s = LOG_LOCK(log);
- }
+ iclog->ic_refcnt++;
+ xlog_state_switch_iclogs(log, iclog, 0);
+ LOG_UNLOCK(log, s);
+ if (xlog_state_release_iclog(log, iclog))
+ return XFS_ERROR(EIO);
+ s = LOG_LOCK(log);
}
if ((flags & XFS_LOG_SYNC) && /* sleep */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 8d1676e1d157..125af631e3f2 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -476,7 +476,7 @@ xlog_find_head(xlog_t *log,
* mkfs etc write a dummy unmount record to a fresh
* log so we can store the uuid in there
*/
- xlog_warn("XFS: totally zeroed log\n");
+ xlog_warn("XFS: totally zeroed log");
}
return 0;
@@ -873,9 +873,19 @@ xlog_find_tail(xlog_t *log,
* overwrite the unmount record after a clean unmount.
*
* Do this only if we are going to recover the filesystem
+ *
+ * NOTE: This used to say "if (!readonly)"
+ * However on Linux, we can & do recover a read-only filesystem.
+ * We only skip recovery if NORECOVERY is specified on mount,
+ * in which case we would not be here.
+ *
+ * But... if the -device- itself is readonly, just skip this.
+ * We can't recover this device anyway, so it won't matter.
*/
- if (!readonly)
+
+ if (!bdev_read_only(log->l_mp->m_logdev_targp->pbr_bdev)) {
error = xlog_clear_stale_blocks(log, tail_lsn);
+ }
#endif
bread_err:
@@ -1242,7 +1252,7 @@ xlog_recover_add_to_cont_trans(xlog_recover_t *trans,
/* finish copying rest of trans header */
xlog_recover_add_item(&trans->r_itemq);
ptr = (xfs_caddr_t)&trans->r_theader+sizeof(xfs_trans_header_t)-len;
- bcopy(dp, ptr, len); /* s, d, l */
+ memcpy(ptr, dp, len); /* d, s, l */
return 0;
}
item = item->ri_prev;
@@ -1251,7 +1261,7 @@ xlog_recover_add_to_cont_trans(xlog_recover_t *trans,
old_len = item->ri_buf[item->ri_cnt-1].i_len;
ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0);
- bcopy(dp , &ptr[old_len], len); /* s, d, l */
+ memcpy(&ptr[old_len], dp, len); /* d, s, l */
item->ri_buf[item->ri_cnt-1].i_len += len;
item->ri_buf[item->ri_cnt-1].i_addr = ptr;
return 0;
@@ -1282,7 +1292,7 @@ xlog_recover_add_to_trans(xlog_recover_t *trans,
if (!len)
return 0;
ptr = kmem_zalloc(len, 0);
- bcopy(dp, ptr, len);
+ memcpy(ptr, dp, len);
in_f = (xfs_inode_log_format_t *)ptr;
item = trans->r_itemq;
@@ -1290,7 +1300,7 @@ xlog_recover_add_to_trans(xlog_recover_t *trans,
ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
if (len == sizeof(xfs_trans_header_t))
xlog_recover_add_item(&trans->r_itemq);
- bcopy(dp, &trans->r_theader, len); /* s, d, l */
+ memcpy(&trans->r_theader, dp, len); /* d, s, l */
return 0;
}
if (item->ri_prev->ri_total != 0 &&
@@ -1799,9 +1809,10 @@ xlog_recover_do_reg_buffer(xfs_mount_t *mp,
"dquot_buf_recover");
}
if (!error)
- bcopy(item->ri_buf[i].i_addr, /* source */
- xfs_buf_offset(bp, (uint)bit << XFS_BLI_SHIFT), /* dest */
- nbits<<XFS_BLI_SHIFT); /* length */
+ memcpy(xfs_buf_offset(bp,
+ (uint)bit << XFS_BLI_SHIFT), /* dest */
+ item->ri_buf[i].i_addr, /* source */
+ nbits<<XFS_BLI_SHIFT); /* length */
i++;
bit += nbits;
}
@@ -2115,9 +2126,9 @@ xlog_recover_do_inode_trans(xlog_t *log,
-1, ARCH_CONVERT);
/* the rest is in on-disk format */
if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) {
- bcopy(item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t),
- (xfs_caddr_t) dip + sizeof(xfs_dinode_core_t),
- item->ri_buf[1].i_len - sizeof(xfs_dinode_core_t));
+ memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t),
+ item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t),
+ item->ri_buf[1].i_len - sizeof(xfs_dinode_core_t));
}
fields = in_f->ilf_fields;
@@ -2143,7 +2154,7 @@ xlog_recover_do_inode_trans(xlog_t *log,
switch (fields & XFS_ILOG_DFORK) {
case XFS_ILOG_DDATA:
case XFS_ILOG_DEXT:
- bcopy(src, &dip->di_u, len);
+ memcpy(&dip->di_u, src, len);
break;
case XFS_ILOG_DBROOT:
@@ -2182,7 +2193,7 @@ xlog_recover_do_inode_trans(xlog_t *log,
case XFS_ILOG_AEXT:
dest = XFS_DFORK_APTR(dip);
ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
- bcopy(src, dest, len);
+ memcpy(dest, src, len);
break;
case XFS_ILOG_ABROOT:
@@ -2341,7 +2352,7 @@ xlog_recover_do_dquot_trans(xlog_t *log,
return XFS_ERROR(EIO);
}
- bcopy(recddq, ddq, item->ri_buf[1].i_len);
+ memcpy(ddq, recddq, item->ri_buf[1].i_len);
ASSERT(dq_f->qlf_size == 2);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
@@ -2382,7 +2393,7 @@ xlog_recover_do_efi_trans(xlog_t *log,
mp = log->l_mp;
efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
- bcopy((char *)efi_formatp, (char *)&(efip->efi_format),
+ memcpy((char *)&(efip->efi_format), (char *)efi_formatp,
sizeof(xfs_efi_log_format_t) +
((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t)));
efip->efi_next_extent = efi_formatp->efi_nextents;
@@ -3131,7 +3142,7 @@ xlog_unpack_data(xlog_rec_header_t *rhead,
"XFS: Disregard message if filesystem was created with non-DEBUG kernel");
if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
cmn_err(CE_DEBUG,
- "XFS: LogR this is a LogV2 filesystem\n");
+ "XFS: LogR this is a LogV2 filesystem");
}
log->l_flags |= XLOG_CHKSUM_MISMATCH;
}
@@ -3215,7 +3226,7 @@ xlog_do_recovery_pass(xlog_t *log,
return ENOMEM;
}
- bzero(rhash, sizeof(rhash));
+ memset(rhash, 0, sizeof(rhash));
if (tail_blk <= head_blk) {
for (blk_no = tail_blk; blk_no < head_blk; ) {
if ((error = xlog_bread(log, blk_no, hblks, hbp)))
@@ -3521,17 +3532,20 @@ xlog_recover(xlog_t *log, int readonly)
* error message.
* ...but this is no longer true. Now, unless you specify
* NORECOVERY (in which case this function would never be
- * called), it enables read-write access long enough to do
- * recovery.
+ * called), we just go ahead and recover. We do this all
+ * under the vfs layer, so we can get away with it unless
+ * the device itself is read-only, in which case we fail.
*/
- if (readonly) {
#ifdef __KERNEL__
- if ((error = xfs_recover_read_only(log)))
- return error;
+ if ((error = xfs_dev_is_read_only(log->l_mp,
+ "recovery required"))) {
+ return error;
+ }
#else
+ if (readonly) {
return ENOSPC;
-#endif
}
+#endif
#ifdef __KERNEL__
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
@@ -3548,8 +3562,6 @@ xlog_recover(xlog_t *log, int readonly)
#endif
error = xlog_do_recover(log, head_blk, tail_blk);
log->l_flags |= XLOG_RECOVERY_NEEDED;
- if (readonly)
- XFS_MTOVFS(log->l_mp)->vfs_flag |= VFS_RDONLY;
}
return error;
} /* xlog_recover */
@@ -3607,7 +3619,7 @@ xlog_recover_finish(xlog_t *log, int mfsi_flags)
log->l_flags &= ~XLOG_RECOVERY_NEEDED;
} else {
cmn_err(CE_DEBUG,
- "!Ending clean XFS mount for filesystem: %s\n",
+ "!Ending clean XFS mount for filesystem: %s",
log->l_mp->m_fsname);
}
return 0;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 13c88ba8039e..2625226c73c7 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -31,6 +31,13 @@
*/
#include <xfs.h>
+#include <linux/major.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+
+#ifndef EVMS_MAJOR
+#define EVMS_MAJOR 117
+#endif
STATIC void xfs_mount_reset_sbqflags(xfs_mount_t *);
STATIC void xfs_mount_log_sbunit(xfs_mount_t *, __int64_t);
@@ -375,9 +382,9 @@ xfs_xlatesb(
size == 1 ||
xfs_sb_info[f].type == 1) {
if (dir > 0) {
- bcopy(buf_ptr + first, mem_ptr + first, size);
+ memcpy(mem_ptr + first, buf_ptr + first, size);
} else {
- bcopy(mem_ptr + first, buf_ptr + first, size);
+ memcpy(buf_ptr + first, mem_ptr + first, size);
}
} else {
switch (size) {
@@ -673,7 +680,7 @@ xfs_mountfs(
}
uuid_mounted=1;
ret64 = uuid_hash64(&sbp->sb_uuid);
- bcopy(&ret64, &vfsp->vfs_fsid, sizeof(ret64));
+ memcpy(&vfsp->vfs_fsid, &ret64, sizeof(ret64));
}
/*
@@ -904,7 +911,7 @@ xfs_mountfs(
rvp = XFS_ITOV(rip);
if ((rip->i_d.di_mode & IFMT) != IFDIR) {
cmn_err(CE_WARN, "XFS: corrupted root inode");
- VMAP(rvp, rip, vmap);
+ VMAP(rvp, vmap);
prdev("Root inode %llu is not a directory",
mp->m_dev, (unsigned long long)rip->i_ino);
rvp->v_flag |= VPURGE;
@@ -930,8 +937,7 @@ xfs_mountfs(
if (((quotaondisk && !XFS_IS_QUOTA_ON(mp)) ||
(!quotaondisk && XFS_IS_QUOTA_ON(mp))) &&
- (bdev_read_only(mp->m_ddev_targp->pbr_bdev) ||
- bdev_read_only(mp->m_logdev_targp->pbr_bdev))) {
+ xfs_dev_is_read_only(mp, "changing quota state")) {
cmn_err(CE_WARN,
"XFS: device %s is read-only, cannot change "
"quota state. Please mount with%s quota option.",
@@ -952,7 +958,7 @@ xfs_mountfs(
*/
cmn_err(CE_WARN, "XFS: failed to read RT inodes");
rvp->v_flag |= VPURGE;
- VMAP(rvp, rip, vmap);
+ VMAP(rvp, vmap);
VN_RELE(rvp);
vn_purge(rvp, &vmap);
goto error3;
@@ -1023,14 +1029,12 @@ xfs_mountfs(
if (needquotamount) {
ASSERT(mp->m_qflags == 0);
mp->m_qflags = quotaflags;
- rootqcheck = ((XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) &&
- mp->m_dev == rootdev && needquotacheck);
- if (rootqcheck && (error = xfs_quotacheck_read_only(mp)))
+ rootqcheck = (mp->m_dev == rootdev && needquotacheck);
+ if (rootqcheck && (error = xfs_dev_is_read_only(mp,
+ "quotacheck")))
goto error2;
if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp);
- if (rootqcheck)
- XFS_MTOVFS(mp)->vfs_flag |= VFS_RDONLY;
}
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
@@ -1135,7 +1139,7 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
/*
* clear all error tags on this filesystem
*/
- bcopy(&(XFS_MTOVFS(mp)->vfs_fsid), &fsid, sizeof(int64_t));
+ memcpy(&fsid, &(XFS_MTOVFS(mp)->vfs_fsid), sizeof(int64_t));
(void) xfs_errortag_clearall_umount(fsid, mp->m_fsname, 0);
#endif
@@ -1149,15 +1153,17 @@ xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
int have_logdev = (mp->m_logdev_targp != mp->m_ddev_targp);
if (mp->m_ddev_targp) {
- pagebuf_lock_disable(mp->m_ddev_targp, 0);
+ xfs_free_buftarg(mp->m_ddev_targp);
mp->m_ddev_targp = NULL;
}
if (mp->m_rtdev_targp) {
- pagebuf_lock_disable(mp->m_rtdev_targp, 1);
+ xfs_blkdev_put(mp->m_rtdev_targp->pbr_bdev);
+ xfs_free_buftarg(mp->m_rtdev_targp);
mp->m_rtdev_targp = NULL;
}
if (mp->m_logdev_targp && have_logdev) {
- pagebuf_lock_disable(mp->m_logdev_targp, 1);
+ xfs_blkdev_put(mp->m_logdev_targp->pbr_bdev);
+ xfs_free_buftarg(mp->m_logdev_targp);
mp->m_logdev_targp = NULL;
}
}
@@ -1725,3 +1731,71 @@ xfs_check_frozen(
if (level == XFS_FREEZE_TRANS)
atomic_inc(&mp->m_active_trans);
}
+
+int
+xfs_blkdev_get(
+ const char *name,
+ struct block_device **bdevp)
+{
+ struct nameidata nd;
+ int error = 0;
+
+ error = path_lookup(name, LOOKUP_FOLLOW, &nd);
+ if (error) {
+ printk("XFS: Invalid device [%s], error=%d\n",
+ name, error);
+ return error;
+ }
+
+ /* I think we actually want bd_acquire here.. --hch */
+ *bdevp = bdget(kdev_t_to_nr(nd.dentry->d_inode->i_rdev));
+ if (*bdevp) {
+ error = blkdev_get(*bdevp, FMODE_READ|FMODE_WRITE, 0, BDEV_FS);
+ } else {
+ error = -ENOMEM;
+ }
+
+ path_release(&nd);
+ return -error;
+}
+
+void
+xfs_blkdev_put(
+ struct block_device *bdev)
+{
+ blkdev_put(bdev, BDEV_FS);
+}
+
+void
+xfs_free_buftarg(
+ xfs_buftarg_t *btp)
+{
+ pagebuf_delwri_flush(btp, PBDF_WAIT, NULL);
+ kfree(btp);
+}
+
+xfs_buftarg_t *
+xfs_alloc_buftarg(
+ struct block_device *bdev)
+{
+ xfs_buftarg_t *btp;
+
+ btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
+
+ btp->pbr_dev = bdev->bd_dev;
+ btp->pbr_bdev = bdev;
+ btp->pbr_mapping = bdev->bd_inode->i_mapping;
+ btp->pbr_blocksize = PAGE_CACHE_SIZE;
+
+ switch (MAJOR(btp->pbr_dev)) {
+ case MD_MAJOR:
+ case EVMS_MAJOR:
+ btp->pbr_flags = PBR_ALIGNED_ONLY;
+ break;
+ case LVM_BLK_MAJOR:
+ btp->pbr_flags = PBR_SECTOR_ONLY;
+ break;
+ }
+
+ return btp;
+}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 51c86fea20c4..7011e001b6c3 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -77,7 +77,6 @@ struct xfs_inode;
struct xfs_perag;
struct xfs_quotainfo;
struct xfs_iocore;
-struct xfs_dio;
struct xfs_bmbt_irec;
struct xfs_bmap_free;
@@ -96,24 +95,18 @@ struct xfs_bmap_free;
* minimize the number of memory indirections involved.
*/
-typedef int (*xfs_dio_write_t)(struct xfs_dio *);
-typedef int (*xfs_dio_read_t)(struct xfs_dio *);
-typedef int (*xfs_strat_write_t)(struct xfs_iocore *, struct xfs_buf *);
typedef int (*xfs_bmapi_t)(struct xfs_trans *, void *,
xfs_fileoff_t, xfs_filblks_t, int,
xfs_fsblock_t *, xfs_extlen_t,
struct xfs_bmbt_irec *, int *,
struct xfs_bmap_free *);
typedef int (*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *);
-typedef int (*xfs_rsync_t)(void *, int, xfs_off_t, xfs_off_t);
-typedef uint (*xfs_lck_map_shared_t)(void *);
typedef void (*xfs_lock_t)(void *, uint);
typedef void (*xfs_lock_demote_t)(void *, uint);
typedef int (*xfs_lock_nowait_t)(void *, uint);
typedef void (*xfs_unlk_t)(void *, unsigned int);
typedef void (*xfs_chgtime_t)(void *, int);
typedef xfs_fsize_t (*xfs_size_t)(void *);
-typedef xfs_fsize_t (*xfs_setsize_t)(void *, xfs_off_t);
typedef xfs_fsize_t (*xfs_lastbyte_t)(void *);
typedef struct xfs_ioops {
@@ -295,8 +288,7 @@ typedef struct xfs_mount {
#define XFS_MOUNT_NOUUID 0x00004000 /* ignore uuid during mount */
#define XFS_MOUNT_32BITINODES 0x00008000 /* do not create inodes above
* 32 bits in size */
-#define XFS_MOUNT_IRIXSGID 0x00010000 /* Irix-style sgid inheritance */
-#define XFS_MOUNT_NOLOGFLUSH 0x00020000
+#define XFS_MOUNT_NOLOGFLUSH 0x00010000
/*
* Flags for m_cxfstype
@@ -436,6 +428,11 @@ int xfs_syncsub(xfs_mount_t *, int, int, int *);
void xfs_initialize_perag(xfs_mount_t *, int);
void xfs_xlatesb(void *, struct xfs_sb *, int, xfs_arch_t, __int64_t);
+int xfs_blkdev_get(const char *, struct block_device **);
+void xfs_blkdev_put(struct block_device *);
+struct xfs_buftarg *xfs_alloc_buftarg(struct block_device *);
+void xfs_free_buftarg(struct xfs_buftarg *);
+
/*
* Flags for freeze operations.
*/
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index db7f44f0eb52..b728d271c7ee 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1039,14 +1039,14 @@ xfs_qm_unmount(
vp = XFS_ITOV(XFS_QI_UQIP(mp));
VN_RELE(vp);
if (vn_count(vp) > 1)
- cmn_err(CE_WARN, "UQUOTA busy vp=0x%x count=%d\n",
+ cmn_err(CE_WARN, "UQUOTA busy vp=0x%x count=%d",
vp, vn_count(vp));
}
if (XFS_IS_GQUOTA_ON(mp)) {
vp = XFS_ITOV(XFS_QI_GQIP(mp));
VN_RELE(vp);
if (vn_count(vp) > 1)
- cmn_err(CE_WARN, "GQUOTA busy vp=0x%x count=%d\n",
+ cmn_err(CE_WARN, "GQUOTA busy vp=0x%x count=%d",
vp, vn_count(vp));
}
@@ -1427,9 +1427,9 @@ xfs_qm_qino_alloc(
xfs_trans_cancel(tp, 0);
return (error);
}
- bzero(&zerocr, sizeof(zerocr));
+ memset(&zerocr, 0, sizeof(zerocr));
- if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, IFREG, 1, mp->m_dev,
+ if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, IFREG, 1, 0,
&zerocr, 0, 1, ip, &committed))) {
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT);
@@ -1998,11 +1998,16 @@ xfs_qm_init_quotainos(
int error;
__int64_t sbflags;
uint flags;
+ int readonly;
+ vfs_t *vfsp;
ASSERT(mp->m_quotainfo);
uip = gip = NULL;
+ error = 0;
sbflags = 0;
flags = 0;
+ vfsp = XFS_MTOVFS(mp);
+ readonly = vfsp->vfs_flag & VFS_RDONLY;
/*
* Get the uquota and gquota inodes
@@ -2036,36 +2041,34 @@ xfs_qm_init_quotainos(
* made above will get added to a transaction and logged in one of
* the qino_alloc calls below.
*/
+
if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
- if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
- return XFS_ERROR(EROFS);
if ((error = xfs_qm_qino_alloc(mp, &uip,
sbflags | XFS_SB_UQUOTINO,
flags | XFS_QMOPT_UQUOTA)))
- return XFS_ERROR(error);
+ goto error;
flags &= ~XFS_QMOPT_SBVERSION;
}
if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
- if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) {
- if (uip)
- VN_RELE(XFS_ITOV(uip));
- return XFS_ERROR(EROFS);
- }
if ((error = xfs_qm_qino_alloc(mp, &gip,
sbflags | XFS_SB_GQUOTINO,
flags | XFS_QMOPT_GQUOTA))) {
if (uip)
VN_RELE(XFS_ITOV(uip));
- return XFS_ERROR(error);
+ goto error;
}
}
XFS_QI_UQIP(mp) = uip;
XFS_QI_GQIP(mp) = gip;
- return (0);
+error:
+ if (readonly)
+ vfsp->vfs_flag |= VFS_RDONLY;
+
+ return XFS_ERROR(error);
}
@@ -2414,11 +2417,11 @@ xfs_qm_dqalloc_incore(
if ((dqp = xfs_qm_dqreclaim_one())) {
XFS_STATS_INC(xfsstats.xs_qm_dqreclaims);
/*
- * Just bzero the core here. The rest will get
+ * Just zero the core here. The rest will get
* reinitialized by caller. XXX we shouldn't even
- * do this bzero ...
+ * do this zero ...
*/
- bzero(&dqp->q_core, sizeof(dqp->q_core));
+ memset(&dqp->q_core, 0, sizeof(dqp->q_core));
*O_dqpp = dqp;
return (B_FALSE);
}
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 73f22a296ec8..b3cfba7ecad0 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -550,7 +550,7 @@ xfs_qm_scall_getqstat(
uip = gip = NULL;
tempuqip = tempgqip = B_FALSE;
- bzero(out, sizeof(fs_quota_stat_t));
+ memset(out, 0, sizeof(fs_quota_stat_t));
out->qs_version = FS_QSTAT_VERSION;
if (! XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) {
@@ -885,7 +885,7 @@ xfs_qm_export_dquot(
xfs_disk_dquot_t *src,
struct fs_disk_quota *dst)
{
- bzero(dst, sizeof(*dst));
+ memset(dst, 0, sizeof(*dst));
dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */
dst->d_flags =
xfs_qm_export_qtype_flags(INT_GET(src->d_flags, ARCH_CONVERT));
@@ -1060,7 +1060,7 @@ again:
* Sample vp mapping while holding the mplock, lest
* we come across a non-existent vnode.
*/
- VMAP(vp, ip, vmap);
+ VMAP(vp, vmap);
ireclaims = mp->m_ireclaims;
topino = mp->m_inodes;
XFS_MOUNT_IUNLOCK(mp);
diff --git a/fs/xfs/xfs_quota_priv.h b/fs/xfs/xfs_quota_priv.h
index c7f8bb60e561..d016d2b6cf8a 100644
--- a/fs/xfs/xfs_quota_priv.h
+++ b/fs/xfs/xfs_quota_priv.h
@@ -181,7 +181,7 @@ for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \
vmap_t dqvmap; \
vnode_t *dqvp; \
dqvp = XFS_ITOV(ip); \
- VMAP(dqvp, ip, dqvmap); \
+ VMAP(dqvp, dqvmap); \
VN_RELE(dqvp); \
}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 82684350d6eb..7f5526f4417c 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -170,7 +170,7 @@ xfs_growfs_rt_alloc(
error = XFS_ERROR(EIO);
goto error_exit;
}
- bzero(XFS_BUF_PTR(bp), mp->m_sb.sb_blocksize);
+ memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize);
xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
/*
* Commit the transaction.
@@ -2322,7 +2322,7 @@ xfs_rtmount_inodes(
vmap_t vmap; /* vmap to delete vnode */
rbmvp = XFS_ITOV(mp->m_rbmip);
- VMAP(rbmvp, mp->m_rbmip, vmap);
+ VMAP(rbmvp, vmap);
VN_RELE(rbmvp);
vn_purge(rbmvp, &vmap);
return error;
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 0076a84485e8..01dc65dc1158 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -230,7 +230,7 @@ xfs_ioerror_alert(
xfs_daddr_t blkno)
{
cmn_err(CE_ALERT,
- "I/O error in filesystem (\"%s\") meta-data dev 0x%x block 0x%llx\n"
+ "I/O error in filesystem (\"%s\") meta-data dev 0x%x block 0x%llx"
" (\"%s\") error %d buf count %u",
(!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname,
XFS_BUF_TARGET_DEV(bp),
@@ -362,7 +362,7 @@ xfs_inval_cached_pages(
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
isize = XFS_SIZE(mp, io);
if (offset > isize) {
- xfs_zero_eof(vp, io, offset, isize, offset, NULL);
+ xfs_zero_eof(vp, io, offset, isize, offset);
}
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
}
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index f2f4c5d88738..96bf21108a24 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -42,8 +42,6 @@ struct xfs_inode;
struct xfs_iocore;
struct xfs_mount;
struct xfs_trans;
-struct xfs_dio;
-struct pm;
/*
* Maximum count of bmaps used by read and write paths.
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 292ad257bc38..6e8dd532c1dc 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -788,6 +788,7 @@ shut_us_down:
commit_lsn = xfs_log_done(mp, tp->t_ticket, log_flags);
#endif
+ tp->t_commit_lsn = commit_lsn;
if (nvec > XFS_TRANS_LOGVEC_COUNT) {
kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t));
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 1845dd874a4b..5489e4dded40 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -378,7 +378,10 @@ typedef struct xfs_trans {
unsigned int t_rtx_res_used; /* # of resvd rt extents used */
xfs_log_ticket_t t_ticket; /* log mgr ticket */
sema_t t_sema; /* sema for commit completion */
- xfs_lsn_t t_lsn; /* log seq num of trans commit*/
+ xfs_lsn_t t_lsn; /* log seq num of start of
+ * transaction. */
+ xfs_lsn_t t_commit_lsn; /* log seq num of end of
+ * transaction. */
struct xfs_mount *t_mountp; /* ptr to fs mount struct */
struct xfs_dquot_acct *t_dqinfo; /* accting info for dquots */
xfs_trans_callback_t t_callback; /* transaction callback */
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index e6bcf374e54c..8daceb174c4f 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -472,7 +472,7 @@ shutdown_abort:
*/
#if defined(DEBUG)
if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
- cmn_err(CE_NOTE, "about to pop assert, bp == 0x%x\n", bp);
+ cmn_err(CE_NOTE, "about to pop assert, bp == 0x%x", bp);
#endif
ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
(XFS_B_STALE|XFS_B_DELWRI));
@@ -880,7 +880,7 @@ xfs_trans_binval(
bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_DIRTY);
bip->bli_format.blf_flags &= ~XFS_BLI_INODE_BUF;
bip->bli_format.blf_flags |= XFS_BLI_CANCEL;
- bzero((char *)(bip->bli_format.blf_data_map),
+ memset((char *)(bip->bli_format.blf_data_map), 0,
(bip->bli_format.blf_map_size * sizeof(uint)));
lidp->lid_flags |= XFS_LID_DIRTY;
tp->t_flags |= XFS_TRANS_DIRTY;
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index b7fbf81b00da..0df7fc3075cf 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -414,20 +414,8 @@ xfs_trans_inode_broot_debug(
iip->ili_root_size = ip->i_df.if_broot_bytes;
iip->ili_orig_root =
(char*)kmem_alloc(iip->ili_root_size, KM_SLEEP);
- bcopy((char*)(ip->i_df.if_broot), iip->ili_orig_root,
+ memcpy(iip->ili_orig_root, (char*)(ip->i_df.if_broot),
iip->ili_root_size);
}
}
#endif
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index aa6fe38dde5e..e08f8b727a65 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -308,23 +308,19 @@ extern struct xfsstats xfsstats;
#endif /* !CONFIG_PROC_FS */
-
-/* juggle IRIX device numbers - still used in ondisk structures */
-
-#ifndef __KERNEL__
-#define MKDEV(major, minor) makedev(major, minor)
-#endif
-
-#define IRIX_DEV_BITSMAJOR 14
-#define IRIX_DEV_BITSMINOR 18
-#define IRIX_DEV_MAXMAJ 0x1ff
-#define IRIX_DEV_MAXMIN 0x3ffff
-#define IRIX_DEV_MAJOR(dev) ((int)(((unsigned)(dev)>>IRIX_DEV_BITSMINOR) \
- & IRIX_DEV_MAXMAJ))
-#define IRIX_DEV_MINOR(dev) ((int)((dev)&IRIX_DEV_MAXMIN))
-#define IRIX_MKDEV(major,minor) ((xfs_dev_t)(((major)<<IRIX_DEV_BITSMINOR) \
- | (minor&IRIX_DEV_MAXMIN)))
-
-#define IRIX_DEV_TO_KDEVT(dev) MKDEV(IRIX_DEV_MAJOR(dev),IRIX_DEV_MINOR(dev))
+/*
+ * Juggle IRIX device numbers - still used in ondisk structures
+ */
+#define XFS_DEV_BITSMAJOR 14
+#define XFS_DEV_BITSMINOR 18
+#define XFS_DEV_MAXMAJ 0x1ff
+#define XFS_DEV_MAXMIN 0x3ffff
+#define XFS_DEV_MAJOR(dev) ((int)(((unsigned)(dev)>>XFS_DEV_BITSMINOR) \
+ & XFS_DEV_MAXMAJ))
+#define XFS_DEV_MINOR(dev) ((int)((dev)&XFS_DEV_MAXMIN))
+#define XFS_MKDEV(major,minor) ((xfs_dev_t)(((major)<<XFS_DEV_BITSMINOR) \
+ | (minor&XFS_DEV_MAXMIN)))
+
+#define XFS_DEV_TO_KDEVT(dev) mk_kdev(XFS_DEV_MAJOR(dev),XFS_DEV_MINOR(dev))
#endif /* !__XFS_TYPES_H */
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 9fcee5b06cdc..d82bf3a6a7be 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -132,7 +132,7 @@ xfs_dir_ialloc(
the inode. */
mode_t mode,
nlink_t nlink,
- dev_t rdev,
+ xfs_dev_t rdev,
cred_t *credp,
prid_t prid, /* project id */
int okalloc, /* ok to allocate new space */
@@ -345,7 +345,7 @@ xfs_bump_ino_vers2(
ip->i_d.di_version = XFS_DINODE_VERSION_2;
ip->i_d.di_onlink = 0;
- bzero(&(ip->i_d.di_pad[0]), sizeof(ip->i_d.di_pad));
+ memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
mp = tp->t_mountp;
if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
s = XFS_SB_LOCK(mp);
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index db4da1029291..ac8f5b92ba0f 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -76,7 +76,7 @@ xfs_dir_ialloc(
struct xfs_inode *dp,
mode_t mode,
nlink_t nlink,
- dev_t rdev,
+ xfs_dev_t rdev,
struct cred *credp,
prid_t prid,
int okalloc,
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index cce4a74d5dfe..97f6f65feca6 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -225,7 +225,7 @@ xfs_start_flags(
(ap->logbufs < XLOG_NUM_ICLOGS ||
ap->logbufs > XLOG_MAX_ICLOGS)) {
cmn_err(CE_WARN,
- "XFS: invalid logbufs value: %d [not %d-%d]\n",
+ "XFS: invalid logbufs value: %d [not %d-%d]",
ap->logbufs, XLOG_NUM_ICLOGS, XLOG_MAX_ICLOGS);
return XFS_ERROR(EINVAL);
}
@@ -237,7 +237,7 @@ xfs_start_flags(
ap->logbufsize != 128 * 1024 &&
ap->logbufsize != 256 * 1024) {
cmn_err(CE_WARN,
- "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]\n",
+ "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
ap->logbufsize);
return XFS_ERROR(EINVAL);
}
@@ -274,13 +274,9 @@ xfs_start_flags(
if (ap->flags & XFSMNT_OSYNCISOSYNC)
mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
- /* Default on Linux */
- if (1 || ap->flags & XFSMNT_32BITINODES)
+ if (ap->flags & XFSMNT_32BITINODES)
mp->m_flags |= XFS_MOUNT_32BITINODES;
- if (ap->flags & XFSMNT_IRIXSGID)
- mp->m_flags |= XFS_MOUNT_IRIXSGID;
-
if (ap->flags & XFSMNT_IOSIZE) {
if (ap->iosizelog > XFS_MAX_IO_LOG ||
ap->iosizelog < XFS_MIN_IO_LOG) {
@@ -392,145 +388,115 @@ xfs_finish_flags(
}
/*
- * xfs_cmountfs
+ * xfs_mount
*
- * This function is the common mount file system function for XFS.
+ * The file system configurations are:
+ * (1) device (partition) with data and internal log
+ * (2) logical volume with data and log subvolumes.
+ * (3) logical volume with data, log, and realtime subvolumes.
+ *
+ * The Linux VFS took care of finding and opening the data volume for
+ * us. We have to handle the other two (if present) here.
*/
STATIC int
-xfs_cmountfs(
+xfs_mount(
vfs_t *vfsp,
- dev_t ddev,
- dev_t logdev,
- dev_t rtdev,
- struct xfs_mount_args *ap,
- struct cred *cr)
+ struct xfs_mount_args *args,
+ cred_t *credp)
{
xfs_mount_t *mp;
+ struct block_device *ddev, *logdev, *rtdev;
int ronly = (vfsp->vfs_flag & VFS_RDONLY);
int error = 0;
- /*
- * Allocate VFS private data (xfs mount structure).
- */
- mp = xfs_mount_init();
-
- vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp);
+ ddev = vfsp->vfs_super->s_bdev;
+ logdev = rtdev = NULL;
/*
- * Open data, real time, and log devices now - order is important.
+ * Open real time and log devices - order is important.
*/
- mp->m_ddev_targp = pagebuf_lock_enable(ddev, 0);
- if (IS_ERR(mp->m_ddev_targp)) {
- error = PTR_ERR(mp->m_ddev_targp);
- goto error2;
+ if (args->logname[0]) {
+ error = xfs_blkdev_get(args->logname, &logdev);
+ if (error)
+ return error;
}
-
- if (rtdev != 0) {
- mp->m_rtdev_targp = pagebuf_lock_enable(rtdev, 1);
- if (IS_ERR(mp->m_rtdev_targp)) {
- error = PTR_ERR(mp->m_rtdev_targp);
- pagebuf_lock_disable(mp->m_ddev_targp, 0);
- goto error2;
+ if (args->rtname[0]) {
+ error = xfs_blkdev_get(args->rtname, &rtdev);
+ if (error) {
+ xfs_blkdev_put(logdev);
+ return error;
}
if (rtdev == ddev || rtdev == logdev) {
cmn_err(CE_WARN,
"XFS: Cannot mount filesystem with identical rtdev and ddev/logdev.");
- error = EINVAL;
- pagebuf_lock_disable(mp->m_ddev_targp, 0);
- goto error2;
+ xfs_blkdev_put(logdev);
+ xfs_blkdev_put(rtdev);
+ return EINVAL;
}
-
- /* Set the realtime device's block size */
- set_blocksize(mp->m_rtdev_targp->pbr_bdev, 512);
}
- if (logdev != ddev) {
- mp->m_logdev_targp = pagebuf_lock_enable(logdev, 1);
- if (IS_ERR(mp->m_logdev_targp)) {
- error = PTR_ERR(mp->m_logdev_targp);
- pagebuf_lock_disable(mp->m_ddev_targp, 1);
- if (mp->m_rtdev_targp)
- pagebuf_lock_disable(mp->m_rtdev_targp, 1);
- goto error2;
- }
+ /*
+ * Allocate VFS private data (xfs mount structure).
+ */
+ mp = xfs_mount_init();
- /* Set the log device's block size */
- set_blocksize(mp->m_logdev_targp->pbr_bdev, 512);
+ vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp);
+
+ mp->m_ddev_targp = xfs_alloc_buftarg(ddev);
+ if (rtdev != NULL) {
+ mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev);
+ set_blocksize(rtdev, 512);
+ }
+ if (logdev != NULL && logdev != ddev) {
+ mp->m_logdev_targp = xfs_alloc_buftarg(logdev);
+ set_blocksize(logdev, 512);
} else {
mp->m_logdev_targp = mp->m_ddev_targp;
}
- if ((error = xfs_start_flags(ap, mp, ronly)))
- goto error3;
+ error = xfs_start_flags(args, mp, ronly);
+ if (error)
+ goto error;
- if ((error = xfs_readsb(mp)))
- goto error3;
+ error = xfs_readsb(mp);
+ if (error)
+ goto error;
- if ((error = xfs_finish_flags(ap, mp, ronly))) {
+ error = xfs_finish_flags(args, mp, ronly);
+ if (error) {
xfs_freesb(mp);
- goto error3;
+ goto error;
}
- pagebuf_target_blocksize(mp->m_ddev_targp, mp->m_sb.sb_blocksize);
- if (logdev != 0 && logdev != ddev)
- pagebuf_target_blocksize(mp->m_logdev_targp,
- mp->m_sb.sb_blocksize);
- if (rtdev != 0)
- pagebuf_target_blocksize(mp->m_rtdev_targp,
- mp->m_sb.sb_blocksize);
+ mp->m_ddev_targp->pbr_blocksize = mp->m_sb.sb_blocksize;
+ if (logdev != 0 && logdev != ddev) {
+ mp->m_logdev_targp->pbr_blocksize = mp->m_sb.sb_blocksize;
+ }
+ if (rtdev != 0) {
+ mp->m_rtdev_targp->pbr_blocksize = mp->m_sb.sb_blocksize;
+ }
mp->m_cxfstype = XFS_CXFS_NOT;
- error = xfs_mountfs(vfsp, mp, ddev, 0);
+ error = xfs_mountfs(vfsp, mp, ddev->bd_dev, 0);
if (error)
- goto error3;
+ goto error;
return 0;
- error3:
- /* It's impossible to get here before buftargs are filled */
+ error:
xfs_binval(mp->m_ddev_targp);
- pagebuf_lock_disable(mp->m_ddev_targp, 0);
- if (logdev && logdev != ddev) {
+ if (logdev != NULL && logdev != ddev) {
xfs_binval(mp->m_logdev_targp);
- pagebuf_lock_disable(mp->m_logdev_targp, 1);
}
- if (rtdev != 0) {
+ if (rtdev != NULL) {
xfs_binval(mp->m_rtdev_targp);
- pagebuf_lock_disable(mp->m_rtdev_targp, 1);
- }
- error2:
- if (error) {
- xfs_mount_free(mp, 1);
}
+ xfs_unmountfs_close(mp, NULL);
+ xfs_mount_free(mp, 1);
return error;
}
/*
- * xfs_mount
- *
- * The file system configurations are:
- * (1) device (partition) with data and internal log
- * (2) logical volume with data and log subvolumes.
- * (3) logical volume with data, log, and realtime subvolumes.
- */
-STATIC int
-xfs_mount(
- vfs_t *vfsp,
- struct xfs_mount_args *args,
- cred_t *credp)
-{
- dev_t ddev;
- dev_t logdev;
- dev_t rtdev;
- int error;
-
- error = spectodevs(vfsp->vfs_super, args, &ddev, &logdev, &rtdev);
- if (!error)
- error = xfs_cmountfs(vfsp, ddev, logdev, rtdev, args, credp);
- return (error);
-}
-
-/*
* xfs_ibusy searches for a busy inode in the mounted file system.
*
* Return 0 if there are no active inodes otherwise return 1.
@@ -1149,7 +1115,7 @@ xfs_syncsub(
* in taking a snapshot of the vnode version number
* for use in calling vn_get().
*/
- VMAP(vp, ip, vmap);
+ VMAP(vp, vmap);
IPOINTER_INSERT(ip, mp);
vp = vn_get(vp, &vmap);
@@ -1601,6 +1567,39 @@ xfs_syncsub(
return XFS_ERROR(last_error);
}
+STATIC void
+xfs_initialize_vnode(
+ bhv_desc_t *bdp,
+ vnode_t *vp,
+ bhv_desc_t *inode_bhv,
+ int unlock)
+{
+ xfs_inode_t *ip = XFS_BHVTOI(inode_bhv);
+ struct inode *inode = LINVFS_GET_IP(vp);
+
+ if (vp->v_fbhv == NULL) {
+ vp->v_vfsp = bhvtovfs(bdp);
+ bhv_desc_init(&(ip->i_bhv_desc), ip, vp, &xfs_vnodeops);
+ bhv_insert_initial(VN_BHV_HEAD(vp), &(ip->i_bhv_desc));
+ }
+
+ vp->v_type = IFTOVT(ip->i_d.di_mode);
+ /* Have we been called during the new inode create process,
+ * in which case we are too early to fill in the linux inode.
+ */
+ if (vp->v_type == VNON)
+ return;
+
+ xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
+
+ /* For new inodes we need to set the ops vectors,
+ * and unlock the inode.
+ */
+ if (unlock && (inode->i_state & I_NEW)) {
+ linvfs_set_inode_ops(inode);
+ unlock_new_inode(inode);
+ }
+}
/*
* xfs_vget - called by DMAPI to get vnode from file handle
@@ -1653,11 +1652,6 @@ xfs_vget(
inode = LINVFS_GET_IP((*vpp));
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- error = linvfs_revalidate_core(inode, ATTR_COMM);
- if (error) {
- iput(inode);
- return XFS_ERROR(error);
- }
return 0;
}
@@ -1670,6 +1664,7 @@ vfsops_t xfs_vfsops = {
.vfs_statvfs = xfs_statvfs,
.vfs_sync = xfs_sync,
.vfs_vget = xfs_vget,
+ .vfs_init_vnode = xfs_initialize_vnode,
.vfs_force_shutdown = xfs_do_force_shutdown,
#ifdef CONFIG_XFS_DMAPI
.vfs_dmapi_mount = xfs_dm_mount,
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 46c42a2cd8ed..07f1af20c108 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -208,7 +208,7 @@ xfs_getattr(
(mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog);
}
} else {
- vap->va_rdev = IRIX_DEV_TO_KDEVT(ip->i_df.if_u2.if_rdev);
+ vap->va_rdev = ip->i_df.if_u2.if_rdev;
vap->va_blksize = BLKDEV_IOSIZE;
}
@@ -1970,7 +1970,7 @@ xfs_create(
vnode_t *vp=NULL;
xfs_trans_t *tp;
xfs_mount_t *mp;
- dev_t rdev;
+ xfs_dev_t rdev;
int error;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
@@ -2955,8 +2955,7 @@ xfs_mkdir(
xfs_inode_t *cdp; /* inode of created dir */
vnode_t *cvp; /* vnode of created dir */
xfs_trans_t *tp;
- dev_t rdev;
- mode_t mode;
+ xfs_dev_t rdev;
xfs_mount_t *mp;
int cancel_flags;
int error;
@@ -3062,8 +3061,9 @@ xfs_mkdir(
* create the directory inode.
*/
rdev = (vap->va_mask & AT_RDEV) ? vap->va_rdev : 0;
- mode = IFDIR | (vap->va_mode & ~IFMT);
- error = xfs_dir_ialloc(&tp, dp, mode, 2, rdev, credp, prid, resblks > 0,
+ error = xfs_dir_ialloc(&tp, dp,
+ MAKEIMODE(vap->va_type,vap->va_mode), 2,
+ rdev, credp, prid, resblks > 0,
&cdp, NULL);
if (error) {
if (error == ENOSPC)
@@ -3521,7 +3521,7 @@ xfs_symlink(
xfs_inode_t *ip;
int error;
int pathlen;
- dev_t rdev;
+ xfs_dev_t rdev;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
boolean_t dp_joined_to_trans;
@@ -3702,7 +3702,7 @@ xfs_symlink(
*/
if (pathlen <= XFS_IFORK_DSIZE(ip)) {
xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK);
- bcopy(target_path, ip->i_df.if_u1.if_data, pathlen);
+ memcpy(ip->i_df.if_u1.if_data, target_path, pathlen);
ip->i_d.di_size = pathlen;
/*
@@ -3743,7 +3743,7 @@ xfs_symlink(
}
pathlen -= byte_cnt;
- bcopy(cur_chunk, XFS_BUF_PTR(bp), byte_cnt);
+ memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt);
cur_chunk += byte_cnt;
xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1);
@@ -3859,10 +3859,10 @@ xfs_fid2(
xfid->fid_len = sizeof(xfs_fid2_t) - sizeof(xfid->fid_len);
xfid->fid_pad = 0;
/*
- * use bcopy because the inode is a long long and there's no
+ * use memcpy because the inode is a long long and there's no
* assurance that xfid->fid_ino is properly aligned.
*/
- bcopy(&ip->i_ino, &xfid->fid_ino, sizeof xfid->fid_ino);
+ memcpy(&xfid->fid_ino, &ip->i_ino, sizeof(xfid->fid_ino));
xfid->fid_gen = ip->i_d.di_gen;
return 0;
@@ -4504,9 +4504,9 @@ xfs_zero_remaining_bytes(
mp, bp, XFS_BUF_ADDR(bp));
break;
}
- bzero(XFS_BUF_PTR(bp) +
+ memset(XFS_BUF_PTR(bp) +
(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
- lastoffset - offset + 1);
+ 0, lastoffset - offset + 1);
XFS_BUF_UNDONE(bp);
XFS_BUF_UNREAD(bp);
XFS_BUF_WRITE(bp);
@@ -4937,7 +4937,6 @@ vnodeops_t xfs_vnodeops = {
.vop_rwlock = xfs_rwlock,
.vop_rwunlock = xfs_rwunlock,
.vop_bmap = xfs_bmap,
- .vop_strategy = xfs_strategy,
.vop_reclaim = xfs_reclaim,
.vop_attr_get = xfs_attr_get,
.vop_attr_set = xfs_attr_set,
diff --git a/fs/xfs/xfsidbg.c b/fs/xfs/xfsidbg.c
index 28ec0bd10af3..f4d1a6b4951e 100644
--- a/fs/xfs/xfsidbg.c
+++ b/fs/xfs/xfsidbg.c
@@ -53,6 +53,9 @@ static void xfsidbg_xagf(xfs_agf_t *);
static void xfsidbg_xagi(xfs_agi_t *);
static void xfsidbg_xaildump(xfs_mount_t *);
static void xfsidbg_xalloc(xfs_alloc_arg_t *);
+#ifdef DEBUG
+static void xfsidbg_xalmtrace(xfs_mount_t *);
+#endif
static void xfsidbg_xattrcontext(xfs_attr_list_context_t *);
static void xfsidbg_xattrleaf(xfs_attr_leafblock_t *);
static void xfsidbg_xattrsf(xfs_attr_shortform_t *);
@@ -196,6 +199,29 @@ static int kdbm_xfs_xalloc(
return 0;
}
+#ifdef DEBUG
+static int kdbm_xfs_xalmtrace(
+ int argc,
+ const char **argv,
+ const char **envp,
+ struct pt_regs *regs)
+{
+ unsigned long addr;
+ int nextarg = 1;
+ long offset = 0;
+ int diag;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs);
+ if (diag)
+ return diag;
+
+ xfsidbg_xalmtrace((xfs_mount_t *) addr);
+ return 0;
+}
+#endif /* DEBUG */
+
static int kdbm_xfs_xattrcontext(
int argc,
const char **argv,
@@ -1700,18 +1726,19 @@ static int kdbm_vn(
/* pagebuf stuff */
static char *pb_flag_vals[] = {
- "READ", "WRITE", "MAPPED", "PARTIAL",
- "ASYNC", "NONE", "DELWRI", "FREED", "SYNC",
- "MAPPABLE", "STALE", "FS_MANAGED", "RELEASE",
- "LOCK", "TRYLOCK", "ALLOCATE", "FILE_ALLOCATE", "DONT_BLOCK",
- "DIRECT", "LOCKABLE", "NEXT_KEY", "ENTER_PAGES",
- "ALL_PAGES_MAPPED", "SOME_INVALID_PAGES", "ADDR_ALLOCATED",
- "MEM_ALLOCATED", "GRIO", "FORCEIO", "SHUTDOWN",
- NULL };
+/* 0 */ "READ", "WRITE", "MAPPED", "PARTIAL", "ASYNC",
+/* 5 */ "NONE", "DELWRI", "FREED", "SYNC", "MAPPABLE",
+/* 10 */ "STALE", "FS_MANAGED", "INVALID12", "LOCK", "TRYLOCK",
+/* 15 */ "FILE_ALLOCATE", "DONT_BLOCK", "DIRECT", "INVALID18", "LOCKABLE",
+/* 20 */ "PRIVATE_BH", "ALL_PAGES_MAPPED", "ADDR_ALLOCATED", "MEM_ALLOCATED",
+ "FORCEIO",
+/* 25 */ "FLUSH", "READ_AHEAD", "INVALID27", "INVALID28", "INVALID29",
+/* 30 */ "INVALID30", "INVALID31",
+ NULL };
static char *pbm_flag_vals[] = {
- "EOF", "HOLE", "DELAY", "FLUSH_OVERLAPS",
- "READAHEAD", "UNWRITTEN", "DONTALLOC", "NEW",
+ "EOF", "HOLE", "DELAY", "INVALID0x08",
+ "INVALID0x10", "UNWRITTEN", "INVALID0x40", "INVALID0x80",
NULL };
@@ -1964,7 +1991,7 @@ pb_trace_core(
if ((trace->event < EV_SIZE-1) && event_names[trace->event]) {
event = event_names[trace->event];
- } else if (trace->event == EV_SIZE) {
+ } else if (trace->event == EV_SIZE-1) {
event = (char *)trace->misc;
} else {
event = value;
@@ -2081,6 +2108,10 @@ static struct xif {
"Dump XFS AIL for a mountpoint" },
{ "xalloc", kdbm_xfs_xalloc, "<xfs_alloc_arg_t>",
"Dump XFS allocation args structure" },
+#ifdef DEBUG
+ { "xalmtrc", kdbm_xfs_xalmtrace, "<xfs_mount_t>",
+ "Dump XFS alloc mount-point trace" },
+#endif
{ "xattrcx", kdbm_xfs_xattrcontext, "<xfs_attr_list_context_t>",
"Dump XFS attr_list context struct"},
{ "xattrlf", kdbm_xfs_xattrleaf, "<xfs_attr_leafblock_t>",
@@ -2245,13 +2276,14 @@ static char *xfs_alloctype[] = {
/*
* Prototypes for static functions.
*/
+#ifdef DEBUG
+static int xfs_alloc_trace_entry(ktrace_entry_t *ktep);
+#endif
static void xfs_broot(xfs_inode_t *ip, xfs_ifork_t *f);
static void xfs_btalloc(xfs_alloc_block_t *bt, int bsz);
static void xfs_btbmap(xfs_bmbt_block_t *bt, int bsz);
static void xfs_btino(xfs_inobt_block_t *bt, int bsz);
static void xfs_buf_item_print(xfs_buf_log_item_t *blip, int summary);
-static void xfs_convert_extent(xfs_bmbt_rec_64_t *rp, xfs_dfiloff_t *op,
- xfs_dfsbno_t *sp, xfs_dfilblks_t *cp, int *fp);
static void xfs_dastate_path(xfs_da_state_path_t *p);
static void xfs_dir2data(void *addr, int size);
static void xfs_dir2leaf(xfs_dir2_leaf_t *leaf, int size);
@@ -2277,6 +2309,137 @@ static void xfs_xnode_fork(char *name, xfs_ifork_t *f);
* Static functions.
*/
+#ifdef DEBUG
+/*
+ * Print xfs alloc trace buffer entry.
+ */
+static int
+xfs_alloc_trace_entry(ktrace_entry_t *ktep)
+{
+ static char *modagf_flags[] = {
+ "magicnum",
+ "versionnum",
+ "seqno",
+ "length",
+ "roots",
+ "levels",
+ "flfirst",
+ "fllast",
+ "flcount",
+ "freeblks",
+ "longest",
+ NULL
+ };
+
+ if (((__psint_t)ktep->val[0] & 0xffff) == 0)
+ return 0;
+ switch ((long)ktep->val[0] & 0xffffL) {
+ case XFS_ALLOC_KTRACE_ALLOC:
+ kdb_printf("alloc %s[%s %d] mp 0x%p\n",
+ (char *)ktep->val[1],
+ ktep->val[2] ? (char *)ktep->val[2] : "",
+ (__psint_t)ktep->val[0] >> 16,
+ (xfs_mount_t *)ktep->val[3]);
+ kdb_printf(
+ "agno %d agbno %d minlen %d maxlen %d mod %d prod %d minleft %d\n",
+ (__psunsigned_t)ktep->val[4],
+ (__psunsigned_t)ktep->val[5],
+ (__psunsigned_t)ktep->val[6],
+ (__psunsigned_t)ktep->val[7],
+ (__psunsigned_t)ktep->val[8],
+ (__psunsigned_t)ktep->val[9],
+ (__psunsigned_t)ktep->val[10]);
+ kdb_printf("total %d alignment %d len %d type %s otype %s\n",
+ (__psunsigned_t)ktep->val[11],
+ (__psunsigned_t)ktep->val[12],
+ (__psunsigned_t)ktep->val[13],
+ xfs_alloctype[((__psint_t)ktep->val[14]) >> 16],
+ xfs_alloctype[((__psint_t)ktep->val[14]) & 0xffff]);
+ kdb_printf("wasdel %d wasfromfl %d isfl %d userdata %d\n",
+ ((__psint_t)ktep->val[15] & (1 << 3)) != 0,
+ ((__psint_t)ktep->val[15] & (1 << 2)) != 0,
+ ((__psint_t)ktep->val[15] & (1 << 1)) != 0,
+ ((__psint_t)ktep->val[15] & (1 << 0)) != 0);
+ break;
+ case XFS_ALLOC_KTRACE_FREE:
+ kdb_printf("free %s[%s %d] mp 0x%p\n",
+ (char *)ktep->val[1],
+ ktep->val[2] ? (char *)ktep->val[2] : "",
+ (__psint_t)ktep->val[0] >> 16,
+ (xfs_mount_t *)ktep->val[3]);
+ kdb_printf("agno %d agbno %d len %d isfl %d\n",
+ (__psunsigned_t)ktep->val[4],
+ (__psunsigned_t)ktep->val[5],
+ (__psunsigned_t)ktep->val[6],
+ (__psint_t)ktep->val[7]);
+ break;
+ case XFS_ALLOC_KTRACE_MODAGF:
+ kdb_printf("modagf %s[%s %d] mp 0x%p\n",
+ (char *)ktep->val[1],
+ ktep->val[2] ? (char *)ktep->val[2] : "",
+ (__psint_t)ktep->val[0] >> 16,
+ (xfs_mount_t *)ktep->val[3]);
+ printflags((__psint_t)ktep->val[4], modagf_flags, "modified");
+ kdb_printf("seqno %d length %d roots b %d c %d\n",
+ (__psunsigned_t)ktep->val[5],
+ (__psunsigned_t)ktep->val[6],
+ (__psunsigned_t)ktep->val[7],
+ (__psunsigned_t)ktep->val[8]);
+ kdb_printf("levels b %d c %d flfirst %d fllast %d flcount %d\n",
+ (__psunsigned_t)ktep->val[9],
+ (__psunsigned_t)ktep->val[10],
+ (__psunsigned_t)ktep->val[11],
+ (__psunsigned_t)ktep->val[12],
+ (__psunsigned_t)ktep->val[13]);
+ kdb_printf("freeblks %d longest %d\n",
+ (__psunsigned_t)ktep->val[14],
+ (__psunsigned_t)ktep->val[15]);
+ break;
+
+ case XFS_ALLOC_KTRACE_UNBUSY:
+ kdb_printf("unbusy %s [%s %d] mp 0x%p\n",
+ (char *)ktep->val[1],
+ ktep->val[2] ? (char *)ktep->val[2] : "",
+ (__psint_t)ktep->val[0] >> 16,
+ (xfs_mount_t *)ktep->val[3]);
+ kdb_printf(" agno %d slot %d tp 0x%x\n",
+ (__psunsigned_t)ktep->val[4],
+ (__psunsigned_t)ktep->val[7],
+ (__psunsigned_t)ktep->val[8]);
+ break;
+ case XFS_ALLOC_KTRACE_BUSY:
+ kdb_printf("busy %s [%s %d] mp 0x%p\n",
+ (char *)ktep->val[1],
+ ktep->val[2] ? (char *)ktep->val[2] : "",
+ (__psint_t)ktep->val[0] >> 16,
+ (xfs_mount_t *)ktep->val[3]);
+ kdb_printf(" agno %d agbno %d len %d slot %d tp 0x%x\n",
+ (__psunsigned_t)ktep->val[4],
+ (__psunsigned_t)ktep->val[5],
+ (__psunsigned_t)ktep->val[6],
+ (__psunsigned_t)ktep->val[7],
+ (__psunsigned_t)ktep->val[8]);
+ break;
+ case XFS_ALLOC_KTRACE_BUSYSEARCH:
+ kdb_printf("busy-search %s [%s %d] mp 0x%p\n",
+ (char *)ktep->val[1],
+ ktep->val[2] ? (char *)ktep->val[2] : "",
+ (__psint_t)ktep->val[0] >> 16,
+ (xfs_mount_t *)ktep->val[3]);
+ kdb_printf(" agno %d agbno %d len %d slot %d tp 0x%x\n",
+ (__psunsigned_t)ktep->val[4],
+ (__psunsigned_t)ktep->val[5],
+ (__psunsigned_t)ktep->val[6],
+ (__psunsigned_t)ktep->val[7],
+ (__psunsigned_t)ktep->val[8]);
+ break;
+ default:
+ kdb_printf("unknown alloc trace record\n");
+ break;
+ }
+ return 1;
+}
+#endif /* DEBUG */
/*
* Print an xfs in-inode bmap btree root.
@@ -2359,18 +2522,17 @@ xfs_btbmap(xfs_bmbt_block_t *bt, int bsz)
kdb_printf("rightsib %Lx\n", INT_GET(bt->bb_rightsib, ARCH_CONVERT));
if (INT_ISZERO(bt->bb_level, ARCH_CONVERT)) {
for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) {
- xfs_bmbt_rec_64_t *r;
- xfs_dfiloff_t o;
- xfs_dfsbno_t s;
- xfs_dfilblks_t c;
- int fl;
+ xfs_bmbt_rec_t *r;
+ xfs_bmbt_irec_t irec;
- r = (xfs_bmbt_rec_64_t *)XFS_BTREE_REC_ADDR(bsz,
+ r = (xfs_bmbt_rec_t *)XFS_BTREE_REC_ADDR(bsz,
xfs_bmbt, bt, i, 0);
- xfs_convert_extent(r, &o, &s, &c, &fl);
- kdb_printf("rec %d startoff %Ld ", i, o);
- kdb_printf("startblock %Lx ", s);
- kdb_printf("blockcount %Ld flag %d\n", c, fl);
+
+ xfs_bmbt_disk_get_all((xfs_bmbt_rec_t *)r, &irec);
+ kdb_printf("rec %d startoff %Ld startblock %Lx blockcount %Ld flag %d\n",
+ i, irec.br_startoff,
+ (__uint64_t)irec.br_startblock,
+ irec.br_blockcount, irec.br_state);
}
} else {
int mxr;
@@ -2473,31 +2635,6 @@ xfs_buf_item_print(xfs_buf_log_item_t *blip, int summary)
}
/*
- * Convert an external extent descriptor to internal form.
- */
-static void
-xfs_convert_extent(xfs_bmbt_rec_64_t *rp, xfs_dfiloff_t *op, xfs_dfsbno_t *sp,
- xfs_dfilblks_t *cp, int *fp)
-{
- xfs_dfiloff_t o;
- xfs_dfsbno_t s;
- xfs_dfilblks_t c;
- int flag;
-
- flag = (int)((INT_GET(rp->l0, ARCH_CONVERT)) >> (64 - 1 ));
- o = ((xfs_fileoff_t)INT_GET(rp->l0, ARCH_CONVERT) &
- (((__uint64_t)1 << ( 64 - 1 )) - 1) ) >> 9;
- s = (((xfs_fsblock_t)INT_GET(rp->l0, ARCH_CONVERT) & (((__uint64_t)1 << ( 9 )) - 1) ) << 43) |
- (((xfs_fsblock_t)INT_GET(rp->l1, ARCH_CONVERT)) >> 21);
- c = (xfs_filblks_t)(INT_GET(rp->l1, ARCH_CONVERT) & (((__uint64_t)1 << ( 21 )) - 1) );
- *op = o;
- *sp = s;
- *cp = c;
- *fp = flag;
-}
-
-
-/*
* Print an xfs_da_state_path structure.
*/
static void
@@ -2890,11 +3027,8 @@ static void
xfs_xexlist_fork(xfs_inode_t *ip, int whichfork)
{
int nextents, i;
- xfs_dfiloff_t o;
- xfs_dfsbno_t s;
- xfs_dfilblks_t c;
- int flag;
xfs_ifork_t *ifp;
+ xfs_bmbt_irec_t irec;
ifp = XFS_IFORK_PTR(ip, whichfork);
if (ifp->if_flags & XFS_IFEXTENTS) {
@@ -2902,12 +3036,12 @@ xfs_xexlist_fork(xfs_inode_t *ip, int whichfork)
kdb_printf("inode 0x%p %cf extents 0x%p nextents 0x%x\n",
ip, "da"[whichfork], ifp->if_u1.if_extents, nextents);
for (i = 0; i < nextents; i++) {
- xfs_convert_extent(
- (xfs_bmbt_rec_64_t *)&ifp->if_u1.if_extents[i],
- &o, &s, &c, &flag);
+ xfs_bmbt_get_all(&ifp->if_u1.if_extents[i], &irec);
kdb_printf(
"%d: startoff %Ld startblock %s blockcount %Ld flag %d\n",
- i, o, xfs_fmtfsblock(s, ip->i_mount), c, flag);
+ i, irec.br_startoff,
+ xfs_fmtfsblock(irec.br_startblock, ip->i_mount),
+ irec.br_blockcount, irec.br_state);
}
}
}
@@ -3030,7 +3164,33 @@ xfsidbg_xalloc(xfs_alloc_arg_t *args)
args->wasfromfl, args->isfl, args->userdata);
}
+#ifdef DEBUG
+/*
+ * Print out all the entries in the alloc trace buf corresponding
+ * to the given mount point.
+ */
+static void
+xfsidbg_xalmtrace(xfs_mount_t *mp)
+{
+ ktrace_entry_t *ktep;
+ ktrace_snap_t kts;
+ extern ktrace_t *xfs_alloc_trace_buf;
+
+ if (xfs_alloc_trace_buf == NULL) {
+ kdb_printf("The xfs alloc trace buffer is not initialized\n");
+ return;
+ }
+ ktep = ktrace_first(xfs_alloc_trace_buf, &kts);
+ while (ktep != NULL) {
+ if ((__psint_t)ktep->val[0] && (xfs_mount_t *)ktep->val[3] == mp) {
+ (void)xfs_alloc_trace_entry(ktep);
+ kdb_printf("\n");
+ }
+ ktep = ktrace_next(xfs_alloc_trace_buf, &kts);
+ }
+}
+#endif /* DEBUG */
/*
* Print an attr_list() context structure.
@@ -3199,14 +3359,12 @@ xfsidbg_xbmalla(xfs_bmalloca_t *a)
static void
xfsidbg_xbrec(xfs_bmbt_rec_64_t *r)
{
- xfs_dfiloff_t o;
- xfs_dfsbno_t s;
- xfs_dfilblks_t c;
- int flag;
+ xfs_bmbt_irec_t irec;
- xfs_convert_extent(r, &o, &s, &c, &flag);
+ xfs_bmbt_get_all((xfs_bmbt_rec_t *)r, &irec);
kdb_printf("startoff %Ld startblock %Lx blockcount %Ld flag %d\n",
- o, s, c, flag);
+ irec.br_startoff, (__uint64_t)irec.br_startblock,
+ irec.br_blockcount, irec.br_state);
}
/*
@@ -4412,8 +4570,7 @@ xfsidbg_xmount(xfs_mount_t *mp)
"OSYNC", /* 0x2000 */
"NOUUID", /* 0x4000 */
"32BIT", /* 0x8000 */
- "IRIXSGID", /* 0x10000 */
- "NOLOGFLUSH", /* 0x20000 */
+ "NOLOGFLUSH", /* 0x10000 */
0
};
@@ -4438,9 +4595,9 @@ xfsidbg_xmount(xfs_mount_t *mp)
mp->m_ail_gen, &mp->m_sb);
kdb_printf("sb_lock 0x%p sb_bp 0x%p dev 0x%x logdev 0x%x rtdev 0x%x\n",
&mp->m_sb_lock, mp->m_sb_bp,
- mp->m_ddev_targp->pbr_dev,
- mp->m_logdev_targp->pbr_dev,
- mp->m_rtdev_targp->pbr_dev);
+ mp->m_ddev_targp ? mp->m_ddev_targp->pbr_dev : 0,
+ mp->m_logdev_targp ? mp->m_logdev_targp->pbr_dev : 0,
+ mp->m_rtdev_targp ? mp->m_rtdev_targp->pbr_dev : 0);
kdb_printf("bsize %d agfrotor %d agirotor %d ihash 0x%p ihsize %d\n",
mp->m_bsize, mp->m_agfrotor, mp->m_agirotor,
mp->m_ihash, mp->m_ihsize);
@@ -4734,13 +4891,17 @@ xfsidbg_xperag(xfs_mount_t *mp)
if (pag->pagi_init)
kdb_printf(" i_freecount %d i_inodeok %d\n",
pag->pagi_freecount, pag->pagi_inodeok);
-
- for (busy = 0; busy < XFS_PAGB_NUM_SLOTS; busy++) {
- kdb_printf(" %04d: start %d length %d tp 0x%p\n",
- busy,
- pag->pagb_list[busy].busy_start,
- pag->pagb_list[busy].busy_length,
- pag->pagb_list[busy].busy_tp);
+ if (pag->pagf_init) {
+ for (busy = 0; busy < XFS_PAGB_NUM_SLOTS; busy++) {
+ if (pag->pagb_list[busy].busy_length != 0) {
+ kdb_printf(
+ " %04d: start %d length %d tp 0x%p\n",
+ busy,
+ pag->pagb_list[busy].busy_start,
+ pag->pagb_list[busy].busy_length,
+ pag->pagb_list[busy].busy_tp);
+ }
+ }
}
}
}
@@ -5138,8 +5299,10 @@ xfsidbg_xtp(xfs_trans_t *tp)
tp->t_log_res, tp->t_blk_res, tp->t_blk_res_used);
kdb_printf("rt res %d rt res used %d\n", tp->t_rtx_res,
tp->t_rtx_res_used);
- kdb_printf("ticket 0x%lx lsn %s\n",
- (unsigned long) tp->t_ticket, xfs_fmtlsn(&tp->t_lsn));
+ kdb_printf("ticket 0x%lx lsn %s commit_lsn %s\n",
+ (unsigned long) tp->t_ticket,
+ xfs_fmtlsn(&tp->t_lsn),
+ xfs_fmtlsn(&tp->t_commit_lsn));
kdb_printf("callback 0x%p callarg 0x%p\n",
tp->t_callback, tp->t_callarg);
kdb_printf("icount delta %ld ifree delta %ld\n",
diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h
index a91e6ede6b0a..4aac5f72e9ed 100644
--- a/include/asm-i386/apicdef.h
+++ b/include/asm-i386/apicdef.h
@@ -32,6 +32,8 @@
#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
#define APIC_ALL_CPUS 0xFF
#define APIC_DFR 0xE0
+#define APIC_DFR_CLUSTER 0x0FFFFFFFul
+#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
#define APIC_SPIV_FOCUS_DISABLED (1<<9)
#define APIC_SPIV_APIC_ENABLED (1<<8)
@@ -108,7 +110,11 @@
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
-#define MAX_IO_APICS 8
+#ifdef CONFIG_X86_NUMA
+ #define MAX_IO_APICS 32
+#else
+ #define MAX_IO_APICS 8
+#endif
/*
* the local APIC register structure, memory mapped. Not terribly well
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index f23f4f75ce65..1a60daa9172e 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -13,6 +13,7 @@
*/
#include <linux/config.h>
+#include <linux/profile.h>
#include <asm/atomic.h>
#include <asm/irq.h>
@@ -65,20 +66,31 @@ extern char _stext, _etext;
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-extern unsigned long prof_cpu_mask;
-extern unsigned int * prof_buffer;
-extern unsigned long prof_len;
-extern unsigned long prof_shift;
-
/*
- * x86 profiling function, SMP safe. We might want to do this in
- * assembly totally?
+ * The profiling function is SMP safe. (nothing can mess
+ * around with "current", and the profiling counters are
+ * updated with atomic operations). This is especially
+ * useful with a profiling multiplier != 1
*/
-static inline void x86_do_profile (unsigned long eip)
+static inline void x86_do_profile(struct pt_regs * regs)
{
+ unsigned long eip;
+ extern unsigned long prof_cpu_mask;
+ extern char _stext;
+#ifdef CONFIG_PROFILING
+ extern void x86_profile_hook(struct pt_regs *);
+
+ x86_profile_hook(regs);
+#endif
+
+ if (user_mode(regs))
+ return;
+
if (!prof_buffer)
return;
+ eip = regs->eip;
+
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
@@ -97,7 +109,28 @@ static inline void x86_do_profile (unsigned long eip)
eip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[eip]);
}
+
+struct notifier_block;
+
+#ifdef CONFIG_PROFILING
+
+int register_profile_notifier(struct notifier_block * nb);
+int unregister_profile_notifier(struct notifier_block * nb);
+
+#else
+
+static inline int register_profile_notifier(struct notifier_block * nb)
+{
+ return -ENOSYS;
+}
+
+static inline int unregister_profile_notifier(struct notifier_block * nb)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_PROFILING */
+
#ifdef CONFIG_SMP /*more of this file should probably be ifdefed SMP */
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
if (IO_APIC_IRQ(i))
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index f2a73c118d33..6fee20b0ef9f 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -16,11 +16,11 @@
/*
* a maximum of 16 APICs with the current APIC ID architecture.
*/
-#ifdef CONFIG_X86_NUMAQ
+#ifdef CONFIG_X86_NUMA
#define MAX_APICS 256
-#else /* !CONFIG_X86_NUMAQ */
+#else /* !CONFIG_X86_NUMA */
#define MAX_APICS 16
-#endif /* CONFIG_X86_NUMAQ */
+#endif /* CONFIG_X86_NUMA */
#define MAX_MPC_ENTRY 1024
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index b1b5ae1ce148..8eefc078d95e 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -99,7 +99,13 @@
#define MSR_K6_PFIR 0xC0000088
#define MSR_K7_EVNTSEL0 0xC0010000
+#define MSR_K7_EVNTSEL1 0xC0010001
+#define MSR_K7_EVNTSEL2 0xC0010002
+#define MSR_K7_EVNTSEL3 0xC0010003
#define MSR_K7_PERFCTR0 0xC0010004
+#define MSR_K7_PERFCTR1 0xC0010005
+#define MSR_K7_PERFCTR2 0xC0010006
+#define MSR_K7_PERFCTR3 0xC0010007
#define MSR_K7_HWCR 0xC0010015
#define MSR_K7_FID_VID_CTL 0xC0010041
#define MSR_K7_VID_STATUS 0xC0010042
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
new file mode 100644
index 000000000000..d20f0fb9ad2b
--- /dev/null
+++ b/include/asm-i386/nmi.h
@@ -0,0 +1,49 @@
+/*
+ * linux/include/asm-i386/nmi.h
+ */
+#ifndef ASM_NMI_H
+#define ASM_NMI_H
+
+#include <linux/pm.h>
+
+struct pt_regs;
+
+typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
+
+/**
+ * set_nmi_callback
+ *
+ * Set a handler for an NMI. Only one handler may be
+ * set. Return 1 if the NMI was handled.
+ */
+void set_nmi_callback(nmi_callback_t callback);
+
+/**
+ * unset_nmi_callback
+ *
+ * Remove the handler previously set.
+ */
+void unset_nmi_callback(void);
+
+#ifdef CONFIG_PM
+
+/** Replace the PM callback routine for NMI. */
+struct pm_dev * set_nmi_pm_callback(pm_callback callback);
+
+/** Unset the PM callback routine back to the default. */
+void unset_nmi_pm_callback(struct pm_dev * dev);
+
+#else
+
+static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
+{
+ return 0;
+}
+
+static inline void unset_nmi_pm_callback(struct pm_dev * dev)
+{
+}
+
+#endif /* CONFIG_PM */
+
+#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 71a71d011842..6df7592b84f5 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -21,17 +21,10 @@
#endif
#endif
-#ifdef CONFIG_SMP
-# ifdef CONFIG_CLUSTERED_APIC
-# define TARGET_CPUS 0xf /* all CPUs in *THIS* quad */
-# define INT_DELIVERY_MODE 0 /* physical delivery on LOCAL quad */
-# else
-# define TARGET_CPUS cpu_online_map
-# define INT_DELIVERY_MODE 1 /* logical delivery broadcast to all procs */
-# endif
+#ifdef CONFIG_CLUSTERED_APIC
+ #define INT_DELIVERY_MODE 0 /* physical delivery on LOCAL quad */
#else
-# define INT_DELIVERY_MODE 1 /* logical delivery */
-# define TARGET_CPUS 0x01
+ #define INT_DELIVERY_MODE 1 /* logical delivery broadcast to all procs */
#endif
#ifndef clustered_apic_mode
@@ -72,6 +65,7 @@ extern void zap_low_mappings (void);
* the real APIC ID <-> CPU # mapping.
*/
#define MAX_APICID 256
+#define BAD_APICID 0xFFu
extern volatile int cpu_to_physical_apicid[NR_CPUS];
extern volatile int physical_apicid_to_cpu[MAX_APICID];
extern volatile int cpu_to_logical_apicid[NR_CPUS];
diff --git a/include/asm-i386/smpboot.h b/include/asm-i386/smpboot.h
index 6a77a1663ca9..a3840aa6eca8 100644
--- a/include/asm-i386/smpboot.h
+++ b/include/asm-i386/smpboot.h
@@ -24,15 +24,6 @@
#endif /* CONFIG_CLUSTERED_APIC */
/*
- * How to map from the cpu_present_map
- */
-#ifdef CONFIG_CLUSTERED_APIC
- #define cpu_present_to_apicid(mps_cpu) ( ((mps_cpu/4)*16) + (1<<(mps_cpu%4)) )
-#else /* !CONFIG_CLUSTERED_APIC */
- #define cpu_present_to_apicid(apicid) (apicid)
-#endif /* CONFIG_CLUSTERED_APIC */
-
-/*
* Mappings between logical cpu number and logical / physical apicid
* The first four macros are trivial, but it keeps the abstraction consistent
*/
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 8765a0f82aff..159dfa7fefe1 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -257,6 +257,8 @@
#define __NR_alloc_hugepages 250
#define __NR_free_hugepages 251
#define __NR_exit_group 252
+#define __NR_lookup_dcookie 253
+
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
diff --git a/include/asm-sparc64/checksum.h b/include/asm-sparc64/checksum.h
index 6cfa55be0f09..a7d31c896881 100644
--- a/include/asm-sparc64/checksum.h
+++ b/include/asm-sparc64/checksum.h
@@ -78,45 +78,8 @@ csum_and_copy_to_user(const char *src, char *dst, int len,
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
-static __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph,
- unsigned int ihl)
-{
- unsigned short sum;
-
- /* Note: We must read %2 before we touch %0 for the first time,
- * because GCC can legitimately use the same register for
- * both operands.
- */
- __asm__ __volatile__(
-" sub %2, 4, %%g7 ! IEU0\n"
-" lduw [%1 + 0x00], %0 ! Load Group\n"
-" lduw [%1 + 0x04], %%g2 ! Load Group\n"
-" lduw [%1 + 0x08], %%g3 ! Load Group\n"
-" addcc %%g2, %0, %0 ! IEU1 1 Load Bubble + Group\n"
-" lduw [%1 + 0x0c], %%g2 ! Load\n"
-" addccc %%g3, %0, %0 ! Sngle Group no Bubble\n"
-" lduw [%1 + 0x10], %%g3 ! Load Group\n"
-" addccc %%g2, %0, %0 ! Sngle Group no Bubble\n"
-" addc %0, %%g0, %0 ! Sngle Group\n"
-"1: addcc %%g3, %0, %0 ! IEU1 Group no Bubble\n"
-" add %1, 4, %1 ! IEU0\n"
-" addccc %0, %%g0, %0 ! Sngle Group no Bubble\n"
-" subcc %%g7, 1, %%g7 ! IEU1 Group\n"
-" be,a,pt %%icc, 2f ! CTI\n"
-" sll %0, 16, %%g2 ! IEU0\n"
-" lduw [%1 + 0x10], %%g3 ! Load Group\n"
-" ba,pt %%xcc, 1b ! CTI\n"
-" nop ! IEU0\n"
-"2: addcc %0, %%g2, %%g2 ! IEU1 Group\n"
-" srl %%g2, 16, %0 ! IEU0 Group regdep XXX Scheisse!\n"
-" addc %0, %%g0, %0 ! Sngle Group\n"
-" xnor %%g0, %0, %0 ! IEU0 Group\n"
-" srl %0, 0, %0 ! IEU0 Group XXX Scheisse!\n"
- : "=r" (sum), "=&r" (iph)
- : "r" (ihl), "1" (iph)
- : "g2", "g3", "g7", "cc");
- return sum;
-}
+extern unsigned short ip_fast_csum(__const__ unsigned char *iph,
+ unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */
static __inline__ unsigned short csum_fold(unsigned int sum)
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index a56c528e9af4..397744bb33d7 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -93,7 +93,7 @@ extern unsigned char dma_sync_reg_table_entry;
#define IBF_MULTI 0x08 /* On PCI, indicates shared bucket. */
#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
-#define NUM_IVECS 8192
+#define NUM_IVECS (IMAP_INR + 1)
extern struct ino_bucket ivector_table[NUM_IVECS];
#define __irq_ino(irq) \
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h
index 2aaa3160115e..7419dd88b49e 100644
--- a/include/asm-sparc64/semaphore.h
+++ b/include/asm-sparc64/semaphore.h
@@ -47,184 +47,10 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem)
sema_init(sem, 0);
}
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-static __inline__ void down(struct semaphore * sem)
-{
- /* This atomically does:
- * old_val = sem->count;
- * new_val = sem->count - 1;
- * sem->count = new_val;
- * if (old_val < 1)
- * __down(sem);
- *
- * The (old_val < 1) test is equivalent to
- * the more straightforward (new_val < 0),
- * but it is easier to test the former because
- * of how the CAS instruction works.
- */
-
- __asm__ __volatile__("\n"
-" ! down sem(%0)\n"
-"1: lduw [%0], %%g5\n"
-" sub %%g5, 1, %%g7\n"
-" cas [%0], %%g5, %%g7\n"
-" cmp %%g5, %%g7\n"
-" bne,pn %%icc, 1b\n"
-" cmp %%g7, 1\n"
-" bl,pn %%icc, 3f\n"
-" membar #StoreLoad | #StoreStore\n"
-"2:\n"
-" .subsection 2\n"
-"3: mov %0, %%g5\n"
-" save %%sp, -160, %%sp\n"
-" mov %%g1, %%l1\n"
-" mov %%g2, %%l2\n"
-" mov %%g3, %%l3\n"
-" call %1\n"
-" mov %%g5, %%o0\n"
-" mov %%l1, %%g1\n"
-" mov %%l2, %%g2\n"
-" ba,pt %%xcc, 2b\n"
-" restore %%l3, %%g0, %%g3\n"
-" .previous\n"
- : : "r" (sem), "i" (__down)
- : "g5", "g7", "memory", "cc");
-}
-
-static __inline__ int down_interruptible(struct semaphore *sem)
-{
- int ret = 0;
-
- /* This atomically does:
- * old_val = sem->count;
- * new_val = sem->count - 1;
- * sem->count = new_val;
- * if (old_val < 1)
- * ret = __down_interruptible(sem);
- *
- * The (old_val < 1) test is equivalent to
- * the more straightforward (new_val < 0),
- * but it is easier to test the former because
- * of how the CAS instruction works.
- */
-
- __asm__ __volatile__("\n"
-" ! down_interruptible sem(%2) ret(%0)\n"
-"1: lduw [%2], %%g5\n"
-" sub %%g5, 1, %%g7\n"
-" cas [%2], %%g5, %%g7\n"
-" cmp %%g5, %%g7\n"
-" bne,pn %%icc, 1b\n"
-" cmp %%g7, 1\n"
-" bl,pn %%icc, 3f\n"
-" membar #StoreLoad | #StoreStore\n"
-"2:\n"
-" .subsection 2\n"
-"3: mov %2, %%g5\n"
-" save %%sp, -160, %%sp\n"
-" mov %%g1, %%l1\n"
-" mov %%g2, %%l2\n"
-" mov %%g3, %%l3\n"
-" call %3\n"
-" mov %%g5, %%o0\n"
-" mov %%l1, %%g1\n"
-" mov %%l2, %%g2\n"
-" mov %%l3, %%g3\n"
-" ba,pt %%xcc, 2b\n"
-" restore %%o0, %%g0, %0\n"
-" .previous\n"
- : "=r" (ret)
- : "0" (ret), "r" (sem), "i" (__down_interruptible)
- : "g5", "g7", "memory", "cc");
- return ret;
-}
-
-static __inline__ int down_trylock(struct semaphore *sem)
-{
- int ret;
-
- /* This atomically does:
- * old_val = sem->count;
- * new_val = sem->count - 1;
- * if (old_val < 1) {
- * ret = 1;
- * } else {
- * sem->count = new_val;
- * ret = 0;
- * }
- *
- * The (old_val < 1) test is equivalent to
- * the more straightforward (new_val < 0),
- * but it is easier to test the former because
- * of how the CAS instruction works.
- */
-
- __asm__ __volatile__("\n"
-" ! down_trylock sem(%1) ret(%0)\n"
-"1: lduw [%1], %%g5\n"
-" sub %%g5, 1, %%g7\n"
-" cmp %%g5, 1\n"
-" bl,pn %%icc, 2f\n"
-" mov 1, %0\n"
-" cas [%1], %%g5, %%g7\n"
-" cmp %%g5, %%g7\n"
-" bne,pn %%icc, 1b\n"
-" mov 0, %0\n"
-" membar #StoreLoad | #StoreStore\n"
-"2:\n"
- : "=&r" (ret)
- : "r" (sem)
- : "g5", "g7", "memory", "cc");
-
- return ret;
-}
-
-static __inline__ void up(struct semaphore * sem)
-{
- /* This atomically does:
- * old_val = sem->count;
- * new_val = sem->count + 1;
- * sem->count = new_val;
- * if (old_val < 0)
- * __up(sem);
- *
- * The (old_val < 0) test is equivalent to
- * the more straightforward (new_val <= 0),
- * but it is easier to test the former because
- * of how the CAS instruction works.
- */
-
- __asm__ __volatile__("\n"
-" ! up sem(%0)\n"
-" membar #StoreLoad | #LoadLoad\n"
-"1: lduw [%0], %%g5\n"
-" add %%g5, 1, %%g7\n"
-" cas [%0], %%g5, %%g7\n"
-" cmp %%g5, %%g7\n"
-" bne,pn %%icc, 1b\n"
-" addcc %%g7, 1, %%g0\n"
-" ble,pn %%icc, 3f\n"
-" membar #StoreLoad | #StoreStore\n"
-"2:\n"
-" .subsection 2\n"
-"3: mov %0, %%g5\n"
-" save %%sp, -160, %%sp\n"
-" mov %%g1, %%l1\n"
-" mov %%g2, %%l2\n"
-" mov %%g3, %%l3\n"
-" call %1\n"
-" mov %%g5, %%o0\n"
-" mov %%l1, %%g1\n"
-" mov %%l2, %%g2\n"
-" ba,pt %%xcc, 2b\n"
-" restore %%l3, %%g0, %%g3\n"
-" .previous\n"
- : : "r" (sem), "i" (__up)
- : "g5", "g7", "memory", "cc");
-}
+extern void up(struct semaphore *sem);
+extern void down(struct semaphore *sem);
+extern int down_trylock(struct semaphore *sem);
+extern int down_interruptible(struct semaphore *sem);
#endif /* __KERNEL__ */
diff --git a/include/asm-um/cache.h b/include/asm-um/cache.h
index a4962992cf86..4b134fe8504e 100644
--- a/include/asm-um/cache.h
+++ b/include/asm-um/cache.h
@@ -1,7 +1,10 @@
#ifndef __UM_CACHE_H
#define __UM_CACHE_H
+/* These are x86 numbers */
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
+
#endif
diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h
index 931b1f956f17..a991b9feea3f 100644
--- a/include/asm-um/smp.h
+++ b/include/asm-um/smp.h
@@ -6,15 +6,30 @@ extern unsigned long cpu_online_map;
#ifdef CONFIG_SMP
#include "linux/config.h"
+#include "linux/bitops.h"
#include "asm/current.h"
-#define smp_processor_id() (current->processor)
+#define smp_processor_id() (current->thread_info->cpu)
#define cpu_logical_map(n) (n)
#define cpu_number_map(n) (n)
#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */
extern int hard_smp_processor_id(void);
#define NO_PROC_ID -1
+#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
+
+extern int ncpus;
+#define cpu_possible(cpu) (cpu < ncpus)
+
+extern inline unsigned int num_online_cpus(void)
+{
+ return(hweight32(cpu_online_map));
+}
+
+extern inline void smp_cpus_done(unsigned int maxcpus)
+{
+}
+
#endif
#endif
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 494e367f4ff9..1cb0f45359f5 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -64,10 +64,14 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+ * TIF_NEED_RESCHED
+ */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4929d743683d..ccb56d58de6a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -34,6 +34,7 @@ struct request {
int rq_status; /* should split this into a few status bits */
kdev_t rq_dev;
+ struct gendisk *rq_disk;
int errors;
sector_t sector;
unsigned long nr_sectors;
@@ -60,6 +61,12 @@ struct request {
int tag;
void *special;
char *buffer;
+
+ /* For packet commands */
+ unsigned int data_len;
+ void *data, *sense;
+
+ unsigned int timeout;
struct completion *waiting;
struct bio *bio, *biotail;
request_queue_t *q;
@@ -85,6 +92,8 @@ enum rq_flag_bits {
__REQ_BLOCK_PC, /* queued down pc from block layer */
__REQ_SENSE, /* sense retrival */
+ __REQ_FAILED, /* set if the request failed */
+ __REQ_QUIET, /* don't worry about errors */
__REQ_SPECIAL, /* driver suplied command */
__REQ_DRIVE_CMD,
__REQ_DRIVE_TASK,
@@ -103,6 +112,8 @@ enum rq_flag_bits {
#define REQ_PC (1 << __REQ_PC)
#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC)
#define REQ_SENSE (1 << __REQ_SENSE)
+#define REQ_FAILED (1 << __REQ_FAILED)
+#define REQ_QUIET (1 << __REQ_QUIET)
#define REQ_SPECIAL (1 << __REQ_SPECIAL)
#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD)
#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
@@ -301,7 +312,7 @@ extern int blk_remove_plug(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
-extern int block_ioctl(struct block_device *, unsigned int, unsigned long);
+extern int scsi_cmd_ioctl(struct block_device *, unsigned int, unsigned long);
extern void blk_start_queue(request_queue_t *q);
extern void blk_stop_queue(request_queue_t *q);
extern void __blk_stop_queue(request_queue_t *q);
diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h
index 3cfedb07f803..571618972e30 100644
--- a/include/linux/blkpg.h
+++ b/include/linux/blkpg.h
@@ -57,7 +57,6 @@ struct blkpg_partition {
#ifdef __KERNEL__
extern char * partition_name(dev_t dev);
-extern int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg);
#endif /* __KERNEL__ */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index b287b7a24b11..4387203c95b7 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -730,7 +730,6 @@ struct cdrom_device_info {
struct cdrom_device_ops *ops; /* link to device_ops */
struct cdrom_device_info *next; /* next device_info for this major */
void *handle; /* driver-dependent data */
- struct device cdrom_driverfs_dev; /* driverfs implementation */
/* specifications */
kdev_t dev; /* device number */
int mask; /* mask of capability: disables them */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 71708edafce9..76a5085043e1 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -66,6 +66,8 @@ static __inline__ unsigned int full_name_hash(const unsigned char * name, unsign
#define DNAME_INLINE_LEN 16
+struct dcookie_struct;
+
struct dentry {
atomic_t d_count;
unsigned int d_flags;
@@ -84,6 +86,7 @@ struct dentry {
unsigned long d_vfs_flags;
void * d_fsdata; /* fs-specific data */
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
+ struct dcookie_struct * d_cookie; /* cookie, if any */
};
struct dentry_operations {
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
new file mode 100644
index 000000000000..7c4d3319e7d0
--- /dev/null
+++ b/include/linux/dcookies.h
@@ -0,0 +1,69 @@
+/*
+ * dcookies.h
+ *
+ * Persistent cookie-path mappings
+ *
+ * Copyright 2002 John Levon <levon@movementarian.org>
+ */
+
+#ifndef DCOOKIES_H
+#define DCOOKIES_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_PROFILING
+
+#include <linux/types.h>
+
+struct dcookie_user;
+
+/**
+ * dcookie_register - register a user of dcookies
+ *
+ * Register as a dcookie user. Returns %NULL on failure.
+ */
+struct dcookie_user * dcookie_register(void);
+
+/**
+ * dcookie_unregister - unregister a user of dcookies
+ *
+ * Unregister as a dcookie user. This may invalidate
+ * any dcookie values returned from get_dcookie().
+ */
+void dcookie_unregister(struct dcookie_user * user);
+
+/**
+ * get_dcookie - acquire a dcookie
+ *
+ * Convert the given dentry/vfsmount pair into
+ * a cookie value.
+ *
+ * Returns -EINVAL if no living task has registered as a
+ * dcookie user.
+ *
+ * Returns 0 on success, with *cookie filled in
+ */
+int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
+ u32 * cookie);
+
+#else
+
+struct dcookie_user * dcookie_register(void)
+{
+ return 0;
+}
+
+void dcookie_unregister(struct dcookie_user * user)
+{
+ return;
+}
+
+static inline int get_dcookie(struct dentry * dentry,
+ struct vfsmount * vfsmnt, u32 * cookie)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_PROFILING */
+
+#endif /* DCOOKIES_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index 3290c5c40276..80a63939f924 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -329,6 +329,9 @@ dev_set_drvdata (struct device *dev, void *data)
*/
extern int device_register(struct device * dev);
extern void device_unregister(struct device * dev);
+extern void device_initialize(struct device * dev);
+extern int device_add(struct device * dev);
+extern void device_del(struct device * dev);
/* driverfs interface for exporting device attributes */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 93148f1659b0..bca164f4265a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -359,6 +359,7 @@ struct block_device {
sector_t bd_offset;
unsigned bd_part_count;
int bd_invalidated;
+ struct gendisk * bd_disk;
};
struct inode {
@@ -1087,6 +1088,7 @@ extern struct file_operations def_blk_fops;
extern struct address_space_operations def_blk_aops;
extern struct file_operations def_fifo_fops;
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
+extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
extern int blkdev_get(struct block_device *, mode_t, unsigned, int);
extern int blkdev_put(struct block_device *, int);
extern int bd_claim(struct block_device *, void *);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 62781b452fe9..9de2f51ae935 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -62,17 +62,19 @@ struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
devfs_handle_t de; /* primary (master) devfs entry */
- struct device hd_driverfs_dev; /* support driverfs hiearchy */
+ struct device *hd_driverfs_dev; /* support driverfs hiearchy */
};
#define GENHD_FL_REMOVABLE 1
#define GENHD_FL_DRIVERFS 2
#define GENHD_FL_DEVFS 4
#define GENHD_FL_CD 8
+#define GENHD_FL_UP 16
struct gendisk {
int major; /* major number of driver */
int first_minor;
+ int minors;
int minor_shift; /* number of times minor is shifted to
get real minor */
char disk_name[16]; /* name of major driver */
@@ -88,6 +90,10 @@ struct gendisk {
devfs_handle_t disk_de; /* piled higher and deeper */
struct device *driverfs_dev;
struct device disk_dev;
+
+ unsigned sync_io; /* RAID */
+ unsigned reads, writes;
+ unsigned rio, wio;
};
/* drivers/block/genhd.c */
@@ -260,21 +266,16 @@ struct unixware_disklabel {
char *disk_name (struct gendisk *hd, int part, char *buf);
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
-extern void update_partition(struct gendisk *disk, int part);
+extern void add_partition(struct gendisk *, int, sector_t, sector_t);
+extern void delete_partition(struct gendisk *, int);
-extern struct gendisk *alloc_disk(void);
+extern struct gendisk *alloc_disk(int minors);
+extern struct gendisk *get_disk(struct gendisk *disk);
extern void put_disk(struct gendisk *disk);
/* will go away */
extern void blk_set_probe(int major, struct gendisk *(p)(int));
-static inline unsigned int disk_index (kdev_t dev)
-{
- int part;
- struct gendisk *g = get_gendisk(kdev_t_to_nr(dev), &part);
- return g ? (minor(dev) >> g->minor_shift) : 0;
-}
-
#endif
#endif
diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h
new file mode 100644
index 000000000000..ed8dc6755219
--- /dev/null
+++ b/include/linux/mtd/concat.h
@@ -0,0 +1,23 @@
+/*
+ * MTD device concatenation layer definitions
+ *
+ * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
+ *
+ * This code is GPL
+ *
+ * $Id: concat.h,v 1.1 2002/03/08 16:34:36 rkaiser Exp $
+ */
+
+#ifndef MTD_CONCAT_H
+#define MTD_CONCAT_H
+
+
+struct mtd_info *mtd_concat_create(
+ struct mtd_info *subdev[], /* subdevices to concatenate */
+ int num_devs, /* number of subdevices */
+ char *name); /* name for the new device */
+
+void mtd_concat_destroy(struct mtd_info *mtd);
+
+#endif
+
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 181e8decebfc..d99650a19b55 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -120,7 +120,7 @@ enum nfs_ftype {
/*
* This is the kernel NFS client file handle representation
*/
-#define NFS_MAXFHSIZE 64
+#define NFS_MAXFHSIZE 128
struct nfs_fh {
unsigned short size;
unsigned char data[NFS_MAXFHSIZE];
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index 359c73e00841..7f11fa589207 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -59,6 +59,11 @@ enum nfs3_ftype {
NF3BAD = 8
};
+struct nfs3_fh {
+ unsigned short size;
+ unsigned char data[NFS3_FHSIZE];
+};
+
#define NFS3_VERSION 3
#define NFS3PROC_NULL 0
#define NFS3PROC_GETATTR 1
diff --git a/include/linux/nfs4_mount.h b/include/linux/nfs4_mount.h
new file mode 100644
index 000000000000..9a782c2bbdd3
--- /dev/null
+++ b/include/linux/nfs4_mount.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_NFS4_MOUNT_H
+#define _LINUX_NFS4_MOUNT_H
+
+/*
+ * linux/include/linux/nfs4_mount.h
+ *
+ * Copyright (C) 2002 Trond Myklebust
+ *
+ * structure passed from user-space to kernel-space during an nfsv4 mount
+ */
+
+/*
+ * WARNING! Do not delete or change the order of these fields. If
+ * a new field is required then add it to the end. The version field
+ * tracks which fields are present. This will ensure some measure of
+ * mount-to-kernel version compatibility. Some of these aren't used yet
+ * but here they are anyway.
+ */
+#define NFS4_MOUNT_VERSION 1
+
+struct nfs_string {
+ unsigned int len;
+ const char* data;
+};
+
+struct nfs4_mount_data {
+ int version; /* 1 */
+ int flags; /* 1 */
+ int rsize; /* 1 */
+ int wsize; /* 1 */
+ int timeo; /* 1 */
+ int retrans; /* 1 */
+ int acregmin; /* 1 */
+ int acregmax; /* 1 */
+ int acdirmin; /* 1 */
+ int acdirmax; /* 1 */
+
+ /* see the definition of 'struct clientaddr4' in RFC3010 */
+ struct nfs_string client_addr; /* 1 */
+
+ /* Mount path */
+ struct nfs_string mnt_path; /* 1 */
+
+ /* Server details */
+ struct nfs_string hostname; /* 1 */
+ /* Server IP address */
+ unsigned int host_addrlen; /* 1 */
+ struct sockaddr* host_addr; /* 1 */
+
+ /* Transport protocol to use */
+ int proto; /* 1 */
+
+ /* Pseudo-flavours to use for authentication. See RFC2623 */
+ int auth_flavourlen; /* 1 */
+ int *auth_flavours; /* 1 */
+};
+
+/* bits in the flags field */
+/* Note: the fields that correspond to existing NFSv2/v3 mount options
+ * should mirror the values from include/linux/nfs_mount.h
+ */
+
+#define NFS4_MOUNT_SOFT 0x0001 /* 1 */
+#define NFS4_MOUNT_INTR 0x0002 /* 1 */
+#define NFS4_MOUNT_NOCTO 0x0010 /* 1 */
+#define NFS4_MOUNT_NOAC 0x0020 /* 1 */
+#define NFS4_MOUNT_STRICTLOCK 0x1000 /* 1 */
+#define NFS4_MOUNT_FLAGMASK 0xFFFF
+
+#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 4c35f7cbb97c..3a23ac81e80f 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -472,6 +472,55 @@ extern void * nfs_root_data(void);
#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
+#ifdef CONFIG_NFS_V4
+struct nfs4_client {
+ atomic_t cl_count; /* refcount */
+ u64 cl_clientid; /* constant */
+ nfs4_verifier cl_confirm;
+
+ /*
+ * Starts a list of lockowners, linked through lo_list.
+ */
+ struct list_head cl_lockowners; /* protected by state_spinlock */
+};
+
+/* nfs4proc.c */
+extern int nfs4_proc_renew(struct nfs_server *server);
+
+/* nfs4renewd.c */
+extern int nfs4_init_renewd(struct nfs_server *server);
+#endif /* CONFIG_NFS_V4 */
+
+#ifdef CONFIG_NFS_V4
+
+extern struct nfs4_client *nfs4_get_client(void);
+extern void nfs4_put_client(struct nfs4_client *clp);
+
+struct nfs4_mount_data;
+static inline int
+create_nfsv4_state(struct nfs_server *server, struct nfs4_mount_data *data)
+{
+ server->nfs4_state = NULL;
+ return 0;
+}
+
+static inline void
+destroy_nfsv4_state(struct nfs_server *server)
+{
+ if (server->mnt_path) {
+ kfree(server->mnt_path);
+ server->mnt_path = NULL;
+ }
+ if (server->nfs4_state) {
+ nfs4_put_client(server->nfs4_state);
+ server->nfs4_state = NULL;
+ }
+}
+#else
+#define create_nfsv4_state(server, data) 0
+#define destroy_nfsv4_state(server) do { } while (0)
+#endif
+
#endif /* __KERNEL__ */
/*
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 080c98fed1dd..d171608d7105 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -30,6 +30,16 @@ struct nfs_server {
lru_busy;
struct nfs_fh fh;
struct sockaddr_in addr;
+#if CONFIG_NFS_V4
+ /* Our own IP address, as a null-terminated string.
+ * This is used to generate the clientid, and the callback address.
+ */
+ char ip_addr[16];
+ char * mnt_path;
+ struct nfs4_client * nfs4_state; /* all NFSv4 state starts here */
+ unsigned long lease_time; /* in jiffies */
+ unsigned long last_renewal; /* in jiffies */
+#endif
};
/* Server capabilities */
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h
index 2b552936eeca..223ed3462064 100644
--- a/include/linux/nfs_mount.h
+++ b/include/linux/nfs_mount.h
@@ -10,6 +10,8 @@
*/
#include <linux/in.h>
#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs3.h>
/*
* WARNING! Do not delete or change the order of these fields. If
@@ -37,7 +39,7 @@ struct nfs_mount_data {
char hostname[256]; /* 1 */
int namlen; /* 2 */
unsigned int bsize; /* 3 */
- struct nfs_fh root; /* 4 */
+ struct nfs3_fh root; /* 4 */
};
/* bits in the flags field */
@@ -53,6 +55,10 @@ struct nfs_mount_data {
#define NFS_MOUNT_KERBEROS 0x0100 /* 3 */
#define NFS_MOUNT_NONLM 0x0200 /* 3 */
#define NFS_MOUNT_BROKEN_SUID 0x0400 /* 4 */
+#if 0
+#define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */
+#define NFS_MOUNT_SECFLAVOUR 0x2000 /* reserved */
+#endif
#define NFS_MOUNT_FLAGMASK 0xFFFF
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b71b1b217c70..4bb5125056e7 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -50,6 +50,7 @@ struct nfs_fattr {
* Info on the file system
*/
struct nfs_fsinfo {
+ struct nfs_fattr *fattr; /* Post-op attributes */
__u32 rtmax; /* max. read transfer size */
__u32 rtpref; /* pref. read transfer size */
__u32 rtmult; /* reads should be multiple of this */
@@ -58,16 +59,31 @@ struct nfs_fsinfo {
__u32 wtmult; /* writes should be multiple of this */
__u32 dtpref; /* pref. readdir transfer size */
__u64 maxfilesize;
- __u64 bsize; /* block size */
+ __u32 lease_time; /* in seconds */
+};
+
+struct nfs_fsstat {
+ struct nfs_fattr *fattr; /* Post-op attributes */
__u64 tbytes; /* total size in bytes */
__u64 fbytes; /* # of free bytes */
__u64 abytes; /* # of bytes available to user */
__u64 tfiles; /* # of files */
__u64 ffiles; /* # of free files */
__u64 afiles; /* # of files available to user */
- __u32 linkmax;/* max # of hard links */
- __u32 namelen;/* max name length */
- __u32 lease_time; /* in seconds */
+};
+
+struct nfs2_fsstat {
+ __u32 tsize; /* Server transfer size */
+ __u32 bsize; /* Filesystem block size */
+ __u32 blocks; /* No. of "bsize" blocks on filesystem */
+ __u32 bfree; /* No. of free "bsize" blocks */
+ __u32 bavail; /* No. of available "bsize" blocks */
+};
+
+struct nfs_pathconf {
+ struct nfs_fattr *fattr; /* Post-op attributes */
+ __u32 max_link; /* max # of hard links */
+ __u32 max_namelen; /* max name length */
};
/*
@@ -309,6 +325,219 @@ struct nfs3_readdirres {
int plus;
};
+#ifdef CONFIG_NFS_V4
+
+typedef u64 clientid4;
+
+struct nfs4_change_info {
+ u32 atomic;
+ u64 before;
+ u64 after;
+};
+
+struct nfs4_access {
+ u32 ac_req_access; /* request */
+ u32 * ac_resp_supported; /* response */
+ u32 * ac_resp_access; /* response */
+};
+
+struct nfs4_close {
+ char * cl_stateid; /* request */
+ u32 cl_seqid; /* request */
+};
+
+struct nfs4_commit {
+ u64 co_start; /* request */
+ u32 co_len; /* request */
+ struct nfs_writeverf * co_verifier; /* response */
+};
+
+struct nfs4_create {
+ u32 cr_ftype; /* request */
+ union { /* request */
+ struct {
+ u32 textlen;
+ const char * text;
+ } symlink; /* NF4LNK */
+ struct {
+ u32 specdata1;
+ u32 specdata2;
+ } device; /* NF4BLK, NF4CHR */
+ } u;
+ u32 cr_namelen; /* request */
+ const char * cr_name; /* request */
+ struct iattr * cr_attrs; /* request */
+ struct nfs4_change_info * cr_cinfo; /* response */
+};
+#define cr_textlen u.symlink.textlen
+#define cr_text u.symlink.text
+#define cr_specdata1 u.device.specdata1
+#define cr_specdata2 u.device.specdata2
+
+struct nfs4_getattr {
+ u32 * gt_bmval; /* request */
+ struct nfs_fattr * gt_attrs; /* response */
+ struct nfs_fsstat * gt_fsstat; /* response */
+ struct nfs_fsinfo * gt_fsinfo; /* response */
+ struct nfs_pathconf * gt_pathconf; /* response */
+ u32 * gt_bmres; /* response */
+};
+
+struct nfs4_getfh {
+ struct nfs_fh * gf_fhandle; /* response */
+};
+
+struct nfs4_link {
+ u32 ln_namelen; /* request */
+ const char * ln_name; /* request */
+ struct nfs4_change_info * ln_cinfo; /* response */
+};
+
+struct nfs4_lookup {
+ struct qstr * lo_name; /* request */
+};
+
+struct nfs4_open {
+ u32 op_share_access; /* request */
+ u32 op_opentype; /* request */
+ u32 op_createmode; /* request */
+ union { /* request */
+ struct iattr * attrs; /* UNCHECKED, GUARDED */
+ nfs4_verifier verifier; /* EXCLUSIVE */
+ } u;
+ struct qstr * op_name; /* request */
+ char * op_stateid; /* response */
+ struct nfs4_change_info * op_cinfo; /* response */
+ u32 * op_rflags; /* response */
+};
+#define op_attrs u.attrs
+#define op_verifier u.verifier
+
+struct nfs4_open_confirm {
+ char * oc_stateid; /* request */
+};
+
+struct nfs4_putfh {
+ struct nfs_fh * pf_fhandle; /* request */
+};
+
+struct nfs4_read {
+ u64 rd_offset; /* request */
+ u32 rd_length; /* request */
+ u32 *rd_eof; /* response */
+ u32 *rd_bytes_read; /* response */
+ struct page ** rd_pages; /* zero-copy data */
+ unsigned int rd_pgbase; /* zero-copy data */
+};
+
+struct nfs4_readdir {
+ u64 rd_cookie; /* request */
+ nfs4_verifier rd_req_verifier; /* request */
+ u32 rd_count; /* request */
+ u32 rd_bmval[2]; /* request */
+ nfs4_verifier rd_resp_verifier; /* response */
+ struct page ** rd_pages; /* zero-copy data */
+ unsigned int rd_pgbase; /* zero-copy data */
+};
+
+struct nfs4_readlink {
+ u32 rl_count; /* zero-copy data */
+ struct page ** rl_pages; /* zero-copy data */
+};
+
+struct nfs4_remove {
+ u32 rm_namelen; /* request */
+ const char * rm_name; /* request */
+ struct nfs4_change_info * rm_cinfo; /* response */
+};
+
+struct nfs4_rename {
+ u32 rn_oldnamelen; /* request */
+ const char * rn_oldname; /* request */
+ u32 rn_newnamelen; /* request */
+ const char * rn_newname; /* request */
+ struct nfs4_change_info * rn_src_cinfo; /* response */
+ struct nfs4_change_info * rn_dst_cinfo; /* response */
+};
+
+struct nfs4_setattr {
+ char * st_stateid; /* request */
+ struct iattr * st_iap; /* request */
+};
+
+struct nfs4_setclientid {
+ nfs4_verifier sc_verifier; /* request */
+ char * sc_name; /* request */
+ u32 sc_prog; /* request */
+ char sc_netid[4]; /* request */
+ char sc_uaddr[24]; /* request */
+ u32 sc_cb_ident; /* request */
+};
+
+struct nfs4_write {
+ u64 wr_offset; /* request */
+ u32 wr_stable_how; /* request */
+ u32 wr_len; /* request */
+ u32 * wr_bytes_written; /* response */
+ struct nfs_writeverf * wr_verf; /* response */
+ struct page ** wr_pages; /* zero-copy data */
+ unsigned int wr_pgbase; /* zero-copy data */
+};
+
+struct nfs4_op {
+ u32 opnum;
+ u32 nfserr;
+ union {
+ struct nfs4_access access;
+ struct nfs4_close close;
+ struct nfs4_commit commit;
+ struct nfs4_create create;
+ struct nfs4_getattr getattr;
+ struct nfs4_getfh getfh;
+ struct nfs4_link link;
+ struct nfs4_lookup lookup;
+ struct nfs4_open open;
+ struct nfs4_open_confirm open_confirm;
+ struct nfs4_putfh putfh;
+ struct nfs4_read read;
+ struct nfs4_readdir readdir;
+ struct nfs4_readlink readlink;
+ struct nfs4_remove remove;
+ struct nfs4_rename rename;
+ struct nfs4_setattr setattr;
+ struct nfs4_setclientid setclientid;
+ struct nfs4_write write;
+ } u;
+};
+
+struct nfs4_compound {
+ unsigned int flags; /* defined below */
+ struct nfs_server * server;
+
+ /* RENEW information */
+ int renew_index;
+ unsigned long timestamp;
+
+ /* scratch variables for XDR encode/decode */
+ int nops;
+ u32 * p;
+ u32 * end;
+
+ /* the individual COMPOUND operations */
+ struct nfs4_op *ops;
+
+ /* request */
+ int req_nops;
+ u32 taglen;
+ char * tag;
+
+ /* response */
+ int resp_nops;
+ int toplevel_status;
+};
+
+#endif /* CONFIG_NFS_V4 */
+
struct nfs_read_data {
struct rpc_task task;
struct inode *inode;
@@ -322,7 +551,12 @@ struct nfs_read_data {
struct nfs_readres res;
} v3; /* also v2 */
#ifdef CONFIG_NFS_V4
- /* NFSv4 data will come here... */
+ struct {
+ struct nfs4_compound compound;
+ struct nfs4_op ops[3];
+ u32 res_count;
+ u32 res_eof;
+ } v4;
#endif
} u;
};
@@ -337,11 +571,17 @@ struct nfs_write_data {
struct page *pagevec[NFS_WRITE_MAXIOV];
union {
struct {
- struct nfs_writeargs args;
- struct nfs_writeres res;
+ struct nfs_writeargs args; /* argument struct */
+ struct nfs_writeres res; /* result struct */
} v3;
#ifdef CONFIG_NFS_V4
- /* NFSv4 data to come here... */
+ struct {
+ struct nfs4_compound compound;
+ struct nfs4_op ops[3];
+ u32 arg_count;
+ u32 arg_stable;
+ u32 res_count;
+ } v4;
#endif
} u;
};
@@ -391,7 +631,11 @@ struct nfs_rpc_ops {
int (*mknod) (struct inode *, struct qstr *, struct iattr *,
dev_t, struct nfs_fh *, struct nfs_fattr *);
int (*statfs) (struct nfs_server *, struct nfs_fh *,
+ struct nfs_fsstat *);
+ int (*fsinfo) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
+ int (*pathconf) (struct nfs_server *, struct nfs_fh *,
+ struct nfs_pathconf *);
u32 * (*decode_dirent)(u32 *, struct nfs_entry *, int plus);
void (*read_setup) (struct nfs_read_data *, unsigned int count);
void (*write_setup) (struct nfs_write_data *, unsigned int count, int how);
@@ -410,8 +654,10 @@ struct nfs_rpc_ops {
*/
extern struct nfs_rpc_ops nfs_v2_clientops;
extern struct nfs_rpc_ops nfs_v3_clientops;
+extern struct nfs_rpc_ops nfs_v4_clientops;
extern struct rpc_version nfs_version2;
extern struct rpc_version nfs_version3;
+extern struct rpc_version nfs_version4;
extern struct rpc_program nfs_program;
extern struct rpc_stat nfs_rpcstat;
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
new file mode 100644
index 000000000000..982b64e0518a
--- /dev/null
+++ b/include/linux/oprofile.h
@@ -0,0 +1,98 @@
+/**
+ * @file oprofile.h
+ *
+ * API for machine-specific interrupts to interface
+ * to oprofile.
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROFILE_H
+#define OPROFILE_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+struct super_block;
+struct dentry;
+struct file_operations;
+
+enum oprofile_cpu {
+ OPROFILE_CPU_PPRO,
+ OPROFILE_CPU_PII,
+ OPROFILE_CPU_PIII,
+ OPROFILE_CPU_ATHLON,
+ OPROFILE_CPU_TIMER
+};
+
+/* Operations structure to be filled in */
+struct oprofile_operations {
+ /* create any necessary configuration files in the oprofile fs.
+ * Optional. */
+ int (*create_files)(struct super_block * sb, struct dentry * root);
+ /* Do any necessary interrupt setup. Optional. */
+ int (*setup)(void);
+ /* Do any necessary interrupt shutdown. Optional. */
+ void (*shutdown)(void);
+ /* Start delivering interrupts. */
+ int (*start)(void);
+ /* Stop delivering interrupts. */
+ void (*stop)(void);
+};
+
+/**
+ * One-time initialisation. *ops must be set to a filled-in
+ * operations structure. oprofile_cpu_type must be set.
+ * Return 0 on success.
+ */
+int oprofile_arch_init(struct oprofile_operations ** ops, enum oprofile_cpu * cpu);
+
+/**
+ * Add a sample. This may be called from any context. Pass
+ * smp_processor_id() as cpu.
+ */
+extern void FASTCALL(oprofile_add_sample(unsigned long eip, unsigned long event, int cpu));
+
+/**
+ * Create a file of the given name as a child of the given root, with
+ * the specified file operations.
+ */
+int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
+ char const * name, struct file_operations * fops);
+
+/** Create a file for read/write access to an unsigned long. */
+int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
+ char const * name, ulong * val);
+
+/** Create a file for read-only access to an unsigned long. */
+int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
+ char const * name, ulong * val);
+
+/** Create a file for read-only access to an atomic_t. */
+int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
+ char const * name, atomic_t * val);
+
+/** create a directory */
+struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
+ char const * name);
+
+/**
+ * Convert an unsigned long value into ASCII and copy it to the user buffer @buf,
+ * updating *offset appropriately. Returns bytes written or -EFAULT.
+ */
+ssize_t oprofilefs_ulong_to_user(unsigned long * val, char * buf, size_t count, loff_t * offset);
+
+/**
+ * Read an ASCII string for a number from a userspace buffer and fill *val on success.
+ * Returns 0 on success, < 0 on error.
+ */
+int oprofilefs_ulong_from_user(unsigned long * val, char const * buf, size_t count);
+
+/** lock for read/write safety */
+extern spinlock_t oprofilefs_lock;
+
+#endif /* OPROFILE_H */
diff --git a/include/linux/profile.h b/include/linux/profile.h
new file mode 100644
index 000000000000..11fbe9cec572
--- /dev/null
+++ b/include/linux/profile.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_PROFILE_H
+#define _LINUX_PROFILE_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <asm/errno.h>
+
+/* parse command line */
+int __init profile_setup(char * str);
+
+/* init basic kernel profiler */
+void __init profile_init(void);
+
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+
+enum profile_type {
+ EXIT_TASK,
+ EXIT_MMAP,
+ EXEC_UNMAP
+};
+
+#ifdef CONFIG_PROFILING
+
+struct notifier_block;
+struct task_struct;
+struct mm_struct;
+
+/* task is in do_exit() */
+void profile_exit_task(struct task_struct * task);
+
+/* change of vma mappings */
+void profile_exec_unmap(struct mm_struct * mm);
+
+/* exit of all vmas for a task */
+void profile_exit_mmap(struct mm_struct * mm);
+
+int profile_event_register(enum profile_type, struct notifier_block * n);
+
+int profile_event_unregister(enum profile_type, struct notifier_block * n);
+
+#else
+
+static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
+{
+ return -ENOSYS;
+}
+
+static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
+{
+ return -ENOSYS;
+}
+
+#define profile_exit_task(a) do { } while (0)
+#define profile_exec_unmap(a) do { } while (0)
+#define profile_exit_mmap(a) do { } while (0)
+
+#endif /* CONFIG_PROFILING */
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_PROFILE_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
new file mode 100644
index 000000000000..a5ffb7bb5743
--- /dev/null
+++ b/include/linux/rcupdate.h
@@ -0,0 +1,134 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) IBM Corporation, 2001
+ *
+ * Author: Dipankar Sarma <dipankar@in.ibm.com>
+ *
+ * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * http://lse.sourceforge.net/locking/rcupdate.html
+ *
+ */
+
+#ifndef __LINUX_RCUPDATE_H
+#define __LINUX_RCUPDATE_H
+
+#ifdef __KERNEL__
+
+#include <linux/cache.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+
+/**
+ * struct rcu_head - callback structure for use with RCU
+ * @list: list_head to queue the update requests
+ * @func: actual update function to call after the grace period.
+ * @arg: argument to be passed to the actual update function.
+ */
+struct rcu_head {
+ struct list_head list;
+ void (*func)(void *obj);
+ void *arg;
+};
+
+#define RCU_HEAD_INIT(head) \
+ { list: LIST_HEAD_INIT(head.list), func: NULL, arg: NULL }
+#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT(head)
+#define INIT_RCU_HEAD(ptr) do { \
+ INIT_LIST_HEAD(&(ptr)->list); (ptr)->func = NULL; (ptr)->arg = NULL; \
+} while (0)
+
+
+
+/* Control variables for rcupdate callback mechanism. */
+struct rcu_ctrlblk {
+ spinlock_t mutex; /* Guard this struct */
+ long curbatch; /* Current batch number. */
+ long maxbatch; /* Max requested batch number. */
+ unsigned long rcu_cpu_mask; /* CPUs that need to switch in order */
+ /* for current batch to proceed. */
+};
+
+/* Is batch a before batch b ? */
+static inline int rcu_batch_before(long a, long b)
+{
+ return (a - b) < 0;
+}
+
+/* Is batch a after batch b ? */
+static inline int rcu_batch_after(long a, long b)
+{
+ return (a - b) > 0;
+}
+
+/*
+ * Per-CPU data for Read-Copy UPdate.
+ * nxtlist - new callbacks are added here
+ * curlist - current batch for which quiescent cycle started if any
+ */
+struct rcu_data {
+ long qsctr; /* User-mode/idle loop etc. */
+ long last_qsctr; /* value of qsctr at beginning */
+ /* of rcu grace period */
+ long batch; /* Batch # for current RCU batch */
+ struct list_head nxtlist;
+ struct list_head curlist;
+} ____cacheline_aligned_in_smp;
+
+extern struct rcu_data rcu_data[NR_CPUS];
+extern struct rcu_ctrlblk rcu_ctrlblk;
+
+#define RCU_qsctr(cpu) (rcu_data[(cpu)].qsctr)
+#define RCU_last_qsctr(cpu) (rcu_data[(cpu)].last_qsctr)
+#define RCU_batch(cpu) (rcu_data[(cpu)].batch)
+#define RCU_nxtlist(cpu) (rcu_data[(cpu)].nxtlist)
+#define RCU_curlist(cpu) (rcu_data[(cpu)].curlist)
+
+#define RCU_QSCTR_INVALID 0
+
+static inline int rcu_pending(int cpu)
+{
+ if ((!list_empty(&RCU_curlist(cpu)) &&
+ rcu_batch_before(RCU_batch(cpu), rcu_ctrlblk.curbatch)) ||
+ (list_empty(&RCU_curlist(cpu)) &&
+ !list_empty(&RCU_nxtlist(cpu))) ||
+ test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask))
+ return 1;
+ else
+ return 0;
+}
+
+#define rcu_read_lock() preempt_disable()
+#define rcu_read_unlock() preempt_enable()
+
+extern void rcu_init(void);
+extern void rcu_check_callbacks(int cpu, int user);
+
+/* Exported interfaces */
+extern void FASTCALL(call_rcu(struct rcu_head *head,
+ void (*func)(void *arg), void *arg));
+extern void synchronize_kernel(void);
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 89c4ead4cf4b..764a3ebf3c24 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -492,10 +492,6 @@ extern unsigned long itimer_ticks;
extern unsigned long itimer_next;
extern void do_timer(struct pt_regs *);
-extern unsigned int * prof_buffer;
-extern unsigned long prof_len;
-extern unsigned long prof_shift;
-
extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
diff --git a/include/linux/vcache.h b/include/linux/vcache.h
index d5756643332c..5708fe6a908a 100644
--- a/include/linux/vcache.h
+++ b/include/linux/vcache.h
@@ -18,7 +18,7 @@ extern void __attach_vcache(vcache_t *vcache,
struct mm_struct *mm,
void (*callback)(struct vcache_s *data, struct page *new_page));
-extern void detach_vcache(vcache_t *vcache);
+extern void __detach_vcache(vcache_t *vcache);
extern void invalidate_vcache(unsigned long address, struct mm_struct *mm,
struct page *new_page);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 3253f7e9e06a..d8e54c8f4daf 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -40,11 +40,7 @@
#endif
/* Reserv for core and drivers use */
-#define BLUEZ_SKB_RESERVE 8
-
-#ifndef MIN
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
-#endif
+#define BT_SKB_RESERVE 8
#define BTPROTO_L2CAP 0
#define BTPROTO_HCI 1
@@ -57,29 +53,12 @@
#define SOL_SCO 17
#define SOL_RFCOMM 18
-/* Debugging */
-#ifdef CONFIG_BLUEZ_DEBUG
-
-#define HCI_CORE_DEBUG 1
-#define HCI_SOCK_DEBUG 1
-#define HCI_UART_DEBUG 1
-#define HCI_USB_DEBUG 1
-//#define HCI_DATA_DUMP 1
-
-#define L2CAP_DEBUG 1
-#define SCO_DEBUG 1
-#define AF_BLUETOOTH_DEBUG 1
-
-#endif /* CONFIG_BLUEZ_DEBUG */
-
-extern void bluez_dump(char *pref, __u8 *buf, int count);
-
#define BT_INFO(fmt, arg...) printk(KERN_INFO fmt "\n" , ## arg)
#define BT_DBG(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n" , __FUNCTION__ , ## arg)
#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __FUNCTION__ , ## arg)
#ifdef HCI_DATA_DUMP
-#define BT_DMP(buf, len) bluez_dump(__FUNCTION__, buf, len)
+#define BT_DMP(buf, len) bt_dump(__FUNCTION__, buf, len)
#else
#define BT_DMP(D...)
#endif
@@ -127,9 +106,9 @@ bdaddr_t *strtoba(char *str);
/* Common socket structures and functions */
-#define bluez_sk(__sk) ((struct bluez_sock *) __sk)
+#define bt_sk(__sk) ((struct bt_sock *) __sk)
-struct bluez_sock {
+struct bt_sock {
struct sock sk;
bdaddr_t src;
bdaddr_t dst;
@@ -137,48 +116,48 @@ struct bluez_sock {
struct sock *parent;
};
-struct bluez_sock_list {
+struct bt_sock_list {
struct sock *head;
rwlock_t lock;
};
-int bluez_sock_register(int proto, struct net_proto_family *ops);
-int bluez_sock_unregister(int proto);
-struct sock *bluez_sock_alloc(struct socket *sock, int proto, int pi_size, int prio);
-void bluez_sock_link(struct bluez_sock_list *l, struct sock *s);
-void bluez_sock_unlink(struct bluez_sock_list *l, struct sock *s);
-int bluez_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, int len, int flags, struct scm_cookie *scm);
-uint bluez_sock_poll(struct file * file, struct socket *sock, poll_table *wait);
-int bluez_sock_w4_connect(struct sock *sk, int flags);
+int bt_sock_register(int proto, struct net_proto_family *ops);
+int bt_sock_unregister(int proto);
+struct sock *bt_sock_alloc(struct socket *sock, int proto, int pi_size, int prio);
+void bt_sock_link(struct bt_sock_list *l, struct sock *s);
+void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
+int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, int len, int flags, struct scm_cookie *scm);
+uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait);
+int bt_sock_w4_connect(struct sock *sk, int flags);
-void bluez_accept_enqueue(struct sock *parent, struct sock *sk);
-struct sock *bluez_accept_dequeue(struct sock *parent, struct socket *newsock);
+void bt_accept_enqueue(struct sock *parent, struct sock *sk);
+struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
-struct bluez_skb_cb {
- int incomming;
+struct bt_skb_cb {
+ int incoming;
};
-#define bluez_cb(skb) ((struct bluez_skb_cb *)(skb->cb))
+#define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb))
-static inline struct sk_buff *bluez_skb_alloc(unsigned int len, int how)
+static inline struct sk_buff *bt_skb_alloc(unsigned int len, int how)
{
struct sk_buff *skb;
- if ((skb = alloc_skb(len + BLUEZ_SKB_RESERVE, how))) {
- skb_reserve(skb, BLUEZ_SKB_RESERVE);
- bluez_cb(skb)->incomming = 0;
+ if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) {
+ skb_reserve(skb, BT_SKB_RESERVE);
+ bt_cb(skb)->incoming = 0;
}
return skb;
}
-static inline struct sk_buff *bluez_skb_send_alloc(struct sock *sk, unsigned long len,
+static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long len,
int nb, int *err)
{
struct sk_buff *skb;
- if ((skb = sock_alloc_send_skb(sk, len + BLUEZ_SKB_RESERVE, nb, err))) {
- skb_reserve(skb, BLUEZ_SKB_RESERVE);
- bluez_cb(skb)->incomming = 0;
+ if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
+ skb_reserve(skb, BT_SKB_RESERVE);
+ bt_cb(skb)->incoming = 0;
}
return skb;
@@ -193,11 +172,8 @@ static inline int skb_frags_no(struct sk_buff *skb)
return n;
}
-int hci_core_init(void);
-int hci_core_cleanup(void);
-int hci_sock_init(void);
-int hci_sock_cleanup(void);
+void bt_dump(char *pref, __u8 *buf, int count);
-int bterr(__u16 code);
+int bt_err(__u16 code);
#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index b58ebef5397b..a8832055d170 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -39,6 +39,8 @@
#define HCI_DEV_UNREG 2
#define HCI_DEV_UP 3
#define HCI_DEV_DOWN 4
+#define HCI_DEV_SUSPEND 5
+#define HCI_DEV_RESUME 6
/* HCI device types */
#define HCI_VHCI 0
@@ -582,7 +584,7 @@ typedef struct {
} __attribute__ ((packed)) evt_read_remote_version_complete;
#define EVT_READ_REMOTE_VERSION_COMPLETE_SIZE 8
-/* Internal events generated by BlueZ stack */
+/* Internal events generated by Bluetooth stack */
#define EVT_STACK_INTERNAL 0xfd
typedef struct {
__u16 type;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 1740cde1287d..adf5558f660a 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -302,6 +302,8 @@ struct hci_dev *hci_dev_get(int index);
struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
int hci_register_dev(struct hci_dev *hdev);
int hci_unregister_dev(struct hci_dev *hdev);
+int hci_suspend_dev(struct hci_dev *hdev);
+int hci_resume_dev(struct hci_dev *hdev);
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_reset(__u16 dev);
diff --git a/include/rxrpc/call.h b/include/rxrpc/call.h
new file mode 100644
index 000000000000..0ae39ad9c612
--- /dev/null
+++ b/include/rxrpc/call.h
@@ -0,0 +1,218 @@
+/* call.h: Rx call record
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_CALL_H
+#define _LINUX_RXRPC_CALL_H
+
+#include <rxrpc/types.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/packet.h>
+#include <linux/timer.h>
+
+#define RXRPC_CALL_ACK_WINDOW_SIZE 16
+
+extern unsigned rxrpc_call_rcv_timeout; /* receive activity timeout (secs) */
+extern unsigned rxrpc_call_acks_timeout; /* pending ACK (retransmit) timeout (secs) */
+extern unsigned rxrpc_call_dfr_ack_timeout; /* deferred ACK timeout (secs) */
+extern unsigned short rxrpc_call_max_resend; /* maximum consecutive resend count */
+
+/* application call state
+ * - only state 0 and ffff are reserved, the state is set to 1 after an opid is received
+ */
+enum rxrpc_app_cstate {
+ RXRPC_CSTATE_COMPLETE = 0, /* operation complete */
+ RXRPC_CSTATE_ERROR, /* operation ICMP error or aborted */
+ RXRPC_CSTATE_SRVR_RCV_OPID, /* [SERVER] receiving operation ID */
+ RXRPC_CSTATE_SRVR_RCV_ARGS, /* [SERVER] receiving operation data */
+ RXRPC_CSTATE_SRVR_GOT_ARGS, /* [SERVER] completely received operation data */
+ RXRPC_CSTATE_SRVR_SND_REPLY, /* [SERVER] sending operation reply */
+ RXRPC_CSTATE_SRVR_RCV_FINAL_ACK, /* [SERVER] receiving final ACK */
+ RXRPC_CSTATE_CLNT_SND_ARGS, /* [CLIENT] sending operation args */
+ RXRPC_CSTATE_CLNT_RCV_REPLY, /* [CLIENT] receiving operation reply */
+ RXRPC_CSTATE_CLNT_GOT_REPLY, /* [CLIENT] completely received operation reply */
+} __attribute__((packed));
+
+extern const char *rxrpc_call_states[];
+
+enum rxrpc_app_estate {
+ RXRPC_ESTATE_NO_ERROR = 0, /* no error */
+ RXRPC_ESTATE_LOCAL_ABORT, /* aborted locally by application layer */
+ RXRPC_ESTATE_PEER_ABORT, /* aborted remotely by peer */
+ RXRPC_ESTATE_LOCAL_ERROR, /* local ICMP network error */
+ RXRPC_ESTATE_REMOTE_ERROR, /* remote ICMP network error */
+} __attribute__((packed));
+
+extern const char *rxrpc_call_error_states[];
+
+/*****************************************************************************/
+/*
+ * Rx call record and application scratch buffer
+ * - the call record occupies the bottom of a complete page
+ * - the application scratch buffer occupies the rest
+ */
+struct rxrpc_call
+{
+ atomic_t usage;
+ struct rxrpc_connection *conn; /* connection upon which active */
+ spinlock_t lock; /* access lock */
+ struct module *owner; /* owner module */
+ wait_queue_head_t waitq; /* wait queue for events to happen */
+ struct list_head link; /* general internal list link */
+ struct list_head call_link; /* master call list link */
+ u32 chan_ix; /* connection channel index (net order) */
+ u32 call_id; /* call ID on connection (net order) */
+ unsigned long cjif; /* jiffies at call creation */
+ unsigned long flags; /* control flags */
+#define RXRPC_CALL_ACKS_TIMO 0x00000001 /* ACKS timeout reached */
+#define RXRPC_CALL_ACKR_TIMO 0x00000002 /* ACKR timeout reached */
+#define RXRPC_CALL_RCV_TIMO 0x00000004 /* RCV timeout reached */
+#define RXRPC_CALL_RCV_PKT 0x00000008 /* received packet */
+
+ /* transmission */
+ rxrpc_seq_t snd_seq_count; /* outgoing packet sequence number counter */
+ struct rxrpc_message *snd_nextmsg; /* next message being constructed for sending */
+ struct rxrpc_message *snd_ping; /* last ping message sent */
+ unsigned short snd_resend_cnt; /* count of resends since last ACK */
+
+ /* transmission ACK tracking */
+ struct list_head acks_pendq; /* messages pending ACK (ordered by seq) */
+ unsigned acks_pend_cnt; /* number of un-ACK'd packets */
+ rxrpc_seq_t acks_dftv_seq; /* highest definitively ACK'd msg seq */
+ struct timer_list acks_timeout; /* timeout on expected ACK */
+
+ /* reception */
+ struct list_head rcv_receiveq; /* messages pending reception (ordered by seq) */
+ struct list_head rcv_krxiodq_lk; /* krxiod queue for new inbound packets */
+ struct timer_list rcv_timeout; /* call receive activity timeout */
+
+ /* reception ACK'ing */
+ rxrpc_seq_t ackr_win_bot; /* bottom of ACK window */
+ rxrpc_seq_t ackr_win_top; /* top of ACK window */
+ rxrpc_seq_t ackr_high_seq; /* highest seqno yet received */
+ rxrpc_seq_t ackr_prev_seq; /* previous seqno received */
+ unsigned ackr_pend_cnt; /* number of pending ACKs */
+ struct timer_list ackr_dfr_timo; /* timeout on deferred ACK */
+ char ackr_dfr_perm; /* request for deferred ACKs permitted */
+ rxrpc_seq_t ackr_dfr_seq; /* seqno for deferred ACK */
+ struct rxrpc_ackpacket ackr; /* pending normal ACK packet */
+ u8 ackr_array[RXRPC_CALL_ACK_WINDOW_SIZE]; /* ACK records */
+
+ /* presentation layer */
+ char app_last_rcv; /* T if received last packet from remote end */
+ enum rxrpc_app_cstate app_call_state; /* call state */
+ enum rxrpc_app_estate app_err_state; /* abort/error state */
+ struct list_head app_readyq; /* ordered ready received packet queue */
+ struct list_head app_unreadyq; /* ordered post-hole recv'd packet queue */
+ rxrpc_seq_t app_ready_seq; /* last seq number dropped into readyq */
+ size_t app_ready_qty; /* amount of data ready in readyq */
+ unsigned app_opcode; /* operation ID */
+ unsigned app_abort_code; /* abort code (when aborted) */
+ int app_errno; /* error number (when ICMP error received) */
+
+ /* statisics */
+ unsigned pkt_rcv_count; /* count of received packets on this call */
+ unsigned pkt_snd_count; /* count of sent packets on this call */
+ unsigned app_read_count; /* number of reads issued */
+
+ /* bits for the application to use */
+ rxrpc_call_attn_func_t app_attn_func; /* callback when attention required */
+ rxrpc_call_error_func_t app_error_func; /* callback when abort sent (cleanup and put) */
+ rxrpc_call_aemap_func_t app_aemap_func; /* callback to map abort code to/from errno */
+ void *app_user; /* application data */
+ struct list_head app_link; /* application list linkage */
+ struct list_head app_attn_link; /* application attention list linkage */
+ size_t app_mark; /* trigger callback when app_ready_qty>=app_mark */
+ char app_async_read; /* T if in async-read mode */
+ u8 *app_read_buf; /* application async read buffer (app_mark size) */
+ u8 *app_scr_alloc; /* application scratch allocation pointer */
+ void *app_scr_ptr; /* application pointer into scratch buffer */
+
+#define RXRPC_APP_MARK_EOF 0xFFFFFFFFU /* mark at end of input */
+
+ /* application scratch buffer */
+ u8 app_scratch[0] __attribute__((aligned(sizeof(long))));
+};
+
+#define RXRPC_CALL_SCRATCH_SIZE (PAGE_SIZE - sizeof(struct rxrpc_call))
+
+#define rxrpc_call_reset_scratch(CALL) \
+do { (CALL)->app_scr_alloc = (CALL)->app_scratch; } while(0)
+
+#define rxrpc_call_alloc_scratch(CALL,SIZE) \
+({ \
+ void *ptr; \
+ ptr = (CALL)->app_scr_alloc; \
+ (CALL)->app_scr_alloc += (SIZE); \
+ if ((SIZE)>RXRPC_CALL_SCRATCH_SIZE || \
+ (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \
+ printk("rxrpc_call_alloc_scratch(%p,%u)\n",(CALL),(SIZE)); \
+ BUG(); \
+ } \
+ ptr; \
+})
+
+#define rxrpc_call_alloc_scratch_s(CALL,TYPE) \
+({ \
+ size_t size = sizeof(TYPE); \
+ TYPE *ptr; \
+ ptr = (TYPE*)(CALL)->app_scr_alloc; \
+ (CALL)->app_scr_alloc += size; \
+ if (size>RXRPC_CALL_SCRATCH_SIZE || \
+ (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \
+ printk("rxrpc_call_alloc_scratch(%p,%u)\n",(CALL),size); \
+ BUG(); \
+ } \
+ ptr; \
+})
+
+#define rxrpc_call_is_ack_pending(CALL) ((CALL)->ackr.reason != 0)
+
+extern int rxrpc_create_call(struct rxrpc_connection *conn,
+ rxrpc_call_attn_func_t attn,
+ rxrpc_call_error_func_t error,
+ rxrpc_call_aemap_func_t aemap,
+ struct rxrpc_call **_call);
+
+extern int rxrpc_incoming_call(struct rxrpc_connection *conn,
+ struct rxrpc_message *msg,
+ struct rxrpc_call **_call);
+
+static inline void rxrpc_get_call(struct rxrpc_call *call)
+{
+ if (atomic_read(&call->usage)<=0)
+ BUG();
+ atomic_inc(&call->usage);
+ /*printk("rxrpc_get_call(%p{u=%d})\n",(C),atomic_read(&(C)->usage));*/
+}
+
+extern void rxrpc_put_call(struct rxrpc_call *call);
+
+extern void rxrpc_call_do_stuff(struct rxrpc_call *call);
+
+extern int rxrpc_call_abort(struct rxrpc_call *call, int error);
+
+#define RXRPC_CALL_READ_BLOCK 0x0001 /* block if not enough data and not yet EOF */
+#define RXRPC_CALL_READ_ALL 0x0002 /* error if insufficient data received */
+extern int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int flags);
+
+extern int rxrpc_call_write_data(struct rxrpc_call *call,
+ size_t sioc,
+ struct iovec siov[],
+ u8 rxhdr_flags,
+ int alloc_flags,
+ int dup_data,
+ size_t *size_sent);
+
+extern int rxrpc_call_flush(struct rxrpc_call *call);
+
+extern void rxrpc_call_handle_error(struct rxrpc_call *conn, int local, int errno);
+
+#endif /* _LINUX_RXRPC_CALL_H */
diff --git a/include/rxrpc/connection.h b/include/rxrpc/connection.h
new file mode 100644
index 000000000000..fc10fed01b21
--- /dev/null
+++ b/include/rxrpc/connection.h
@@ -0,0 +1,83 @@
+/* connection.h: Rx connection record
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_CONNECTION_H
+#define _LINUX_RXRPC_CONNECTION_H
+
+#include <rxrpc/types.h>
+#include <rxrpc/krxtimod.h>
+
+struct sk_buff;
+
+/*****************************************************************************/
+/*
+ * Rx connection
+ * - connections are matched by (rmt_port,rmt_addr,service_id,conn_id,clientflag)
+ * - connections only retain a refcount on the peer when they are active
+ * - connections with refcount==0 are inactive and reside in the peer's graveyard
+ */
+struct rxrpc_connection
+{
+ atomic_t usage;
+ struct rxrpc_transport *trans; /* transport endpoint */
+ struct rxrpc_peer *peer; /* peer from/to which connected */
+ struct rxrpc_service *service; /* responsible service (inbound conns) */
+ struct rxrpc_timer timeout; /* decaching timer */
+ struct list_head link; /* link in peer's list */
+ struct list_head proc_link; /* link in proc list */
+ struct list_head err_link; /* link in ICMP error processing list */
+ struct sockaddr_in addr; /* remote address */
+ struct rxrpc_call *channels[4]; /* channels (active calls) */
+ wait_queue_head_t chanwait; /* wait for channel to become available */
+ spinlock_t lock; /* access lock */
+ struct timeval atime; /* last access time */
+ size_t mtu_size; /* MTU size for outbound messages */
+ unsigned call_counter; /* call ID counter */
+ rxrpc_serial_t serial_counter; /* packet serial number counter */
+
+ /* the following should all be in net order */
+ u32 in_epoch; /* peer's epoch */
+ u32 out_epoch; /* my epoch */
+ u32 conn_id; /* connection ID, appropriately shifted */
+ u16 service_id; /* service ID */
+ u8 security_ix; /* security ID */
+ u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
+ u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
+};
+
+extern int rxrpc_create_connection(struct rxrpc_transport *trans,
+ u16 port,
+ u32 addr,
+ unsigned short service_id,
+ void *security,
+ struct rxrpc_connection **_conn);
+
+extern int rxrpc_connection_lookup(struct rxrpc_peer *peer,
+ struct rxrpc_message *msg,
+ struct rxrpc_connection **_conn);
+
+static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
+{
+ if (atomic_read(&conn->usage)<0)
+ BUG();
+ atomic_inc(&conn->usage);
+ //printk("rxrpc_get_conn(%p{u=%d})\n",conn,atomic_read(&conn->usage));
+}
+
+extern void rxrpc_put_connection(struct rxrpc_connection *conn);
+
+extern int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
+ struct rxrpc_call *call,
+ struct rxrpc_message *msg);
+
+extern void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno);
+
+#endif /* _LINUX_RXRPC_CONNECTION_H */
diff --git a/include/rxrpc/krxiod.h b/include/rxrpc/krxiod.h
new file mode 100644
index 000000000000..c0e0e82e4df2
--- /dev/null
+++ b/include/rxrpc/krxiod.h
@@ -0,0 +1,27 @@
+/* krxiod.h: Rx RPC I/O kernel thread interface
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_KRXIOD_H
+#define _LINUX_RXRPC_KRXIOD_H
+
+#include <rxrpc/types.h>
+
+extern int rxrpc_krxiod_init(void);
+extern void rxrpc_krxiod_kill(void);
+extern void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans);
+extern void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans);
+extern void rxrpc_krxiod_queue_peer(struct rxrpc_peer *peer);
+extern void rxrpc_krxiod_dequeue_peer(struct rxrpc_peer *peer);
+extern void rxrpc_krxiod_clear_peers(struct rxrpc_transport *trans);
+extern void rxrpc_krxiod_queue_call(struct rxrpc_call *call);
+extern void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call);
+
+#endif /* _LINUX_RXRPC_KRXIOD_H */
diff --git a/include/rxrpc/krxsecd.h b/include/rxrpc/krxsecd.h
new file mode 100644
index 000000000000..55ce43a25b38
--- /dev/null
+++ b/include/rxrpc/krxsecd.h
@@ -0,0 +1,22 @@
+/* krxsecd.h: Rx RPC security kernel thread interface
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_KRXSECD_H
+#define _LINUX_RXRPC_KRXSECD_H
+
+#include <rxrpc/types.h>
+
+extern int rxrpc_krxsecd_init(void);
+extern void rxrpc_krxsecd_kill(void);
+extern void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans);
+extern void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg);
+
+#endif /* _LINUX_RXRPC_KRXSECD_H */
diff --git a/include/rxrpc/krxtimod.h b/include/rxrpc/krxtimod.h
new file mode 100644
index 000000000000..b3d298b612f2
--- /dev/null
+++ b/include/rxrpc/krxtimod.h
@@ -0,0 +1,45 @@
+/* krxtimod.h: RxRPC timeout daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_KRXTIMOD_H
+#define _LINUX_RXRPC_KRXTIMOD_H
+
+#include <rxrpc/types.h>
+
+struct rxrpc_timer_ops {
+ /* called when the front of the timer queue has timed out */
+ void (*timed_out)(struct rxrpc_timer *timer);
+};
+
+/*****************************************************************************/
+/*
+ * RXRPC timer/timeout record
+ */
+struct rxrpc_timer
+{
+ struct list_head link; /* link in timer queue */
+ unsigned long timo_jif; /* timeout time */
+ const struct rxrpc_timer_ops *ops; /* timeout expiry function */
+};
+
+static inline void rxrpc_timer_init(rxrpc_timer_t *timer, const struct rxrpc_timer_ops *ops)
+{
+ INIT_LIST_HEAD(&timer->link);
+ timer->ops = ops;
+}
+
+extern int rxrpc_krxtimod_start(void);
+extern void rxrpc_krxtimod_kill(void);
+
+extern void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout);
+extern int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer);
+
+#endif /* _LINUX_RXRPC_KRXTIMOD_H */
diff --git a/include/rxrpc/message.h b/include/rxrpc/message.h
new file mode 100644
index 000000000000..2e43c03c6857
--- /dev/null
+++ b/include/rxrpc/message.h
@@ -0,0 +1,72 @@
+/* message.h: Rx message caching
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _H_3AD3363A_3A9C_11D6_83D8_0002B3163499
+#define _H_3AD3363A_3A9C_11D6_83D8_0002B3163499
+
+#include <rxrpc/packet.h>
+
+/*****************************************************************************/
+/*
+ * Rx message record
+ */
+struct rxrpc_message
+{
+ atomic_t usage;
+ struct list_head link; /* list link */
+ struct timeval stamp; /* time received or last sent */
+ rxrpc_seq_t seq; /* message sequence number */
+
+ int state; /* the state the message is currently in */
+#define RXRPC_MSG_PREPARED 0
+#define RXRPC_MSG_SENT 1
+#define RXRPC_MSG_ACKED 2 /* provisionally ACK'd */
+#define RXRPC_MSG_DONE 3 /* definitively ACK'd (msg->seq<ack.firstPacket) */
+#define RXRPC_MSG_RECEIVED 4
+#define RXRPC_MSG_ERROR -1
+ char rttdone; /* used for RTT */
+
+ struct rxrpc_transport *trans; /* transport received through */
+ struct rxrpc_connection *conn; /* connection received over */
+ struct sk_buff *pkt; /* received packet */
+ off_t offset; /* offset into pkt of next byte of data */
+
+ struct rxrpc_header hdr; /* message header */
+
+ int dcount; /* data part count */
+ size_t dsize; /* data size */
+#define RXRPC_MSG_MAX_IOCS 8
+ struct iovec data[RXRPC_MSG_MAX_IOCS]; /* message data */
+ unsigned long dfree; /* bit mask indicating kfree(data[x]) if T */
+};
+
+#define rxrpc_get_message(M) do { atomic_inc(&(M)->usage); } while(0)
+
+extern void __rxrpc_put_message(struct rxrpc_message *msg);
+static inline void rxrpc_put_message(struct rxrpc_message *msg)
+{
+ if (atomic_read(&msg->usage)<=0)
+ BUG();
+ if (atomic_dec_and_test(&msg->usage))
+ __rxrpc_put_message(msg);
+}
+
+extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
+ struct rxrpc_call *call,
+ u8 type,
+ int count,
+ struct iovec diov[],
+ int alloc_flags,
+ struct rxrpc_message **_msg);
+
+extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
+
+#endif /* _H_3AD3363A_3A9C_11D6_83D8_0002B3163499 */
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
new file mode 100644
index 000000000000..78999077f5b8
--- /dev/null
+++ b/include/rxrpc/packet.h
@@ -0,0 +1,128 @@
+/* packet.h: Rx packet layout and definitions
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_PACKET_H
+#define _LINUX_RXRPC_PACKET_H
+
+#include <rxrpc/types.h>
+
+#define RXRPC_IPUDP_SIZE 28
+extern size_t RXRPC_MAX_PACKET_SIZE;
+#define RXRPC_MAX_PACKET_DATA_SIZE (RXRPC_MAX_PACKET_SIZE - sizeof(struct rxrpc_header))
+#define RXRPC_LOCAL_PACKET_SIZE RXRPC_MAX_PACKET_SIZE
+#define RXRPC_REMOTE_PACKET_SIZE (576 - RXRPC_IPUDP_SIZE)
+
+/*****************************************************************************/
+/*
+ * on-the-wire Rx packet header
+ * - all multibyte fields should be in network byte order
+ */
+struct rxrpc_header
+{
+ u32 epoch; /* client boot timestamp */
+
+ u32 cid; /* connection and channel ID */
+#define RXRPC_MAXCALLS 4 /* max active calls per conn */
+#define RXRPC_CHANNELMASK (RXRPC_MAXCALLS-1) /* mask for channel ID */
+#define RXRPC_CIDMASK (~RXRPC_CHANNELMASK) /* mask for connection ID */
+#define RXRPC_CIDSHIFT 2 /* shift for connection ID */
+
+ u32 callNumber; /* call ID (0 for connection-level packets) */
+#define RXRPC_PROCESS_MAXCALLS (1<<2) /* maximum number of active calls per conn (power of 2) */
+
+ u32 seq; /* sequence number of pkt in call stream */
+ u32 serial; /* serial number of pkt sent to network */
+
+ u8 type; /* packet type */
+#define RXRPC_PACKET_TYPE_DATA 1 /* data */
+#define RXRPC_PACKET_TYPE_ACK 2 /* ACK */
+#define RXRPC_PACKET_TYPE_BUSY 3 /* call reject */
+#define RXRPC_PACKET_TYPE_ABORT 4 /* call/connection abort */
+#define RXRPC_PACKET_TYPE_ACKALL 5 /* ACK all outstanding packets on call */
+#define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */
+#define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */
+#define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */
+#define RXRPC_N_PACKET_TYPES 9 /* number of packet types (incl type 0) */
+
+ u8 flags; /* packet flags */
+#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
+#define RXRPC_REQUEST_ACK 0x02 /* request an unconditional ACK of this packet */
+#define RXRPC_LAST_PACKET 0x04 /* the last packet from this side for this call */
+#define RXRPC_MORE_PACKETS 0x08 /* more packets to come */
+#define RXRPC_JUMBO_PACKET 0x20 /* [DATA] this is a jumbo packet */
+#define RXRPC_SLOW_START_OK 0x20 /* [ACK] slow start supported */
+
+ u8 userStatus; /* app-layer defined status */
+ u8 securityIndex; /* security protocol ID */
+ u16 _rsvd; /* reserved (used by kerberos security as cksum) */
+ u16 serviceId; /* service ID */
+
+} __attribute__((packed));
+
+#define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X)
+
+extern const char *rxrpc_pkts[];
+
+/*****************************************************************************/
+/*
+ * jumbo packet secondary header
+ * - can be mapped to read header by:
+ * - new_serial = serial + 1
+ * - new_seq = seq + 1
+ * - new_flags = j_flags
+ * - new__rsvd = j__rsvd
+ * - duplicating all other fields
+ */
+struct rxrpc_jumbo_header
+{
+ u8 flags; /* packet flags (as per rxrpc_header) */
+ u8 pad;
+ u16 _rsvd; /* reserved (used by kerberos security as cksum) */
+};
+
+#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
+
+/*****************************************************************************/
+/*
+ * on-the-wire Rx ACK packet data payload
+ * - all multibyte fields should be in network byte order
+ */
+struct rxrpc_ackpacket
+{
+ u16 bufferSpace; /* number of packet buffers available */
+ u16 maxSkew; /* diff between serno being ACK'd and highest serial no received */
+ u32 firstPacket; /* sequence no of first ACK'd packet in attached list */
+ u32 previousPacket; /* sequence no of previous packet received */
+ u32 serial; /* serial no of packet that prompted this ACK */
+
+ u8 reason; /* reason for ACK */
+#define RXRPC_ACK_REQUESTED 1 /* ACK was requested on packet */
+#define RXRPC_ACK_DUPLICATE 2 /* duplicate packet received */
+#define RXRPC_ACK_OUT_OF_SEQUENCE 3 /* out of sequence packet received */
+#define RXRPC_ACK_EXCEEDS_WINDOW 4 /* packet received beyond end of ACK window */
+#define RXRPC_ACK_NOSPACE 5 /* packet discarded due to lack of buffer space */
+#define RXRPC_ACK_PING 6 /* keep alive ACK */
+#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */
+#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
+#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
+
+ u8 nAcks; /* number of ACKs */
+#define RXRPC_MAXACKS 255
+
+ u8 acks[0]; /* list of ACK/NAKs */
+#define RXRPC_ACK_TYPE_NACK 0
+#define RXRPC_ACK_TYPE_ACK 1
+
+} __attribute__((packed));
+
+extern const char *rxrpc_acks[];
+
+#endif /* _LINUX_RXRPC_PACKET_H */
diff --git a/include/rxrpc/peer.h b/include/rxrpc/peer.h
new file mode 100644
index 000000000000..9f09bc95a40f
--- /dev/null
+++ b/include/rxrpc/peer.h
@@ -0,0 +1,80 @@
+/* peer.h: Rx RPC per-transport peer record
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_PEER_H
+#define _LINUX_RXRPC_PEER_H
+
+#include <linux/wait.h>
+#include <rxrpc/types.h>
+#include <rxrpc/krxtimod.h>
+
+struct rxrpc_peer_ops
+{
+ /* peer record being added */
+ int (*adding)(struct rxrpc_peer *peer);
+
+ /* peer record being discarded from graveyard */
+ void (*discarding)(struct rxrpc_peer *peer);
+
+ /* change of epoch detected on connection */
+ void (*change_of_epoch)(struct rxrpc_connection *conn);
+};
+
+/*****************************************************************************/
+/*
+ * Rx RPC per-transport peer record
+ * - peers only retain a refcount on the transport when they are active
+ * - peers with refcount==0 are inactive and reside in the transport's graveyard
+ */
+struct rxrpc_peer
+{
+ atomic_t usage;
+ struct rxrpc_peer_ops *ops; /* operations on this peer */
+ struct rxrpc_transport *trans; /* owner transport */
+ struct rxrpc_timer timeout; /* timeout for grave destruction */
+ struct list_head link; /* link in transport's peer list */
+ struct list_head proc_link; /* link in /proc list */
+ rwlock_t conn_lock; /* lock for connections */
+ struct list_head conn_active; /* active connections to/from this peer */
+ struct list_head conn_graveyard; /* graveyard for inactive connections */
+ spinlock_t conn_gylock; /* lock for conn_graveyard */
+ wait_queue_head_t conn_gy_waitq; /* wait queue hit when graveyard is empty */
+ atomic_t conn_count; /* number of attached connections */
+ struct in_addr addr; /* remote address */
+ size_t if_mtu; /* interface MTU for this peer */
+ spinlock_t lock; /* access lock */
+
+ void *user; /* application layer data */
+
+ /* calculated RTT cache */
+#define RXRPC_RTT_CACHE_SIZE 32
+ suseconds_t rtt; /* current RTT estimate (in uS) */
+ unsigned short rtt_point; /* next entry at which to insert */
+ unsigned short rtt_usage; /* amount of cache actually used */
+ suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
+};
+
+
+extern int rxrpc_peer_lookup(struct rxrpc_transport *trans,
+ u32 addr,
+ struct rxrpc_peer **_peer);
+
+static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
+{
+ if (atomic_read(&peer->usage)<0)
+ BUG();
+ atomic_inc(&peer->usage);
+ //printk("rxrpc_get_peer(%p{u=%d})\n",peer,atomic_read(&peer->usage));
+}
+
+extern void rxrpc_put_peer(struct rxrpc_peer *peer);
+
+#endif /* _LINUX_RXRPC_PEER_H */
diff --git a/include/rxrpc/rxrpc.h b/include/rxrpc/rxrpc.h
new file mode 100644
index 000000000000..454d59933675
--- /dev/null
+++ b/include/rxrpc/rxrpc.h
@@ -0,0 +1,29 @@
+/* rx.h: Rx RPC interface
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_RXRPC_H
+#define _LINUX_RXRPC_RXRPC_H
+
+#ifdef __KERNEL__
+
+extern u32 rxrpc_epoch;
+
+extern int rxrpc_ktrace;
+extern int rxrpc_kdebug;
+extern int rxrpc_kproto;
+extern int rxrpc_knet;
+
+extern int rxrpc_sysctl_init(void);
+extern void rxrpc_sysctl_cleanup(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_RXRPC_RXRPC_H */
diff --git a/include/rxrpc/transport.h b/include/rxrpc/transport.h
new file mode 100644
index 000000000000..b9c225533158
--- /dev/null
+++ b/include/rxrpc/transport.h
@@ -0,0 +1,115 @@
+/* transport.h: Rx transport management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_TRANSPORT_H
+#define _LINUX_RXRPC_TRANSPORT_H
+
+#include <rxrpc/types.h>
+#include <rxrpc/krxiod.h>
+#include <rxrpc/rxrpc.h>
+#include <linux/skbuff.h>
+#include <linux/rwsem.h>
+
+typedef int (*rxrpc_newcall_fnx_t)(struct rxrpc_call *call);
+
+extern wait_queue_head_t rxrpc_krxiod_wq;
+
+/*****************************************************************************/
+/*
+ * Rx operation specification
+ * - tables of these must be sorted by op ID so that they can be binary-chop searched
+ */
+struct rxrpc_operation
+{
+ unsigned id; /* operation ID */
+ size_t asize; /* minimum size of argument block */
+ const char *name; /* name of operation */
+ void *user; /* initial user data */
+};
+
+/*****************************************************************************/
+/*
+ * Rx transport service record
+ */
+struct rxrpc_service
+{
+ struct list_head link; /* link in services list on transport */
+ struct module *owner; /* owner module */
+ rxrpc_newcall_fnx_t new_call; /* new call handler function */
+ const char *name; /* name of service */
+ unsigned short service_id; /* Rx service ID */
+ rxrpc_call_attn_func_t attn_func; /* call requires attention callback */
+ rxrpc_call_error_func_t error_func; /* call error callback */
+ rxrpc_call_aemap_func_t aemap_func; /* abort -> errno mapping callback */
+
+ const struct rxrpc_operation *ops_begin; /* beginning of operations table */
+ const struct rxrpc_operation *ops_end; /* end of operations table */
+};
+
+/*****************************************************************************/
+/*
+ * Rx transport endpoint record
+ */
+struct rxrpc_transport
+{
+ atomic_t usage;
+ struct socket *socket; /* my UDP socket */
+ struct list_head services; /* services listening on this socket */
+ struct list_head link; /* link in transport list */
+ struct list_head proc_link; /* link in transport proc list */
+ struct list_head krxiodq_link; /* krxiod attention queue link */
+ spinlock_t lock; /* access lock */
+ struct list_head peer_active; /* active peers connected to over this socket */
+ struct list_head peer_graveyard; /* inactive peer list */
+ spinlock_t peer_gylock; /* peer graveyard lock */
+ wait_queue_head_t peer_gy_waitq; /* wait queue hit when peer graveyard is empty */
+ rwlock_t peer_lock; /* peer list access lock */
+ atomic_t peer_count; /* number of peers */
+ struct rxrpc_peer_ops *peer_ops; /* default peer operations */
+ unsigned short port; /* port upon which listening */
+ volatile char error_rcvd; /* T if received ICMP error outstanding */
+};
+
+extern struct list_head rxrpc_transports;
+
+extern int rxrpc_create_transport(unsigned short port,
+ struct rxrpc_transport **_trans);
+
+static inline void rxrpc_get_transport(struct rxrpc_transport *trans)
+{
+ if (atomic_read(&trans->usage)<=0)
+ BUG();
+ atomic_inc(&trans->usage);
+ //printk("rxrpc_get_transport(%p{u=%d})\n",trans,atomic_read(&trans->usage));
+}
+
+extern void rxrpc_put_transport(struct rxrpc_transport *trans);
+
+extern int rxrpc_add_service(struct rxrpc_transport *trans,
+ struct rxrpc_service *srv);
+
+extern void rxrpc_del_service(struct rxrpc_transport *trans,
+ struct rxrpc_service *srv);
+
+#if 0
+extern int rxrpc_trans_add_connection(struct rxrpc_transport *trans,
+ struct rxrpc_connection *conn);
+#endif
+
+extern void rxrpc_trans_receive_packet(struct rxrpc_transport *trans);
+
+extern int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
+ struct rxrpc_message *msg,
+ int error);
+
+extern void rxrpc_clear_transport(struct rxrpc_transport *trans);
+
+#endif /* _LINUX_RXRPC_TRANSPORT_H */
diff --git a/include/rxrpc/types.h b/include/rxrpc/types.h
new file mode 100644
index 000000000000..40700bc61a6f
--- /dev/null
+++ b/include/rxrpc/types.h
@@ -0,0 +1,39 @@
+/* types.h: Rx types
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_TYPES_H
+#define _LINUX_RXRPC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+typedef unsigned rxrpc_seq_t; /* Rx message sequence number */
+typedef unsigned rxrpc_serial_t; /* Rx message serial number */
+
+struct rxrpc_call;
+struct rxrpc_connection;
+struct rxrpc_header;
+struct rxrpc_message;
+struct rxrpc_operation;
+struct rxrpc_peer;
+struct rxrpc_service;
+typedef struct rxrpc_timer rxrpc_timer_t;
+struct rxrpc_transport;
+
+typedef void (*rxrpc_call_attn_func_t)(struct rxrpc_call *call);
+typedef void (*rxrpc_call_error_func_t)(struct rxrpc_call *call);
+typedef void (*rxrpc_call_aemap_func_t)(struct rxrpc_call *call);
+
+#endif /* _LINUX_RXRPC_TYPES_H */
diff --git a/include/sound/asound.h b/include/sound/asound.h
index d749d968b9fb..9e351ebaf847 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -129,7 +129,7 @@ enum {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 3)
typedef unsigned long sndrv_pcm_uframes_t;
typedef long sndrv_pcm_sframes_t;
@@ -423,6 +423,7 @@ enum {
SNDRV_PCM_IOCTL_SW_PARAMS = _IOWR('A', 0x13, struct sndrv_pcm_sw_params),
SNDRV_PCM_IOCTL_STATUS = _IOR('A', 0x20, struct sndrv_pcm_status),
SNDRV_PCM_IOCTL_DELAY = _IOR('A', 0x21, sndrv_pcm_sframes_t),
+ SNDRV_PCM_IOCTL_HWSYNC = _IO('A', 0x22),
SNDRV_PCM_IOCTL_CHANNEL_INFO = _IOR('A', 0x32, struct sndrv_pcm_channel_info),
SNDRV_PCM_IOCTL_PREPARE = _IO('A', 0x40),
SNDRV_PCM_IOCTL_RESET = _IO('A', 0x41),
diff --git a/include/sound/core.h b/include/sound/core.h
index 8d90674f046c..a0051adef639 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -225,7 +225,6 @@ int snd_minor_info_oss_init(void);
int snd_minor_info_oss_done(void);
int snd_oss_init_module(void);
-void snd_oss_cleanup_module(void);
#endif
diff --git a/include/sound/info.h b/include/sound/info.h
index 576cc37de253..b45b422063e2 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -164,7 +164,7 @@ static inline int snd_info_card_unregister(snd_card_t * card) { return 0; }
static inline int snd_info_register(snd_info_entry_t * entry) { return 0; }
static inline int snd_info_unregister(snd_info_entry_t * entry) { return 0; }
-static inline struct proc_dir_entry *snd_create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent) { return 0; }
+static inline struct proc_dir_entry *snd_create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent) { return NULL; }
static inline void snd_remove_proc_entry(struct proc_dir_entry *parent,
struct proc_dir_entry *de) { ; }
@@ -174,7 +174,7 @@ static inline void snd_remove_proc_entry(struct proc_dir_entry *parent,
* OSS info part
*/
-#ifdef CONFIG_SND_OSSEMUL
+#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_PROC_FS)
#define SNDRV_OSS_INFO_DEV_AUDIO 0
#define SNDRV_OSS_INFO_DEV_SYNTH 1
@@ -187,6 +187,6 @@ static inline void snd_remove_proc_entry(struct proc_dir_entry *parent,
extern int snd_oss_info_register(int dev, int num, char *string);
#define snd_oss_info_unregister(dev, num) snd_oss_info_register(dev, num, NULL)
-#endif /* CONFIG_SND_OSSEMUL */
+#endif /* CONFIG_SND_OSSEMUL && CONFIG_PROC_FS */
#endif /* __SOUND_INFO_H */
diff --git a/include/sound/version.h b/include/sound/version.h
index ba48af7a3099..9ac82df6cf79 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
/* include/version.h. Generated automatically by configure. */
#define CONFIG_SND_VERSION "0.9.0rc3"
-#define CONFIG_SND_DATE " (Fri Oct 04 13:09:13 2002 UTC)"
+#define CONFIG_SND_DATE " (Mon Oct 14 16:41:26 2002 UTC)"
diff --git a/init/main.c b/init/main.c
index c6023edc03f3..97d88c50366b 100644
--- a/init/main.c
+++ b/init/main.c
@@ -30,6 +30,8 @@
#include <linux/kernel_stat.h>
#include <linux/security.h>
#include <linux/workqueue.h>
+#include <linux/profile.h>
+#include <linux/rcupdate.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -52,7 +54,6 @@
#error Sorry, your GCC is too old. It builds incorrect kernels.
#endif
-extern char _stext, _etext;
extern char *linux_banner;
static int init(void *);
@@ -130,13 +131,6 @@ __setup("maxcpus=", maxcpus);
static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
-static int __init profile_setup(char *str)
-{
- int par;
- if (get_option(&str,&par)) prof_shift = par;
- return 1;
-}
-
__setup("profile=", profile_setup);
static int __init checksetup(char *line)
@@ -397,6 +391,7 @@ asmlinkage void __init start_kernel(void)
printk("Kernel command line: %s\n", saved_command_line);
parse_options(command_line);
trap_init();
+ rcu_init();
init_IRQ();
sched_init();
softirq_init();
@@ -411,16 +406,7 @@ asmlinkage void __init start_kernel(void)
#ifdef CONFIG_MODULES
init_modules();
#endif
- if (prof_shift) {
- unsigned int size;
- /* only text is profiled */
- prof_len = (unsigned long) &_etext - (unsigned long) &_stext;
- prof_len >>= prof_shift;
-
- size = prof_len * sizeof(unsigned int) + PAGE_SIZE-1;
- prof_buffer = (unsigned int *) alloc_bootmem(size);
- }
-
+ profile_init();
kmem_cache_init();
local_irq_enable();
calibrate_delay();
diff --git a/kernel/Makefile b/kernel/Makefile
index b3fce6d3ac9c..daf6cbd5d42a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -3,12 +3,14 @@
#
export-objs = signal.o sys.o kmod.o workqueue.o ksyms.o pm.o exec_domain.o \
- printk.o platform.o suspend.o dma.o module.o cpufreq.o
+ printk.o platform.o suspend.o dma.o module.o cpufreq.o \
+ profile.o rcupdate.o
-obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
+obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
module.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o capability.o ptrace.o timer.o user.o \
- signal.o sys.o kmod.o workqueue.o futex.o platform.o pid.o
+ signal.o sys.o kmod.o workqueue.o futex.o platform.o pid.o \
+ rcupdate.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o
diff --git a/kernel/exit.c b/kernel/exit.c
index 6ed07def4c62..c2b0f6eeff0f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -19,6 +19,7 @@
#include <linux/file.h>
#include <linux/binfmts.h>
#include <linux/ptrace.h>
+#include <linux/profile.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -59,11 +60,12 @@ void release_task(struct task_struct * p)
{
struct dentry *proc_dentry;
task_t *leader;
-
- if (p->state < TASK_ZOMBIE)
- BUG();
+
+ BUG_ON(p->state < TASK_ZOMBIE);
+
if (p != current)
wait_task_inactive(p);
+
atomic_dec(&p->user->processes);
security_ops->task_free_security(p);
free_uid(p->user);
@@ -635,6 +637,8 @@ NORET_TYPE void do_exit(long code)
current->comm, current->pid,
preempt_count());
+ profile_exit_task(tsk);
+
fake_volatile:
acct_process(code);
__exit_mm(tsk);
diff --git a/kernel/futex.c b/kernel/futex.c
index d268c3c1b758..4aa2115c4d66 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -115,8 +115,9 @@ static struct page *__pin_page(unsigned long addr)
* Do a quick atomic lookup first - this is the fastpath.
*/
page = follow_page(mm, addr, 0);
- if (likely(page != NULL)) {
- get_page(page);
+ if (likely(page != NULL)) {
+ if (!PageReserved(page))
+ get_page(page);
return page;
}
@@ -140,8 +141,10 @@ repeat_lookup:
* check for races:
*/
tmp = follow_page(mm, addr, 0);
- if (tmp != page)
+ if (tmp != page) {
+ put_page(page);
goto repeat_lookup;
+ }
return page;
}
@@ -176,6 +179,7 @@ static int futex_wake(unsigned long uaddr, int offset, int num)
if (this->page == page && this->offset == offset) {
list_del_init(i);
+ __detach_vcache(&this->vcache);
tell_waiter(this);
ret++;
if (ret >= num)
@@ -235,15 +239,15 @@ static inline int unqueue_me(struct futex_q *q)
{
int ret = 0;
- detach_vcache(&q->vcache);
-
+ spin_lock(&vcache_lock);
spin_lock(&futex_lock);
if (!list_empty(&q->list)) {
list_del(&q->list);
+ __detach_vcache(&q->vcache);
ret = 1;
}
spin_unlock(&futex_lock);
-
+ spin_unlock(&vcache_lock);
return ret;
}
@@ -314,13 +318,7 @@ static int futex_close(struct inode *inode, struct file *filp)
{
struct futex_q *q = filp->private_data;
- spin_lock(&futex_lock);
- if (!list_empty(&q->list)) {
- list_del(&q->list);
- /* Noone can be polling on us now. */
- BUG_ON(waitqueue_active(&q->waiters));
- }
- spin_unlock(&futex_lock);
+ unqueue_me(q);
unpin_page(q->page);
kfree(filp->private_data);
return 0;
@@ -436,9 +434,8 @@ asmlinkage int sys_futex(unsigned long uaddr, int op, int val, struct timespec *
pos_in_page = uaddr % PAGE_SIZE;
- /* Must be "naturally" aligned, and not on page boundary. */
- if ((pos_in_page % __alignof__(int)) != 0
- || pos_in_page + sizeof(int) > PAGE_SIZE)
+ /* Must be "naturally" aligned */
+ if (pos_in_page % sizeof(int))
return -EINVAL;
switch (op) {
diff --git a/kernel/profile.c b/kernel/profile.c
new file mode 100644
index 000000000000..756f142b1f35
--- /dev/null
+++ b/kernel/profile.c
@@ -0,0 +1,121 @@
+/*
+ * linux/kernel/profile.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/profile.h>
+#include <linux/bootmem.h>
+#include <linux/notifier.h>
+#include <linux/mm.h>
+
+extern char _stext, _etext;
+
+unsigned int * prof_buffer;
+unsigned long prof_len;
+unsigned long prof_shift;
+
+int __init profile_setup(char * str)
+{
+ int par;
+ if (get_option(&str,&par))
+ prof_shift = par;
+ return 1;
+}
+
+
+void __init profile_init(void)
+{
+ unsigned int size;
+
+ if (!prof_shift)
+ return;
+
+ /* only text is profiled */
+ prof_len = (unsigned long) &_etext - (unsigned long) &_stext;
+ prof_len >>= prof_shift;
+
+ size = prof_len * sizeof(unsigned int) + PAGE_SIZE - 1;
+ prof_buffer = (unsigned int *) alloc_bootmem(size);
+}
+
+/* Profile event notifications */
+
+#ifdef CONFIG_PROFILING
+
+static DECLARE_RWSEM(profile_rwsem);
+static struct notifier_block * exit_task_notifier;
+static struct notifier_block * exit_mmap_notifier;
+static struct notifier_block * exec_unmap_notifier;
+
+void profile_exit_task(struct task_struct * task)
+{
+ down_read(&profile_rwsem);
+ notifier_call_chain(&exit_task_notifier, 0, task);
+ up_read(&profile_rwsem);
+}
+
+void profile_exit_mmap(struct mm_struct * mm)
+{
+ down_read(&profile_rwsem);
+ notifier_call_chain(&exit_mmap_notifier, 0, mm);
+ up_read(&profile_rwsem);
+}
+
+void profile_exec_unmap(struct mm_struct * mm)
+{
+ down_read(&profile_rwsem);
+ notifier_call_chain(&exec_unmap_notifier, 0, mm);
+ up_read(&profile_rwsem);
+}
+
+int profile_event_register(enum profile_type type, struct notifier_block * n)
+{
+ int err = -EINVAL;
+
+ down_write(&profile_rwsem);
+
+ switch (type) {
+ case EXIT_TASK:
+ err = notifier_chain_register(&exit_task_notifier, n);
+ break;
+ case EXIT_MMAP:
+ err = notifier_chain_register(&exit_mmap_notifier, n);
+ break;
+ case EXEC_UNMAP:
+ err = notifier_chain_register(&exec_unmap_notifier, n);
+ break;
+ }
+
+ up_write(&profile_rwsem);
+
+ return err;
+}
+
+
+int profile_event_unregister(enum profile_type type, struct notifier_block * n)
+{
+ int err = -EINVAL;
+
+ down_write(&profile_rwsem);
+
+ switch (type) {
+ case EXIT_TASK:
+ err = notifier_chain_unregister(&exit_task_notifier, n);
+ break;
+ case EXIT_MMAP:
+ err = notifier_chain_unregister(&exit_mmap_notifier, n);
+ break;
+ case EXEC_UNMAP:
+ err = notifier_chain_unregister(&exec_unmap_notifier, n);
+ break;
+ }
+
+ up_write(&profile_rwsem);
+ return err;
+}
+
+#endif /* CONFIG_PROFILING */
+
+EXPORT_SYMBOL_GPL(profile_event_register);
+EXPORT_SYMBOL_GPL(profile_event_unregister);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
new file mode 100644
index 000000000000..dfdf1774489d
--- /dev/null
+++ b/kernel/rcupdate.c
@@ -0,0 +1,242 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) IBM Corporation, 2001
+ *
+ * Author: Dipankar Sarma <dipankar@in.ibm.com>
+ *
+ * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * http://lse.sourceforge.net/locking/rcupdate.html
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+
+/* Definition for rcupdate control block. */
+struct rcu_ctrlblk rcu_ctrlblk =
+ { .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1,
+ .maxbatch = 1, .rcu_cpu_mask = 0 };
+struct rcu_data rcu_data[NR_CPUS] __cacheline_aligned;
+
+/* Fake initialization required by compiler */
+static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
+#define RCU_tasklet(cpu) (per_cpu(rcu_tasklet, cpu))
+
+/**
+ * call_rcu - Queue an RCU update request.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ * @arg: argument to be passed to the update function
+ *
+ * The update function will be invoked as soon as all CPUs have performed
+ * a context switch or been seen in the idle loop or in a user process.
+ * The read-side of critical section that use call_rcu() for updation must
+ * be protected by rcu_read_lock()/rcu_read_unlock().
+ */
+void call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
+{
+ int cpu;
+ unsigned long flags;
+
+ head->func = func;
+ head->arg = arg;
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ list_add_tail(&head->list, &RCU_nxtlist(cpu));
+ local_irq_restore(flags);
+}
+
+/*
+ * Invoke the completed RCU callbacks. They are expected to be in
+ * a per-cpu list.
+ */
+static void rcu_do_batch(struct list_head *list)
+{
+ struct list_head *entry;
+ struct rcu_head *head;
+
+ while (!list_empty(list)) {
+ entry = list->next;
+ list_del(entry);
+ head = list_entry(entry, struct rcu_head, list);
+ head->func(head->arg);
+ }
+}
+
+/*
+ * Register a new batch of callbacks, and start it up if there is currently no
+ * active batch and the batch to be registered has not already occurred.
+ * Caller must hold the rcu_ctrlblk lock.
+ */
+static void rcu_start_batch(long newbatch)
+{
+ if (rcu_batch_before(rcu_ctrlblk.maxbatch, newbatch)) {
+ rcu_ctrlblk.maxbatch = newbatch;
+ }
+ if (rcu_batch_before(rcu_ctrlblk.maxbatch, rcu_ctrlblk.curbatch) ||
+ (rcu_ctrlblk.rcu_cpu_mask != 0)) {
+ return;
+ }
+ rcu_ctrlblk.rcu_cpu_mask = cpu_online_map;
+}
+
+/*
+ * Check if the cpu has gone through a quiescent state (say context
+ * switch). If so and if it already hasn't done so in this RCU
+ * quiescent cycle, then indicate that it has done so.
+ */
+static void rcu_check_quiescent_state(void)
+{
+ int cpu = smp_processor_id();
+
+ if (!test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask)) {
+ return;
+ }
+
+ /*
+ * Races with local timer interrupt - in the worst case
+ * we may miss one quiescent state of that CPU. That is
+ * tolerable. So no need to disable interrupts.
+ */
+ if (RCU_last_qsctr(cpu) == RCU_QSCTR_INVALID) {
+ RCU_last_qsctr(cpu) = RCU_qsctr(cpu);
+ return;
+ }
+ if (RCU_qsctr(cpu) == RCU_last_qsctr(cpu)) {
+ return;
+ }
+
+ spin_lock(&rcu_ctrlblk.mutex);
+ if (!test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask)) {
+ spin_unlock(&rcu_ctrlblk.mutex);
+ return;
+ }
+ clear_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask);
+ RCU_last_qsctr(cpu) = RCU_QSCTR_INVALID;
+ if (rcu_ctrlblk.rcu_cpu_mask != 0) {
+ spin_unlock(&rcu_ctrlblk.mutex);
+ return;
+ }
+ rcu_ctrlblk.curbatch++;
+ rcu_start_batch(rcu_ctrlblk.maxbatch);
+ spin_unlock(&rcu_ctrlblk.mutex);
+}
+
+
+/*
+ * This does the RCU processing work from tasklet context.
+ */
+static void rcu_process_callbacks(unsigned long unused)
+{
+ int cpu = smp_processor_id();
+ LIST_HEAD(list);
+
+ if (!list_empty(&RCU_curlist(cpu)) &&
+ rcu_batch_after(rcu_ctrlblk.curbatch, RCU_batch(cpu))) {
+ list_splice(&RCU_curlist(cpu), &list);
+ INIT_LIST_HEAD(&RCU_curlist(cpu));
+ }
+
+ local_irq_disable();
+ if (!list_empty(&RCU_nxtlist(cpu)) && list_empty(&RCU_curlist(cpu))) {
+ list_splice(&RCU_nxtlist(cpu), &RCU_curlist(cpu));
+ INIT_LIST_HEAD(&RCU_nxtlist(cpu));
+ local_irq_enable();
+
+ /*
+ * start the next batch of callbacks
+ */
+ spin_lock(&rcu_ctrlblk.mutex);
+ RCU_batch(cpu) = rcu_ctrlblk.curbatch + 1;
+ rcu_start_batch(RCU_batch(cpu));
+ spin_unlock(&rcu_ctrlblk.mutex);
+ } else {
+ local_irq_enable();
+ }
+ rcu_check_quiescent_state();
+ if (!list_empty(&list))
+ rcu_do_batch(&list);
+}
+
+void rcu_check_callbacks(int cpu, int user)
+{
+ if (user ||
+ (idle_cpu(cpu) && !in_softirq() && hardirq_count() <= 1))
+ RCU_qsctr(cpu)++;
+ tasklet_schedule(&RCU_tasklet(cpu));
+}
+
+/*
+ * Initializes rcu mechanism. Assumed to be called early.
+ * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
+ * Note that rcu_qsctr and friends are implicitly
+ * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
+ */
+void __init rcu_init(void)
+{
+ int i;
+
+ memset(&rcu_data[0], 0, sizeof(rcu_data));
+ for (i = 0; i < NR_CPUS; i++) {
+ tasklet_init(&RCU_tasklet(i), rcu_process_callbacks, 0UL);
+ INIT_LIST_HEAD(&RCU_nxtlist(i));
+ INIT_LIST_HEAD(&RCU_curlist(i));
+ }
+}
+
+/* Because of FASTCALL declaration of complete, we use this wrapper */
+static void wakeme_after_rcu(void *completion)
+{
+ complete(completion);
+}
+
+/**
+ * synchronize-kernel - wait until all the CPUs have gone
+ * through a "quiescent" state. It may sleep.
+ */
+void synchronize_kernel(void)
+{
+ struct rcu_head rcu;
+ DECLARE_COMPLETION(completion);
+
+ /* Will wake me after RCU finished */
+ call_rcu(&rcu, wakeme_after_rcu, &completion);
+
+ /* Wait for it */
+ wait_for_completion(&completion);
+}
+
+
+EXPORT_SYMBOL(call_rcu);
+EXPORT_SYMBOL(synchronize_kernel);
diff --git a/kernel/sched.c b/kernel/sched.c
index 0464ac0649b8..20d2854c0bc6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -31,6 +31,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/timer.h>
+#include <linux/rcupdate.h>
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
@@ -865,6 +866,9 @@ void scheduler_tick(int user_ticks, int sys_ticks)
runqueue_t *rq = this_rq();
task_t *p = current;
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, user_ticks);
+
if (p == rq->idle) {
/* note: this timer irq context must be accounted for as well */
if (irq_count() - HARDIRQ_OFFSET >= SOFTIRQ_OFFSET)
@@ -1023,6 +1027,7 @@ pick_next_task:
switch_tasks:
prefetch(next);
clear_tsk_need_resched(prev);
+ RCU_qsctr(prev->thread_info->cpu)++;
if (likely(prev != next)) {
rq->nr_switches++;
diff --git a/kernel/sys.c b/kernel/sys.c
index 5b7e84384cfa..3c2992ac68f2 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/times.h>
#include <linux/security.h>
+#include <linux/dcookies.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -202,6 +203,7 @@ asmlinkage long sys_ni_syscall(void)
cond_syscall(sys_nfsservctl)
cond_syscall(sys_quotactl)
cond_syscall(sys_acct)
+cond_syscall(sys_lookup_dcookie)
static int set_one_prio(struct task_struct *p, int niceval, int error)
{
diff --git a/kernel/timer.c b/kernel/timer.c
index bf0077634c93..2d30f7fd0ecb 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -406,10 +406,6 @@ long time_adj; /* tick adjust (scaled 1 / HZ) */
long time_reftime; /* time at last adjustment (s) */
long time_adjust;
-unsigned int * prof_buffer;
-unsigned long prof_len;
-unsigned long prof_shift;
-
/*
* this routine handles the overflow of the microsecond field
*
diff --git a/mm/mmap.c b/mm/mmap.c
index 5d43e84413b1..90ae8b22ab96 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
+#include <linux/profile.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
@@ -1104,6 +1105,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
if (mpnt->vm_start >= end)
return 0;
+ /* Something will probably happen, so notify. */
+ if (mpnt->vm_file && (mpnt->vm_flags & VM_EXEC))
+ profile_exec_unmap(mm);
+
/*
* If we need to split any vma, do it now to save pain later.
*/
@@ -1253,7 +1258,10 @@ void exit_mmap(struct mm_struct * mm)
mmu_gather_t *tlb;
struct vm_area_struct * mpnt;
+ profile_exit_mmap(mm);
+
release_segments(mm);
+
spin_lock(&mm->page_table_lock);
tlb = tlb_gather_mmu(mm, 1);
diff --git a/mm/vcache.c b/mm/vcache.c
index ea6bc9d2b259..599e0f25490d 100644
--- a/mm/vcache.c
+++ b/mm/vcache.c
@@ -41,14 +41,12 @@ void __attach_vcache(vcache_t *vcache,
hash_head = hash_vcache(address, mm);
- list_add(&vcache->hash_entry, hash_head);
+ list_add_tail(&vcache->hash_entry, hash_head);
}
-void detach_vcache(vcache_t *vcache)
+void __detach_vcache(vcache_t *vcache)
{
- spin_lock(&vcache_lock);
- list_del(&vcache->hash_entry);
- spin_unlock(&vcache_lock);
+ list_del_init(&vcache->hash_entry);
}
void invalidate_vcache(unsigned long address, struct mm_struct *mm,
@@ -61,12 +59,11 @@ void invalidate_vcache(unsigned long address, struct mm_struct *mm,
hash_head = hash_vcache(address, mm);
/*
- * This is safe, because this path is called with the mm
- * semaphore read-held, and the add/remove path calls with the
- * mm semaphore write-held. So while other mm's might add new
- * entries in parallel, and *this* mm is locked out, so if the
- * list is empty now then we do not have to take the vcache
- * lock to see it's really empty.
+ * This is safe, because this path is called with the pagetable
+ * lock held. So while other mm's might add new entries in
+ * parallel, *this* mm is locked out, so if the list is empty
+ * now then we do not have to take the vcache lock to see it's
+ * really empty.
*/
if (likely(list_empty(hash_head)))
return;
diff --git a/net/Makefile b/net/Makefile
index 6cc376aaebf1..f7e9d2b90fd0 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -27,8 +27,9 @@ obj-$(CONFIG_NETROM) += netrom/
obj-$(CONFIG_ROSE) += rose/
obj-$(CONFIG_AX25) += ax25/
obj-$(CONFIG_IRDA) += irda/
-obj-$(CONFIG_BLUEZ) += bluetooth/
+obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_SUNRPC) += sunrpc/
+obj-$(CONFIG_RXRPC) += rxrpc/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_DECNET) += decnet/
obj-$(CONFIG_ECONET) += econet/
diff --git a/net/bluetooth/Config.help b/net/bluetooth/Config.help
index 80b5d7a6ce65..d69299604b51 100644
--- a/net/bluetooth/Config.help
+++ b/net/bluetooth/Config.help
@@ -1,5 +1,5 @@
Bluetooth subsystem support
-CONFIG_BLUEZ
+CONFIG_BT
Bluetooth is low-cost, low-power, short-range wireless technology.
It was designed as a replacement for cables and other short-range
technologies like IrDA. Bluetooth operates in personal area range
@@ -7,25 +7,25 @@ CONFIG_BLUEZ
Bluetooth can be found at <http://www.bluetooth.com/>.
Linux Bluetooth subsystem consist of several layers:
- BlueZ Core (HCI device and connection manager, scheduler)
+ Bluetooth Core (HCI device and connection manager, scheduler)
HCI Device drivers (interface to the hardware)
L2CAP Module (L2CAP protocol)
SCO Module (SCO links)
RFCOMM Module (RFCOMM protocol)
BNEP Module (BNEP protocol)
- Say Y here to enable Linux Bluetooth support and to build BlueZ Core
+ Say Y here to enable Linux Bluetooth support and to build Bluetooth Core
layer.
To use Linux Bluetooth subsystem, you will need several user-space
utilities like hciconfig and hcid. These utilities and updates to
- Bluetooth kernel modules are provided in the BlueZ package.
+ Bluetooth kernel modules are provided in the BlueZ packages.
For more information, see <http://bluez.sourceforge.net/>.
- If you want to compile BlueZ Core as module (bluez.o) say M here.
+ If you want to compile Bluetooth Core as module (bluetooth.o) say M here.
L2CAP protocol support
-CONFIG_BLUEZ_L2CAP
+CONFIG_BT_L2CAP
L2CAP (Logical Link Control and Adaptation Protocol) provides
connection oriented and connection-less data transport. L2CAP
support is required for most Bluetooth applications.
@@ -34,7 +34,7 @@ CONFIG_BLUEZ_L2CAP
compile it as module (l2cap.o).
SCO links support
-CONFIG_BLUEZ_SCO
+CONFIG_BT_SCO
SCO link provides voice transport over Bluetooth. SCO support is
required for voice applications like Headset and Audio.
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 3ca0d58c859a..b54e5162b731 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -4,12 +4,12 @@
export-objs := syms.o
-obj-$(CONFIG_BLUEZ) += bluez.o
-obj-$(CONFIG_BLUEZ_L2CAP) += l2cap.o
-obj-$(CONFIG_BLUEZ_SCO) += sco.o
-obj-$(CONFIG_BLUEZ_RFCOMM) += rfcomm/
-obj-$(CONFIG_BLUEZ_BNEP) += bnep/
+obj-$(CONFIG_BT) += bluetooth.o
+obj-$(CONFIG_BT_L2CAP) += l2cap.o
+obj-$(CONFIG_BT_SCO) += sco.o
+obj-$(CONFIG_BT_RFCOMM) += rfcomm/
+obj-$(CONFIG_BT_BNEP) += bnep/
-bluez-objs := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o lib.o syms.o
+bluetooth-objs := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o lib.o syms.o
include $(TOPDIR)/Rules.make
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index e5b5e6b027fb..9ad9c4454f53 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ Bluetooth address family and sockets.
+ * Bluetooth address family and sockets.
*
* $Id: af_bluetooth.c,v 1.3 2002/04/17 17:37:15 maxk Exp $
*/
@@ -51,66 +51,66 @@
#include <net/bluetooth/bluetooth.h>
-#ifndef AF_BLUETOOTH_DEBUG
+#ifndef CONFIG_BT_SOCK_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
/* Bluetooth sockets */
-#define BLUEZ_MAX_PROTO 5
-static struct net_proto_family *bluez_proto[BLUEZ_MAX_PROTO];
+#define BT_MAX_PROTO 5
+static struct net_proto_family *bt_proto[BT_MAX_PROTO];
-static kmem_cache_t *bluez_sock_cache;
+static kmem_cache_t *bt_sock_cache;
-int bluez_sock_register(int proto, struct net_proto_family *ops)
+int bt_sock_register(int proto, struct net_proto_family *ops)
{
- if (proto >= BLUEZ_MAX_PROTO)
+ if (proto >= BT_MAX_PROTO)
return -EINVAL;
- if (bluez_proto[proto])
+ if (bt_proto[proto])
return -EEXIST;
- bluez_proto[proto] = ops;
+ bt_proto[proto] = ops;
return 0;
}
-int bluez_sock_unregister(int proto)
+int bt_sock_unregister(int proto)
{
- if (proto >= BLUEZ_MAX_PROTO)
+ if (proto >= BT_MAX_PROTO)
return -EINVAL;
- if (!bluez_proto[proto])
+ if (!bt_proto[proto])
return -ENOENT;
- bluez_proto[proto] = NULL;
+ bt_proto[proto] = NULL;
return 0;
}
-static int bluez_sock_create(struct socket *sock, int proto)
+static int bt_sock_create(struct socket *sock, int proto)
{
- if (proto > BLUEZ_MAX_PROTO)
+ if (proto > BT_MAX_PROTO)
return -EINVAL;
#if defined(CONFIG_KMOD)
- if (!bluez_proto[proto]) {
+ if (!bt_proto[proto]) {
char module_name[30];
sprintf(module_name, "bt-proto-%d", proto);
request_module(module_name);
}
#endif
- if (!bluez_proto[proto])
+ if (!bt_proto[proto])
return -ENOENT;
- return bluez_proto[proto]->create(sock, proto);
+ return bt_proto[proto]->create(sock, proto);
}
-struct sock *bluez_sock_alloc(struct socket *sock, int proto, int pi_size, int prio)
+struct sock *bt_sock_alloc(struct socket *sock, int proto, int pi_size, int prio)
{
struct sock *sk;
void *pi;
- sk = sk_alloc(PF_BLUETOOTH, prio, sizeof(struct bluez_sock), bluez_sock_cache);
+ sk = sk_alloc(PF_BLUETOOTH, prio, sizeof(struct bt_sock), bt_sock_cache);
if (!sk)
return NULL;
@@ -125,7 +125,7 @@ struct sock *bluez_sock_alloc(struct socket *sock, int proto, int pi_size, int p
}
sock_init_data(sock, sk);
- INIT_LIST_HEAD(&bluez_sk(sk)->accept_q);
+ INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->zapped = 0;
sk->protocol = proto;
@@ -134,7 +134,7 @@ struct sock *bluez_sock_alloc(struct socket *sock, int proto, int pi_size, int p
return sk;
}
-void bluez_sock_link(struct bluez_sock_list *l, struct sock *sk)
+void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
sk->next = l->head;
@@ -143,7 +143,7 @@ void bluez_sock_link(struct bluez_sock_list *l, struct sock *sk)
write_unlock_bh(&l->lock);
}
-void bluez_sock_unlink(struct bluez_sock_list *l, struct sock *sk)
+void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
{
struct sock **skp;
@@ -158,45 +158,45 @@ void bluez_sock_unlink(struct bluez_sock_list *l, struct sock *sk)
write_unlock_bh(&l->lock);
}
-void bluez_accept_enqueue(struct sock *parent, struct sock *sk)
+void bt_accept_enqueue(struct sock *parent, struct sock *sk)
{
BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
- list_add_tail(&bluez_sk(sk)->accept_q, &bluez_sk(parent)->accept_q);
- bluez_sk(sk)->parent = parent;
+ list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
+ bt_sk(sk)->parent = parent;
parent->ack_backlog++;
}
-static void bluez_accept_unlink(struct sock *sk)
+static void bt_accept_unlink(struct sock *sk)
{
BT_DBG("sk %p state %d", sk, sk->state);
- list_del_init(&bluez_sk(sk)->accept_q);
- bluez_sk(sk)->parent->ack_backlog--;
- bluez_sk(sk)->parent = NULL;
+ list_del_init(&bt_sk(sk)->accept_q);
+ bt_sk(sk)->parent->ack_backlog--;
+ bt_sk(sk)->parent = NULL;
sock_put(sk);
}
-struct sock *bluez_accept_dequeue(struct sock *parent, struct socket *newsock)
+struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
{
struct list_head *p, *n;
struct sock *sk;
BT_DBG("parent %p", parent);
- list_for_each_safe(p, n, &bluez_sk(parent)->accept_q) {
- sk = (struct sock *) list_entry(p, struct bluez_sock, accept_q);
+ list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
+ sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
lock_sock(sk);
if (sk->state == BT_CLOSED) {
release_sock(sk);
- bluez_accept_unlink(sk);
+ bt_accept_unlink(sk);
continue;
}
if (sk->state == BT_CONNECTED || !newsock) {
- bluez_accept_unlink(sk);
+ bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
release_sock(sk);
@@ -207,7 +207,7 @@ struct sock *bluez_accept_dequeue(struct sock *parent, struct socket *newsock)
return NULL;
}
-int bluez_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, int len, int flags, struct scm_cookie *scm)
{
int noblock = flags & MSG_DONTWAIT;
@@ -242,7 +242,7 @@ int bluez_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err ? : copied;
}
-unsigned int bluez_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
+unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask;
@@ -259,7 +259,7 @@ unsigned int bluez_sock_poll(struct file * file, struct socket *sock, poll_table
mask |= POLLHUP;
if (!skb_queue_empty(&sk->receive_queue) ||
- !list_empty(&bluez_sk(sk)->accept_q) ||
+ !list_empty(&bt_sk(sk)->accept_q) ||
(sk->shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
@@ -277,7 +277,7 @@ unsigned int bluez_sock_poll(struct file * file, struct socket *sock, poll_table
return mask;
}
-int bluez_sock_w4_connect(struct sock *sk, int flags)
+int bt_sock_w4_connect(struct sock *sk, int flags)
{
DECLARE_WAITQUEUE(wait, current);
long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
@@ -316,50 +316,51 @@ int bluez_sock_w4_connect(struct sock *sk, int flags)
return err;
}
-struct net_proto_family bluez_sock_family_ops =
+struct net_proto_family bt_sock_family_ops =
{
- PF_BLUETOOTH, bluez_sock_create
+ PF_BLUETOOTH, bt_sock_create
};
-static int __init bluez_init(void)
+extern int hci_sock_init(void);
+extern int hci_sock_cleanup(void);
+
+static int __init bt_init(void)
{
- BT_INFO("BlueZ Core ver %s Copyright (C) 2000,2001 Qualcomm Inc",
+ BT_INFO("Bluetooth Core ver %s Copyright (C) 2000,2001 Qualcomm Inc",
VERSION);
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
proc_mkdir("bluetooth", NULL);
/* Init socket cache */
- bluez_sock_cache = kmem_cache_create("bluez_sock",
- sizeof(struct bluez_sock), 0,
+ bt_sock_cache = kmem_cache_create("bt_sock",
+ sizeof(struct bt_sock), 0,
SLAB_HWCACHE_ALIGN, 0, 0);
- if (!bluez_sock_cache) {
- BT_ERR("BlueZ socket cache creation failed");
+ if (!bt_sock_cache) {
+ BT_ERR("Bluetooth socket cache creation failed");
return -ENOMEM;
}
- sock_register(&bluez_sock_family_ops);
+ sock_register(&bt_sock_family_ops);
- hci_core_init();
hci_sock_init();
return 0;
}
-static void __exit bluez_cleanup(void)
+static void __exit bt_cleanup(void)
{
hci_sock_cleanup();
- hci_core_cleanup();
sock_unregister(PF_BLUETOOTH);
- kmem_cache_destroy(bluez_sock_cache);
+ kmem_cache_destroy(bt_sock_cache);
remove_proc_entry("bluetooth", NULL);
}
-subsys_initcall(bluez_init);
-module_exit(bluez_cleanup);
+subsys_initcall(bt_init);
+module_exit(bt_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
-MODULE_DESCRIPTION("BlueZ Core ver " VERSION);
+MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/bnep/Config.help b/net/bluetooth/bnep/Config.help
index 64bafc9dc313..25fdd001eb42 100644
--- a/net/bluetooth/bnep/Config.help
+++ b/net/bluetooth/bnep/Config.help
@@ -1,5 +1,5 @@
BNEP protocol support
-CONFIG_BLUEZ_BNEP
+CONFIG_BT_BNEP
BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet
emulation layer on top of Bluetooth. BNEP is required for Bluetooth
PAN (Personal Area Network).
@@ -12,10 +12,10 @@ CONFIG_BLUEZ_BNEP
compile it as module (bnep.o).
BNEP multicast filter support
-CONFIG_BLUEZ_BNEP_MC_FILTER
+CONFIG_BT_BNEP_MC_FILTER
This option enables the multicast filter support for BNEP.
BNEP protocol filter support
-CONFIG_BLUEZ_BNEP_PROTO_FILTER
+CONFIG_BT_BNEP_PROTO_FILTER
This option enables the protocol filter support for BNEP.
diff --git a/net/bluetooth/bnep/Config.in b/net/bluetooth/bnep/Config.in
index 8de2ad3c951d..2e1fd13547ec 100644
--- a/net/bluetooth/bnep/Config.in
+++ b/net/bluetooth/bnep/Config.in
@@ -1,8 +1,8 @@
-dep_tristate 'BNEP protocol support' CONFIG_BLUEZ_BNEP $CONFIG_BLUEZ_L2CAP
+dep_tristate 'BNEP protocol support' CONFIG_BT_BNEP $CONFIG_BT_L2CAP
-if [ "$CONFIG_BLUEZ_BNEP" != "n" ]; then
- bool ' Multicast filter support' CONFIG_BLUEZ_BNEP_MC_FILTER
- bool ' Protocol filter support' CONFIG_BLUEZ_BNEP_PROTO_FILTER
+if [ "$CONFIG_BT_BNEP" != "n" ]; then
+ bool ' Multicast filter support' CONFIG_BT_BNEP_MC_FILTER
+ bool ' Protocol filter support' CONFIG_BT_BNEP_PROTO_FILTER
fi
diff --git a/net/bluetooth/bnep/Makefile b/net/bluetooth/bnep/Makefile
index 7baf319e271e..f424217b0d1e 100644
--- a/net/bluetooth/bnep/Makefile
+++ b/net/bluetooth/bnep/Makefile
@@ -2,7 +2,7 @@
# Makefile for the Linux Bluetooth BNEP layer.
#
-obj-$(CONFIG_BLUEZ_BNEP) += bnep.o
+obj-$(CONFIG_BT_BNEP) += bnep.o
bnep-objs := core.o sock.o netdev.o crc32.o
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5f30ddab5159..fb9fae8c9710 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -58,7 +58,7 @@
#include "bnep.h"
-#ifndef CONFIG_BLUEZ_BNEP_DEBUG
+#ifndef CONFIG_BT_BNEP_DEBUG
#undef BT_DBG
#define BT_DBG(D...)
#endif
@@ -144,7 +144,7 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, struct sk_buff *skb)
BT_DBG("filter len %d", n);
-#ifdef CONFIG_BLUEZ_BNEP_PROTO_FILTER
+#ifdef CONFIG_BT_BNEP_PROTO_FILTER
n /= 4;
if (n <= BNEP_MAX_PROTO_FILTERS) {
struct bnep_proto_filter *f = s->proto_filter;
@@ -186,7 +186,7 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, struct sk_buff *skb)
BT_DBG("filter len %d", n);
-#ifdef CONFIG_BLUEZ_BNEP_MC_FILTER
+#ifdef CONFIG_BT_BNEP_MC_FILTER
n /= (ETH_ALEN * 2);
if (n > 0) {
@@ -479,7 +479,7 @@ static int bnep_session(void *arg)
BT_DBG("");
daemonize();
- set_user_nice(current, 19);
+ set_user_nice(current, -15);
current->flags |= PF_IOTHREAD;
sigfillset(&current->blocked);
flush_signals(current);
@@ -538,8 +538,8 @@ int bnep_add_connection(struct bnep_conadd_req *req, struct socket *sock)
BT_DBG("");
- baswap((void *) dst, &bluez_sk(sock->sk)->dst);
- baswap((void *) src, &bluez_sk(sock->sk)->src);
+ baswap((void *) dst, &bt_sk(sock->sk)->dst);
+ baswap((void *) src, &bt_sk(sock->sk)->src);
s = kmalloc(sizeof(struct bnep_session), GFP_KERNEL);
if (!s)
@@ -572,12 +572,12 @@ int bnep_add_connection(struct bnep_conadd_req *req, struct socket *sock)
s->msg.msg_flags = MSG_NOSIGNAL;
-#ifdef CONFIG_BLUEZ_BNEP_MC_FILTER
+#ifdef CONFIG_BT_BNEP_MC_FILTER
/* Set default mc filter */
set_bit(bnep_mc_hash(dev->broadcast), (ulong *) &s->mc_filter);
#endif
-#ifdef CONFIG_BLUEZ_BNEP_PROTO_FILTER
+#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Set default protocol filter */
/* (IPv4, ARP) */
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index bc17e026aefa..71217edf3566 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -46,7 +46,7 @@
#include "bnep.h"
-#ifndef CONFIG_BLUEZ_BNEP_DEBUG
+#ifndef CONFIG_BT_BNEP_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
@@ -73,7 +73,7 @@ static struct net_device_stats *bnep_net_get_stats(struct net_device *dev)
static void bnep_net_set_mc_list(struct net_device *dev)
{
-#ifdef CONFIG_BLUEZ_BNEP_MC_FILTER
+#ifdef CONFIG_BT_BNEP_MC_FILTER
struct bnep_session *s = dev->priv;
struct sock *sk = s->sock->sk;
struct bnep_set_filter_req *r;
@@ -143,7 +143,7 @@ static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
}
-#ifdef CONFIG_BLUEZ_BNEP_MC_FILTER
+#ifdef CONFIG_BT_BNEP_MC_FILTER
static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
{
struct ethhdr *eh = (void *) skb->data;
@@ -154,7 +154,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
}
#endif
-#ifdef CONFIG_BLUEZ_BNEP_PROTO_FILTER
+#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Determine ether protocol. Based on eth_type_trans. */
static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
{
@@ -192,14 +192,14 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
BT_DBG("skb %p, dev %p", skb, dev);
-#ifdef CONFIG_BLUEZ_BNEP_MC_FILTER
+#ifdef CONFIG_BT_BNEP_MC_FILTER
if (bnep_net_mc_filter(skb, s)) {
kfree_skb(skb);
return 0;
}
#endif
-#ifdef CONFIG_BLUEZ_BNEP_PROTO_FILTER
+#ifdef CONFIG_BT_BNEP_PROTO_FILTER
if (bnep_net_proto_filter(skb, s)) {
kfree_skb(skb);
return 0;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 5b8b750023ec..7bfabccd0f67 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -50,7 +50,7 @@
#include "bnep.h"
-#ifndef CONFIG_BLUEZ_BNEP_DEBUG
+#ifndef CONFIG_BT_BNEP_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
@@ -173,7 +173,7 @@ static int bnep_sock_create(struct socket *sock, int protocol)
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- if (!(sk = bluez_sock_alloc(sock, PF_BLUETOOTH, 0, GFP_KERNEL)))
+ if (!(sk = bt_sock_alloc(sock, PF_BLUETOOTH, 0, GFP_KERNEL)))
return -ENOMEM;
sock->ops = &bnep_sock_ops;
@@ -194,13 +194,13 @@ static struct net_proto_family bnep_sock_family_ops = {
int bnep_sock_init(void)
{
- bluez_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
+ bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
return 0;
}
int bnep_sock_cleanup(void)
{
- if (bluez_sock_unregister(BTPROTO_BNEP))
+ if (bt_sock_unregister(BTPROTO_BNEP))
BT_ERR("Can't unregister BNEP socket");
return 0;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 536402c9581a..64d3e5cf31e5 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -52,7 +52,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#ifndef HCI_CORE_DEBUG
+#ifndef CONFIG_BT_HCI_CORE_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index f1485ad896bd..1ad3339ed38d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ HCI Core.
+ * Bluetooth HCI Core.
*
* $Id: hci_core.c,v 1.6 2002/04/17 17:37:16 maxk Exp $
*/
@@ -53,7 +53,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#ifndef HCI_CORE_DEBUG
+#ifndef CONFIG_BT_HCI_CORE_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
@@ -168,7 +168,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
switch (hdev->req_status) {
case HCI_REQ_DONE:
- err = -bterr(hdev->req_result);
+ err = -bt_err(hdev->req_result);
break;
case HCI_REQ_CANCELED:
@@ -863,6 +863,22 @@ int hci_unregister_dev(struct hci_dev *hdev)
return 0;
}
+/* Suspend HCI device */
+int hci_suspend_dev(struct hci_dev *hdev)
+{
+ hci_notify(hdev, HCI_DEV_SUSPEND);
+ hci_run_hotplug(hdev->name, "suspend");
+ return 0;
+}
+
+/* Resume HCI device */
+int hci_resume_dev(struct hci_dev *hdev)
+{
+ hci_notify(hdev, HCI_DEV_RESUME);
+ hci_run_hotplug(hdev->name, "resume");
+ return 0;
+}
+
/* Receive frame from HCI drivers */
int hci_recv_frame(struct sk_buff *skb)
{
@@ -877,7 +893,7 @@ int hci_recv_frame(struct sk_buff *skb)
BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
/* Incomming skb */
- bluez_cb(skb)->incomming = 1;
+ bt_cb(skb)->incoming = 1;
/* Time stamp */
do_gettimeofday(&skb->stamp);
@@ -1001,7 +1017,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
- if (!(skb = bluez_skb_alloc(len, GFP_ATOMIC))) {
+ if (!(skb = bt_skb_alloc(len, GFP_ATOMIC))) {
BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
return -ENOMEM;
}
@@ -1250,7 +1266,7 @@ static void hci_tx_task(unsigned long arg)
read_unlock(&hci_task_lock);
}
-/* ----- HCI RX task (incomming data proccessing) ----- */
+/* ----- HCI RX task (incoming data proccessing) ----- */
/* ACL data packet */
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1406,15 +1422,3 @@ static void hci_cmd_task(unsigned long arg)
}
}
}
-
-/* ---- Initialization ---- */
-
-int hci_core_init(void)
-{
- return 0;
-}
-
-int hci_core_cleanup(void)
-{
- return 0;
-}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 9445e4543416..c9303bbd8cde 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -52,7 +52,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#ifndef HCI_CORE_DEBUG
+#ifndef CONFIG_BT_HCI_CORE_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
@@ -68,7 +68,7 @@ static void hci_cc_link_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb
default:
BT_DBG("%s Command complete: ogf LINK_CTL ocf %x", hdev->name, ocf);
break;
- };
+ }
}
/* Command Complete OGF LINK_POLICY */
@@ -103,7 +103,7 @@ static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *
BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x",
hdev->name, ocf);
break;
- };
+ }
}
/* Command Complete OGF HOST_CTL */
@@ -213,7 +213,7 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb
default:
BT_DBG("%s Command complete: ogf HOST_CTL ocf %x", hdev->name, ocf);
break;
- };
+ }
}
/* Command Complete OGF INFO_PARAM */
@@ -287,7 +287,7 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s
default:
BT_DBG("%s Command complete: ogf INFO_PARAM ocf %x", hdev->name, ocf);
break;
- };
+ }
}
/* Command Status OGF LINK_CTL */
@@ -376,7 +376,7 @@ static void hci_cs_link_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status)
BT_DBG("%s Command status: ogf LINK_CTL ocf %x status %d",
hdev->name, ocf, status);
break;
- };
+ }
}
/* Command Status OGF LINK_POLICY */
@@ -388,7 +388,7 @@ static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status)
default:
BT_DBG("%s Command status: ogf HOST_POLICY ocf %x", hdev->name, ocf);
break;
- };
+ }
}
/* Command Status OGF HOST_CTL */
@@ -400,7 +400,7 @@ static void hci_cs_host_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status)
default:
BT_DBG("%s Command status: ogf HOST_CTL ocf %x", hdev->name, ocf);
break;
- };
+ }
}
/* Command Status OGF INFO_PARAM */
@@ -412,7 +412,7 @@ static void hci_cs_info_param(struct hci_dev *hdev, __u16 ocf, __u8 status)
default:
BT_DBG("%s Command status: ogf INFO_PARAM ocf %x", hdev->name, ocf);
break;
- };
+ }
}
/* Inquiry Complete */
@@ -849,7 +849,7 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
void *ptr;
size = HCI_EVENT_HDR_SIZE + EVT_STACK_INTERNAL_SIZE + dlen;
- skb = bluez_skb_alloc(size, GFP_ATOMIC);
+ skb = bt_skb_alloc(size, GFP_ATOMIC);
if (!skb)
return;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index daf7546004b5..0398bb045b4c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ HCI socket layer.
+ * Bluetooth HCI socket layer.
*
* $Id: hci_sock.c,v 1.4 2002/04/18 22:26:14 maxk Exp $
*/
@@ -53,7 +53,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#ifndef HCI_SOCK_DEBUG
+#ifndef CONFIG_BT_HCI_SOCK_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
@@ -79,7 +79,7 @@ static struct hci_sec_filter hci_sec_filter = {
}
};
-static struct bluez_sock_list hci_sk_list = {
+static struct bt_sock_list hci_sk_list = {
.lock = RW_LOCK_UNLOCKED
};
@@ -144,7 +144,7 @@ static int hci_sock_release(struct socket *sock)
if (!sk)
return 0;
- bluez_sock_unlink(&hci_sk_list, sk);
+ bt_sock_unlink(&hci_sk_list, sk);
if (hdev) {
atomic_dec(&hdev->promisc);
@@ -310,7 +310,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
__u32 mask = hci_pi(sk)->cmsg_mask;
if (mask & HCI_CMSG_DIR)
- put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(int), &bluez_cb(skb)->incomming);
+ put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(int), &bt_cb(skb)->incoming);
if (mask & HCI_CMSG_TSTAMP)
put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(skb->stamp), &skb->stamp);
@@ -378,7 +378,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
goto done;
}
- if (!(skb = bluez_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err)))
+ if (!(skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err)))
goto done;
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -454,7 +454,7 @@ int hci_sock_setsockopt(struct socket *sock, int level, int optname, char *optva
break;
case HCI_FILTER:
- len = MIN(len, sizeof(uf));
+ len = min_t(unsigned int, len, sizeof(uf));
if (copy_from_user(&uf, optval, len)) {
err = -EFAULT;
break;
@@ -472,7 +472,7 @@ int hci_sock_setsockopt(struct socket *sock, int level, int optname, char *optva
f->type_mask = uf.type_mask;
f->opcode = uf.opcode;
*((u32 *) f->event_mask + 0) = uf.event_mask[0];
- *((u32 *) f->event_mask + 1) = uf.event_mask[0];
+ *((u32 *) f->event_mask + 1) = uf.event_mask[1];
}
break;
@@ -522,10 +522,10 @@ int hci_sock_getsockopt(struct socket *sock, int level, int optname, char *optva
uf.type_mask = f->type_mask;
uf.opcode = f->opcode;
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
- uf.event_mask[0] = *((u32 *) f->event_mask + 1);
+ uf.event_mask[1] = *((u32 *) f->event_mask + 1);
}
- len = MIN(len, sizeof(uf));
+ len = min_t(unsigned int, len, sizeof(uf));
if (copy_to_user(optval, &uf, len))
return -EFAULT;
break;
@@ -568,14 +568,14 @@ static int hci_sock_create(struct socket *sock, int protocol)
sock->ops = &hci_sock_ops;
- sk = bluez_sock_alloc(sock, protocol, sizeof(struct hci_pinfo), GFP_KERNEL);
+ sk = bt_sock_alloc(sock, protocol, sizeof(struct hci_pinfo), GFP_KERNEL);
if (!sk)
return -ENOMEM;
sock->state = SS_UNCONNECTED;
sk->state = BT_OPEN;
- bluez_sock_link(&hci_sk_list, sk);
+ bt_sock_link(&hci_sk_list, sk);
MOD_INC_USE_COUNT;
return 0;
@@ -627,7 +627,7 @@ struct notifier_block hci_sock_nblock = {
int hci_sock_init(void)
{
- if (bluez_sock_register(BTPROTO_HCI, &hci_sock_family_ops)) {
+ if (bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops)) {
BT_ERR("Can't register HCI socket");
return -EPROTO;
}
@@ -638,7 +638,7 @@ int hci_sock_init(void)
int hci_sock_cleanup(void)
{
- if (bluez_sock_unregister(BTPROTO_HCI))
+ if (bt_sock_unregister(BTPROTO_HCI))
BT_ERR("Can't unregister HCI socket");
hci_unregister_notifier(&hci_sock_nblock);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index b8bdf5978299..28f06d39daab 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ L2CAP core and sockets.
+ * Bluetooth L2CAP core and sockets.
*
* $Id: l2cap.c,v 1.15 2002/09/09 01:14:52 maxk Exp $
*/
@@ -57,14 +57,14 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
-#ifndef L2CAP_DEBUG
+#ifndef CONFIG_BT_L2CAP_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
static struct proto_ops l2cap_sock_ops;
-struct bluez_sock_list l2cap_sk_list = {
+struct bt_sock_list l2cap_sk_list = {
.lock = RW_LOCK_UNLOCKED
};
@@ -188,8 +188,8 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
int l2cap_connect(struct sock *sk)
{
- bdaddr_t *src = &bluez_sk(sk)->src;
- bdaddr_t *dst = &bluez_sk(sk)->dst;
+ bdaddr_t *src = &bt_sk(sk)->src;
+ bdaddr_t *dst = &bt_sk(sk)->dst;
struct l2cap_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
@@ -248,7 +248,7 @@ static struct sock *__l2cap_get_sock_by_addr(__u16 psm, bdaddr_t *src)
struct sock *sk;
for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
if (l2cap_pi(sk)->psm == psm &&
- !bacmp(&bluez_sk(sk)->src, src))
+ !bacmp(&bt_sk(sk)->src, src))
break;
}
return sk;
@@ -267,11 +267,11 @@ static struct sock *__l2cap_get_sock_by_psm(int state, __u16 psm, bdaddr_t *src)
if (l2cap_pi(sk)->psm == psm) {
/* Exact match. */
- if (!bacmp(&bluez_sk(sk)->src, src))
+ if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
- if (!bacmp(&bluez_sk(sk)->src, BDADDR_ANY))
+ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
@@ -310,7 +310,7 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
- while ((sk = bluez_accept_dequeue(parent, NULL)))
+ while ((sk = bt_accept_dequeue(parent, NULL)))
l2cap_sock_close(sk);
parent->state = BT_CLOSED;
@@ -328,7 +328,7 @@ static void l2cap_sock_kill(struct sock *sk)
BT_DBG("sk %p state %d", sk, sk->state);
/* Kill poor orphan */
- bluez_sock_unlink(&l2cap_sk_list, sk);
+ bt_sock_unlink(&l2cap_sk_list, sk);
sk->dead = 1;
sock_put(sk);
}
@@ -409,7 +409,7 @@ static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
{
struct sock *sk;
- sk = bluez_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
+ sk = bt_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
if (!sk)
return NULL;
@@ -421,7 +421,7 @@ static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
l2cap_sock_init_timer(sk);
- bluez_sock_link(&l2cap_sk_list, sk);
+ bt_sock_link(&l2cap_sk_list, sk);
MOD_INC_USE_COUNT;
return sk;
@@ -471,7 +471,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
err = -EADDRINUSE;
} else {
/* Save source address */
- bacpy(&bluez_sk(sk)->src, &la->l2_bdaddr);
+ bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
l2cap_pi(sk)->psm = la->l2_psm;
sk->state = BT_BOUND;
}
@@ -524,14 +524,14 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
}
/* Set destination address and psm */
- bacpy(&bluez_sk(sk)->dst, &la->l2_bdaddr);
+ bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
l2cap_pi(sk)->psm = la->l2_psm;
if ((err = l2cap_connect(sk)))
goto done;
wait:
- err = bluez_sock_w4_connect(sk, flags);
+ err = bt_sock_w4_connect(sk, flags);
done:
release_sock(sk);
@@ -586,7 +586,7 @@ int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk->sleep, &wait);
- while (!(nsk = bluez_accept_dequeue(sk, newsock))) {
+ while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
@@ -633,9 +633,9 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
*len = sizeof(struct sockaddr_l2);
if (peer)
- bacpy(&la->l2_bdaddr, &bluez_sk(sk)->dst);
+ bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
else
- bacpy(&la->l2_bdaddr, &bluez_sk(sk)->src);
+ bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
la->l2_psm = l2cap_pi(sk)->psm;
return 0;
@@ -682,7 +682,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
switch (optname) {
case L2CAP_OPTIONS:
- len = MIN(sizeof(opts), optlen);
+ len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *)&opts, optval, len)) {
err = -EFAULT;
break;
@@ -727,7 +727,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
opts.omtu = l2cap_pi(sk)->omtu;
opts.flush_to = l2cap_pi(sk)->flush_to;
- len = MIN(len, sizeof(opts));
+ len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
@@ -746,7 +746,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
- len = MIN(len, sizeof(cinfo));
+ len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
@@ -892,7 +892,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
__l2cap_chan_link(l, sk);
if (parent)
- bluez_accept_enqueue(parent, sk);
+ bt_accept_enqueue(parent, sk);
}
/* Delete channel.
@@ -900,7 +900,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
static void l2cap_chan_del(struct sock *sk, int err)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sock *parent = bluez_sk(sk)->parent;
+ struct sock *parent = bt_sk(sk)->parent;
l2cap_sock_clear_timer(sk);
@@ -954,7 +954,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
static void l2cap_chan_ready(struct sock *sk)
{
- struct sock *parent = bluez_sk(sk)->parent;
+ struct sock *parent = bt_sk(sk)->parent;
BT_DBG("sk %p, parent %p", sk, parent);
@@ -1017,9 +1017,9 @@ static int l2cap_chan_send(struct sock *sk, struct msghdr *msg, int len)
else
hlen = L2CAP_HDR_SIZE;
- count = MIN(conn->mtu - hlen, len);
+ count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bluez_skb_send_alloc(sk, hlen + count,
+ skb = bt_skb_send_alloc(sk, hlen + count,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
@@ -1043,9 +1043,9 @@ static int l2cap_chan_send(struct sock *sk, struct msghdr *msg, int len)
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
- count = MIN(conn->mtu, len);
+ count = min_t(unsigned int, conn->mtu, len);
- *frag = bluez_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
+ *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
if (!*frag)
goto fail;
@@ -1103,9 +1103,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
- count = MIN(conn->mtu, len);
+ count = min_t(unsigned int, conn->mtu, len);
- skb = bluez_skb_alloc(count, GFP_ATOMIC);
+ skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
return NULL;
@@ -1129,9 +1129,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
- count = MIN(conn->mtu, len);
+ count = min_t(unsigned int, conn->mtu, len);
- *frag = bluez_skb_alloc(count, GFP_ATOMIC);
+ *frag = bt_skb_alloc(count, GFP_ATOMIC);
if (!*frag)
goto fail;
@@ -1374,8 +1374,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd,
hci_conn_hold(conn->hcon);
l2cap_sock_init(sk, parent);
- bacpy(&bluez_sk(sk)->src, conn->src);
- bacpy(&bluez_sk(sk)->dst, conn->dst);
+ bacpy(&bt_sk(sk)->src, conn->src);
+ bacpy(&bt_sk(sk)->dst, conn->dst);
l2cap_pi(sk)->psm = psm;
l2cap_pi(sk)->dcid = scid;
@@ -1785,10 +1785,10 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
if (sk->state != BT_LISTEN)
continue;
- if (!bacmp(&bluez_sk(sk)->src, bdaddr)) {
+ if (!bacmp(&bt_sk(sk)->src, bdaddr)) {
lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
exact++;
- } else if (!bacmp(&bluez_sk(sk)->src, BDADDR_ANY))
+ } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
}
read_unlock(&l2cap_sk_list.lock);
@@ -1810,7 +1810,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, __u8 status)
if (conn)
l2cap_conn_ready(conn);
} else
- l2cap_conn_del(hcon, bterr(status));
+ l2cap_conn_del(hcon, bt_err(status));
return 0;
}
@@ -1822,7 +1822,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon, __u8 reason)
if (hcon->type != ACL_LINK)
return 0;
- l2cap_conn_del(hcon, bterr(reason));
+ l2cap_conn_del(hcon, bt_err(reason));
return 0;
}
@@ -1958,7 +1958,7 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, __u16
}
/* Allocate skb for the complete frame (with header) */
- if (!(conn->rx_skb = bluez_skb_alloc(len, GFP_ATOMIC)))
+ if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
goto drop;
memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
@@ -1996,7 +1996,7 @@ drop:
}
/* ----- Proc fs support ------ */
-static int l2cap_sock_dump(char *buf, struct bluez_sock_list *list)
+static int l2cap_sock_dump(char *buf, struct bt_sock_list *list)
{
struct l2cap_pinfo *pi;
struct sock *sk;
@@ -2007,7 +2007,7 @@ static int l2cap_sock_dump(char *buf, struct bluez_sock_list *list)
for (sk = list->head; sk; sk = sk->next) {
pi = l2cap_pi(sk);
ptr += sprintf(ptr, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
- batostr(&bluez_sk(sk)->src), batostr(&bluez_sk(sk)->dst),
+ batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
sk->state, pi->psm, pi->scid, pi->dcid, pi->imtu, pi->omtu,
pi->link_mode);
}
@@ -2051,8 +2051,8 @@ static struct proto_ops l2cap_sock_ops = {
.accept = l2cap_sock_accept,
.getname = l2cap_sock_getname,
.sendmsg = l2cap_sock_sendmsg,
- .recvmsg = bluez_sock_recvmsg,
- .poll = bluez_sock_poll,
+ .recvmsg = bt_sock_recvmsg,
+ .poll = bt_sock_poll,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.ioctl = sock_no_ioctl,
@@ -2081,7 +2081,7 @@ int __init l2cap_init(void)
{
int err;
- if ((err = bluez_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
+ if ((err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
BT_ERR("Can't register L2CAP socket");
return err;
}
@@ -2093,7 +2093,7 @@ int __init l2cap_init(void)
create_proc_read_entry("bluetooth/l2cap", 0, 0, l2cap_read_proc, NULL);
- BT_INFO("BlueZ L2CAP ver %s Copyright (C) 2000,2001 Qualcomm Inc", VERSION);
+ BT_INFO("Bluetooth L2CAP ver %s Copyright (C) 2000,2001 Qualcomm Inc", VERSION);
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
return 0;
}
@@ -2103,7 +2103,7 @@ void l2cap_cleanup(void)
remove_proc_entry("bluetooth/l2cap", NULL);
/* Unregister socket and protocol */
- if (bluez_sock_unregister(BTPROTO_L2CAP))
+ if (bt_sock_unregister(BTPROTO_L2CAP))
BT_ERR("Can't unregister L2CAP socket");
if (hci_unregister_proto(&l2cap_hci_proto))
@@ -2114,5 +2114,5 @@ module_init(l2cap_init);
module_exit(l2cap_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
-MODULE_DESCRIPTION("BlueZ L2CAP ver " VERSION);
+MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 3fbcbd646d9f..44c022292f66 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ kernel library.
+ * Bluetooth kernel library.
*
* $Id: lib.c,v 1.1 2002/03/08 21:06:59 maxk Exp $
*/
@@ -35,7 +35,7 @@
#include <net/bluetooth/bluetooth.h>
-void bluez_dump(char *pref, __u8 *buf, int count)
+void bt_dump(char *pref, __u8 *buf, int count)
{
char *ptr;
char line[100];
@@ -83,7 +83,7 @@ char *batostr(bdaddr_t *ba)
}
/* Bluetooth error codes to Unix errno mapping */
-int bterr(__u16 code)
+int bt_err(__u16 code)
{
switch (code) {
case 0:
@@ -171,5 +171,5 @@ int bterr(__u16 code)
default:
return ENOSYS;
- };
+ }
}
diff --git a/net/bluetooth/rfcomm/Config.help b/net/bluetooth/rfcomm/Config.help
index 6d5ee6a77f04..27e9d505bd0e 100644
--- a/net/bluetooth/rfcomm/Config.help
+++ b/net/bluetooth/rfcomm/Config.help
@@ -1,5 +1,5 @@
RFCOMM protocol support
-CONFIG_BLUEZ_RFCOMM
+CONFIG_BT_RFCOMM
RFCOMM provides connection oriented stream transport. RFCOMM
support is required for Dialup Networking, OBEX and other Bluetooth
applications.
@@ -8,5 +8,5 @@ CONFIG_BLUEZ_RFCOMM
compile it as module (rfcomm.o).
RFCOMM TTY emulation support
-CONFIG_BLUEZ_RFCOMM_TTY
+CONFIG_BT_RFCOMM_TTY
This option enables TTY emulation support for RFCOMM channels.
diff --git a/net/bluetooth/rfcomm/Config.in b/net/bluetooth/rfcomm/Config.in
index e3ad1358f873..9dc852225dd5 100644
--- a/net/bluetooth/rfcomm/Config.in
+++ b/net/bluetooth/rfcomm/Config.in
@@ -1,7 +1,7 @@
-dep_tristate 'RFCOMM protocol support' CONFIG_BLUEZ_RFCOMM $CONFIG_BLUEZ_L2CAP
+dep_tristate 'RFCOMM protocol support' CONFIG_BT_RFCOMM $CONFIG_BT_L2CAP
-if [ "$CONFIG_BLUEZ_RFCOMM" != "n" ]; then
- bool ' RFCOMM TTY support' CONFIG_BLUEZ_RFCOMM_TTY
+if [ "$CONFIG_BT_RFCOMM" != "n" ]; then
+ bool ' RFCOMM TTY support' CONFIG_BT_RFCOMM_TTY
fi
diff --git a/net/bluetooth/rfcomm/Makefile b/net/bluetooth/rfcomm/Makefile
index 94fa0f6e1bba..9cae42fee3d6 100644
--- a/net/bluetooth/rfcomm/Makefile
+++ b/net/bluetooth/rfcomm/Makefile
@@ -2,10 +2,10 @@
# Makefile for the Linux Bluetooth RFCOMM layer.
#
-obj-$(CONFIG_BLUEZ_RFCOMM) += rfcomm.o
+obj-$(CONFIG_BT_RFCOMM) += rfcomm.o
rfcomm-y := core.o sock.o crc.o
-rfcomm-$(CONFIG_BLUEZ_RFCOMM_TTY) += tty.o
+rfcomm-$(CONFIG_BT_RFCOMM_TTY) += tty.o
rfcomm-objs := $(rfcomm-y)
include $(TOPDIR)/Rules.make
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 230adb8abc89..2d0e0bfc8271 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -26,7 +26,7 @@
*/
/*
- * RFCOMM core.
+ * Bluetooth RFCOMM core.
*
* $Id: core.c,v 1.42 2002/10/01 23:26:25 maxk Exp $
*/
@@ -53,7 +53,7 @@
#define VERSION "0.3"
-#ifndef CONFIG_BLUEZ_RFCOMM_DEBUG
+#ifndef CONFIG_BT_RFCOMM_DEBUG
#undef BT_DBG
#define BT_DBG(D...)
#endif
@@ -489,10 +489,10 @@ struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
{
struct rfcomm_session *s;
struct list_head *p, *n;
- struct bluez_sock *sk;
+ struct bt_sock *sk;
list_for_each_safe(p, n, &session_list) {
s = list_entry(p, struct rfcomm_session, list);
- sk = bluez_sk(s->sock->sk);
+ sk = bt_sk(s->sock->sk);
if ((!bacmp(src, BDADDR_ANY) || !bacmp(&sk->src, src)) &&
!bacmp(&sk->dst, dst))
@@ -577,9 +577,9 @@ void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *d
{
struct sock *sk = s->sock->sk;
if (src)
- bacpy(src, &bluez_sk(sk)->src);
+ bacpy(src, &bt_sk(sk)->src);
if (dst)
- bacpy(dst, &bluez_sk(sk)->dst);
+ bacpy(dst, &bt_sk(sk)->dst);
}
/* ---- RFCOMM frame sending ---- */
@@ -1509,7 +1509,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
/* Fast check for a new connection.
* Avoids unnesesary socket allocations. */
- if (list_empty(&bluez_sk(sock->sk)->accept_q))
+ if (list_empty(&bt_sk(sock->sk)->accept_q))
return;
BT_DBG("session %p", s);
@@ -1727,7 +1727,7 @@ static int rfcomm_dlc_dump(char *buf)
d = list_entry(pp, struct rfcomm_dlc, list);
ptr += sprintf(ptr, "dlc %s %s %ld %d %d %d %d\n",
- batostr(&bluez_sk(sk)->src), batostr(&bluez_sk(sk)->dst),
+ batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits);
}
}
@@ -1771,13 +1771,13 @@ int __init rfcomm_init(void)
rfcomm_init_sockets();
-#ifdef CONFIG_BLUEZ_RFCOMM_TTY
+#ifdef CONFIG_BT_RFCOMM_TTY
rfcomm_init_ttys();
#endif
create_proc_read_entry("bluetooth/rfcomm", 0, 0, rfcomm_read_proc, NULL);
- BT_INFO("BlueZ RFCOMM ver %s", VERSION);
+ BT_INFO("Bluetooth RFCOMM ver %s", VERSION);
BT_INFO("Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>");
BT_INFO("Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>");
return 0;
@@ -1796,7 +1796,7 @@ void rfcomm_cleanup(void)
remove_proc_entry("bluetooth/rfcomm", NULL);
-#ifdef CONFIG_BLUEZ_RFCOMM_TTY
+#ifdef CONFIG_BT_RFCOMM_TTY
rfcomm_cleanup_ttys();
#endif
@@ -1808,5 +1808,5 @@ module_init(rfcomm_init);
module_exit(rfcomm_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("BlueZ RFCOMM ver " VERSION);
+MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 97277e1a60d9..c7bf6635d467 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -52,14 +52,14 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/rfcomm.h>
-#ifndef CONFIG_BLUEZ_RFCOMM_DEBUG
+#ifndef CONFIG_BT_RFCOMM_DEBUG
#undef BT_DBG
#define BT_DBG(D...)
#endif
static struct proto_ops rfcomm_sock_ops;
-static struct bluez_sock_list rfcomm_sk_list = {
+static struct bt_sock_list rfcomm_sk_list = {
.lock = RW_LOCK_UNLOCKED
};
@@ -98,10 +98,10 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
sk->err = err;
sk->state = d->state;
- parent = bluez_sk(sk)->parent;
+ parent = bt_sk(sk)->parent;
if (!parent) {
if (d->state == BT_CONNECTED)
- rfcomm_session_getaddr(d->session, &bluez_sk(sk)->src, NULL);
+ rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
sk->state_change(sk);
} else
parent->data_ready(parent, 0);
@@ -122,7 +122,7 @@ static struct sock *__rfcomm_get_sock_by_addr(int channel, bdaddr_t *src)
for (sk = rfcomm_sk_list.head; sk; sk = sk->next) {
if (rfcomm_pi(sk)->channel == channel &&
- !bacmp(&bluez_sk(sk)->src, src))
+ !bacmp(&bt_sk(sk)->src, src))
break;
}
@@ -142,11 +142,11 @@ static struct sock *__rfcomm_get_sock_by_channel(int state, __u16 channel, bdadd
if (rfcomm_pi(sk)->channel == channel) {
/* Exact match. */
- if (!bacmp(&bluez_sk(sk)->src, src))
+ if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
- if (!bacmp(&bluez_sk(sk)->src, BDADDR_ANY))
+ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
@@ -197,7 +197,7 @@ static void rfcomm_sock_cleanup_listen(struct sock *parent)
BT_DBG("parent %p", parent);
/* Close not yet accepted dlcs */
- while ((sk = bluez_accept_dequeue(parent, NULL)))
+ while ((sk = bt_accept_dequeue(parent, NULL)))
rfcomm_sock_close(sk);
parent->state = BT_CLOSED;
@@ -215,7 +215,7 @@ static void rfcomm_sock_kill(struct sock *sk)
BT_DBG("sk %p state %d refcnt %d", sk, sk->state, atomic_read(&sk->refcnt));
/* Kill poor orphan */
- bluez_sock_unlink(&rfcomm_sk_list, sk);
+ bt_sock_unlink(&rfcomm_sk_list, sk);
sk->dead = 1;
sock_put(sk);
}
@@ -265,7 +265,7 @@ static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, int prio)
struct rfcomm_dlc *d;
struct sock *sk;
- sk = bluez_sock_alloc(sock, BTPROTO_RFCOMM, sizeof(struct rfcomm_pinfo), prio);
+ sk = bt_sock_alloc(sock, BTPROTO_RFCOMM, sizeof(struct rfcomm_pinfo), prio);
if (!sk)
return NULL;
@@ -290,7 +290,7 @@ static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, int prio)
sk->protocol = proto;
sk->state = BT_OPEN;
- bluez_sock_link(&rfcomm_sk_list, sk);
+ bt_sock_link(&rfcomm_sk_list, sk);
BT_DBG("sk %p", sk);
@@ -342,7 +342,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
err = -EADDRINUSE;
} else {
/* Save source address */
- bacpy(&bluez_sk(sk)->src, &sa->rc_bdaddr);
+ bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
sk->state = BT_BOUND;
}
@@ -375,12 +375,12 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
lock_sock(sk);
sk->state = BT_CONNECT;
- bacpy(&bluez_sk(sk)->dst, &sa->rc_bdaddr);
+ bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
- err = rfcomm_dlc_open(d, &bluez_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
+ err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
if (!err)
- err = bluez_sock_w4_connect(sk, flags);
+ err = bt_sock_w4_connect(sk, flags);
release_sock(sk);
return err;
@@ -429,7 +429,7 @@ int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk->sleep, &wait);
- while (!(nsk = bluez_accept_dequeue(sk, newsock))) {
+ while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
@@ -475,9 +475,9 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
- bacpy(&sa->rc_bdaddr, &bluez_sk(sk)->dst);
+ bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
else
- bacpy(&sa->rc_bdaddr, &bluez_sk(sk)->src);
+ bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
@@ -707,7 +707,7 @@ static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
lock_sock(sk);
-#ifdef CONFIG_BLUEZ_RFCOMM_TTY
+#ifdef CONFIG_BT_RFCOMM_TTY
err = rfcomm_dev_ioctl(sk, cmd, arg);
#else
err = -EOPNOTSUPP;
@@ -762,12 +762,12 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
goto done;
rfcomm_sock_init(sk, parent);
- bacpy(&bluez_sk(sk)->src, &src);
- bacpy(&bluez_sk(sk)->dst, &dst);
+ bacpy(&bt_sk(sk)->src, &src);
+ bacpy(&bt_sk(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
sk->state = BT_CONFIG;
- bluez_accept_enqueue(parent, sk);
+ bt_accept_enqueue(parent, sk);
/* Accept connection and return socket DLC */
*d = rfcomm_pi(sk)->dlc;
@@ -781,7 +781,7 @@ done:
/* ---- Proc fs support ---- */
int rfcomm_sock_dump(char *buf)
{
- struct bluez_sock_list *list = &rfcomm_sk_list;
+ struct bt_sock_list *list = &rfcomm_sk_list;
struct rfcomm_pinfo *pi;
struct sock *sk;
char *ptr = buf;
@@ -791,7 +791,7 @@ int rfcomm_sock_dump(char *buf)
for (sk = list->head; sk; sk = sk->next) {
pi = rfcomm_pi(sk);
ptr += sprintf(ptr, "sk %s %s %d %d\n",
- batostr(&bluez_sk(sk)->src), batostr(&bluez_sk(sk)->dst),
+ batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
sk->state, rfcomm_pi(sk)->channel);
}
@@ -814,7 +814,7 @@ static struct proto_ops rfcomm_sock_ops = {
.setsockopt = rfcomm_sock_setsockopt,
.getsockopt = rfcomm_sock_getsockopt,
.ioctl = rfcomm_sock_ioctl,
- .poll = bluez_sock_poll,
+ .poll = bt_sock_poll,
.socketpair = sock_no_socketpair,
.mmap = sock_no_mmap
};
@@ -828,7 +828,7 @@ int rfcomm_init_sockets(void)
{
int err;
- if ((err = bluez_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops))) {
+ if ((err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops))) {
BT_ERR("Can't register RFCOMM socket layer");
return err;
}
@@ -841,6 +841,6 @@ void rfcomm_cleanup_sockets(void)
int err;
/* Unregister socket, protocol and notifier */
- if ((err = bluez_sock_unregister(BTPROTO_RFCOMM)))
+ if ((err = bt_sock_unregister(BTPROTO_RFCOMM)))
BT_ERR("Can't unregister RFCOMM socket layer %d", err);
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index d46caaa5454a..10614ef8120b 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -40,7 +40,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/rfcomm.h>
-#ifndef CONFIG_BLUEZ_RFCOMM_DEBUG
+#ifndef CONFIG_BT_RFCOMM_DEBUG
#undef BT_DBG
#define BT_DBG(D...)
#endif
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 57e44e41f58d..defa54f0afc3 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ SCO sockets.
+ * Bluetooth SCO sockets.
*
* $Id: sco.c,v 1.3 2002/04/17 17:37:16 maxk Exp $
*/
@@ -56,14 +56,14 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
-#ifndef SCO_DEBUG
+#ifndef CONFIG_BT_SCO_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
static struct proto_ops sco_sock_ops;
-static struct bluez_sock_list sco_sk_list = {
+static struct bt_sock_list sco_sk_list = {
.lock = RW_LOCK_UNLOCKED
};
@@ -200,8 +200,8 @@ static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct so
int sco_connect(struct sock *sk)
{
- bdaddr_t *src = &bluez_sk(sk)->src;
- bdaddr_t *dst = &bluez_sk(sk)->dst;
+ bdaddr_t *src = &bt_sk(sk)->src;
+ bdaddr_t *dst = &bt_sk(sk)->dst;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
@@ -258,8 +258,8 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
- count = MIN(conn->mtu, len);
- if (!(skb = bluez_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err)))
+ count = min_t(unsigned int, conn->mtu, len);
+ if (!(skb = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err)))
return err;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
@@ -303,7 +303,7 @@ static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
struct sock *sk;
for (sk = sco_sk_list.head; sk; sk = sk->next) {
- if (!bacmp(&bluez_sk(sk)->src, ba))
+ if (!bacmp(&bt_sk(sk)->src, ba))
break;
}
@@ -324,11 +324,11 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)
continue;
/* Exact match. */
- if (!bacmp(&bluez_sk(sk)->src, src))
+ if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
- if (!bacmp(&bluez_sk(sk)->src, BDADDR_ANY))
+ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
@@ -357,7 +357,7 @@ static void sco_sock_cleanup_listen(struct sock *parent)
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
- while ((sk = bluez_accept_dequeue(parent, NULL)))
+ while ((sk = bt_accept_dequeue(parent, NULL)))
sco_sock_close(sk);
parent->state = BT_CLOSED;
@@ -375,7 +375,7 @@ static void sco_sock_kill(struct sock *sk)
BT_DBG("sk %p state %d", sk, sk->state);
/* Kill poor orphan */
- bluez_sock_unlink(&sco_sk_list, sk);
+ bt_sock_unlink(&sco_sk_list, sk);
sk->dead = 1;
sock_put(sk);
}
@@ -429,7 +429,7 @@ static struct sock *sco_sock_alloc(struct socket *sock, int proto, int prio)
{
struct sock *sk;
- sk = bluez_sock_alloc(sock, proto, sizeof(struct sco_pinfo), prio);
+ sk = bt_sock_alloc(sock, proto, sizeof(struct sco_pinfo), prio);
if (!sk)
return NULL;
@@ -439,7 +439,7 @@ static struct sock *sco_sock_alloc(struct socket *sock, int proto, int prio)
sco_sock_init_timer(sk);
- bluez_sock_link(&sco_sk_list, sk);
+ bt_sock_link(&sco_sk_list, sk);
MOD_INC_USE_COUNT;
return sk;
@@ -490,7 +490,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
err = -EADDRINUSE;
} else {
/* Save source address */
- bacpy(&bluez_sk(sk)->src, &sa->sco_bdaddr);
+ bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
sk->state = BT_BOUND;
}
@@ -522,12 +522,12 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
lock_sock(sk);
/* Set destination address and psm */
- bacpy(&bluez_sk(sk)->dst, &sa->sco_bdaddr);
+ bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
if ((err = sco_connect(sk)))
goto done;
- err = bluez_sock_w4_connect(sk, flags);
+ err = bt_sock_w4_connect(sk, flags);
done:
release_sock(sk);
@@ -577,7 +577,7 @@ int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk->sleep, &wait);
- while (!(ch = bluez_accept_dequeue(sk, newsock))) {
+ while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
@@ -624,9 +624,9 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
*len = sizeof(struct sockaddr_sco);
if (peer)
- bacpy(&sa->sco_bdaddr, &bluez_sk(sk)->dst);
+ bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
else
- bacpy(&sa->sco_bdaddr, &bluez_sk(sk)->src);
+ bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
return 0;
}
@@ -699,7 +699,7 @@ int sco_sock_getsockopt(struct socket *sock, int level, int optname, char *optva
BT_INFO("mtu %d", opts.mtu);
- len = MIN(len, sizeof(opts));
+ len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
@@ -713,7 +713,7 @@ int sco_sock_getsockopt(struct socket *sock, int level, int optname, char *optva
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
- len = MIN(len, sizeof(cinfo));
+ len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
@@ -751,7 +751,7 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *
conn->sk = sk;
if (parent)
- bluez_accept_enqueue(parent, sk);
+ bt_accept_enqueue(parent, sk);
}
/* Delete channel.
@@ -808,8 +808,8 @@ static void sco_conn_ready(struct sco_conn *conn)
sco_sock_init(sk, parent);
- bacpy(&bluez_sk(sk)->src, conn->src);
- bacpy(&bluez_sk(sk)->dst, conn->dst);
+ bacpy(&bt_sk(sk)->src, conn->src);
+ bacpy(&bt_sk(sk)->dst, conn->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
@@ -849,7 +849,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
if (conn)
sco_conn_ready(conn);
} else
- sco_conn_del(hcon, bterr(status));
+ sco_conn_del(hcon, bt_err(status));
return 0;
}
@@ -861,7 +861,7 @@ int sco_disconn_ind(struct hci_conn *hcon, __u8 reason)
if (hcon->type != SCO_LINK)
return 0;
- sco_conn_del(hcon, bterr(reason));
+ sco_conn_del(hcon, bt_err(reason));
return 0;
}
@@ -885,7 +885,7 @@ drop:
}
/* ----- Proc fs support ------ */
-static int sco_sock_dump(char *buf, struct bluez_sock_list *list)
+static int sco_sock_dump(char *buf, struct bt_sock_list *list)
{
struct sco_pinfo *pi;
struct sock *sk;
@@ -896,7 +896,7 @@ static int sco_sock_dump(char *buf, struct bluez_sock_list *list)
for (sk = list->head; sk; sk = sk->next) {
pi = sco_pi(sk);
ptr += sprintf(ptr, "%s %s %d\n",
- batostr(&bluez_sk(sk)->src), batostr(&bluez_sk(sk)->dst),
+ batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
sk->state);
}
@@ -940,8 +940,8 @@ static struct proto_ops sco_sock_ops = {
.accept = sco_sock_accept,
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
- .recvmsg = bluez_sock_recvmsg,
- .poll = bluez_sock_poll,
+ .recvmsg = bt_sock_recvmsg,
+ .poll = bt_sock_poll,
.ioctl = sock_no_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
@@ -968,7 +968,7 @@ int __init sco_init(void)
{
int err;
- if ((err = bluez_sock_register(BTPROTO_SCO, &sco_sock_family_ops))) {
+ if ((err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops))) {
BT_ERR("Can't register SCO socket layer");
return err;
}
@@ -980,7 +980,7 @@ int __init sco_init(void)
create_proc_read_entry("bluetooth/sco", 0, 0, sco_read_proc, NULL);
- BT_INFO("BlueZ SCO ver %s Copyright (C) 2000,2001 Qualcomm Inc", VERSION);
+ BT_INFO("Bluetooth SCO ver %s Copyright (C) 2000,2001 Qualcomm Inc", VERSION);
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
return 0;
}
@@ -992,7 +992,7 @@ void sco_cleanup(void)
remove_proc_entry("bluetooth/sco", NULL);
/* Unregister socket, protocol and notifier */
- if ((err = bluez_sock_unregister(BTPROTO_SCO)))
+ if ((err = bt_sock_unregister(BTPROTO_SCO)))
BT_ERR("Can't unregister SCO socket layer %d", err);
if ((err = hci_unregister_proto(&sco_hci_proto)))
@@ -1003,5 +1003,5 @@ module_init(sco_init);
module_exit(sco_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
-MODULE_DESCRIPTION("BlueZ SCO ver " VERSION);
+MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/syms.c b/net/bluetooth/syms.c
index 6ec6c7123e60..779d82130924 100644
--- a/net/bluetooth/syms.c
+++ b/net/bluetooth/syms.c
@@ -23,7 +23,7 @@
*/
/*
- * BlueZ symbols.
+ * Bluetooth symbols.
*
* $Id: syms.c,v 1.1 2002/03/08 21:06:59 maxk Exp $
*/
@@ -44,6 +44,9 @@
/* HCI Core */
EXPORT_SYMBOL(hci_register_dev);
EXPORT_SYMBOL(hci_unregister_dev);
+EXPORT_SYMBOL(hci_suspend_dev);
+EXPORT_SYMBOL(hci_resume_dev);
+
EXPORT_SYMBOL(hci_register_proto);
EXPORT_SYMBOL(hci_unregister_proto);
@@ -59,20 +62,20 @@ EXPORT_SYMBOL(hci_send_sco);
EXPORT_SYMBOL(hci_send_raw);
EXPORT_SYMBOL(hci_si_event);
-/* BlueZ lib */
-EXPORT_SYMBOL(bluez_dump);
+/* Bluetooth lib */
+EXPORT_SYMBOL(bt_dump);
EXPORT_SYMBOL(baswap);
EXPORT_SYMBOL(batostr);
-EXPORT_SYMBOL(bterr);
+EXPORT_SYMBOL(bt_err);
-/* BlueZ sockets */
-EXPORT_SYMBOL(bluez_sock_register);
-EXPORT_SYMBOL(bluez_sock_unregister);
-EXPORT_SYMBOL(bluez_sock_alloc);
-EXPORT_SYMBOL(bluez_sock_link);
-EXPORT_SYMBOL(bluez_sock_unlink);
-EXPORT_SYMBOL(bluez_sock_recvmsg);
-EXPORT_SYMBOL(bluez_sock_poll);
-EXPORT_SYMBOL(bluez_accept_enqueue);
-EXPORT_SYMBOL(bluez_accept_dequeue);
-EXPORT_SYMBOL(bluez_sock_w4_connect);
+/* Bluetooth sockets */
+EXPORT_SYMBOL(bt_sock_register);
+EXPORT_SYMBOL(bt_sock_unregister);
+EXPORT_SYMBOL(bt_sock_alloc);
+EXPORT_SYMBOL(bt_sock_link);
+EXPORT_SYMBOL(bt_sock_unlink);
+EXPORT_SYMBOL(bt_sock_recvmsg);
+EXPORT_SYMBOL(bt_sock_poll);
+EXPORT_SYMBOL(bt_accept_enqueue);
+EXPORT_SYMBOL(bt_accept_dequeue);
+EXPORT_SYMBOL(bt_sock_w4_connect);
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
new file mode 100644
index 000000000000..63e3027738c4
--- /dev/null
+++ b/net/rxrpc/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile for Linux kernel Rx RPC
+#
+
+export-objs := rxrpc_syms.o
+
+rxrpc-objs := \
+ call.o \
+ connection.o \
+ krxiod.o \
+ krxsecd.o \
+ krxtimod.o \
+ main.o \
+ peer.o \
+ rxrpc_syms.o \
+ transport.o
+
+#ifeq ($(CONFIG_PROC_FS),y)
+rxrpc-objs += proc.o
+#endif
+#ifeq ($(CONFIG_SYSCTL),y)
+rxrpc-objs += sysctl.o
+#endif
+
+obj-m := rxrpc.o
+
+# superfluous for 2.5, but needed for 2.4..
+ifeq "$(VERSION).$(PATCHLEVEL)" "2.4"
+rxrpc.o: $(rxrpc-objs)
+ $(LD) -r -o $@ $(rxrpc-objs)
+endif
+
+include $(TOPDIR)/Rules.make
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c
new file mode 100644
index 000000000000..475fd925e5fa
--- /dev/null
+++ b/net/rxrpc/call.c
@@ -0,0 +1,2122 @@
+/* call.c: Rx call routines
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include <rxrpc/message.h>
+#include "internal.h"
+
+__RXACCT_DECL(atomic_t rxrpc_call_count);
+__RXACCT_DECL(atomic_t rxrpc_message_count);
+
+LIST_HEAD(rxrpc_calls);
+DECLARE_RWSEM(rxrpc_calls_sem);
+
+unsigned rxrpc_call_rcv_timeout = 30;
+unsigned rxrpc_call_acks_timeout = 30;
+unsigned rxrpc_call_dfr_ack_timeout = 5;
+unsigned short rxrpc_call_max_resend = 10;
+
+const char *rxrpc_call_states[] = {
+ "COMPLETE",
+ "ERROR",
+ "SRVR_RCV_OPID",
+ "SRVR_RCV_ARGS",
+ "SRVR_GOT_ARGS",
+ "SRVR_SND_REPLY",
+ "SRVR_RCV_FINAL_ACK",
+ "CLNT_SND_ARGS",
+ "CLNT_RCV_REPLY",
+ "CLNT_GOT_REPLY"
+};
+
+const char *rxrpc_call_error_states[] = {
+ "NO_ERROR",
+ "LOCAL_ABORT",
+ "PEER_ABORT",
+ "LOCAL_ERROR",
+ "REMOTE_ERROR"
+};
+
+const char *rxrpc_pkts[] = {
+ "?00", "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug",
+ "?09", "?10", "?11", "?12", "?13", "?14", "?15"
+};
+
+const char *rxrpc_acks[] = {
+ "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", "-?-"
+};
+
+static const char _acktype[] = "NA-";
+
+static void rxrpc_call_receive_packet(struct rxrpc_call *call);
+static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc_message *msg);
+static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_message *msg);
+static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, rxrpc_seq_t higest);
+static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest);
+static int __rxrpc_call_read_data(struct rxrpc_call *call);
+
+static int rxrpc_call_record_ACK(struct rxrpc_call *call,
+ struct rxrpc_message *msg,
+ rxrpc_seq_t seq,
+ size_t count);
+#define _state(call) \
+ _debug("[[[ state %s ]]]",rxrpc_call_states[call->app_call_state]);
+
+static void rxrpc_call_default_attn_func(struct rxrpc_call *call)
+{
+ wake_up(&call->waitq);
+}
+
+static void rxrpc_call_default_error_func(struct rxrpc_call *call)
+{
+ wake_up(&call->waitq);
+}
+
+static void rxrpc_call_default_aemap_func(struct rxrpc_call *call)
+{
+ switch (call->app_err_state) {
+ case RXRPC_ESTATE_LOCAL_ABORT:
+ call->app_abort_code = -call->app_errno;
+ case RXRPC_ESTATE_PEER_ABORT:
+ call->app_errno = -ECONNABORTED;
+ default:
+ break;
+ }
+}
+
+static void __rxrpc_call_acks_timeout(unsigned long _call)
+{
+ struct rxrpc_call *call = (struct rxrpc_call *) _call;
+
+ _debug("ACKS TIMEOUT %05lu",jiffies - call->cjif);
+
+ call->flags |= RXRPC_CALL_ACKS_TIMO;
+ rxrpc_krxiod_queue_call(call);
+}
+
+static void __rxrpc_call_rcv_timeout(unsigned long _call)
+{
+ struct rxrpc_call *call = (struct rxrpc_call *) _call;
+
+ _debug("RCV TIMEOUT %05lu",jiffies - call->cjif);
+
+ call->flags |= RXRPC_CALL_RCV_TIMO;
+ rxrpc_krxiod_queue_call(call);
+}
+
+static void __rxrpc_call_ackr_timeout(unsigned long _call)
+{
+ struct rxrpc_call *call = (struct rxrpc_call *) _call;
+
+ _debug("ACKR TIMEOUT %05lu",jiffies - call->cjif);
+
+ call->flags |= RXRPC_CALL_ACKR_TIMO;
+ rxrpc_krxiod_queue_call(call);
+}
+
+/*****************************************************************************/
+/*
+ * create a new call record
+ */
+static inline int __rxrpc_create_call(struct rxrpc_connection *conn,
+ struct rxrpc_call **_call)
+{
+ struct rxrpc_call *call;
+
+ _enter("%p",conn);
+
+ /* allocate and initialise a call record */
+ call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL);
+ if (!call) {
+ _leave(" ENOMEM");
+ return -ENOMEM;
+ }
+
+ atomic_set(&call->usage,1);
+
+ init_waitqueue_head(&call->waitq);
+ spin_lock_init(&call->lock);
+ INIT_LIST_HEAD(&call->link);
+ INIT_LIST_HEAD(&call->acks_pendq);
+ INIT_LIST_HEAD(&call->rcv_receiveq);
+ INIT_LIST_HEAD(&call->rcv_krxiodq_lk);
+ INIT_LIST_HEAD(&call->app_readyq);
+ INIT_LIST_HEAD(&call->app_unreadyq);
+ INIT_LIST_HEAD(&call->app_link);
+ INIT_LIST_HEAD(&call->app_attn_link);
+
+ init_timer(&call->acks_timeout);
+ call->acks_timeout.data = (unsigned long) call;
+ call->acks_timeout.function = __rxrpc_call_acks_timeout;
+
+ init_timer(&call->rcv_timeout);
+ call->rcv_timeout.data = (unsigned long) call;
+ call->rcv_timeout.function = __rxrpc_call_rcv_timeout;
+
+ init_timer(&call->ackr_dfr_timo);
+ call->ackr_dfr_timo.data = (unsigned long) call;
+ call->ackr_dfr_timo.function = __rxrpc_call_ackr_timeout;
+
+ call->conn = conn;
+ call->ackr_win_bot = 1;
+ call->ackr_win_top = call->ackr_win_bot + RXRPC_CALL_ACK_WINDOW_SIZE - 1;
+ call->ackr_prev_seq = 0;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ call->app_attn_func = rxrpc_call_default_attn_func;
+ call->app_error_func = rxrpc_call_default_error_func;
+ call->app_aemap_func = rxrpc_call_default_aemap_func;
+ call->app_scr_alloc = call->app_scratch;
+
+ call->cjif = jiffies;
+
+ _leave(" = 0 (%p)",call);
+
+ *_call = call;
+
+ return 0;
+} /* end __rxrpc_create_call() */
+
+/*****************************************************************************/
+/*
+ * create a new call record for outgoing calls
+ */
+int rxrpc_create_call(struct rxrpc_connection *conn,
+ rxrpc_call_attn_func_t attn,
+ rxrpc_call_error_func_t error,
+ rxrpc_call_aemap_func_t aemap,
+ struct rxrpc_call **_call)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_call *call;
+ int ret, cix, loop;
+
+ _enter("%p",conn);
+
+ /* allocate and initialise a call record */
+ ret = __rxrpc_create_call(conn,&call);
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS;
+ if (attn) call->app_attn_func = attn;
+ if (error) call->app_error_func = error;
+ if (aemap) call->app_aemap_func = aemap;
+
+ _state(call);
+
+ spin_lock(&conn->lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&conn->chanwait,&myself);
+
+ try_again:
+ /* try to find an unused channel */
+ for (cix=0; cix<4; cix++)
+ if (!conn->channels[cix])
+ goto obtained_chan;
+
+ /* no free channels - wait for one to become available */
+ ret = -EINTR;
+ if (signal_pending(current))
+ goto error_unwait;
+
+ spin_unlock(&conn->lock);
+
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock(&conn->lock);
+ goto try_again;
+
+ /* got a channel - now attach to the connection */
+ obtained_chan:
+ remove_wait_queue(&conn->chanwait,&myself);
+ set_current_state(TASK_RUNNING);
+
+ /* concoct a unique call number */
+ next_callid:
+ call->call_id = htonl(++conn->call_counter);
+ for (loop=0; loop<4; loop++)
+ if (conn->channels[loop] && conn->channels[loop]->call_id==call->call_id)
+ goto next_callid;
+
+ rxrpc_get_connection(conn);
+ conn->channels[cix] = call; /* assign _after_ done callid check loop */
+ do_gettimeofday(&conn->atime);
+ call->chan_ix = htonl(cix);
+
+ spin_unlock(&conn->lock);
+
+ down_write(&rxrpc_calls_sem);
+ list_add_tail(&call->call_link,&rxrpc_calls);
+ up_write(&rxrpc_calls_sem);
+
+ __RXACCT(atomic_inc(&rxrpc_call_count));
+ *_call = call;
+
+ _leave(" = 0 (call=%p cix=%u)",call,cix);
+ return 0;
+
+ error_unwait:
+ remove_wait_queue(&conn->chanwait,&myself);
+ set_current_state(TASK_RUNNING);
+ spin_unlock(&conn->lock);
+
+ free_page((unsigned long)call);
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end rxrpc_create_call() */
+
+/*****************************************************************************/
+/*
+ * create a new call record for incoming calls
+ */
+int rxrpc_incoming_call(struct rxrpc_connection *conn,
+ struct rxrpc_message *msg,
+ struct rxrpc_call **_call)
+{
+ struct rxrpc_call *call;
+ unsigned cix;
+ int ret;
+
+ cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
+
+ _enter("%p,%u,%u",conn,ntohl(msg->hdr.callNumber),cix);
+
+ /* allocate and initialise a call record */
+ ret = __rxrpc_create_call(conn,&call);
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ call->pkt_rcv_count = 1;
+ call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID;
+ call->app_mark = sizeof(u32);
+
+ _state(call);
+
+ /* attach to the connection */
+ ret = -EBUSY;
+ call->chan_ix = htonl(cix);
+ call->call_id = msg->hdr.callNumber;
+
+ spin_lock(&conn->lock);
+
+ if (!conn->channels[cix]) {
+ conn->channels[cix] = call;
+ rxrpc_get_connection(conn);
+ ret = 0;
+ }
+
+ spin_unlock(&conn->lock);
+
+ if (ret<0) free_page((unsigned long)call);
+
+ _leave(" = %p",call);
+
+ if (ret==0) {
+ down_write(&rxrpc_calls_sem);
+ list_add_tail(&call->call_link,&rxrpc_calls);
+ up_write(&rxrpc_calls_sem);
+ __RXACCT(atomic_inc(&rxrpc_call_count));
+ *_call = call;
+ }
+
+ return ret;
+} /* end rxrpc_incoming_call() */
+
+/*****************************************************************************/
+/*
+ * free a call record
+ */
+void rxrpc_put_call(struct rxrpc_call *call)
+{
+ struct rxrpc_connection *conn = call->conn;
+ struct rxrpc_message *msg;
+
+ _enter("%p{u=%d}",call,atomic_read(&call->usage));
+
+ /* sanity check */
+ if (atomic_read(&call->usage)<=0)
+ BUG();
+
+ /* to prevent a race, the decrement and the de-list must be effectively atomic */
+ spin_lock(&conn->lock);
+ if (likely(!atomic_dec_and_test(&call->usage))) {
+ spin_unlock(&conn->lock);
+ _leave("");
+ return;
+ }
+
+ conn->channels[ntohl(call->chan_ix)] = NULL;
+
+ spin_unlock(&conn->lock);
+
+ wake_up(&conn->chanwait);
+
+ rxrpc_put_connection(conn);
+
+ /* clear the timers and dequeue from krxiod */
+ del_timer_sync(&call->acks_timeout);
+ del_timer_sync(&call->rcv_timeout);
+ del_timer_sync(&call->ackr_dfr_timo);
+
+ rxrpc_krxiod_dequeue_call(call);
+
+ /* clean up the contents of the struct */
+ if (call->snd_nextmsg)
+ rxrpc_put_message(call->snd_nextmsg);
+
+ if (call->snd_ping)
+ rxrpc_put_message(call->snd_ping);
+
+ while (!list_empty(&call->acks_pendq)) {
+ msg = list_entry(call->acks_pendq.next,struct rxrpc_message,link);
+ list_del(&msg->link);
+ rxrpc_put_message(msg);
+ }
+
+ while (!list_empty(&call->rcv_receiveq)) {
+ msg = list_entry(call->rcv_receiveq.next,struct rxrpc_message,link);
+ list_del(&msg->link);
+ rxrpc_put_message(msg);
+ }
+
+ while (!list_empty(&call->app_readyq)) {
+ msg = list_entry(call->app_readyq.next,struct rxrpc_message,link);
+ list_del(&msg->link);
+ rxrpc_put_message(msg);
+ }
+
+ while (!list_empty(&call->app_unreadyq)) {
+ msg = list_entry(call->app_unreadyq.next,struct rxrpc_message,link);
+ list_del(&msg->link);
+ rxrpc_put_message(msg);
+ }
+
+ if (call->owner) __MOD_DEC_USE_COUNT(call->owner);
+
+ down_write(&rxrpc_calls_sem);
+ list_del(&call->call_link);
+ up_write(&rxrpc_calls_sem);
+
+ __RXACCT(atomic_dec(&rxrpc_call_count));
+ free_page((unsigned long)call);
+
+ _leave(" [destroyed]");
+} /* end rxrpc_put_call() */
+
+/*****************************************************************************/
+/*
+ * actually generate a normal ACK
+ */
+static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call, rxrpc_seq_t seq)
+{
+ struct rxrpc_message *msg;
+ struct iovec diov[3];
+ unsigned aux[4];
+ int delta, ret;
+
+ /* ACKs default to DELAY */
+ if (!call->ackr.reason)
+ call->ackr.reason = RXRPC_ACK_DELAY;
+
+ _proto("Rx %05lu Sending ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ jiffies - call->cjif,
+ ntohs(call->ackr.maxSkew),
+ ntohl(call->ackr.firstPacket),
+ ntohl(call->ackr.previousPacket),
+ ntohl(call->ackr.serial),
+ rxrpc_acks[call->ackr.reason],
+ call->ackr.nAcks);
+
+ aux[0] = htonl(call->conn->peer->if_mtu); /* interface MTU */
+ aux[1] = htonl(1444); /* max MTU */
+ aux[2] = htonl(16); /* rwind */
+ aux[3] = htonl(4); /* max packets */
+
+ diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
+ diov[0].iov_base = &call->ackr;
+ diov[1].iov_len = (call->ackr_pend_cnt+3);
+ diov[1].iov_base = call->ackr_array;
+ diov[2].iov_len = sizeof(aux);
+ diov[2].iov_base = &aux;
+
+ /* build and send the message */
+ ret = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_ACK,3,diov,GFP_KERNEL,&msg);
+ if (ret<0)
+ goto out;
+
+ msg->seq = seq;
+ msg->hdr.seq = htonl(seq);
+ msg->hdr.flags |= RXRPC_SLOW_START_OK;
+
+ ret = rxrpc_conn_sendmsg(call->conn,msg);
+ rxrpc_put_message(msg);
+ if (ret<0)
+ goto out;
+ call->pkt_snd_count++;
+
+ /* count how many actual ACKs there were at the front */
+ for (delta=0; delta<call->ackr_pend_cnt; delta++)
+ if (call->ackr_array[delta]!=RXRPC_ACK_TYPE_ACK)
+ break;
+
+ call->ackr_pend_cnt -= delta; /* all ACK'd to this point */
+
+ /* crank the ACK window around */
+ if (delta==0) {
+ /* un-ACK'd window */
+ }
+ else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) {
+ /* partially ACK'd window
+ * - shuffle down to avoid losing out-of-sequence packets
+ */
+ call->ackr_win_bot += delta;
+ call->ackr_win_top += delta;
+
+ memmove(&call->ackr_array[0],
+ &call->ackr_array[delta],
+ call->ackr_pend_cnt);
+
+ memset(&call->ackr_array[call->ackr_pend_cnt],
+ RXRPC_ACK_TYPE_NACK,
+ sizeof(call->ackr_array) - call->ackr_pend_cnt);
+ }
+ else {
+ /* fully ACK'd window
+ * - just clear the whole thing
+ */
+ memset(&call->ackr_array,RXRPC_ACK_TYPE_NACK,sizeof(call->ackr_array));
+ }
+
+ /* clear this ACK */
+ memset(&call->ackr,0,sizeof(call->ackr));
+
+ out:
+ if (!call->app_call_state) printk("___ STATE 0 ___\n");
+ return ret;
+} /* end __rxrpc_call_gen_normal_ACK() */
+
+/*****************************************************************************/
+/*
+ * note the reception of a packet in the call's ACK records and generate an appropriate ACK packet
+ * if necessary
+ * - returns 0 if packet should be processed, 1 if packet should be ignored and -ve on an error
+ */
+static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
+ struct rxrpc_header *hdr,
+ struct rxrpc_ackpacket *ack)
+{
+ struct rxrpc_message *msg;
+ rxrpc_seq_t seq;
+ unsigned offset;
+ int ret = 0, err;
+ u8 special_ACK, do_ACK, force;
+
+ _enter("%p,%p { seq=%d tp=%d fl=%02x }",call,hdr,ntohl(hdr->seq),hdr->type,hdr->flags);
+
+ seq = ntohl(hdr->seq);
+ offset = seq - call->ackr_win_bot;
+ do_ACK = RXRPC_ACK_DELAY;
+ special_ACK = 0;
+ force = (seq==1);
+
+ if (call->ackr_high_seq < seq)
+ call->ackr_high_seq = seq;
+
+ /* deal with generation of obvious special ACKs first */
+ if (ack && ack->reason==RXRPC_ACK_PING) {
+ special_ACK = RXRPC_ACK_PING_RESPONSE;
+ ret = 1;
+ goto gen_ACK;
+ }
+
+ if (seq < call->ackr_win_bot) {
+ special_ACK = RXRPC_ACK_DUPLICATE;
+ ret = 1;
+ goto gen_ACK;
+ }
+
+ if (seq >= call->ackr_win_top) {
+ special_ACK = RXRPC_ACK_EXCEEDS_WINDOW;
+ ret = 1;
+ goto gen_ACK;
+ }
+
+ if (call->ackr_array[offset] != RXRPC_ACK_TYPE_NACK) {
+ special_ACK = RXRPC_ACK_DUPLICATE;
+ ret = 1;
+ goto gen_ACK;
+ }
+
+ /* okay... it's a normal data packet inside the ACK window */
+ call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK;
+
+ if (offset<call->ackr_pend_cnt) {
+ }
+ else if (offset>call->ackr_pend_cnt) {
+ do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE;
+ call->ackr_pend_cnt = offset;
+ goto gen_ACK;
+ }
+
+ if (hdr->flags & RXRPC_REQUEST_ACK) {
+ do_ACK = RXRPC_ACK_REQUESTED;
+ }
+
+ /* generate an ACK on the final packet of a reply just received */
+ if (hdr->flags & RXRPC_LAST_PACKET) {
+ if (call->conn->out_clientflag)
+ force = 1;
+ }
+ else if (!(hdr->flags & RXRPC_MORE_PACKETS)) {
+ do_ACK = RXRPC_ACK_REQUESTED;
+ }
+
+ /* re-ACK packets previously received out-of-order */
+ for (offset++; offset<RXRPC_CALL_ACK_WINDOW_SIZE; offset++)
+ if (call->ackr_array[offset]!=RXRPC_ACK_TYPE_ACK)
+ break;
+
+ call->ackr_pend_cnt = offset;
+
+ /* generate an ACK if we fill up the window */
+ if (call->ackr_pend_cnt >= RXRPC_CALL_ACK_WINDOW_SIZE)
+ force = 1;
+
+ gen_ACK:
+ _debug("%05lu ACKs pend=%u norm=%s special=%s%s",
+ jiffies - call->cjif,
+ call->ackr_pend_cnt,rxrpc_acks[do_ACK],rxrpc_acks[special_ACK],
+ force ? " immediate" :
+ do_ACK==RXRPC_ACK_REQUESTED ? " merge-req" :
+ hdr->flags & RXRPC_LAST_PACKET ? " finalise" :
+ " defer"
+ );
+
+ /* send any pending normal ACKs if need be */
+ if (call->ackr_pend_cnt>0) {
+ /* fill out the appropriate form */
+ call->ackr.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
+ call->ackr.maxSkew = htons(min(call->ackr_high_seq - seq,65535U));
+ call->ackr.firstPacket = htonl(call->ackr_win_bot);
+ call->ackr.previousPacket = call->ackr_prev_seq;
+ call->ackr.serial = hdr->serial;
+ call->ackr.nAcks = call->ackr_pend_cnt;
+
+ if (do_ACK==RXRPC_ACK_REQUESTED)
+ call->ackr.reason = do_ACK;
+
+ /* generate the ACK immediately if necessary */
+ if (special_ACK || force) {
+ err = __rxrpc_call_gen_normal_ACK(call,do_ACK==RXRPC_ACK_DELAY ? 0 : seq);
+ if (err<0) {
+ ret = err;
+ goto out;
+ }
+ }
+ }
+
+ if (call->ackr.reason==RXRPC_ACK_REQUESTED)
+ call->ackr_dfr_seq = seq;
+
+ /* start the ACK timer if not running if there are any pending deferred ACKs */
+ if (call->ackr_pend_cnt>0 &&
+ call->ackr.reason!=RXRPC_ACK_REQUESTED &&
+ !timer_pending(&call->ackr_dfr_timo)
+ ) {
+ unsigned long timo;
+
+ timo = rxrpc_call_dfr_ack_timeout + jiffies;
+
+ _debug("START ACKR TIMER for cj=%lu",timo-call->cjif);
+
+ spin_lock(&call->lock);
+ mod_timer(&call->ackr_dfr_timo,timo);
+ spin_unlock(&call->lock);
+ }
+ else if ((call->ackr_pend_cnt==0 || call->ackr.reason==RXRPC_ACK_REQUESTED) &&
+ timer_pending(&call->ackr_dfr_timo)
+ ) {
+ /* stop timer if no pending ACKs */
+ _debug("CLEAR ACKR TIMER");
+ del_timer_sync(&call->ackr_dfr_timo);
+ }
+
+ /* send a special ACK if one is required */
+ if (special_ACK) {
+ struct rxrpc_ackpacket ack;
+ struct iovec diov[2];
+ u8 acks[1] = { RXRPC_ACK_TYPE_ACK };
+
+ /* fill out the appropriate form */
+ ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
+ ack.maxSkew = htons(min(call->ackr_high_seq - seq,65535U));
+ ack.firstPacket = htonl(call->ackr_win_bot);
+ ack.previousPacket = call->ackr_prev_seq;
+ ack.serial = hdr->serial;
+ ack.reason = special_ACK;
+ ack.nAcks = 0;
+ //ack.nAcks = special_ACK==RXRPC_ACK_OUT_OF_SEQUENCE ? 0 : hdr->seq ? 1 : 0;
+
+ _proto("Rx Sending s-ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ ntohs(ack.maxSkew),ntohl(ack.firstPacket),ntohl(ack.previousPacket),
+ ntohl(ack.serial),rxrpc_acks[ack.reason],ack.nAcks);
+
+ diov[0].iov_len = sizeof(struct rxrpc_ackpacket);
+ diov[0].iov_base = &ack;
+ diov[1].iov_len = sizeof(acks);
+ diov[1].iov_base = acks;
+
+ /* build and send the message */
+ err = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_ACK,
+ hdr->seq ? 2 : 1,diov,
+ GFP_KERNEL,
+ &msg);
+ if (err<0) {
+ ret = err;
+ goto out;
+ }
+
+ msg->seq = seq;
+ msg->hdr.seq = htonl(seq);
+ msg->hdr.flags |= RXRPC_SLOW_START_OK;
+
+ err = rxrpc_conn_sendmsg(call->conn,msg);
+ rxrpc_put_message(msg);
+ if (err<0) {
+ ret = err;
+ goto out;
+ }
+ call->pkt_snd_count++;
+ }
+
+ out:
+ if (hdr->seq)
+ call->ackr_prev_seq = hdr->seq;
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end rxrpc_call_generate_ACK() */
+
+/*****************************************************************************/
+/*
+ * handle work to be done on a call
+ * - includes packet reception and timeout processing
+ */
+void rxrpc_call_do_stuff(struct rxrpc_call *call)
+{
+ _enter("%p{flags=%lx}",call,call->flags);
+
+ /* handle packet reception */
+ if (call->flags & RXRPC_CALL_RCV_PKT) {
+ _debug("- receive packet");
+ call->flags &= ~RXRPC_CALL_RCV_PKT;
+ rxrpc_call_receive_packet(call);
+ }
+
+ /* handle overdue ACKs */
+ if (call->flags & RXRPC_CALL_ACKS_TIMO) {
+ _debug("- overdue ACK timeout");
+ call->flags &= ~RXRPC_CALL_ACKS_TIMO;
+ rxrpc_call_resend(call,call->snd_seq_count);
+ }
+
+ /* handle lack of reception */
+ if (call->flags & RXRPC_CALL_RCV_TIMO) {
+ _debug("- reception timeout");
+ call->flags &= ~RXRPC_CALL_RCV_TIMO;
+ rxrpc_call_abort(call,-EIO);
+ }
+
+ /* handle deferred ACKs */
+ if (call->flags & RXRPC_CALL_ACKR_TIMO ||
+ (call->ackr.nAcks>0 && call->ackr.reason==RXRPC_ACK_REQUESTED)
+ ) {
+ _debug("- deferred ACK timeout: cj=%05lu r=%s n=%u",
+ jiffies - call->cjif,
+ rxrpc_acks[call->ackr.reason],
+ call->ackr.nAcks);
+
+ call->flags &= ~RXRPC_CALL_ACKR_TIMO;
+
+ if (call->ackr.nAcks>0 && call->app_call_state!=RXRPC_CSTATE_ERROR) {
+ /* generate ACK */
+ __rxrpc_call_gen_normal_ACK(call,call->ackr_dfr_seq);
+ call->ackr_dfr_seq = 0;
+ }
+ }
+
+ _leave("");
+
+} /* end rxrpc_call_do_timeout() */
+
+/*****************************************************************************/
+/*
+ * send an abort message at call or connection level
+ * - must be called with call->lock held
+ * - the supplied error code is sent as the packet data
+ */
+static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
+{
+ struct rxrpc_connection *conn = call->conn;
+ struct rxrpc_message *msg;
+ struct iovec diov[1];
+ int ret;
+ u32 _error;
+
+ _enter("%p{%08x},%p{%d},%d",conn,ntohl(conn->conn_id),call,ntohl(call->call_id),errno);
+
+ /* if this call is already aborted, then just wake up any waiters */
+ if (call->app_call_state==RXRPC_CSTATE_ERROR) {
+ spin_unlock(&call->lock);
+ call->app_error_func(call);
+ _leave(" = 0");
+ return 0;
+ }
+
+ rxrpc_get_call(call);
+
+ /* change the state _with_ the lock still held */
+ call->app_call_state = RXRPC_CSTATE_ERROR;
+ call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT;
+ call->app_errno = errno;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ call->app_read_buf = NULL;
+ call->app_async_read = 0;
+
+ _state(call);
+
+ /* ask the app to translate the error code */
+ call->app_aemap_func(call);
+
+ spin_unlock(&call->lock);
+
+ /* flush any outstanding ACKs */
+ del_timer_sync(&call->acks_timeout);
+ del_timer_sync(&call->rcv_timeout);
+ del_timer_sync(&call->ackr_dfr_timo);
+
+ if (rxrpc_call_is_ack_pending(call))
+ __rxrpc_call_gen_normal_ACK(call,0);
+
+ /* send the abort packet only if we actually traded some other packets */
+ ret = 0;
+ if (call->pkt_snd_count || call->pkt_rcv_count) {
+ /* actually send the abort */
+ _proto("Rx Sending Call ABORT { data=%d }",call->app_abort_code);
+
+ _error = htonl(call->app_abort_code);
+
+ diov[0].iov_len = sizeof(_error);
+ diov[0].iov_base = &_error;
+
+ ret = rxrpc_conn_newmsg(conn,call,RXRPC_PACKET_TYPE_ABORT,1,diov,GFP_KERNEL,&msg);
+ if (ret==0) {
+ ret = rxrpc_conn_sendmsg(conn,msg);
+ rxrpc_put_message(msg);
+ }
+ }
+
+ /* tell the app layer to let go */
+ call->app_error_func(call);
+
+ rxrpc_put_call(call);
+
+ _leave(" = %d",ret);
+
+ return ret;
+} /* end __rxrpc_call_abort() */
+
+/*****************************************************************************/
+/*
+ * send an abort message at call or connection level
+ * - the supplied error code is sent as the packet data
+ */
+int rxrpc_call_abort(struct rxrpc_call *call, int error)
+{
+ spin_lock(&call->lock);
+
+ return __rxrpc_call_abort(call,error);
+
+} /* end rxrpc_call_abort() */
+
+/*****************************************************************************/
+/*
+ * process packets waiting for this call
+ */
+static void rxrpc_call_receive_packet(struct rxrpc_call *call)
+{
+ struct rxrpc_message *msg;
+ struct list_head *_p;
+ u32 data32;
+
+ _enter("%p",call);
+
+ rxrpc_get_call(call); /* must not go away too soon if aborted by app-layer */
+
+ while (!list_empty(&call->rcv_receiveq)) {
+ /* try to get next packet */
+ _p = NULL;
+ spin_lock(&call->lock);
+ if (!list_empty(&call->rcv_receiveq)) {
+ _p = call->rcv_receiveq.next;
+ list_del_init(_p);
+ }
+ spin_unlock(&call->lock);
+
+ if (!_p) break;
+
+ msg = list_entry(_p,struct rxrpc_message,link);
+
+ _proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)",
+ jiffies - call->cjif,
+ rxrpc_pkts[msg->hdr.type],
+ ntohl(msg->hdr.serial),
+ msg->seq,
+ msg->hdr.flags & RXRPC_JUMBO_PACKET ? 'j' : '-',
+ msg->hdr.flags & RXRPC_MORE_PACKETS ? 'm' : '-',
+ msg->hdr.flags & RXRPC_LAST_PACKET ? 'l' : '-',
+ msg->hdr.flags & RXRPC_REQUEST_ACK ? 'r' : '-',
+ msg->hdr.flags & RXRPC_CLIENT_INITIATED ? 'C' : 'S'
+ );
+
+ switch (msg->hdr.type) {
+ /* deal with data packets */
+ case RXRPC_PACKET_TYPE_DATA:
+ /* ACK the packet if necessary */
+ switch (rxrpc_call_generate_ACK(call,&msg->hdr,NULL)) {
+ case 0: /* useful packet */
+ rxrpc_call_receive_data_packet(call,msg);
+ break;
+ case 1: /* duplicate or out-of-window packet */
+ break;
+ default:
+ rxrpc_put_message(msg);
+ goto out;
+ }
+ break;
+
+ /* deal with ACK packets */
+ case RXRPC_PACKET_TYPE_ACK:
+ rxrpc_call_receive_ack_packet(call,msg);
+ break;
+
+ /* deal with abort packets */
+ case RXRPC_PACKET_TYPE_ABORT:
+ data32 = 0;
+ if (skb_copy_bits(msg->pkt,msg->offset,&data32,sizeof(data32))<0) {
+ printk("Rx Received short ABORT packet\n");
+ }
+ else {
+ data32 = ntohl(data32);
+ }
+
+ _proto("Rx Received Call ABORT { data=%d }",data32);
+
+ spin_lock(&call->lock);
+ call->app_call_state = RXRPC_CSTATE_ERROR;
+ call->app_err_state = RXRPC_ESTATE_PEER_ABORT;
+ call->app_abort_code = data32;
+ call->app_errno = -ECONNABORTED;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ call->app_read_buf = NULL;
+ call->app_async_read = 0;
+
+ /* ask the app to translate the error code */
+ call->app_aemap_func(call);
+ _state(call);
+ spin_unlock(&call->lock);
+ call->app_error_func(call);
+ break;
+
+ default:
+ /* deal with other packet types */
+ _proto("Rx Unsupported packet type %u (#%u)",msg->hdr.type,msg->seq);
+ break;
+ }
+
+ rxrpc_put_message(msg);
+ }
+
+ out:
+ rxrpc_put_call(call);
+ _leave("");
+} /* end rxrpc_call_receive_packet() */
+
+/*****************************************************************************/
+/*
+ * process next data packet
+ * - as the next data packet arrives:
+ * - it is queued on app_readyq _if_ it is the next one expected (app_ready_seq+1)
+ * - it is queued on app_unreadyq _if_ it is not the next one expected
+ * - if a packet placed on app_readyq completely fills a hole leading up to the first packet
+ * on app_unreadyq, then packets now in sequence are tranferred to app_readyq
+ * - the application layer can only see packets on app_readyq (app_ready_qty bytes)
+ * - the application layer is prodded every time a new packet arrives
+ */
+static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, struct rxrpc_message *msg)
+{
+ const struct rxrpc_operation *optbl, *op;
+ struct rxrpc_message *pmsg;
+ struct list_head *_p;
+ int ret, lo, hi, rmtimo;
+ u32 opid;
+
+ _enter("%p{%u},%p{%u}",call,ntohl(call->call_id),msg,msg->seq);
+
+ rxrpc_get_message(msg);
+
+ /* add to the unready queue if we'd have to create a hole in the ready queue otherwise */
+ if (msg->seq != call->app_ready_seq+1) {
+ _debug("Call add packet %d to unreadyq",msg->seq);
+
+ /* insert in seq order */
+ list_for_each(_p,&call->app_unreadyq) {
+ pmsg = list_entry(_p,struct rxrpc_message,link);
+ if (pmsg->seq>msg->seq)
+ break;
+ }
+
+ list_add_tail(&msg->link,_p);
+
+ _leave(" [unreadyq]");
+ return;
+ }
+
+ /* next in sequence - simply append into the call's ready queue */
+ _debug("Call add packet %d to readyq (+%d => %d bytes)",
+ msg->seq,msg->dsize,call->app_ready_qty);
+
+ spin_lock(&call->lock);
+ call->app_ready_seq = msg->seq;
+ call->app_ready_qty += msg->dsize;
+ list_add_tail(&msg->link,&call->app_readyq);
+
+ /* move unready packets to the readyq if we got rid of a hole */
+ while (!list_empty(&call->app_unreadyq)) {
+ pmsg = list_entry(call->app_unreadyq.next,struct rxrpc_message,link);
+
+ if (pmsg->seq != call->app_ready_seq+1)
+ break;
+
+ /* next in sequence - just move list-to-list */
+ _debug("Call transfer packet %d to readyq (+%d => %d bytes)",
+ pmsg->seq,pmsg->dsize,call->app_ready_qty);
+
+ call->app_ready_seq = pmsg->seq;
+ call->app_ready_qty += pmsg->dsize;
+ list_del_init(&pmsg->link);
+ list_add_tail(&pmsg->link,&call->app_readyq);
+ }
+
+ /* see if we've got the last packet yet */
+ if (!list_empty(&call->app_readyq)) {
+ pmsg = list_entry(call->app_readyq.prev,struct rxrpc_message,link);
+ if (pmsg->hdr.flags & RXRPC_LAST_PACKET) {
+ call->app_last_rcv = 1;
+ _debug("Last packet on readyq");
+ }
+ }
+
+ switch (call->app_call_state) {
+ /* do nothing if call already aborted */
+ case RXRPC_CSTATE_ERROR:
+ spin_unlock(&call->lock);
+ _leave(" [error]");
+ return;
+
+ /* extract the operation ID from an incoming call if that's not yet been done */
+ case RXRPC_CSTATE_SRVR_RCV_OPID:
+ spin_unlock(&call->lock);
+
+ /* handle as yet insufficient data for the operation ID */
+ if (call->app_ready_qty<4) {
+ if (call->app_last_rcv)
+ rxrpc_call_abort(call,-EINVAL); /* trouble - last packet seen */
+
+ _leave("");
+ return;
+ }
+
+ /* pull the operation ID out of the buffer */
+ ret = rxrpc_call_read_data(call,&opid,sizeof(opid),0);
+ if (ret<0) {
+ printk("Unexpected error from read-data: %d\n",ret);
+ if (call->app_call_state!=RXRPC_CSTATE_ERROR)
+ rxrpc_call_abort(call,ret);
+ _leave("");
+ return;
+ }
+ call->app_opcode = ntohl(opid);
+
+ /* locate the operation in the available ops table */
+ optbl = call->conn->service->ops_begin;
+ lo = 0;
+ hi = call->conn->service->ops_end - optbl;
+
+ while (lo<hi) {
+ int mid = (hi+lo) / 2;
+ op = &optbl[mid];
+ if (call->app_opcode==op->id)
+ goto found_op;
+ if (call->app_opcode>op->id)
+ lo = mid+1;
+ else
+ hi = mid;
+ }
+
+ /* search failed */
+ kproto("Rx Client requested operation %d from %s service",
+ call->app_opcode,call->conn->service->name);
+ rxrpc_call_abort(call,-EINVAL);
+ _leave(" [inval]");
+ return;
+
+ found_op:
+ _proto("Rx Client requested operation %s from %s service",
+ op->name,call->conn->service->name);
+
+ /* we're now waiting for the argument block (unless the call was aborted) */
+ spin_lock(&call->lock);
+ if (call->app_call_state==RXRPC_CSTATE_SRVR_RCV_OPID ||
+ call->app_call_state==RXRPC_CSTATE_SRVR_SND_REPLY) {
+ if (!call->app_last_rcv)
+ call->app_call_state = RXRPC_CSTATE_SRVR_RCV_ARGS;
+ else if (call->app_ready_qty>0)
+ call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
+ else
+ call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
+ call->app_mark = op->asize;
+ call->app_user = op->user;
+ }
+ spin_unlock(&call->lock);
+
+ _state(call);
+ break;
+
+ case RXRPC_CSTATE_SRVR_RCV_ARGS:
+ /* change state if just received last packet of arg block */
+ if (call->app_last_rcv)
+ call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
+ spin_unlock(&call->lock);
+
+ _state(call);
+ break;
+
+ case RXRPC_CSTATE_CLNT_RCV_REPLY:
+ /* change state if just received last packet of reply block */
+ rmtimo = 0;
+ if (call->app_last_rcv) {
+ call->app_call_state = RXRPC_CSTATE_CLNT_GOT_REPLY;
+ rmtimo = 1;
+ }
+ spin_unlock(&call->lock);
+
+ if (rmtimo) {
+ del_timer_sync(&call->acks_timeout);
+ del_timer_sync(&call->rcv_timeout);
+ del_timer_sync(&call->ackr_dfr_timo);
+ }
+
+ _state(call);
+ break;
+
+ default:
+ /* deal with data reception in an unexpected state */
+ printk("Unexpected state [[[ %u ]]]\n",call->app_call_state);
+ __rxrpc_call_abort(call,-EBADMSG);
+ _leave("");
+ return;
+ }
+
+ if (call->app_call_state==RXRPC_CSTATE_CLNT_RCV_REPLY && call->app_last_rcv)
+ BUG();
+
+ /* otherwise just invoke the data function whenever we can satisfy its desire for more
+ * data
+ */
+ _proto("Rx Received Op Data: st=%u qty=%u mk=%u%s",
+ call->app_call_state,call->app_ready_qty,call->app_mark,
+ call->app_last_rcv ? " last-rcvd" : "");
+
+ spin_lock(&call->lock);
+
+ ret = __rxrpc_call_read_data(call);
+ switch (ret) {
+ case 0:
+ spin_unlock(&call->lock);
+ call->app_attn_func(call);
+ break;
+ case -EAGAIN:
+ spin_unlock(&call->lock);
+ break;
+ case -ECONNABORTED:
+ spin_unlock(&call->lock);
+ break;
+ default:
+ __rxrpc_call_abort(call,ret);
+ break;
+ }
+
+ _state(call);
+
+ _leave("");
+
+} /* end rxrpc_call_receive_data_packet() */
+
+/*****************************************************************************/
+/*
+ * received an ACK packet
+ */
+static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, struct rxrpc_message *msg)
+{
+ struct rxrpc_ackpacket ack;
+ rxrpc_serial_t serial;
+ rxrpc_seq_t seq;
+ int ret;
+
+ _enter("%p{%u},%p{%u}",call,ntohl(call->call_id),msg,msg->seq);
+
+ /* extract the basic ACK record */
+ if (skb_copy_bits(msg->pkt,msg->offset,&ack,sizeof(ack))<0) {
+ printk("Rx Received short ACK packet\n");
+ return;
+ }
+ msg->offset += sizeof(ack);
+
+ serial = ack.serial;
+ seq = ntohl(ack.firstPacket);
+
+ _proto("Rx Received ACK %%%d { b=%hu m=%hu f=%u p=%u s=%u r=%s n=%u }",
+ ntohl(msg->hdr.serial),
+ ntohs(ack.bufferSpace),
+ ntohs(ack.maxSkew),
+ seq,
+ ntohl(ack.previousPacket),
+ ntohl(serial),
+ rxrpc_acks[ack.reason],
+ call->ackr.nAcks
+ );
+
+ /* check the other side isn't ACK'ing a sequence number I haven't sent yet */
+ if (ack.nAcks>0 && (seq > call->snd_seq_count || seq+ack.nAcks-1 > call->snd_seq_count)) {
+ printk("Received ACK (#%u-#%u) for unsent packet\n",seq,seq+ack.nAcks-1);
+ rxrpc_call_abort(call,-EINVAL);
+ _leave("");
+ return;
+ }
+
+ /* deal with RTT calculation */
+ if (serial) {
+ struct rxrpc_message *rttmsg;
+
+ /* find the prompting packet */
+ spin_lock(&call->lock);
+ if (call->snd_ping && call->snd_ping->hdr.serial==serial) {
+ /* it was a ping packet */
+ rttmsg = call->snd_ping;
+ call->snd_ping = NULL;
+ spin_unlock(&call->lock);
+
+ if (rttmsg) {
+ rttmsg->rttdone = 1;
+ rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
+ rxrpc_put_message(rttmsg);
+ }
+ }
+ else {
+ struct list_head *_p;
+
+ /* it ought to be a data packet - look in the pending ACK list */
+ list_for_each(_p,&call->acks_pendq) {
+ rttmsg = list_entry(_p,struct rxrpc_message,link);
+ if (rttmsg->hdr.serial==serial) {
+ if (rttmsg->rttdone)
+ break; /* never do RTT twice without resending */
+
+ rttmsg->rttdone = 1;
+ rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
+ break;
+ }
+ }
+ spin_unlock(&call->lock);
+ }
+ }
+
+ switch (ack.reason) {
+ /* deal with negative/positive acknowledgement of data packets */
+ case RXRPC_ACK_REQUESTED:
+ case RXRPC_ACK_DELAY:
+ case RXRPC_ACK_IDLE:
+ rxrpc_call_definitively_ACK(call,seq-1);
+
+ case RXRPC_ACK_DUPLICATE:
+ case RXRPC_ACK_OUT_OF_SEQUENCE:
+ case RXRPC_ACK_EXCEEDS_WINDOW:
+ call->snd_resend_cnt = 0;
+ ret = rxrpc_call_record_ACK(call,msg,seq,ack.nAcks);
+ if (ret<0)
+ rxrpc_call_abort(call,ret);
+ break;
+
+ /* respond to ping packets immediately */
+ case RXRPC_ACK_PING:
+ rxrpc_call_generate_ACK(call,&msg->hdr,&ack);
+ break;
+
+ /* only record RTT on ping response packets */
+ case RXRPC_ACK_PING_RESPONSE:
+ if (call->snd_ping) {
+ struct rxrpc_message *rttmsg;
+
+ /* only do RTT stuff if the response matches the retained ping */
+ rttmsg = NULL;
+ spin_lock(&call->lock);
+ if (call->snd_ping && call->snd_ping->hdr.serial==ack.serial) {
+ rttmsg = call->snd_ping;
+ call->snd_ping = NULL;
+ }
+ spin_unlock(&call->lock);
+
+ if (rttmsg) {
+ rttmsg->rttdone = 1;
+ rxrpc_peer_calculate_rtt(call->conn->peer,rttmsg,msg);
+ rxrpc_put_message(rttmsg);
+ }
+ }
+ break;
+
+ default:
+ printk("Unsupported ACK reason %u\n",ack.reason);
+ break;
+ }
+
+ _leave("");
+} /* end rxrpc_call_receive_ack_packet() */
+
+/*****************************************************************************/
+/*
+ * record definitive ACKs for all messages up to and including the one with the 'highest' seq
+ */
+static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, rxrpc_seq_t highest)
+{
+ struct rxrpc_message *msg;
+ int now_complete;
+
+ _enter("%p{ads=%u},%u",call,call->acks_dftv_seq,highest);
+
+ while (call->acks_dftv_seq<highest) {
+ call->acks_dftv_seq++;
+
+ _proto("Definitive ACK on packet #%u",call->acks_dftv_seq);
+
+ /* discard those at front of queue until message with highest ACK is found */
+ spin_lock(&call->lock);
+ msg = NULL;
+ if (!list_empty(&call->acks_pendq)) {
+ msg = list_entry(call->acks_pendq.next,struct rxrpc_message,link);
+ list_del_init(&msg->link); /* dequeue */
+ if (msg->state==RXRPC_MSG_SENT)
+ call->acks_pend_cnt--;
+ }
+ spin_unlock(&call->lock);
+
+ /* insanity check */
+ if (!msg)
+ panic("%s(): acks_pendq unexpectedly empty\n",__FUNCTION__);
+
+ if (msg->seq!=call->acks_dftv_seq)
+ panic("%s(): Packet #%u expected at front of acks_pendq (#%u found)\n",
+ __FUNCTION__,call->acks_dftv_seq,msg->seq);
+
+ /* discard the message */
+ msg->state = RXRPC_MSG_DONE;
+ rxrpc_put_message(msg);
+ }
+
+ /* if all sent packets are definitively ACK'd then prod any sleepers just in case */
+ now_complete = 0;
+ spin_lock(&call->lock);
+ if (call->acks_dftv_seq==call->snd_seq_count) {
+ if (call->app_call_state!=RXRPC_CSTATE_COMPLETE) {
+ call->app_call_state = RXRPC_CSTATE_COMPLETE;
+ _state(call);
+ now_complete = 1;
+ }
+ }
+ spin_unlock(&call->lock);
+
+ if (now_complete) {
+ del_timer_sync(&call->acks_timeout);
+ del_timer_sync(&call->rcv_timeout);
+ del_timer_sync(&call->ackr_dfr_timo);
+ call->app_attn_func(call);
+ }
+
+ _leave("");
+} /* end rxrpc_call_definitively_ACK() */
+
+/*****************************************************************************/
+/*
+ * record the specified amount of ACKs/NAKs
+ */
+static int rxrpc_call_record_ACK(struct rxrpc_call *call,
+ struct rxrpc_message *msg,
+ rxrpc_seq_t seq,
+ size_t count)
+{
+ struct rxrpc_message *dmsg;
+ struct list_head *_p;
+ rxrpc_seq_t highest;
+ unsigned ix;
+ size_t chunk;
+ char resend, now_complete;
+ u8 acks[16];
+
+ _enter("%p{apc=%u ads=%u},%p,%u,%u",
+ call,call->acks_pend_cnt,call->acks_dftv_seq,msg,seq,count);
+
+ /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order ACKs) */
+ if (seq<=call->acks_dftv_seq) {
+ unsigned delta = call->acks_dftv_seq - seq;
+
+ if (count<=delta) {
+ _leave(" = 0 [all definitively ACK'd]");
+ return 0;
+ }
+
+ seq += delta;
+ count -= delta;
+ msg->offset += delta;
+ }
+
+ highest = seq + count - 1;
+ resend = 0;
+ while (count>0) {
+ /* extract up to 16 ACK slots at a time */
+ chunk = min(count,sizeof(acks));
+ count -= chunk;
+
+ memset(acks,2,sizeof(acks));
+
+ if (skb_copy_bits(msg->pkt,msg->offset,&acks,chunk)<0) {
+ printk("Rx Received short ACK packet\n");
+ _leave(" = -EINVAL");
+ return -EINVAL;
+ }
+ msg->offset += chunk;
+
+ /* check that the ACK set is valid */
+ for (ix=0; ix<chunk; ix++) {
+ switch (acks[ix]) {
+ case RXRPC_ACK_TYPE_ACK:
+ break;
+ case RXRPC_ACK_TYPE_NACK:
+ resend = 1;
+ break;
+ default:
+ printk("Rx Received unsupported ACK state %u\n",acks[ix]);
+ _leave(" = -EINVAL");
+ return -EINVAL;
+ }
+ }
+
+ _proto("Rx ACK of packets #%u-#%u [%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)",
+ seq,seq+chunk-1,
+ _acktype[acks[0x0]],
+ _acktype[acks[0x1]],
+ _acktype[acks[0x2]],
+ _acktype[acks[0x3]],
+ _acktype[acks[0x4]],
+ _acktype[acks[0x5]],
+ _acktype[acks[0x6]],
+ _acktype[acks[0x7]],
+ _acktype[acks[0x8]],
+ _acktype[acks[0x9]],
+ _acktype[acks[0xA]],
+ _acktype[acks[0xB]],
+ _acktype[acks[0xC]],
+ _acktype[acks[0xD]],
+ _acktype[acks[0xE]],
+ _acktype[acks[0xF]],
+ call->acks_pend_cnt
+ );
+
+ /* mark the packets in the ACK queue as being provisionally ACK'd */
+ ix = 0;
+ spin_lock(&call->lock);
+
+ /* find the first packet ACK'd/NAK'd here */
+ list_for_each(_p,&call->acks_pendq) {
+ dmsg = list_entry(_p,struct rxrpc_message,link);
+ if (dmsg->seq==seq)
+ goto found_first;
+ _debug("- %u: skipping #%u",ix,dmsg->seq);
+ }
+ goto bad_queue;
+
+ found_first:
+ do {
+ _debug("- %u: processing #%u (%c) apc=%u",
+ ix,dmsg->seq,_acktype[acks[ix]],call->acks_pend_cnt);
+
+ if (acks[ix]==RXRPC_ACK_TYPE_ACK) {
+ if (dmsg->state==RXRPC_MSG_SENT) call->acks_pend_cnt--;
+ dmsg->state = RXRPC_MSG_ACKED;
+ }
+ else {
+ if (dmsg->state==RXRPC_MSG_ACKED) call->acks_pend_cnt++;
+ dmsg->state = RXRPC_MSG_SENT;
+ }
+ ix++;
+ seq++;
+
+ _p = dmsg->link.next;
+ dmsg = list_entry(_p,struct rxrpc_message,link);
+ } while(ix<chunk && _p!=&call->acks_pendq && dmsg->seq==seq);
+
+ if (ix<chunk)
+ goto bad_queue;
+
+ spin_unlock(&call->lock);
+ }
+
+ if (resend)
+ rxrpc_call_resend(call,highest);
+
+ /* if all packets are provisionally ACK'd, then wake up anyone who's waiting for that */
+ now_complete = 0;
+ spin_lock(&call->lock);
+ if (call->acks_pend_cnt==0) {
+ if (call->app_call_state==RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) {
+ call->app_call_state = RXRPC_CSTATE_COMPLETE;
+ _state(call);
+ }
+ now_complete = 1;
+ }
+ spin_unlock(&call->lock);
+
+ if (now_complete) {
+ _debug("- wake up waiters");
+ del_timer_sync(&call->acks_timeout);
+ del_timer_sync(&call->rcv_timeout);
+ del_timer_sync(&call->ackr_dfr_timo);
+ call->app_attn_func(call);
+ }
+
+ _leave(" = 0 (apc=%u)",call->acks_pend_cnt);
+ return 0;
+
+ bad_queue:
+ panic("%s(): acks_pendq in bad state (packet #%u absent)\n",__FUNCTION__,seq);
+
+} /* end rxrpc_call_record_ACK() */
+
+/*****************************************************************************/
+/*
+ * transfer data from the ready packet queue to the asynchronous read buffer
+ * - since this func is the only one going to look at packets queued on app_readyq, we don't need
+ * a lock to modify or access them, only to modify the queue pointers
+ * - called with call->lock held
+ * - the buffer must be in kernel space
+ * - returns:
+ * 0 if buffer filled
+ * -EAGAIN if buffer not filled and more data to come
+ * -EBADMSG if last packet received and insufficient data left
+ * -ECONNABORTED if the call has in an error state
+ */
+static int __rxrpc_call_read_data(struct rxrpc_call *call)
+{
+ struct rxrpc_message *msg;
+ size_t qty;
+ int ret;
+
+ _enter("%p{as=%d buf=%p qty=%u/%u}",
+ call,call->app_async_read,call->app_read_buf,call->app_ready_qty,call->app_mark);
+
+ /* check the state */
+ switch (call->app_call_state) {
+ case RXRPC_CSTATE_SRVR_RCV_ARGS:
+ case RXRPC_CSTATE_CLNT_RCV_REPLY:
+ if (call->app_last_rcv) {
+ printk("%s(%p,%p,%d): Inconsistent call state (%s, last pkt)",
+ __FUNCTION__,call,call->app_read_buf,call->app_mark,
+ rxrpc_call_states[call->app_call_state]);
+ BUG();
+ }
+ break;
+
+ case RXRPC_CSTATE_SRVR_RCV_OPID:
+ case RXRPC_CSTATE_SRVR_GOT_ARGS:
+ case RXRPC_CSTATE_CLNT_GOT_REPLY:
+ break;
+
+ case RXRPC_CSTATE_SRVR_SND_REPLY:
+ if (!call->app_last_rcv) {
+ printk("%s(%p,%p,%d): Inconsistent call state (%s, not last pkt)",
+ __FUNCTION__,call,call->app_read_buf,call->app_mark,
+ rxrpc_call_states[call->app_call_state]);
+ BUG();
+ }
+ _debug("Trying to read data from call in SND_REPLY state");
+ break;
+
+ case RXRPC_CSTATE_ERROR:
+ _leave(" = -ECONNABORTED");
+ return -ECONNABORTED;
+
+ default:
+ printk("reading in unexpected state [[[ %u ]]]\n",call->app_call_state);
+ BUG();
+ }
+
+ /* handle the case of not having an async buffer */
+ if (!call->app_async_read) {
+ if (call->app_mark==RXRPC_APP_MARK_EOF) {
+ ret = call->app_last_rcv ? 0 : -EAGAIN;
+ }
+ else {
+ if (call->app_mark >= call->app_ready_qty) {
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ ret = 0;
+ }
+ else {
+ ret = call->app_last_rcv ? -EBADMSG : -EAGAIN;
+ }
+ }
+
+ _leave(" = %d [no buf]",ret);
+ return 0;
+ }
+
+ while (!list_empty(&call->app_readyq) && call->app_mark>0) {
+ msg = list_entry(call->app_readyq.next,struct rxrpc_message,link);
+
+ /* drag as much data as we need out of this packet */
+ qty = min(call->app_mark,msg->dsize);
+
+ _debug("reading %u from skb=%p off=%lu",qty,msg->pkt,msg->offset);
+
+ if (call->app_read_buf)
+ if (skb_copy_bits(msg->pkt,msg->offset,call->app_read_buf,qty)<0)
+ panic("%s: Failed to copy data from packet: (%p,%p,%d)",
+ __FUNCTION__,call,call->app_read_buf,qty);
+
+ /* if that packet is now empty, discard it */
+ call->app_ready_qty -= qty;
+ msg->dsize -= qty;
+
+ if (msg->dsize==0) {
+ list_del_init(&msg->link);
+ rxrpc_put_message(msg);
+ }
+ else {
+ msg->offset += qty;
+ }
+
+ call->app_mark -= qty;
+ if (call->app_read_buf) call->app_read_buf += qty;
+ }
+
+ if (call->app_mark==0) {
+ call->app_async_read = 0;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ call->app_read_buf = NULL;
+
+ /* adjust the state if used up all packets */
+ if (list_empty(&call->app_readyq) && call->app_last_rcv) {
+ switch (call->app_call_state) {
+ case RXRPC_CSTATE_SRVR_RCV_OPID:
+ call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ _state(call);
+ del_timer_sync(&call->rcv_timeout);
+ break;
+ case RXRPC_CSTATE_SRVR_GOT_ARGS:
+ call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
+ _state(call);
+ del_timer_sync(&call->rcv_timeout);
+ break;
+ default:
+ call->app_call_state = RXRPC_CSTATE_COMPLETE;
+ _state(call);
+ del_timer_sync(&call->acks_timeout);
+ del_timer_sync(&call->ackr_dfr_timo);
+ del_timer_sync(&call->rcv_timeout);
+ break;
+ }
+ }
+
+ _leave(" = 0");
+ return 0;
+ }
+
+ if (call->app_last_rcv) {
+ _debug("Insufficient data (%u/%u)",call->app_ready_qty,call->app_mark);
+ call->app_async_read = 0;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ call->app_read_buf = NULL;
+
+ _leave(" = -EBADMSG");
+ return -EBADMSG;
+ }
+
+ _leave(" = -EAGAIN");
+ return -EAGAIN;
+} /* end __rxrpc_call_read_data() */
+
+/*****************************************************************************/
+/*
+ * attempt to read the specified amount of data from the call's ready queue into the buffer
+ * provided
+ * - since this func is the only one going to look at packets queued on app_readyq, we don't need
+ * a lock to modify or access them, only to modify the queue pointers
+ * - if the buffer pointer is NULL, then data is merely drained, not copied
+ * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is enough data or an
+ * error will be generated
+ * - note that the caller must have added the calling task to the call's wait queue beforehand
+ * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this function doesn't read
+ * all available data
+ */
+int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int flags)
+{
+ int ret;
+
+ _enter("%p{arq=%u},%p,%d,%x",call,call->app_ready_qty,buffer,size,flags);
+
+ spin_lock(&call->lock);
+
+ if (unlikely(!!call->app_read_buf)) {
+ spin_unlock(&call->lock);
+ _leave(" = -EBUSY");
+ return -EBUSY;
+ }
+
+ call->app_mark = size;
+ call->app_read_buf = buffer;
+ call->app_async_read = 1;
+ call->app_read_count++;
+
+ /* read as much data as possible */
+ ret = __rxrpc_call_read_data(call);
+ switch (ret) {
+ case 0:
+ if (flags&RXRPC_CALL_READ_ALL && (!call->app_last_rcv || call->app_ready_qty>0)) {
+ _leave(" = -EBADMSG");
+ __rxrpc_call_abort(call,-EBADMSG);
+ return -EBADMSG;
+ }
+
+ spin_unlock(&call->lock);
+ call->app_attn_func(call);
+ _leave(" = 0");
+ return ret;
+
+ case -ECONNABORTED:
+ spin_unlock(&call->lock);
+ _leave(" = %d [aborted]",ret);
+ return ret;
+
+ default:
+ __rxrpc_call_abort(call,ret);
+ _leave(" = %d",ret);
+ return ret;
+
+ case -EAGAIN:
+ spin_unlock(&call->lock);
+
+ if (!(flags&RXRPC_CALL_READ_BLOCK)) {
+ _leave(" = -EAGAIN");
+ return -EAGAIN;
+ }
+
+ /* wait for the data to arrive */
+ _debug("blocking for data arrival");
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!call->app_async_read || signal_pending(current))
+ break;
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
+
+ if (signal_pending(current)) {
+ _leave(" = -EINTR");
+ return -EINTR;
+ }
+
+ if (call->app_call_state==RXRPC_CSTATE_ERROR) {
+ _leave(" = -ECONNABORTED");
+ return -ECONNABORTED;
+ }
+
+ _leave(" = 0");
+ return 0;
+ }
+
+} /* end rxrpc_call_read_data() */
+
+/*****************************************************************************/
+/*
+ * write data to a call
+ * - the data may not be sent immediately if it doesn't fill a buffer
+ * - if we can't queue all the data for buffering now, siov[] will have been adjusted to take
+ * account of what has been sent
+ */
+int rxrpc_call_write_data(struct rxrpc_call *call,
+ size_t sioc,
+ struct iovec siov[],
+ u8 rxhdr_flags,
+ int alloc_flags,
+ int dup_data,
+ size_t *size_sent)
+{
+ struct rxrpc_message *msg;
+ struct iovec *sptr;
+ size_t space, size, chunk, tmp;
+ char *buf;
+ int ret;
+
+ _enter("%p,%u,%p,%02x,%x,%d,%p",call,sioc,siov,rxhdr_flags,alloc_flags,dup_data,size_sent);
+
+ *size_sent = 0;
+ size = 0;
+ ret = -EINVAL;
+
+ /* can't send more if we've sent last packet from this end */
+ switch (call->app_call_state) {
+ case RXRPC_CSTATE_SRVR_SND_REPLY:
+ case RXRPC_CSTATE_CLNT_SND_ARGS:
+ break;
+ case RXRPC_CSTATE_ERROR:
+ ret = call->app_errno;
+ default:
+ goto out;
+ }
+
+ /* calculate how much data we've been given */
+ sptr = siov;
+ for (; sioc>0; sptr++, sioc--) {
+ if (!sptr->iov_len) continue;
+
+ if (!sptr->iov_base)
+ goto out;
+
+ size += sptr->iov_len;
+ }
+
+ _debug("- size=%u mtu=%u",size,call->conn->mtu_size);
+
+ do {
+ /* make sure there's a message under construction */
+ if (!call->snd_nextmsg) {
+ /* no - allocate a message with no data yet attached */
+ ret = rxrpc_conn_newmsg(call->conn,call,RXRPC_PACKET_TYPE_DATA,
+ 0,NULL,alloc_flags,&call->snd_nextmsg);
+ if (ret<0)
+ goto out;
+ _debug("- allocated new message [ds=%u]",call->snd_nextmsg->dsize);
+ }
+
+ msg = call->snd_nextmsg;
+ msg->hdr.flags |= rxhdr_flags;
+
+ /* deal with zero-length terminal packet */
+ if (size==0) {
+ if (rxhdr_flags & RXRPC_LAST_PACKET) {
+ ret = rxrpc_call_flush(call);
+ if (ret<0)
+ goto out;
+ }
+ break;
+ }
+
+ /* work out how much space current packet has available */
+ space = call->conn->mtu_size - msg->dsize;
+ chunk = min(space,size);
+
+ _debug("- [before] space=%u chunk=%u",space,chunk);
+
+ while (!siov->iov_len)
+ siov++;
+
+ /* if we are going to have to duplicate the data then coalesce it too */
+ if (dup_data) {
+ /* don't allocate more that 1 page at a time */
+ if (chunk>PAGE_SIZE)
+ chunk = PAGE_SIZE;
+
+ /* allocate a data buffer and attach to the message */
+ buf = kmalloc(chunk,alloc_flags);
+ if (unlikely(!buf)) {
+ if (msg->dsize==sizeof(struct rxrpc_header)) {
+ /* discard an empty msg and wind back the seq counter */
+ rxrpc_put_message(msg);
+ call->snd_nextmsg = NULL;
+ call->snd_seq_count--;
+ }
+
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmp = msg->dcount++;
+ set_bit(tmp,&msg->dfree);
+ msg->data[tmp].iov_base = buf;
+ msg->data[tmp].iov_len = chunk;
+ msg->dsize += chunk;
+ *size_sent += chunk;
+ size -= chunk;
+
+ /* load the buffer with data */
+ while (chunk>0) {
+ tmp = min(chunk,siov->iov_len);
+ memcpy(buf,siov->iov_base,tmp);
+ buf += tmp;
+ siov->iov_base += tmp;
+ siov->iov_len -= tmp;
+ if (!siov->iov_len)
+ siov++;
+ chunk -= tmp;
+ }
+ }
+ else {
+ /* we want to attach the supplied buffers directly */
+ while (chunk>0 && msg->dcount<RXRPC_MSG_MAX_IOCS) {
+ tmp = msg->dcount++;
+ msg->data[tmp].iov_base = siov->iov_base;
+ msg->data[tmp].iov_len = siov->iov_len;
+ msg->dsize += siov->iov_len;
+ *size_sent += siov->iov_len;
+ size -= siov->iov_len;
+ chunk -= siov->iov_len;
+ siov++;
+ }
+ }
+
+ _debug("- [loaded] chunk=%u size=%u",chunk,size);
+
+ /* dispatch the message when full, final or requesting ACK */
+ if (msg->dsize>=call->conn->mtu_size || rxhdr_flags) {
+ ret = rxrpc_call_flush(call);
+ if (ret<0)
+ goto out;
+ }
+
+ } while(size>0);
+
+ ret = 0;
+ out:
+ _leave(" = %d (%d queued, %d rem)",ret,*size_sent,size);
+ return ret;
+
+} /* end rxrpc_call_write_data() */
+
+/*****************************************************************************/
+/*
+ * flush outstanding packets to the network
+ */
+int rxrpc_call_flush(struct rxrpc_call *call)
+{
+ struct rxrpc_message *msg;
+ int ret = 0;
+
+ _enter("%p",call);
+
+ rxrpc_get_call(call);
+
+ /* if there's a packet under construction, then dispatch it now */
+ if (call->snd_nextmsg) {
+ msg = call->snd_nextmsg;
+ call->snd_nextmsg = NULL;
+
+ if (msg->hdr.flags & RXRPC_LAST_PACKET) {
+ msg->hdr.flags &= ~RXRPC_MORE_PACKETS;
+ msg->hdr.flags |= RXRPC_REQUEST_ACK;
+ }
+ else {
+ msg->hdr.flags |= RXRPC_MORE_PACKETS;
+ }
+
+ _proto("Sending DATA message { ds=%u dc=%u df=%02lu }",
+ msg->dsize,msg->dcount,msg->dfree);
+
+ /* queue and adjust call state */
+ spin_lock(&call->lock);
+ list_add_tail(&msg->link,&call->acks_pendq);
+
+ /* decide what to do depending on current state and if this is the last packet */
+ ret = -EINVAL;
+ switch (call->app_call_state) {
+ case RXRPC_CSTATE_SRVR_SND_REPLY:
+ if (msg->hdr.flags & RXRPC_LAST_PACKET) {
+ call->app_call_state = RXRPC_CSTATE_SRVR_RCV_FINAL_ACK;
+ _state(call);
+ }
+ break;
+
+ case RXRPC_CSTATE_CLNT_SND_ARGS:
+ if (msg->hdr.flags & RXRPC_LAST_PACKET) {
+ call->app_call_state = RXRPC_CSTATE_CLNT_RCV_REPLY;
+ _state(call);
+ }
+ break;
+
+ case RXRPC_CSTATE_ERROR:
+ ret = call->app_errno;
+ default:
+ spin_unlock(&call->lock);
+ goto out;
+ }
+
+ call->acks_pend_cnt++;
+
+ mod_timer(&call->acks_timeout,jiffies + rxrpc_call_acks_timeout);
+
+ spin_unlock(&call->lock);
+
+ ret = rxrpc_conn_sendmsg(call->conn,msg);
+ if (ret==0)
+ call->pkt_snd_count++;
+ }
+
+ out:
+ rxrpc_put_call(call);
+
+ _leave(" = %d",ret);
+ return ret;
+
+} /* end rxrpc_call_flush() */
+
+/*****************************************************************************/
+/*
+ * resend NAK'd or unacknowledged packets up to the highest one specified
+ */
+static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
+{
+ struct rxrpc_message *msg;
+ struct list_head *_p;
+ rxrpc_seq_t seq = 0;
+
+ _enter("%p,%u",call,highest);
+
+ _proto("Rx Resend required");
+
+ /* handle too many resends */
+ if (call->snd_resend_cnt>=rxrpc_call_max_resend) {
+ _debug("Aborting due to too many resends (rcv=%d)",call->pkt_rcv_count);
+ rxrpc_call_abort(call,call->pkt_rcv_count>0?-EIO:-ETIMEDOUT);
+ _leave("");
+ return;
+ }
+
+ spin_lock(&call->lock);
+ call->snd_resend_cnt++;
+ for (;;) {
+ /* determine which the next packet we might need to ACK is */
+ if (seq<=call->acks_dftv_seq)
+ seq = call->acks_dftv_seq;
+ seq++;
+
+ if (seq>highest)
+ break;
+
+ /* look for the packet in the pending-ACK queue */
+ list_for_each(_p,&call->acks_pendq) {
+ msg = list_entry(_p,struct rxrpc_message,link);
+ if (msg->seq==seq)
+ goto found_msg;
+ }
+
+ panic("%s(%p,%d): Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n",
+ __FUNCTION__,call,highest,call->acks_dftv_seq,call->snd_seq_count,seq);
+
+ found_msg:
+ if (msg->state!=RXRPC_MSG_SENT)
+ continue; /* only un-ACK'd packets */
+
+ rxrpc_get_message(msg);
+ spin_unlock(&call->lock);
+
+ /* send each message again (and ignore any errors we might incur) */
+ _proto("Resending DATA message { ds=%u dc=%u df=%02lu }",
+ msg->dsize,msg->dcount,msg->dfree);
+
+ if (rxrpc_conn_sendmsg(call->conn,msg)==0)
+ call->pkt_snd_count++;
+
+ rxrpc_put_message(msg);
+
+ spin_lock(&call->lock);
+ }
+
+ /* reset the timeout */
+ mod_timer(&call->acks_timeout,jiffies + rxrpc_call_acks_timeout);
+
+ spin_unlock(&call->lock);
+
+ _leave("");
+} /* end rxrpc_call_resend() */
+
+/*****************************************************************************/
+/*
+ * handle an ICMP error being applied to a call
+ */
+void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno)
+{
+ _enter("%p{%u},%d",call,ntohl(call->call_id),errno);
+
+ /* if this call is already aborted, then just wake up any waiters */
+ if (call->app_call_state==RXRPC_CSTATE_ERROR) {
+ call->app_error_func(call);
+ }
+ else {
+ /* tell the app layer what happened */
+ spin_lock(&call->lock);
+ call->app_call_state = RXRPC_CSTATE_ERROR;
+ _state(call);
+ if (local)
+ call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR;
+ else
+ call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR;
+ call->app_errno = errno;
+ call->app_mark = RXRPC_APP_MARK_EOF;
+ call->app_read_buf = NULL;
+ call->app_async_read = 0;
+
+ /* map the error */
+ call->app_aemap_func(call);
+
+ del_timer_sync(&call->acks_timeout);
+ del_timer_sync(&call->rcv_timeout);
+ del_timer_sync(&call->ackr_dfr_timo);
+
+ spin_unlock(&call->lock);
+
+ call->app_error_func(call);
+ }
+
+ _leave("");
+} /* end rxrpc_call_handle_error() */
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
new file mode 100644
index 000000000000..e54dd472b5e4
--- /dev/null
+++ b/net/rxrpc/connection.c
@@ -0,0 +1,687 @@
+/* connection.c: Rx connection routines
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include <rxrpc/message.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
+#include <net/sock.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+__RXACCT_DECL(atomic_t rxrpc_connection_count);
+
+LIST_HEAD(rxrpc_conns);
+DECLARE_RWSEM(rxrpc_conns_sem);
+
+static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
+{
+ struct rxrpc_connection *conn = list_entry(timer,struct rxrpc_connection,timeout);
+
+ _debug("Rx CONN TIMEOUT [%p{u=%d}]",conn,atomic_read(&conn->usage));
+
+ rxrpc_conn_do_timeout(conn);
+}
+
+static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
+ timed_out: __rxrpc_conn_timeout,
+};
+
+/*****************************************************************************/
+/*
+ * create a new connection record
+ */
+static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
+ struct rxrpc_connection **_conn)
+{
+ struct rxrpc_connection *conn;
+
+ _enter("%p",peer);
+
+ /* allocate and initialise a connection record */
+ conn = kmalloc(sizeof(struct rxrpc_connection),GFP_KERNEL);
+ if (!conn) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ memset(conn,0,sizeof(struct rxrpc_connection));
+ atomic_set(&conn->usage,1);
+
+ INIT_LIST_HEAD(&conn->link);
+ init_waitqueue_head(&conn->chanwait);
+ spin_lock_init(&conn->lock);
+ rxrpc_timer_init(&conn->timeout,&rxrpc_conn_timer_ops);
+
+ do_gettimeofday(&conn->atime);
+ conn->mtu_size = 1024;
+ conn->peer = peer;
+ conn->trans = peer->trans;
+
+ __RXACCT(atomic_inc(&rxrpc_connection_count));
+ *_conn = conn;
+ _leave(" = 0 (%p)",conn);
+
+ return 0;
+} /* end __rxrpc_create_connection() */
+
+/*****************************************************************************/
+/*
+ * create a new connection record for outgoing connections
+ */
+int rxrpc_create_connection(struct rxrpc_transport *trans,
+ u16 port,
+ u32 addr,
+ unsigned short service_id,
+ void *security,
+ struct rxrpc_connection **_conn)
+{
+ struct rxrpc_connection *conn;
+ struct rxrpc_peer *peer;
+ int ret;
+
+ _enter("%p{%hu},%u,%hu",trans,trans->port,ntohs(port),service_id);
+
+ /* get a peer record */
+ ret = rxrpc_peer_lookup(trans,addr,&peer);
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ /* allocate and initialise a connection record */
+ ret = __rxrpc_create_connection(peer,&conn);
+ if (ret<0) {
+ rxrpc_put_peer(peer);
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ /* fill in the specific bits */
+ conn->addr.sin_family = AF_INET;
+ conn->addr.sin_port = port;
+ conn->addr.sin_addr.s_addr = addr;
+
+ conn->in_epoch = rxrpc_epoch;
+ conn->out_epoch = rxrpc_epoch;
+ conn->in_clientflag = 0;
+ conn->out_clientflag = RXRPC_CLIENT_INITIATED;
+ conn->conn_id = htonl((unsigned) conn & RXRPC_CIDMASK);
+ conn->service_id = htons(service_id);
+
+ /* attach to peer */
+ conn->peer = peer;
+
+ write_lock(&peer->conn_lock);
+ list_add_tail(&conn->link,&peer->conn_active);
+ atomic_inc(&peer->conn_count);
+ write_unlock(&peer->conn_lock);
+
+ down_write(&rxrpc_conns_sem);
+ list_add_tail(&conn->proc_link,&rxrpc_conns);
+ up_write(&rxrpc_conns_sem);
+
+ *_conn = conn;
+ _leave(" = 0 (%p)",conn);
+
+ return 0;
+} /* end rxrpc_create_connection() */
+
+/*****************************************************************************/
+/*
+ * lookup the connection for an incoming packet
+ * - create a new connection record for unrecorded incoming connections
+ */
+int rxrpc_connection_lookup(struct rxrpc_peer *peer,
+ struct rxrpc_message *msg,
+ struct rxrpc_connection **_conn)
+{
+ struct rxrpc_connection *conn, *candidate = NULL;
+ struct list_head *_p;
+ int ret, fresh = 0;
+ u32 x_epoch, x_connid;
+ u16 x_port, x_secix, x_servid;
+ u8 x_clflag;
+
+ _enter("%p{{%hu}},%u,%hu",
+ peer,peer->trans->port,ntohs(msg->pkt->h.uh->source),ntohs(msg->hdr.serviceId));
+
+ x_port = msg->pkt->h.uh->source;
+ x_epoch = msg->hdr.epoch;
+ x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
+ x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
+ x_servid = msg->hdr.serviceId;
+ x_secix = msg->hdr.securityIndex;
+
+ /* [common case] search the transport's active list first */
+ read_lock(&peer->conn_lock);
+ list_for_each(_p,&peer->conn_active) {
+ conn = list_entry(_p,struct rxrpc_connection,link);
+ if (conn->addr.sin_port == x_port &&
+ conn->in_epoch == x_epoch &&
+ conn->conn_id == x_connid &&
+ conn->security_ix == x_secix &&
+ conn->service_id == x_servid &&
+ conn->in_clientflag == x_clflag)
+ goto found_active;
+ }
+ read_unlock(&peer->conn_lock);
+
+ /* [uncommon case] not active
+ * - create a candidate for a new record if an inbound connection
+ * - only examine the graveyard for an outbound connection
+ */
+ if (x_clflag) {
+ ret = __rxrpc_create_connection(peer,&candidate);
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ /* fill in the specifics */
+ candidate->addr.sin_family = AF_INET;
+ candidate->addr.sin_port = x_port;
+ candidate->addr.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
+ candidate->in_epoch = x_epoch;
+ candidate->out_epoch = x_epoch;
+ candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
+ candidate->out_clientflag = 0;
+ candidate->conn_id = x_connid;
+ candidate->service_id = x_servid;
+ candidate->security_ix = x_secix;
+ }
+
+ /* search the active list again, just in case it appeared whilst we were busy */
+ write_lock(&peer->conn_lock);
+ list_for_each(_p,&peer->conn_active) {
+ conn = list_entry(_p,struct rxrpc_connection,link);
+ if (conn->addr.sin_port == x_port &&
+ conn->in_epoch == x_epoch &&
+ conn->conn_id == x_connid &&
+ conn->security_ix == x_secix &&
+ conn->service_id == x_servid &&
+ conn->in_clientflag == x_clflag)
+ goto found_active_second_chance;
+ }
+
+ /* search the transport's graveyard list */
+ spin_lock(&peer->conn_gylock);
+ list_for_each(_p,&peer->conn_graveyard) {
+ conn = list_entry(_p,struct rxrpc_connection,link);
+ if (conn->addr.sin_port == x_port &&
+ conn->in_epoch == x_epoch &&
+ conn->conn_id == x_connid &&
+ conn->security_ix == x_secix &&
+ conn->service_id == x_servid &&
+ conn->in_clientflag == x_clflag)
+ goto found_in_graveyard;
+ }
+ spin_unlock(&peer->conn_gylock);
+
+ /* outbound connections aren't created here */
+ if (!x_clflag) {
+ write_unlock(&peer->conn_lock);
+ _leave(" = -ENOENT");
+ return -ENOENT;
+ }
+
+ /* we can now add the new candidate to the list */
+ rxrpc_get_peer(peer);
+ conn = candidate;
+ candidate = NULL;
+ atomic_inc(&peer->conn_count);
+ fresh = 1;
+
+ make_active:
+ list_add_tail(&conn->link,&peer->conn_active);
+
+ success_uwfree:
+ write_unlock(&peer->conn_lock);
+
+ if (candidate) {
+ __RXACCT(atomic_dec(&rxrpc_connection_count));
+ kfree(candidate);
+ }
+
+ if (fresh) {
+ down_write(&rxrpc_conns_sem);
+ list_add_tail(&conn->proc_link,&rxrpc_conns);
+ up_write(&rxrpc_conns_sem);
+ }
+
+ success:
+ *_conn = conn;
+ _leave(" = 0 (%p)",conn);
+ return 0;
+
+ /* handle the connection being found in the active list straight off */
+ found_active:
+ rxrpc_get_connection(conn);
+ read_unlock(&peer->conn_lock);
+ goto success;
+
+ /* handle resurrecting a connection from the graveyard */
+ found_in_graveyard:
+ rxrpc_get_peer(peer);
+ rxrpc_get_connection(conn);
+ rxrpc_krxtimod_del_timer(&conn->timeout);
+ list_del_init(&conn->link);
+ spin_unlock(&peer->conn_gylock);
+ goto make_active;
+
+ /* handle finding the connection on the second time through the active list */
+ found_active_second_chance:
+ rxrpc_get_connection(conn);
+ goto success_uwfree;
+
+} /* end rxrpc_connection_lookup() */
+
+/*****************************************************************************/
+/*
+ * finish using a connection record
+ * - it will be transferred to the peer's connection graveyard when refcount reaches 0
+ */
+void rxrpc_put_connection(struct rxrpc_connection *conn)
+{
+ struct rxrpc_peer *peer = conn->peer;
+
+ _enter("%p{u=%d p=%hu}",conn,atomic_read(&conn->usage),ntohs(conn->addr.sin_port));
+
+ /* sanity check */
+ if (atomic_read(&conn->usage)<=0)
+ BUG();
+
+ spin_lock(&peer->conn_gylock);
+ if (likely(!atomic_dec_and_test(&conn->usage))) {
+ spin_unlock(&peer->conn_gylock);
+ _leave("");
+ return;
+ }
+
+ /* move to graveyard queue */
+ list_del(&conn->link);
+ list_add_tail(&conn->link,&peer->conn_graveyard);
+
+ /* discard in 100 secs */
+ rxrpc_krxtimod_add_timer(&conn->timeout,20*HZ);
+
+ spin_unlock(&peer->conn_gylock);
+
+ rxrpc_put_peer(conn->peer);
+
+ _leave(" [killed]");
+} /* end rxrpc_put_connection() */
+
+/*****************************************************************************/
+/*
+ * free a connection record
+ */
+void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
+{
+ struct rxrpc_peer *peer;
+
+ _enter("%p{u=%d p=%hu}",conn,atomic_read(&conn->usage),ntohs(conn->addr.sin_port));
+
+ peer = conn->peer;
+
+ if (atomic_read(&conn->usage)<0)
+ BUG();
+
+ /* remove from graveyard if still dead */
+ spin_lock(&peer->conn_gylock);
+ if (atomic_read(&conn->usage)==0) {
+ list_del_init(&conn->link);
+ }
+ else {
+ conn = NULL;
+ }
+ spin_unlock(&peer->conn_gylock);
+
+ if (!conn) {
+ _leave("");
+ return; /* resurrected */
+ }
+
+ _debug("--- Destroying Connection %p ---",conn);
+
+ down_write(&rxrpc_conns_sem);
+ list_del(&conn->proc_link);
+ up_write(&rxrpc_conns_sem);
+
+ __RXACCT(atomic_dec(&rxrpc_connection_count));
+ kfree(conn);
+
+ /* if the graveyard is now empty, wake up anyone waiting for that */
+ if (atomic_dec_and_test(&peer->conn_count))
+ wake_up(&peer->conn_gy_waitq);
+
+ _leave(" [destroyed]");
+} /* end rxrpc_conn_do_timeout() */
+
+/*****************************************************************************/
+/*
+ * clear all connection records from a peer endpoint
+ */
+void rxrpc_conn_clearall(struct rxrpc_peer *peer)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_connection *conn;
+ int err;
+
+ _enter("%p",peer);
+
+ /* there shouldn't be any active conns remaining */
+ if (!list_empty(&peer->conn_active))
+ BUG();
+
+ /* manually timeout all conns in the graveyard */
+ spin_lock(&peer->conn_gylock);
+ while (!list_empty(&peer->conn_graveyard)) {
+ conn = list_entry(peer->conn_graveyard.next,struct rxrpc_connection,link);
+ err = rxrpc_krxtimod_del_timer(&conn->timeout);
+ spin_unlock(&peer->conn_gylock);
+
+ if (err==0)
+ rxrpc_conn_do_timeout(conn);
+
+ spin_lock(&peer->conn_gylock);
+ }
+ spin_unlock(&peer->conn_gylock);
+
+ /* wait for the the conn graveyard to be completely cleared */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&peer->conn_gy_waitq,&myself);
+
+ while (atomic_read(&peer->conn_count)!=0) {
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+
+ remove_wait_queue(&peer->conn_gy_waitq,&myself);
+ set_current_state(TASK_RUNNING);
+
+ _leave("");
+
+} /* end rxrpc_conn_clearall() */
+
+/*****************************************************************************/
+/*
+ * allocate and prepare a message for sending out through the transport endpoint
+ */
+int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
+ struct rxrpc_call *call,
+ u8 type,
+ int dcount,
+ struct iovec diov[],
+ int alloc_flags,
+ struct rxrpc_message **_msg)
+{
+ struct rxrpc_message *msg;
+ int loop;
+
+ _enter("%p{%d},%p,%u",conn,ntohs(conn->addr.sin_port),call,type);
+
+ if (dcount>3) {
+ _leave(" = -EINVAL");
+ return -EINVAL;
+ }
+
+ msg = kmalloc(sizeof(struct rxrpc_message),alloc_flags);
+ if (!msg) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ memset(msg,0,sizeof(*msg));
+ atomic_set(&msg->usage,1);
+
+ INIT_LIST_HEAD(&msg->link);
+
+ msg->state = RXRPC_MSG_PREPARED;
+
+ msg->hdr.epoch = conn->out_epoch;
+ msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0);
+ msg->hdr.callNumber = call ? call->call_id : 0;
+ msg->hdr.type = type;
+ msg->hdr.flags = conn->out_clientflag;
+ msg->hdr.securityIndex = conn->security_ix;
+ msg->hdr.serviceId = conn->service_id;
+
+ /* generate sequence numbers for data packets */
+ if (call) {
+ switch (type) {
+ case RXRPC_PACKET_TYPE_DATA:
+ msg->seq = ++call->snd_seq_count;
+ msg->hdr.seq = htonl(msg->seq);
+ break;
+ case RXRPC_PACKET_TYPE_ACK:
+ /* ACK sequence numbers are complicated. The following may be wrong:
+ * - jumbo packet ACKs should have a seq number
+ * - normal ACKs should not
+ */
+ default:
+ break;
+ }
+ }
+
+ msg->dcount = dcount + 1;
+ msg->dsize = sizeof(msg->hdr);
+ msg->data[0].iov_len = sizeof(msg->hdr);
+ msg->data[0].iov_base = &msg->hdr;
+
+ for (loop=0; loop<dcount; loop++) {
+ msg->dsize += diov[loop].iov_len;
+ msg->data[loop+1].iov_len = diov[loop].iov_len;
+ msg->data[loop+1].iov_base = diov[loop].iov_base;
+ }
+
+ __RXACCT(atomic_inc(&rxrpc_message_count));
+ *_msg = msg;
+ _leave(" = 0 (%p) #%d",msg,atomic_read(&rxrpc_message_count));
+ return 0;
+} /* end rxrpc_conn_newmsg() */
+
+/*****************************************************************************/
+/*
+ * free a message
+ */
+void __rxrpc_put_message(struct rxrpc_message *msg)
+{
+ int loop;
+
+ _enter("%p #%d",msg,atomic_read(&rxrpc_message_count));
+
+ if (msg->pkt) kfree_skb(msg->pkt);
+ if (msg->conn) rxrpc_put_connection(msg->conn);
+
+ for (loop=0; loop<8; loop++)
+ if (test_bit(loop,&msg->dfree))
+ kfree(msg->data[loop].iov_base);
+
+ __RXACCT(atomic_dec(&rxrpc_message_count));
+ kfree(msg);
+
+ _leave("");
+} /* end __rxrpc_put_message() */
+
+/*****************************************************************************/
+/*
+ * send a message out through the transport endpoint
+ */
+int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg)
+{
+ struct msghdr msghdr;
+ mm_segment_t oldfs;
+ int ret;
+
+ _enter("%p{%d}",conn,ntohs(conn->addr.sin_port));
+
+ /* fill in some fields in the header */
+ spin_lock(&conn->lock);
+ msg->hdr.serial = htonl(++conn->serial_counter);
+ msg->rttdone = 0;
+ spin_unlock(&conn->lock);
+
+ /* set up the message to be transmitted */
+ msghdr.msg_name = &conn->addr;
+ msghdr.msg_namelen = sizeof(conn->addr);
+ msghdr.msg_iov = msg->data;
+ msghdr.msg_iovlen = msg->dcount;
+ msghdr.msg_control = NULL;
+ msghdr.msg_controllen = 0;
+ msghdr.msg_flags = MSG_CONFIRM|MSG_DONTWAIT;
+
+ _net("Sending message type %d of %d bytes to %08x:%d",
+ msg->hdr.type,
+ msg->dsize,
+ htonl(conn->addr.sin_addr.s_addr),
+ htons(conn->addr.sin_port));
+
+ /* send the message */
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sock_sendmsg(conn->trans->socket,&msghdr,msg->dsize);
+ set_fs(oldfs);
+
+ if (ret<0) {
+ msg->state = RXRPC_MSG_ERROR;
+ }
+ else {
+ msg->state = RXRPC_MSG_SENT;
+ ret = 0;
+
+ spin_lock(&conn->lock);
+ do_gettimeofday(&conn->atime);
+ msg->stamp = conn->atime;
+ spin_unlock(&conn->lock);
+ }
+
+ _leave(" = %d",ret);
+
+ return ret;
+} /* end rxrpc_conn_sendmsg() */
+
+/*****************************************************************************/
+/*
+ * deal with a subsequent call packet
+ */
+int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
+ struct rxrpc_call *call,
+ struct rxrpc_message *msg)
+{
+ struct rxrpc_message *pmsg;
+ struct list_head *_p;
+ unsigned cix, seq;
+ int ret = 0;
+
+ _enter("%p,%p,%p",conn,call,msg);
+
+ if (!call) {
+ cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
+
+ spin_lock(&conn->lock);
+ call = conn->channels[cix];
+
+ if (!call || call->call_id != msg->hdr.callNumber) {
+ spin_unlock(&conn->lock);
+ rxrpc_trans_immediate_abort(conn->trans,msg,-ENOENT);
+ goto out;
+ }
+ else {
+ rxrpc_get_call(call);
+ spin_unlock(&conn->lock);
+ }
+ }
+ else {
+ rxrpc_get_call(call);
+ }
+
+ _proto("Received packet %%%u [%u] on call %hu:%u:%u",
+ htonl(msg->hdr.serial),
+ htonl(msg->hdr.seq),
+ htons(msg->hdr.serviceId),
+ htonl(conn->conn_id),
+ htonl(call->call_id));
+
+ call->pkt_rcv_count++;
+
+ if (msg->pkt->dst && msg->pkt->dst->dev)
+ conn->peer->if_mtu = msg->pkt->dst->dev->mtu - msg->pkt->dst->dev->hard_header_len;
+
+ /* queue on the call in seq order */
+ rxrpc_get_message(msg);
+ seq = msg->seq;
+
+ spin_lock(&call->lock);
+ list_for_each(_p,&call->rcv_receiveq) {
+ pmsg = list_entry(_p,struct rxrpc_message,link);
+ if (pmsg->seq>seq)
+ break;
+ }
+ list_add_tail(&msg->link,_p);
+
+ /* reset the activity timeout */
+ call->flags |= RXRPC_CALL_RCV_PKT;
+ mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
+
+ spin_unlock(&call->lock);
+
+ rxrpc_krxiod_queue_call(call);
+
+ rxrpc_put_call(call);
+ out:
+ _leave(" = %d",ret);
+
+ return ret;
+} /* end rxrpc_conn_receive_call_packet() */
+
+/*****************************************************************************/
+/*
+ * handle an ICMP error being applied to a connection
+ */
+void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno)
+{
+ struct rxrpc_call *calls[4];
+ int loop;
+
+ _enter("%p{%d},%d",conn,ntohs(conn->addr.sin_port),errno);
+
+ /* get a ref to all my calls in one go */
+ memset(calls,0,sizeof(calls));
+ spin_lock(&conn->lock);
+
+ for (loop=3; loop>=0; loop--) {
+ if (conn->channels[loop]) {
+ calls[loop] = conn->channels[loop];
+ rxrpc_get_call(calls[loop]);
+ }
+ }
+
+ spin_unlock(&conn->lock);
+
+ /* now kick them all */
+ for (loop=3; loop>=0; loop--) {
+ if (calls[loop]) {
+ rxrpc_call_handle_error(calls[loop],local,errno);
+ rxrpc_put_call(calls[loop]);
+ }
+ }
+
+ _leave("");
+} /* end rxrpc_conn_handle_error() */
diff --git a/net/rxrpc/internal.h b/net/rxrpc/internal.h
new file mode 100644
index 000000000000..afd712a439f9
--- /dev/null
+++ b/net/rxrpc/internal.h
@@ -0,0 +1,107 @@
+/* internal.h: internal Rx RPC stuff
+ *
+ * Copyright (c) 2002 David Howells (dhowells@redhat.com).
+ */
+
+#ifndef RXRPC_INTERNAL_H
+#define RXRPC_INTERNAL_H
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+
+/*
+ * debug accounting
+ */
+#if 1
+#define __RXACCT_DECL(X) X
+#define __RXACCT(X) do { X; } while(0)
+#else
+#define __RXACCT_DECL(X)
+#define __RXACCT(X) do { } while(0)
+#endif
+
+__RXACCT_DECL(extern atomic_t rxrpc_transport_count);
+__RXACCT_DECL(extern atomic_t rxrpc_peer_count);
+__RXACCT_DECL(extern atomic_t rxrpc_connection_count);
+__RXACCT_DECL(extern atomic_t rxrpc_call_count);
+__RXACCT_DECL(extern atomic_t rxrpc_message_count);
+
+/*
+ * debug tracing
+ */
+#define kenter(FMT,...) printk("==> %s("FMT")\n",__FUNCTION__,##__VA_ARGS__)
+#define kleave(FMT,...) printk("<== %s()"FMT"\n",__FUNCTION__,##__VA_ARGS__)
+#define kdebug(FMT,...) printk(" "FMT"\n",##__VA_ARGS__)
+#define kproto(FMT,...) printk("### "FMT"\n",##__VA_ARGS__)
+#define knet(FMT,...) printk(" "FMT"\n",##__VA_ARGS__)
+
+#if 0
+#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
+#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
+#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
+#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
+#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
+#else
+#define _enter(FMT,...) do { if (rxrpc_ktrace) kenter(FMT,##__VA_ARGS__); } while(0)
+#define _leave(FMT,...) do { if (rxrpc_ktrace) kleave(FMT,##__VA_ARGS__); } while(0)
+#define _debug(FMT,...) do { if (rxrpc_kdebug) kdebug(FMT,##__VA_ARGS__); } while(0)
+#define _proto(FMT,...) do { if (rxrpc_kproto) kproto(FMT,##__VA_ARGS__); } while(0)
+#define _net(FMT,...) do { if (rxrpc_knet) knet (FMT,##__VA_ARGS__); } while(0)
+#endif
+
+static inline void rxrpc_discard_my_signals(void)
+{
+ while (signal_pending(current)) {
+ siginfo_t sinfo;
+
+ spin_lock_irq(&current->sig->siglock);
+ dequeue_signal(&current->blocked,&sinfo);
+ spin_unlock_irq(&current->sig->siglock);
+ }
+}
+
+/*
+ * call.c
+ */
+extern struct list_head rxrpc_calls;
+extern struct rw_semaphore rxrpc_calls_sem;
+
+/*
+ * connection.c
+ */
+extern struct list_head rxrpc_conns;
+extern struct rw_semaphore rxrpc_conns_sem;
+
+extern void rxrpc_conn_do_timeout(struct rxrpc_connection *conn);
+extern void rxrpc_conn_clearall(struct rxrpc_peer *peer);
+
+/*
+ * peer.c
+ */
+extern struct list_head rxrpc_peers;
+extern struct rw_semaphore rxrpc_peers_sem;
+
+extern void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
+ struct rxrpc_message *msg,
+ struct rxrpc_message *resp);
+
+extern void rxrpc_peer_clearall(struct rxrpc_transport *trans);
+
+extern void rxrpc_peer_do_timeout(struct rxrpc_peer *peer);
+
+
+/*
+ * proc.c
+ */
+#ifdef CONFIG_PROC_FS
+extern int rxrpc_proc_init(void);
+extern void rxrpc_proc_cleanup(void);
+#endif
+
+/*
+ * transport.c
+ */
+extern struct list_head rxrpc_proc_transports;
+extern struct rw_semaphore rxrpc_proc_transports_sem;
+
+#endif /* RXRPC_INTERNAL_H */
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c
new file mode 100644
index 000000000000..4bbc3f6b3418
--- /dev/null
+++ b/net/rxrpc/krxiod.c
@@ -0,0 +1,262 @@
+/* krxiod.c: Rx I/O daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <rxrpc/krxiod.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/call.h>
+#include "internal.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
+static DECLARE_COMPLETION(rxrpc_krxiod_dead);
+
+static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
+
+static LIST_HEAD(rxrpc_krxiod_transportq);
+static spinlock_t rxrpc_krxiod_transportq_lock = SPIN_LOCK_UNLOCKED;
+
+static LIST_HEAD(rxrpc_krxiod_callq);
+static spinlock_t rxrpc_krxiod_callq_lock = SPIN_LOCK_UNLOCKED;
+
+static volatile int rxrpc_krxiod_die;
+
+/*****************************************************************************/
+/*
+ * Rx I/O daemon
+ */
+static int rxrpc_krxiod(void *arg)
+{
+ DECLARE_WAITQUEUE(krxiod,current);
+
+ printk("Started krxiod %d\n",current->pid);
+ strcpy(current->comm,"krxiod");
+
+ daemonize();
+
+ /* only certain signals are of interest */
+ spin_lock_irq(&current->sig->siglock);
+ siginitsetinv(&current->blocked,0);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
+ recalc_sigpending();
+#else
+ recalc_sigpending(current);
+#endif
+ spin_unlock_irq(&current->sig->siglock);
+
+ /* loop around waiting for work to do */
+ do {
+ /* wait for work or to be told to exit */
+ _debug("### Begin Wait");
+ if (!atomic_read(&rxrpc_krxiod_qcount)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ add_wait_queue(&rxrpc_krxiod_sleepq,&krxiod);
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&rxrpc_krxiod_qcount) ||
+ rxrpc_krxiod_die ||
+ signal_pending(current))
+ break;
+
+ schedule();
+ }
+
+ remove_wait_queue(&rxrpc_krxiod_sleepq,&krxiod);
+ set_current_state(TASK_RUNNING);
+ }
+ _debug("### End Wait");
+
+ /* do work if been given some to do */
+ _debug("### Begin Work");
+
+ /* see if there's a transport in need of attention */
+ if (!list_empty(&rxrpc_krxiod_transportq)) {
+ struct rxrpc_transport *trans = NULL;
+
+ spin_lock_irq(&rxrpc_krxiod_transportq_lock);
+
+ if (!list_empty(&rxrpc_krxiod_transportq)) {
+ trans = list_entry(rxrpc_krxiod_transportq.next,
+ struct rxrpc_transport,krxiodq_link);
+ list_del_init(&trans->krxiodq_link);
+ atomic_dec(&rxrpc_krxiod_qcount);
+
+ /* make sure it hasn't gone away and doesn't go away */
+ if (atomic_read(&trans->usage)>0)
+ rxrpc_get_transport(trans);
+ else
+ trans = NULL;
+ }
+
+ spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
+
+ if (trans) {
+ rxrpc_trans_receive_packet(trans);
+ rxrpc_put_transport(trans);
+ }
+ }
+
+ /* see if there's a call in need of attention */
+ if (!list_empty(&rxrpc_krxiod_callq)) {
+ struct rxrpc_call *call = NULL;
+
+ spin_lock_irq(&rxrpc_krxiod_callq_lock);
+
+ if (!list_empty(&rxrpc_krxiod_callq)) {
+ call = list_entry(rxrpc_krxiod_callq.next,
+ struct rxrpc_call,rcv_krxiodq_lk);
+ list_del_init(&call->rcv_krxiodq_lk);
+ atomic_dec(&rxrpc_krxiod_qcount);
+
+ /* make sure it hasn't gone away and doesn't go away */
+ if (atomic_read(&call->usage)>0) {
+ _debug("@@@ KRXIOD Begin Attend Call %p",call);
+ rxrpc_get_call(call);
+ }
+ else {
+ call = NULL;
+ }
+ }
+
+ spin_unlock_irq(&rxrpc_krxiod_callq_lock);
+
+ if (call) {
+ rxrpc_call_do_stuff(call);
+ rxrpc_put_call(call);
+ _debug("@@@ KRXIOD End Attend Call %p",call);
+ }
+ }
+
+ _debug("### End Work");
+
+ /* discard pending signals */
+ rxrpc_discard_my_signals();
+
+ } while (!rxrpc_krxiod_die);
+
+ /* and that's all */
+ complete_and_exit(&rxrpc_krxiod_dead,0);
+
+} /* end rxrpc_krxiod() */
+
+/*****************************************************************************/
+/*
+ * start up a krxiod daemon
+ */
+int __init rxrpc_krxiod_init(void)
+{
+ return kernel_thread(rxrpc_krxiod,NULL,0);
+
+} /* end rxrpc_krxiod_init() */
+
+/*****************************************************************************/
+/*
+ * kill the krxiod daemon and wait for it to complete
+ */
+void rxrpc_krxiod_kill(void)
+{
+ rxrpc_krxiod_die = 1;
+ wake_up_all(&rxrpc_krxiod_sleepq);
+ wait_for_completion(&rxrpc_krxiod_dead);
+
+} /* end rxrpc_krxiod_kill() */
+
+/*****************************************************************************/
+/*
+ * queue a transport for attention by krxiod
+ */
+void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
+{
+ unsigned long flags;
+
+ _enter("");
+
+ if (list_empty(&trans->krxiodq_link)) {
+ spin_lock_irqsave(&rxrpc_krxiod_transportq_lock,flags);
+
+ if (list_empty(&trans->krxiodq_link)) {
+ if (atomic_read(&trans->usage)>0) {
+ list_add_tail(&trans->krxiodq_link,&rxrpc_krxiod_transportq);
+ atomic_inc(&rxrpc_krxiod_qcount);
+ }
+ }
+
+ spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock,flags);
+ wake_up_all(&rxrpc_krxiod_sleepq);
+ }
+
+ _leave("");
+
+} /* end rxrpc_krxiod_queue_transport() */
+
+/*****************************************************************************/
+/*
+ * dequeue a transport from krxiod's attention queue
+ */
+void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
+{
+ unsigned long flags;
+
+ _enter("");
+
+ spin_lock_irqsave(&rxrpc_krxiod_transportq_lock,flags);
+ if (!list_empty(&trans->krxiodq_link)) {
+ list_del_init(&trans->krxiodq_link);
+ atomic_dec(&rxrpc_krxiod_qcount);
+ }
+ spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock,flags);
+
+ _leave("");
+
+} /* end rxrpc_krxiod_dequeue_transport() */
+
+/*****************************************************************************/
+/*
+ * queue a call for attention by krxiod
+ */
+void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
+{
+ unsigned long flags;
+
+ if (list_empty(&call->rcv_krxiodq_lk)) {
+ spin_lock_irqsave(&rxrpc_krxiod_callq_lock,flags);
+ if (atomic_read(&call->usage)>0) {
+ list_add_tail(&call->rcv_krxiodq_lk,&rxrpc_krxiod_callq);
+ atomic_inc(&rxrpc_krxiod_qcount);
+ }
+ spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock,flags);
+ }
+ wake_up_all(&rxrpc_krxiod_sleepq);
+
+} /* end rxrpc_krxiod_queue_call() */
+
+/*****************************************************************************/
+/*
+ * dequeue a call from krxiod's attention queue
+ */
+void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxrpc_krxiod_callq_lock,flags);
+ if (!list_empty(&call->rcv_krxiodq_lk)) {
+ list_del_init(&call->rcv_krxiodq_lk);
+ atomic_dec(&rxrpc_krxiod_qcount);
+ }
+ spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock,flags);
+
+} /* end rxrpc_krxiod_dequeue_call() */
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
new file mode 100644
index 000000000000..5b57c3f8d776
--- /dev/null
+++ b/net/rxrpc/krxsecd.c
@@ -0,0 +1,278 @@
+/* krxsecd.c: Rx security daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This daemon deals with:
+ * - consulting the application as to whether inbound peers and calls should be authorised
+ * - generating security challenges for inbound connections
+ * - responding to security challenges on outbound connections
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <rxrpc/krxsecd.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/message.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/call.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
+#include <net/sock.h>
+#include "internal.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxsecd_sleepq);
+static DECLARE_COMPLETION(rxrpc_krxsecd_dead);
+static volatile int rxrpc_krxsecd_die;
+
+static atomic_t rxrpc_krxsecd_qcount;
+
+/* queue of unprocessed inbound messages with seqno #1 and RXRPC_CLIENT_INITIATED flag set */
+static LIST_HEAD(rxrpc_krxsecd_initmsgq);
+static spinlock_t rxrpc_krxsecd_initmsgq_lock = SPIN_LOCK_UNLOCKED;
+
+static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg);
+
+/*****************************************************************************/
+/*
+ * Rx security daemon
+ */
+static int rxrpc_krxsecd(void *arg)
+{
+ DECLARE_WAITQUEUE(krxsecd,current);
+
+ int die;
+
+ printk("Started krxsecd %d\n",current->pid);
+ strcpy(current->comm,"krxsecd");
+
+ daemonize();
+
+ /* only certain signals are of interest */
+ spin_lock_irq(&current->sig->siglock);
+ siginitsetinv(&current->blocked,0);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
+ recalc_sigpending();
+#else
+ recalc_sigpending(current);
+#endif
+ spin_unlock_irq(&current->sig->siglock);
+
+ /* loop around waiting for work to do */
+ do {
+ /* wait for work or to be told to exit */
+ _debug("### Begin Wait");
+ if (!atomic_read(&rxrpc_krxsecd_qcount)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ add_wait_queue(&rxrpc_krxsecd_sleepq,&krxsecd);
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&rxrpc_krxsecd_qcount) ||
+ rxrpc_krxsecd_die ||
+ signal_pending(current))
+ break;
+
+ schedule();
+ }
+
+ remove_wait_queue(&rxrpc_krxsecd_sleepq,&krxsecd);
+ set_current_state(TASK_RUNNING);
+ }
+ die = rxrpc_krxsecd_die;
+ _debug("### End Wait");
+
+ /* see if there're incoming calls in need of authenticating */
+ _debug("### Begin Inbound Calls");
+
+ if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
+ struct rxrpc_message *msg = NULL;
+
+ spin_lock(&rxrpc_krxsecd_initmsgq_lock);
+
+ if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
+ msg = list_entry(rxrpc_krxsecd_initmsgq.next,
+ struct rxrpc_message,link);
+ list_del_init(&msg->link);
+ atomic_dec(&rxrpc_krxsecd_qcount);
+ }
+
+ spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
+
+ if (msg) {
+ rxrpc_krxsecd_process_incoming_call(msg);
+ rxrpc_put_message(msg);
+ }
+ }
+
+ _debug("### End Inbound Calls");
+
+ /* discard pending signals */
+ rxrpc_discard_my_signals();
+
+ } while (!die);
+
+ /* and that's all */
+ complete_and_exit(&rxrpc_krxsecd_dead,0);
+
+} /* end rxrpc_krxsecd() */
+
+/*****************************************************************************/
+/*
+ * start up a krxsecd daemon
+ */
+int __init rxrpc_krxsecd_init(void)
+{
+ return kernel_thread(rxrpc_krxsecd,NULL,0);
+
+} /* end rxrpc_krxsecd_init() */
+
+/*****************************************************************************/
+/*
+ * kill the krxsecd daemon and wait for it to complete
+ */
+void rxrpc_krxsecd_kill(void)
+{
+ rxrpc_krxsecd_die = 1;
+ wake_up_all(&rxrpc_krxsecd_sleepq);
+ wait_for_completion(&rxrpc_krxsecd_dead);
+
+} /* end rxrpc_krxsecd_kill() */
+
+/*****************************************************************************/
+/*
+ * clear all pending incoming calls for the specified transport
+ */
+void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
+{
+ LIST_HEAD(tmp);
+
+ struct rxrpc_message *msg;
+ struct list_head *_p, *_n;
+
+ _enter("%p",trans);
+
+ /* move all the messages for this transport onto a temp list */
+ spin_lock(&rxrpc_krxsecd_initmsgq_lock);
+
+ list_for_each_safe(_p,_n,&rxrpc_krxsecd_initmsgq) {
+ msg = list_entry(_p,struct rxrpc_message,link);
+ if (msg->trans==trans) {
+ list_del(&msg->link);
+ list_add_tail(&msg->link,&tmp);
+ atomic_dec(&rxrpc_krxsecd_qcount);
+ }
+ }
+
+ spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
+
+ /* zap all messages on the temp list */
+ while (!list_empty(&tmp)) {
+ msg = list_entry(tmp.next,struct rxrpc_message,link);
+ list_del_init(&msg->link);
+ rxrpc_put_message(msg);
+ }
+
+ _leave("");
+} /* end rxrpc_krxsecd_clear_transport() */
+
+/*****************************************************************************/
+/*
+ * queue a message on the incoming calls list
+ */
+void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg)
+{
+ _enter("%p",msg);
+
+ /* queue for processing by krxsecd */
+ spin_lock(&rxrpc_krxsecd_initmsgq_lock);
+
+ if (!rxrpc_krxsecd_die) {
+ rxrpc_get_message(msg);
+ list_add_tail(&msg->link,&rxrpc_krxsecd_initmsgq);
+ atomic_inc(&rxrpc_krxsecd_qcount);
+ }
+
+ spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
+
+ wake_up(&rxrpc_krxsecd_sleepq);
+
+ _leave("");
+} /* end rxrpc_krxsecd_queue_incoming_call() */
+
+/*****************************************************************************/
+/*
+ * process the initial message of an incoming call
+ */
+void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg)
+{
+ struct rxrpc_transport *trans = msg->trans;
+ struct rxrpc_service *srv;
+ struct rxrpc_call *call;
+ struct list_head *_p;
+ unsigned short sid;
+ int ret;
+
+ _enter("%p{tr=%p}",msg,trans);
+
+ ret = rxrpc_incoming_call(msg->conn,msg,&call);
+ if (ret<0)
+ goto out;
+
+ /* find the matching service on the transport */
+ sid = ntohs(msg->hdr.serviceId);
+ srv = NULL;
+
+ spin_lock(&trans->lock);
+ list_for_each(_p,&trans->services) {
+ srv = list_entry(_p,struct rxrpc_service,link);
+ if (srv->service_id==sid && try_inc_mod_count(srv->owner)) {
+ /* found a match (made sure it won't vanish) */
+ _debug("found service '%s'",srv->name);
+ call->owner = srv->owner;
+ break;
+ }
+ }
+ spin_unlock(&trans->lock);
+
+ /* report the new connection
+ * - the func must inc the call's usage count to keep it
+ */
+ ret = -ENOENT;
+ if (_p!=&trans->services) {
+ /* attempt to accept the call */
+ call->conn->service = srv;
+ call->app_attn_func = srv->attn_func;
+ call->app_error_func = srv->error_func;
+ call->app_aemap_func = srv->aemap_func;
+
+ ret = srv->new_call(call);
+
+ /* send an abort if an error occurred */
+ if (ret<0) {
+ rxrpc_call_abort(call,ret);
+ }
+ else {
+ /* formally receive and ACK the new packet */
+ ret = rxrpc_conn_receive_call_packet(call->conn,call,msg);
+ }
+ }
+
+ rxrpc_put_call(call);
+ out:
+ if (ret<0)
+ rxrpc_trans_immediate_abort(trans,msg,ret);
+
+ _leave(" (%d)",ret);
+} /* end rxrpc_krxsecd_process_incoming_call() */
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c
new file mode 100644
index 000000000000..8dc986f79a6f
--- /dev/null
+++ b/net/rxrpc/krxtimod.c
@@ -0,0 +1,210 @@
+/* krxtimod.c: RXRPC timeout daemon
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/krxtimod.h>
+#include <asm/errno.h>
+#include "internal.h"
+
+static DECLARE_COMPLETION(krxtimod_alive);
+static DECLARE_COMPLETION(krxtimod_dead);
+static DECLARE_WAIT_QUEUE_HEAD(krxtimod_sleepq);
+static int krxtimod_die;
+
+static LIST_HEAD(krxtimod_list);
+static spinlock_t krxtimod_lock = SPIN_LOCK_UNLOCKED;
+
+static int krxtimod(void *arg);
+
+/*****************************************************************************/
+/*
+ * start the timeout daemon
+ */
+int rxrpc_krxtimod_start(void)
+{
+ int ret;
+
+ ret = kernel_thread(krxtimod,NULL,0);
+ if (ret<0)
+ return ret;
+
+ wait_for_completion(&krxtimod_alive);
+
+ return ret;
+} /* end rxrpc_krxtimod_start() */
+
+/*****************************************************************************/
+/*
+ * stop the timeout daemon
+ */
+void rxrpc_krxtimod_kill(void)
+{
+ /* get rid of my daemon */
+ krxtimod_die = 1;
+ wake_up(&krxtimod_sleepq);
+ wait_for_completion(&krxtimod_dead);
+
+} /* end rxrpc_krxtimod_kill() */
+
+/*****************************************************************************/
+/*
+ * timeout processing daemon
+ */
+static int krxtimod(void *arg)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ rxrpc_timer_t *timer;
+
+ printk("Started krxtimod %d\n",current->pid);
+ strcpy(current->comm,"krxtimod");
+
+ daemonize();
+
+ complete(&krxtimod_alive);
+
+ /* only certain signals are of interest */
+ spin_lock_irq(&current->sig->siglock);
+ siginitsetinv(&current->blocked,0);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,3)
+ recalc_sigpending();
+#else
+ recalc_sigpending(current);
+#endif
+ spin_unlock_irq(&current->sig->siglock);
+
+ /* loop around looking for things to attend to */
+ loop:
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&krxtimod_sleepq,&myself);
+
+ for (;;) {
+ unsigned long jif;
+ signed long timeout;
+
+ /* deal with the server being asked to die */
+ if (krxtimod_die) {
+ remove_wait_queue(&krxtimod_sleepq,&myself);
+ _leave("");
+ complete_and_exit(&krxtimod_dead,0);
+ }
+
+ /* discard pending signals */
+ rxrpc_discard_my_signals();
+
+ /* work out the time to elapse before the next event */
+ spin_lock(&krxtimod_lock);
+ if (list_empty(&krxtimod_list)) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+ else {
+ timer = list_entry(krxtimod_list.next,rxrpc_timer_t,link);
+ timeout = timer->timo_jif;
+ jif = jiffies;
+
+ if (time_before_eq(timeout,jif))
+ goto immediate;
+
+ else {
+ timeout = (long)timeout - (long)jiffies;
+ }
+ }
+ spin_unlock(&krxtimod_lock);
+
+ schedule_timeout(timeout);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ /* the thing on the front of the queue needs processing
+ * - we come here with the lock held and timer pointing to the expired entry
+ */
+ immediate:
+ remove_wait_queue(&krxtimod_sleepq,&myself);
+ set_current_state(TASK_RUNNING);
+
+ _debug("@@@ Begin Timeout of %p",timer);
+
+ /* dequeue the timer */
+ list_del_init(&timer->link);
+ spin_unlock(&krxtimod_lock);
+
+ /* call the timeout function */
+ timer->ops->timed_out(timer);
+
+ _debug("@@@ End Timeout");
+ goto loop;
+
+} /* end krxtimod() */
+
+/*****************************************************************************/
+/*
+ * (re-)queue a timer
+ */
+void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout)
+{
+ struct list_head *_p;
+ rxrpc_timer_t *ptimer;
+
+ _enter("%p,%lu",timer,timeout);
+
+ spin_lock(&krxtimod_lock);
+
+ list_del(&timer->link);
+
+ /* the timer was deferred or reset - put it back in the queue at the right place */
+ timer->timo_jif = jiffies + timeout;
+
+ list_for_each(_p,&krxtimod_list) {
+ ptimer = list_entry(_p,rxrpc_timer_t,link);
+ if (time_before(timer->timo_jif,ptimer->timo_jif))
+ break;
+ }
+
+ list_add_tail(&timer->link,_p); /* insert before stopping point */
+
+ spin_unlock(&krxtimod_lock);
+
+ wake_up(&krxtimod_sleepq);
+
+ _leave("");
+} /* end rxrpc_krxtimod_queue_vlocation() */
+
+/*****************************************************************************/
+/*
+ * dequeue a timer
+ * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
+ */
+int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer)
+{
+ int ret = 0;
+
+ _enter("%p",timer);
+
+ spin_lock(&krxtimod_lock);
+
+ if (list_empty(&timer->link))
+ ret = -ENOENT;
+ else
+ list_del_init(&timer->link);
+
+ spin_unlock(&krxtimod_lock);
+
+ wake_up(&krxtimod_sleepq);
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end rxrpc_krxtimod_del_timer() */
diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c
new file mode 100644
index 000000000000..04bb3faa42d1
--- /dev/null
+++ b/net/rxrpc/main.c
@@ -0,0 +1,127 @@
+/* main.c: Rx RPC interface
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/krxiod.h>
+#include <rxrpc/krxsecd.h>
+#include <rxrpc/krxtimod.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include <rxrpc/message.h>
+#include "internal.h"
+
+static int rxrpc_initialise(void);
+static void rxrpc_cleanup(void);
+
+module_init(rxrpc_initialise);
+module_exit(rxrpc_cleanup);
+
+MODULE_DESCRIPTION("Rx RPC implementation");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+u32 rxrpc_epoch;
+
+/*****************************************************************************/
+/*
+ * initialise the Rx module
+ */
+static int rxrpc_initialise(void)
+{
+ int ret;
+
+ /* my epoch value */
+ rxrpc_epoch = htonl(xtime.tv_sec);
+
+ /* register the /proc interface */
+#ifdef CONFIG_PROC_FS
+ ret = rxrpc_proc_init();
+ if (ret<0)
+ return ret;
+#endif
+
+ /* register the sysctl files */
+#ifdef CONFIG_SYSCTL
+ ret = rxrpc_sysctl_init();
+ if (ret<0)
+ goto error_proc;
+#endif
+
+ /* start the krxtimod daemon */
+ ret = rxrpc_krxtimod_start();
+ if (ret<0)
+ goto error_sysctl;
+
+ /* start the krxiod daemon */
+ ret = rxrpc_krxiod_init();
+ if (ret<0)
+ goto error_krxtimod;
+
+ /* start the krxsecd daemon */
+ ret = rxrpc_krxsecd_init();
+ if (ret<0)
+ goto error_krxiod;
+
+ kdebug("\n\n");
+
+ return 0;
+
+ error_krxiod:
+ rxrpc_krxiod_kill();
+ error_krxtimod:
+ rxrpc_krxtimod_kill();
+ error_sysctl:
+#ifdef CONFIG_SYSCTL
+ rxrpc_sysctl_cleanup();
+#endif
+ error_proc:
+#ifdef CONFIG_PROC_FS
+ rxrpc_proc_cleanup();
+#endif
+ return ret;
+} /* end rxrpc_initialise() */
+
+/*****************************************************************************/
+/*
+ * clean up the Rx module
+ */
+static void rxrpc_cleanup(void)
+{
+ kenter("");
+
+ __RXACCT(printk("Outstanding Messages : %d\n",atomic_read(&rxrpc_message_count)));
+ __RXACCT(printk("Outstanding Calls : %d\n",atomic_read(&rxrpc_call_count)));
+ __RXACCT(printk("Outstanding Connections: %d\n",atomic_read(&rxrpc_connection_count)));
+ __RXACCT(printk("Outstanding Peers : %d\n",atomic_read(&rxrpc_peer_count)));
+ __RXACCT(printk("Outstanding Transports : %d\n",atomic_read(&rxrpc_transport_count)));
+
+ rxrpc_krxsecd_kill();
+ rxrpc_krxiod_kill();
+ rxrpc_krxtimod_kill();
+#ifdef CONFIG_SYSCTL
+ rxrpc_sysctl_cleanup();
+#endif
+#ifdef CONFIG_PROC_FS
+ rxrpc_proc_cleanup();
+#endif
+
+ __RXACCT(printk("Outstanding Messages : %d\n",atomic_read(&rxrpc_message_count)));
+ __RXACCT(printk("Outstanding Calls : %d\n",atomic_read(&rxrpc_call_count)));
+ __RXACCT(printk("Outstanding Connections: %d\n",atomic_read(&rxrpc_connection_count)));
+ __RXACCT(printk("Outstanding Peers : %d\n",atomic_read(&rxrpc_peer_count)));
+ __RXACCT(printk("Outstanding Transports : %d\n",atomic_read(&rxrpc_transport_count)));
+
+ kleave();
+} /* end rxrpc_cleanup() */
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c
new file mode 100644
index 000000000000..cdd90014b6af
--- /dev/null
+++ b/net/rxrpc/peer.c
@@ -0,0 +1,380 @@
+/* peer.c: Rx RPC peer management
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include <rxrpc/message.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
+#include <net/sock.h>
+#include <asm/uaccess.h>
+#include <asm/div64.h>
+#include "internal.h"
+
+__RXACCT_DECL(atomic_t rxrpc_peer_count);
+LIST_HEAD(rxrpc_peers);
+DECLARE_RWSEM(rxrpc_peers_sem);
+
+static void __rxrpc_peer_timeout(rxrpc_timer_t *timer)
+{
+ struct rxrpc_peer *peer = list_entry(timer,struct rxrpc_peer,timeout);
+
+ _debug("Rx PEER TIMEOUT [%p{u=%d}]",peer,atomic_read(&peer->usage));
+
+ rxrpc_peer_do_timeout(peer);
+}
+
+static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = {
+ .timed_out = __rxrpc_peer_timeout,
+};
+
+/*****************************************************************************/
+/*
+ * create a peer record
+ */
+static int __rxrpc_create_peer(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer **_peer)
+{
+ struct rxrpc_peer *peer;
+
+ _enter("%p,%08x",trans,ntohl(addr));
+
+ /* allocate and initialise a peer record */
+ peer = kmalloc(sizeof(struct rxrpc_peer),GFP_KERNEL);
+ if (!peer) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ memset(peer,0,sizeof(struct rxrpc_peer));
+ atomic_set(&peer->usage,1);
+
+ INIT_LIST_HEAD(&peer->link);
+ INIT_LIST_HEAD(&peer->proc_link);
+ INIT_LIST_HEAD(&peer->conn_active);
+ INIT_LIST_HEAD(&peer->conn_graveyard);
+ spin_lock_init(&peer->conn_gylock);
+ init_waitqueue_head(&peer->conn_gy_waitq);
+ rwlock_init(&peer->conn_lock);
+ atomic_set(&peer->conn_count,0);
+ spin_lock_init(&peer->lock);
+ rxrpc_timer_init(&peer->timeout,&rxrpc_peer_timer_ops);
+
+ peer->addr.s_addr = addr;
+
+ peer->trans = trans;
+ peer->ops = trans->peer_ops;
+
+ __RXACCT(atomic_inc(&rxrpc_peer_count));
+ *_peer = peer;
+ _leave(" = 0 (%p)",peer);
+
+ return 0;
+} /* end __rxrpc_create_peer() */
+
+/*****************************************************************************/
+/*
+ * find a peer record on the specified transport
+ * - returns (if successful) with peer record usage incremented
+ * - resurrects it from the graveyard if found there
+ */
+int rxrpc_peer_lookup(struct rxrpc_transport *trans, u32 addr, struct rxrpc_peer **_peer)
+{
+ struct rxrpc_peer *peer, *candidate = NULL;
+ struct list_head *_p;
+ int ret;
+
+ _enter("%p{%hu},%08x",trans,trans->port,ntohl(addr));
+
+ /* [common case] search the transport's active list first */
+ read_lock(&trans->peer_lock);
+ list_for_each(_p,&trans->peer_active) {
+ peer = list_entry(_p,struct rxrpc_peer,link);
+ if (peer->addr.s_addr==addr)
+ goto found_active;
+ }
+ read_unlock(&trans->peer_lock);
+
+ /* [uncommon case] not active - create a candidate for a new record */
+ ret = __rxrpc_create_peer(trans,addr,&candidate);
+ if (ret<0) {
+ _leave(" = %d",ret);
+ return ret;
+ }
+
+ /* search the active list again, just in case it appeared whilst we were busy */
+ write_lock(&trans->peer_lock);
+ list_for_each(_p,&trans->peer_active) {
+ peer = list_entry(_p,struct rxrpc_peer,link);
+ if (peer->addr.s_addr==addr)
+ goto found_active_second_chance;
+ }
+
+ /* search the transport's graveyard list */
+ spin_lock(&trans->peer_gylock);
+ list_for_each(_p,&trans->peer_graveyard) {
+ peer = list_entry(_p,struct rxrpc_peer,link);
+ if (peer->addr.s_addr==addr)
+ goto found_in_graveyard;
+ }
+ spin_unlock(&trans->peer_gylock);
+
+ /* we can now add the new candidate to the list
+ * - tell the application layer that this peer has been added
+ */
+ rxrpc_get_transport(trans);
+ peer = candidate;
+ candidate = NULL;
+
+ if (peer->ops && peer->ops->adding) {
+ ret = peer->ops->adding(peer);
+ if (ret<0) {
+ write_unlock(&trans->peer_lock);
+ __RXACCT(atomic_dec(&rxrpc_peer_count));
+ kfree(peer);
+ rxrpc_put_transport(trans);
+ _leave(" = %d",ret);
+ return ret;
+ }
+ }
+
+ atomic_inc(&trans->peer_count);
+
+ make_active:
+ list_add_tail(&peer->link,&trans->peer_active);
+
+ success_uwfree:
+ write_unlock(&trans->peer_lock);
+
+ if (candidate) {
+ __RXACCT(atomic_dec(&rxrpc_peer_count));
+ kfree(candidate);
+ }
+
+ if (list_empty(&peer->proc_link)) {
+ down_write(&rxrpc_peers_sem);
+ list_add_tail(&peer->proc_link,&rxrpc_peers);
+ up_write(&rxrpc_peers_sem);
+ }
+
+ success:
+ *_peer = peer;
+
+ _leave(" = 0 (%p{u=%d cc=%d})",
+ peer,atomic_read(&peer->usage),atomic_read(&peer->conn_count));
+ return 0;
+
+ /* handle the peer being found in the active list straight off */
+ found_active:
+ rxrpc_get_peer(peer);
+ read_unlock(&trans->peer_lock);
+ goto success;
+
+ /* handle resurrecting a peer from the graveyard */
+ found_in_graveyard:
+ rxrpc_get_peer(peer);
+ rxrpc_get_transport(peer->trans);
+ rxrpc_krxtimod_del_timer(&peer->timeout);
+ list_del_init(&peer->link);
+ spin_unlock(&trans->peer_gylock);
+ goto make_active;
+
+ /* handle finding the peer on the second time through the active list */
+ found_active_second_chance:
+ rxrpc_get_peer(peer);
+ goto success_uwfree;
+
+} /* end rxrpc_peer_lookup() */
+
+/*****************************************************************************/
+/*
+ * finish with a peer record
+ * - it gets sent to the graveyard from where it can be resurrected or timed out
+ */
+void rxrpc_put_peer(struct rxrpc_peer *peer)
+{
+ struct rxrpc_transport *trans = peer->trans;
+
+ _enter("%p{cc=%d a=%08x}",peer,atomic_read(&peer->conn_count),ntohl(peer->addr.s_addr));
+
+ /* sanity check */
+ if (atomic_read(&peer->usage)<=0)
+ BUG();
+
+ write_lock(&trans->peer_lock);
+ spin_lock(&trans->peer_gylock);
+ if (likely(!atomic_dec_and_test(&peer->usage))) {
+ spin_unlock(&trans->peer_gylock);
+ write_unlock(&trans->peer_lock);
+ _leave("");
+ return;
+ }
+
+ /* move to graveyard queue */
+ list_del(&peer->link);
+ write_unlock(&trans->peer_lock);
+
+ list_add_tail(&peer->link,&trans->peer_graveyard);
+
+ if (!list_empty(&peer->conn_active)) BUG();
+
+ /* discard in 600 secs */
+ rxrpc_krxtimod_add_timer(&peer->timeout,100*HZ);
+
+ spin_unlock(&trans->peer_gylock);
+
+ rxrpc_put_transport(trans);
+
+ _leave(" [killed]");
+} /* end rxrpc_put_peer() */
+
+/*****************************************************************************/
+/*
+ * handle a peer timing out in the graveyard
+ * - called from krxtimod
+ */
+void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
+{
+ struct rxrpc_transport *trans = peer->trans;
+
+ _enter("%p{u=%d cc=%d a=%08x}",
+ peer,atomic_read(&peer->usage),atomic_read(&peer->conn_count),
+ ntohl(peer->addr.s_addr));
+
+ if (atomic_read(&peer->usage)<0)
+ BUG();
+
+ /* remove from graveyard if still dead */
+ spin_lock(&trans->peer_gylock);
+ if (atomic_read(&peer->usage)==0)
+ list_del_init(&peer->link);
+ else
+ peer = NULL;
+ spin_unlock(&trans->peer_gylock);
+
+ if (!peer) {
+ _leave("");
+ return; /* resurrected */
+ }
+
+ /* clear all connections on this peer */
+ rxrpc_conn_clearall(peer);
+
+ if (!list_empty(&peer->conn_active)) BUG();
+ if (!list_empty(&peer->conn_graveyard)) BUG();
+
+ /* inform the application layer */
+ if (peer->ops && peer->ops->discarding)
+ peer->ops->discarding(peer);
+
+ if (!list_empty(&peer->proc_link)) {
+ down_write(&rxrpc_peers_sem);
+ list_del(&peer->proc_link);
+ up_write(&rxrpc_peers_sem);
+ }
+
+ __RXACCT(atomic_dec(&rxrpc_peer_count));
+ kfree(peer);
+
+ /* if the graveyard is now empty, wake up anyone waiting for that */
+ if (atomic_dec_and_test(&trans->peer_count))
+ wake_up(&trans->peer_gy_waitq);
+
+ _leave(" [destroyed]");
+} /* end rxrpc_peer_do_timeout() */
+
+/*****************************************************************************/
+/*
+ * clear all peer records from a transport endpoint
+ */
+void rxrpc_peer_clearall(struct rxrpc_transport *trans)
+{
+ DECLARE_WAITQUEUE(myself,current);
+
+ struct rxrpc_peer *peer;
+ int err;
+
+ _enter("%p",trans);
+
+ /* there shouldn't be any active peers remaining */
+ if (!list_empty(&trans->peer_active))
+ BUG();
+
+ /* manually timeout all peers in the graveyard */
+ spin_lock(&trans->peer_gylock);
+ while (!list_empty(&trans->peer_graveyard)) {
+ peer = list_entry(trans->peer_graveyard.next,struct rxrpc_peer,link);
+ _debug("Clearing peer %p\n",peer);
+ err = rxrpc_krxtimod_del_timer(&peer->timeout);
+ spin_unlock(&trans->peer_gylock);
+
+ if (err==0)
+ rxrpc_peer_do_timeout(peer);
+
+ spin_lock(&trans->peer_gylock);
+ }
+ spin_unlock(&trans->peer_gylock);
+
+ /* wait for the the peer graveyard to be completely cleared */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&trans->peer_gy_waitq,&myself);
+
+ while (atomic_read(&trans->peer_count)!=0) {
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+
+ remove_wait_queue(&trans->peer_gy_waitq,&myself);
+ set_current_state(TASK_RUNNING);
+
+ _leave("");
+
+} /* end rxrpc_peer_clearall() */
+
+/*****************************************************************************/
+/*
+ * calculate and cache the Round-Trip-Time for a message and its response
+ */
+void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
+ struct rxrpc_message *msg,
+ struct rxrpc_message *resp)
+{
+ unsigned long long rtt;
+ int loop;
+
+ _enter("%p,%p,%p",peer,msg,resp);
+
+ /* calculate the latest RTT */
+ rtt = resp->stamp.tv_sec - msg->stamp.tv_sec;
+ rtt *= 1000000UL;
+ rtt += resp->stamp.tv_usec - msg->stamp.tv_usec;
+
+ /* add to cache */
+ peer->rtt_cache[peer->rtt_point] = rtt;
+ peer->rtt_point++;
+ peer->rtt_point %= RXRPC_RTT_CACHE_SIZE;
+
+ if (peer->rtt_usage<RXRPC_RTT_CACHE_SIZE) peer->rtt_usage++;
+
+ /* recalculate RTT */
+ for (loop=peer->rtt_usage-1; loop>=0; loop--)
+ rtt += peer->rtt_cache[loop];
+
+ peer->rtt = do_div(rtt,peer->rtt_usage);
+
+ _leave(" RTT=%lu.%lums",peer->rtt/1000,peer->rtt%1000);
+
+} /* end rxrpc_peer_calculate_rtt() */
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
new file mode 100644
index 000000000000..aeb9d9e8ddae
--- /dev/null
+++ b/net/rxrpc/proc.c
@@ -0,0 +1,612 @@
+/* proc.c: /proc interface for RxRPC
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <rxrpc/rxrpc.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include <rxrpc/message.h>
+#include "internal.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+static inline struct proc_dir_entry *PDE(const struct inode *inode)
+{
+ return (struct proc_dir_entry *)inode->u.generic_ip;
+}
+#endif
+
+static struct proc_dir_entry *proc_rxrpc;
+
+static int rxrpc_proc_transports_open(struct inode *inode, struct file *file);
+static void *rxrpc_proc_transports_start(struct seq_file *p, loff_t *pos);
+static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos);
+static void rxrpc_proc_transports_stop(struct seq_file *p, void *v);
+static int rxrpc_proc_transports_show(struct seq_file *m, void *v);
+
+static struct seq_operations rxrpc_proc_transports_ops = {
+ .start = rxrpc_proc_transports_start,
+ .next = rxrpc_proc_transports_next,
+ .stop = rxrpc_proc_transports_stop,
+ .show = rxrpc_proc_transports_show,
+};
+
+static struct file_operations rxrpc_proc_transports_fops = {
+ .open = rxrpc_proc_transports_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rxrpc_proc_peers_open(struct inode *inode, struct file *file);
+static void *rxrpc_proc_peers_start(struct seq_file *p, loff_t *pos);
+static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos);
+static void rxrpc_proc_peers_stop(struct seq_file *p, void *v);
+static int rxrpc_proc_peers_show(struct seq_file *m, void *v);
+
+static struct seq_operations rxrpc_proc_peers_ops = {
+ .start = rxrpc_proc_peers_start,
+ .next = rxrpc_proc_peers_next,
+ .stop = rxrpc_proc_peers_stop,
+ .show = rxrpc_proc_peers_show,
+};
+
+static struct file_operations rxrpc_proc_peers_fops = {
+ .open = rxrpc_proc_peers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rxrpc_proc_conns_open(struct inode *inode, struct file *file);
+static void *rxrpc_proc_conns_start(struct seq_file *p, loff_t *pos);
+static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos);
+static void rxrpc_proc_conns_stop(struct seq_file *p, void *v);
+static int rxrpc_proc_conns_show(struct seq_file *m, void *v);
+
+static struct seq_operations rxrpc_proc_conns_ops = {
+ .start = rxrpc_proc_conns_start,
+ .next = rxrpc_proc_conns_next,
+ .stop = rxrpc_proc_conns_stop,
+ .show = rxrpc_proc_conns_show,
+};
+
+static struct file_operations rxrpc_proc_conns_fops = {
+ .open = rxrpc_proc_conns_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rxrpc_proc_calls_open(struct inode *inode, struct file *file);
+static void *rxrpc_proc_calls_start(struct seq_file *p, loff_t *pos);
+static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos);
+static void rxrpc_proc_calls_stop(struct seq_file *p, void *v);
+static int rxrpc_proc_calls_show(struct seq_file *m, void *v);
+
+static struct seq_operations rxrpc_proc_calls_ops = {
+ .start = rxrpc_proc_calls_start,
+ .next = rxrpc_proc_calls_next,
+ .stop = rxrpc_proc_calls_stop,
+ .show = rxrpc_proc_calls_show,
+};
+
+static struct file_operations rxrpc_proc_calls_fops = {
+ .open = rxrpc_proc_calls_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const char *rxrpc_call_states7[] = {
+ "complet",
+ "error ",
+ "rcv_op ",
+ "rcv_arg",
+ "got_arg",
+ "snd_rpl",
+ "fin_ack",
+ "snd_arg",
+ "rcv_rpl",
+ "got_rpl"
+};
+
+static const char *rxrpc_call_error_states7[] = {
+ "no_err ",
+ "loc_abt",
+ "rmt_abt",
+ "loc_err",
+ "rmt_err"
+};
+
+/*****************************************************************************/
+/*
+ * initialise the /proc/net/rxrpc/ directory
+ */
+int rxrpc_proc_init(void)
+{
+ struct proc_dir_entry *p;
+
+ proc_rxrpc = proc_mkdir("rxrpc",proc_net);
+ if (!proc_rxrpc)
+ goto error;
+ proc_rxrpc->owner = THIS_MODULE;
+
+ p = create_proc_entry("calls",0,proc_rxrpc);
+ if (!p)
+ goto error_proc;
+ p->proc_fops = &rxrpc_proc_calls_fops;
+ p->owner = THIS_MODULE;
+
+ p = create_proc_entry("connections",0,proc_rxrpc);
+ if (!p)
+ goto error_calls;
+ p->proc_fops = &rxrpc_proc_conns_fops;
+ p->owner = THIS_MODULE;
+
+ p = create_proc_entry("peers",0,proc_rxrpc);
+ if (!p)
+ goto error_calls;
+ p->proc_fops = &rxrpc_proc_peers_fops;
+ p->owner = THIS_MODULE;
+
+ p = create_proc_entry("transports",0,proc_rxrpc);
+ if (!p)
+ goto error_conns;
+ p->proc_fops = &rxrpc_proc_transports_fops;
+ p->owner = THIS_MODULE;
+
+ return 0;
+
+ error_conns:
+ remove_proc_entry("conns",proc_rxrpc);
+ error_calls:
+ remove_proc_entry("calls",proc_rxrpc);
+ error_proc:
+ remove_proc_entry("rxrpc",proc_net);
+ error:
+ return -ENOMEM;
+} /* end rxrpc_proc_init() */
+
+/*****************************************************************************/
+/*
+ * clean up the /proc/net/rxrpc/ directory
+ */
+void rxrpc_proc_cleanup(void)
+{
+ remove_proc_entry("transports",proc_rxrpc);
+ remove_proc_entry("peers",proc_rxrpc);
+ remove_proc_entry("connections",proc_rxrpc);
+ remove_proc_entry("calls",proc_rxrpc);
+
+ remove_proc_entry("rxrpc",proc_net);
+
+} /* end rxrpc_proc_cleanup() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/net/rxrpc/transports" which provides a summary of extant transports
+ */
+static int rxrpc_proc_transports_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file,&rxrpc_proc_transports_ops);
+ if (ret<0)
+ return ret;
+
+ m = file->private_data;
+ m->private = PDE(inode)->data;
+
+ return 0;
+} /* end rxrpc_proc_transports_open() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the transports list and return the first item
+ */
+static void *rxrpc_proc_transports_start(struct seq_file *m, loff_t *_pos)
+{
+ struct list_head *_p;
+ loff_t pos = *_pos;
+
+ /* lock the list against modification */
+ down_read(&rxrpc_proc_transports_sem);
+
+ /* allow for the header line */
+ if (!pos)
+ return (void *)1;
+ pos--;
+
+ /* find the n'th element in the list */
+ list_for_each(_p,&rxrpc_proc_transports)
+ if (!pos--)
+ break;
+
+ return _p!=&rxrpc_proc_transports ? _p : NULL;
+} /* end rxrpc_proc_transports_start() */
+
+/*****************************************************************************/
+/*
+ * move to next call in transports list
+ */
+static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ struct list_head *_p;
+
+ (*pos)++;
+
+ _p = v;
+ _p = v==(void*)1 ? rxrpc_proc_transports.next : _p->next;
+
+ return _p!=&rxrpc_proc_transports ? _p : NULL;
+} /* end rxrpc_proc_transports_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the transports list
+ */
+static void rxrpc_proc_transports_stop(struct seq_file *p, void *v)
+{
+ up_read(&rxrpc_proc_transports_sem);
+
+} /* end rxrpc_proc_transports_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of call lines
+ */
+static int rxrpc_proc_transports_show(struct seq_file *m, void *v)
+{
+ struct rxrpc_transport *trans = list_entry(v,struct rxrpc_transport,proc_link);
+
+ /* display header on line 1 */
+ if (v == (void *)1) {
+ seq_puts(m, "LOCAL USE\n");
+ return 0;
+ }
+
+ /* display one transport per line on subsequent lines */
+ seq_printf(m,"%5hu %3d\n",
+ trans->port,
+ atomic_read(&trans->usage)
+ );
+
+ return 0;
+} /* end rxrpc_proc_transports_show() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/net/rxrpc/peers" which provides a summary of extant peers
+ */
+static int rxrpc_proc_peers_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file,&rxrpc_proc_peers_ops);
+ if (ret<0)
+ return ret;
+
+ m = file->private_data;
+ m->private = PDE(inode)->data;
+
+ return 0;
+} /* end rxrpc_proc_peers_open() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the peers list and return the first item
+ */
+static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos)
+{
+ struct list_head *_p;
+ loff_t pos = *_pos;
+
+ /* lock the list against modification */
+ down_read(&rxrpc_peers_sem);
+
+ /* allow for the header line */
+ if (!pos)
+ return (void *)1;
+ pos--;
+
+ /* find the n'th element in the list */
+ list_for_each(_p,&rxrpc_peers)
+ if (!pos--)
+ break;
+
+ return _p!=&rxrpc_peers ? _p : NULL;
+} /* end rxrpc_proc_peers_start() */
+
+/*****************************************************************************/
+/*
+ * move to next conn in peers list
+ */
+static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ struct list_head *_p;
+
+ (*pos)++;
+
+ _p = v;
+ _p = v==(void*)1 ? rxrpc_peers.next : _p->next;
+
+ return _p!=&rxrpc_peers ? _p : NULL;
+} /* end rxrpc_proc_peers_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the peers list
+ */
+static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
+{
+ up_read(&rxrpc_peers_sem);
+
+} /* end rxrpc_proc_peers_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of conn lines
+ */
+static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
+{
+ struct rxrpc_peer *peer = list_entry(v,struct rxrpc_peer,proc_link);
+ signed long timeout;
+
+ /* display header on line 1 */
+ if (v == (void *)1) {
+ seq_puts(m,"LOCAL REMOTE USAGE CONNS TIMEOUT MTU RTT(uS)\n");
+ return 0;
+ }
+
+ /* display one peer per line on subsequent lines */
+ timeout = 0;
+ if (!list_empty(&peer->timeout.link))
+ timeout = (signed long)peer->timeout.timo_jif - (signed long)jiffies;
+
+ seq_printf(m,"%5hu %08x %5d %5d %8ld %5u %7lu\n",
+ peer->trans->port,
+ ntohl(peer->addr.s_addr),
+ atomic_read(&peer->usage),
+ atomic_read(&peer->conn_count),
+ timeout,
+ peer->if_mtu,
+ peer->rtt
+ );
+
+ return 0;
+} /* end rxrpc_proc_peers_show() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/net/rxrpc/connections" which provides a summary of extant connections
+ */
+static int rxrpc_proc_conns_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file,&rxrpc_proc_conns_ops);
+ if (ret<0)
+ return ret;
+
+ m = file->private_data;
+ m->private = PDE(inode)->data;
+
+ return 0;
+} /* end rxrpc_proc_conns_open() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the conns list and return the first item
+ */
+static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos)
+{
+ struct list_head *_p;
+ loff_t pos = *_pos;
+
+ /* lock the list against modification */
+ down_read(&rxrpc_conns_sem);
+
+ /* allow for the header line */
+ if (!pos)
+ return (void *)1;
+ pos--;
+
+ /* find the n'th element in the list */
+ list_for_each(_p,&rxrpc_conns)
+ if (!pos--)
+ break;
+
+ return _p!=&rxrpc_conns ? _p : NULL;
+} /* end rxrpc_proc_conns_start() */
+
+/*****************************************************************************/
+/*
+ * move to next conn in conns list
+ */
+static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ struct list_head *_p;
+
+ (*pos)++;
+
+ _p = v;
+ _p = v==(void*)1 ? rxrpc_conns.next : _p->next;
+
+ return _p!=&rxrpc_conns ? _p : NULL;
+} /* end rxrpc_proc_conns_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the conns list
+ */
+static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
+{
+ up_read(&rxrpc_conns_sem);
+
+} /* end rxrpc_proc_conns_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of conn lines
+ */
+static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
+{
+ struct rxrpc_connection *conn = list_entry(v,struct rxrpc_connection,proc_link);
+ signed long timeout;
+
+ /* display header on line 1 */
+ if (v == (void *)1) {
+ seq_puts(m,
+ "LOCAL REMOTE RPORT SRVC CONN END SERIALNO CALLNO MTU TIMEOUT"
+ "\n");
+ return 0;
+ }
+
+ /* display one conn per line on subsequent lines */
+ timeout = 0;
+ if (!list_empty(&conn->timeout.link))
+ timeout = (signed long)conn->timeout.timo_jif - (signed long)jiffies;
+
+ seq_printf(m,"%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5u %8ld\n",
+ conn->trans->port,
+ ntohl(conn->addr.sin_addr.s_addr),
+ ntohs(conn->addr.sin_port),
+ ntohs(conn->service_id),
+ ntohl(conn->conn_id),
+ conn->out_clientflag ? "CLT" : "SRV",
+ conn->serial_counter,
+ conn->call_counter,
+ conn->mtu_size,
+ timeout
+ );
+
+ return 0;
+} /* end rxrpc_proc_conns_show() */
+
+/*****************************************************************************/
+/*
+ * open "/proc/net/rxrpc/calls" which provides a summary of extant calls
+ */
+static int rxrpc_proc_calls_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file,&rxrpc_proc_calls_ops);
+ if (ret<0)
+ return ret;
+
+ m = file->private_data;
+ m->private = PDE(inode)->data;
+
+ return 0;
+} /* end rxrpc_proc_calls_open() */
+
+/*****************************************************************************/
+/*
+ * set up the iterator to start reading from the calls list and return the first item
+ */
+static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos)
+{
+ struct list_head *_p;
+ loff_t pos = *_pos;
+
+ /* lock the list against modification */
+ down_read(&rxrpc_calls_sem);
+
+ /* allow for the header line */
+ if (!pos)
+ return (void *)1;
+ pos--;
+
+ /* find the n'th element in the list */
+ list_for_each(_p,&rxrpc_calls)
+ if (!pos--)
+ break;
+
+ return _p!=&rxrpc_calls ? _p : NULL;
+} /* end rxrpc_proc_calls_start() */
+
+/*****************************************************************************/
+/*
+ * move to next call in calls list
+ */
+static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ struct list_head *_p;
+
+ (*pos)++;
+
+ _p = v;
+ _p = v==(void*)1 ? rxrpc_calls.next : _p->next;
+
+ return _p!=&rxrpc_calls ? _p : NULL;
+} /* end rxrpc_proc_calls_next() */
+
+/*****************************************************************************/
+/*
+ * clean up after reading from the calls list
+ */
+static void rxrpc_proc_calls_stop(struct seq_file *p, void *v)
+{
+ up_read(&rxrpc_calls_sem);
+
+} /* end rxrpc_proc_calls_stop() */
+
+/*****************************************************************************/
+/*
+ * display a header line followed by a load of call lines
+ */
+static int rxrpc_proc_calls_show(struct seq_file *m, void *v)
+{
+ struct rxrpc_call *call = list_entry(v,struct rxrpc_call,call_link);
+
+ /* display header on line 1 */
+ if (v == (void *)1) {
+ seq_puts(m,
+ "LOCAL REMOT SRVC CONN CALL DIR USE "
+ " L STATE OPCODE ABORT ERRNO\n"
+ );
+ return 0;
+ }
+
+ /* display one call per line on subsequent lines */
+ seq_printf(m,
+ "%5hu %5hu %04hx %08x %08x %s %3u%c"
+ " %c %-7.7s %6d %08x %5d\n",
+ call->conn->trans->port,
+ ntohs(call->conn->addr.sin_port),
+ ntohs(call->conn->service_id),
+ ntohl(call->conn->conn_id),
+ ntohl(call->call_id),
+ call->conn->service ? "SVC" : "CLT",
+ atomic_read(&call->usage),
+ waitqueue_active(&call->waitq) ? 'w' : ' ',
+ call->app_last_rcv ? 'Y' : '-',
+ (call->app_call_state!=RXRPC_CSTATE_ERROR ?
+ rxrpc_call_states7[call->app_call_state] :
+ rxrpc_call_error_states7[call->app_err_state]),
+ call->app_opcode,
+ call->app_abort_code,
+ call->app_errno
+ );
+
+ return 0;
+} /* end rxrpc_proc_calls_show() */
diff --git a/net/rxrpc/rxrpc_syms.c b/net/rxrpc/rxrpc_syms.c
new file mode 100644
index 000000000000..3b33b7e5cbd7
--- /dev/null
+++ b/net/rxrpc/rxrpc_syms.c
@@ -0,0 +1,51 @@
+/* rxrpc_syms.c: exported Rx RPC layer interface symbols
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <rxrpc/transport.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include <rxrpc/krxiod.h>
+
+/* call.c */
+EXPORT_SYMBOL(rxrpc_call_rcv_timeout);
+EXPORT_SYMBOL(rxrpc_call_acks_timeout);
+EXPORT_SYMBOL(rxrpc_call_dfr_ack_timeout);
+EXPORT_SYMBOL(rxrpc_call_max_resend);
+EXPORT_SYMBOL(rxrpc_call_states);
+EXPORT_SYMBOL(rxrpc_call_error_states);
+
+EXPORT_SYMBOL(rxrpc_create_call);
+EXPORT_SYMBOL(rxrpc_incoming_call);
+EXPORT_SYMBOL(rxrpc_put_call);
+EXPORT_SYMBOL(rxrpc_call_abort);
+EXPORT_SYMBOL(rxrpc_call_read_data);
+EXPORT_SYMBOL(rxrpc_call_write_data);
+EXPORT_SYMBOL(rxrpc_call_flush);
+
+/* connection.c */
+EXPORT_SYMBOL(rxrpc_create_connection);
+EXPORT_SYMBOL(rxrpc_put_connection);
+
+/* sysctl.c */
+EXPORT_SYMBOL(rxrpc_ktrace);
+EXPORT_SYMBOL(rxrpc_kdebug);
+EXPORT_SYMBOL(rxrpc_kproto);
+EXPORT_SYMBOL(rxrpc_knet);
+
+/* transport.c */
+EXPORT_SYMBOL(rxrpc_create_transport);
+EXPORT_SYMBOL(rxrpc_clear_transport);
+EXPORT_SYMBOL(rxrpc_put_transport);
+EXPORT_SYMBOL(rxrpc_add_service);
+EXPORT_SYMBOL(rxrpc_del_service);
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
new file mode 100644
index 000000000000..a2d0d73b59f9
--- /dev/null
+++ b/net/rxrpc/sysctl.c
@@ -0,0 +1,73 @@
+/* sysctl.c: Rx RPC control
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/sysctl.h>
+#include <linux/config.h>
+#include <rxrpc/types.h>
+#include <rxrpc/rxrpc.h>
+#include <asm/errno.h>
+#include "internal.h"
+
+int rxrpc_ktrace;
+int rxrpc_kdebug;
+int rxrpc_kproto;
+int rxrpc_knet;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_header *rxrpc_sysctl = NULL;
+
+static ctl_table rxrpc_sysctl_table[] = {
+ { 1, "kdebug", &rxrpc_kdebug, sizeof(int), 0644, NULL, &proc_dointvec },
+ { 2, "ktrace", &rxrpc_ktrace, sizeof(int), 0644, NULL, &proc_dointvec },
+ { 3, "kproto", &rxrpc_kproto, sizeof(int), 0644, NULL, &proc_dointvec },
+ { 4, "knet", &rxrpc_knet, sizeof(int), 0644, NULL, &proc_dointvec },
+ { 0 }
+};
+
+static ctl_table rxrpc_dir_sysctl_table[] = {
+ { 1, "rxrpc", NULL, 0, 0555, rxrpc_sysctl_table },
+ { 0 }
+};
+#endif /* CONFIG_SYSCTL */
+
+/*****************************************************************************/
+/*
+ * initialise the sysctl stuff for Rx RPC
+ */
+int rxrpc_sysctl_init(void)
+{
+#ifdef CONFIG_SYSCTL
+ rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table,0);
+ if (!rxrpc_sysctl)
+ return -ENOMEM;
+#endif /* CONFIG_SYSCTL */
+
+ return 0;
+} /* end rxrpc_sysctl_init() */
+
+/*****************************************************************************/
+/*
+ * clean up the sysctl stuff for Rx RPC
+ */
+void rxrpc_sysctl_cleanup(void)
+{
+#ifdef CONFIG_SYSCTL
+ if (rxrpc_sysctl) {
+ unregister_sysctl_table(rxrpc_sysctl);
+ rxrpc_sysctl = NULL;
+ }
+#endif /* CONFIG_SYSCTL */
+
+} /* end rxrpc_sysctl_cleanup() */
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
new file mode 100644
index 000000000000..f1dd614c7251
--- /dev/null
+++ b/net/rxrpc/transport.c
@@ -0,0 +1,824 @@
+/* transport.c: Rx Transport routines
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <rxrpc/transport.h>
+#include <rxrpc/peer.h>
+#include <rxrpc/connection.h>
+#include <rxrpc/call.h>
+#include <rxrpc/message.h>
+#include <rxrpc/krxiod.h>
+#include <rxrpc/krxsecd.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/icmp.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */
+#endif
+#include <linux/errqueue.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include "internal.h"
+
+struct errormsg {
+ struct cmsghdr cmsg; /* control message header */
+ struct sock_extended_err ee; /* extended error information */
+ struct sockaddr_in icmp_src; /* ICMP packet source address */
+};
+
+static spinlock_t rxrpc_transports_lock = SPIN_LOCK_UNLOCKED;
+static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
+
+__RXACCT_DECL(atomic_t rxrpc_transport_count);
+LIST_HEAD(rxrpc_proc_transports);
+DECLARE_RWSEM(rxrpc_proc_transports_sem);
+
+static void rxrpc_data_ready(struct sock *sk, int count);
+static void rxrpc_error_report(struct sock *sk);
+static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
+ struct list_head *msgq);
+static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
+
+/*****************************************************************************/
+/*
+ * create a new transport endpoint using the specified UDP port
+ */
+int rxrpc_create_transport(unsigned short port, struct rxrpc_transport **_trans)
+{
+ struct rxrpc_transport *trans;
+ struct sockaddr_in sin;
+ mm_segment_t oldfs;
+ struct sock *sock;
+ int ret, opt;
+
+ _enter("%hu",port);
+
+ trans = kmalloc(sizeof(struct rxrpc_transport),GFP_KERNEL);
+ if (!trans)
+ return -ENOMEM;
+
+ memset(trans,0,sizeof(struct rxrpc_transport));
+ atomic_set(&trans->usage,1);
+ INIT_LIST_HEAD(&trans->services);
+ INIT_LIST_HEAD(&trans->link);
+ INIT_LIST_HEAD(&trans->krxiodq_link);
+ spin_lock_init(&trans->lock);
+ INIT_LIST_HEAD(&trans->peer_active);
+ INIT_LIST_HEAD(&trans->peer_graveyard);
+ spin_lock_init(&trans->peer_gylock);
+ init_waitqueue_head(&trans->peer_gy_waitq);
+ rwlock_init(&trans->peer_lock);
+ atomic_set(&trans->peer_count,0);
+ trans->port = port;
+
+ /* create a UDP socket to be my actual transport endpoint */
+ ret = sock_create(PF_INET,SOCK_DGRAM,IPPROTO_UDP,&trans->socket);
+ if (ret<0)
+ goto error;
+
+ /* use the specified port */
+ if (port) {
+ memset(&sin,0,sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ ret = trans->socket->ops->bind(trans->socket,(struct sockaddr *)&sin,sizeof(sin));
+ if (ret<0)
+ goto error;
+ }
+
+ opt = 1;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = trans->socket->ops->setsockopt(trans->socket,SOL_IP,IP_RECVERR,
+ (char*)&opt,sizeof(opt));
+ set_fs(oldfs);
+
+ spin_lock(&rxrpc_transports_lock);
+ list_add(&trans->link,&rxrpc_transports);
+ spin_unlock(&rxrpc_transports_lock);
+
+ /* set the socket up */
+ sock = trans->socket->sk;
+ sock->user_data = trans;
+ sock->data_ready = rxrpc_data_ready;
+ sock->error_report = rxrpc_error_report;
+
+ down_write(&rxrpc_proc_transports_sem);
+ list_add_tail(&trans->proc_link,&rxrpc_proc_transports);
+ up_write(&rxrpc_proc_transports_sem);
+
+ __RXACCT(atomic_inc(&rxrpc_transport_count));
+
+ *_trans = trans;
+ _leave(" = 0 (%p)",trans);
+ return 0;
+
+ error:
+ rxrpc_put_transport(trans);
+
+ _leave(" = %d",ret);
+
+ return ret;
+
+} /* end rxrpc_create_transport() */
+
+/*****************************************************************************/
+/*
+ * clear the connections on a transport endpoint
+ */
+void rxrpc_clear_transport(struct rxrpc_transport *trans)
+{
+ //struct rxrpc_connection *conn;
+
+} /* end rxrpc_clear_transport() */
+
+/*****************************************************************************/
+/*
+ * destroy a transport endpoint
+ */
+void rxrpc_put_transport(struct rxrpc_transport *trans)
+{
+ _enter("%p{u=%d p=%hu}",trans,atomic_read(&trans->usage),trans->port);
+
+ if (atomic_read(&trans->usage)<=0)
+ BUG();
+
+ /* to prevent a race, the decrement and the dequeue must be effectively atomic */
+ spin_lock(&rxrpc_transports_lock);
+ if (likely(!atomic_dec_and_test(&trans->usage))) {
+ spin_unlock(&rxrpc_transports_lock);
+ _leave("");
+ return;
+ }
+
+ list_del(&trans->link);
+ spin_unlock(&rxrpc_transports_lock);
+
+ /* finish cleaning up the transport */
+ if (trans->socket)
+ trans->socket->ops->shutdown(trans->socket,2);
+
+ rxrpc_krxsecd_clear_transport(trans);
+ rxrpc_krxiod_dequeue_transport(trans);
+
+ /* discard all peer information */
+ rxrpc_peer_clearall(trans);
+
+ down_write(&rxrpc_proc_transports_sem);
+ list_del(&trans->proc_link);
+ up_write(&rxrpc_proc_transports_sem);
+ __RXACCT(atomic_dec(&rxrpc_transport_count));
+
+ /* close the socket */
+ if (trans->socket) {
+ trans->socket->sk->user_data = NULL;
+ sock_release(trans->socket);
+ trans->socket = NULL;
+ }
+
+ kfree(trans);
+
+ _leave("");
+
+} /* end rxrpc_put_transport() */
+
+/*****************************************************************************/
+/*
+ * add a service to a transport to be listened upon
+ */
+int rxrpc_add_service(struct rxrpc_transport *trans, struct rxrpc_service *newsrv)
+{
+ struct rxrpc_service *srv;
+ struct list_head *_p;
+ int ret = -EEXIST;
+
+ _enter("%p{%hu},%p{%hu}",trans,trans->port,newsrv,newsrv->service_id);
+
+ /* verify that the service ID is not already present */
+ spin_lock(&trans->lock);
+
+ list_for_each(_p,&trans->services) {
+ srv = list_entry(_p,struct rxrpc_service,link);
+ if (srv->service_id==newsrv->service_id)
+ goto out;
+ }
+
+ /* okay - add the transport to the list */
+ list_add_tail(&newsrv->link,&trans->services);
+ rxrpc_get_transport(trans);
+ ret = 0;
+
+ out:
+ spin_unlock(&trans->lock);
+
+ _leave("= %d",ret);
+ return ret;
+
+} /* end rxrpc_add_service() */
+
+/*****************************************************************************/
+/*
+ * remove a service from a transport
+ */
+void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
+{
+ _enter("%p{%hu},%p{%hu}",trans,trans->port,srv,srv->service_id);
+
+ spin_lock(&trans->lock);
+ list_del(&srv->link);
+ spin_unlock(&trans->lock);
+
+ rxrpc_put_transport(trans);
+
+ _leave("");
+
+} /* end rxrpc_del_service() */
+
+/*****************************************************************************/
+/*
+ * INET callback when data has been received on the socket.
+ */
+static void rxrpc_data_ready(struct sock *sk, int count)
+{
+ struct rxrpc_transport *trans;
+
+ _enter("%p{t=%p},%d",sk,sk->user_data,count);
+
+ /* queue the transport for attention by krxiod */
+ trans = (struct rxrpc_transport *) sk->user_data;
+ if (trans)
+ rxrpc_krxiod_queue_transport(trans);
+
+ /* wake up anyone waiting on the socket */
+ if (sk->sleep && waitqueue_active(sk->sleep))
+ wake_up_interruptible(sk->sleep);
+
+ _leave("");
+
+} /* end rxrpc_data_ready() */
+
+/*****************************************************************************/
+/*
+ * INET callback when an ICMP error packet is received
+ * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
+ */
+static void rxrpc_error_report(struct sock *sk)
+{
+ struct rxrpc_transport *trans;
+
+ _enter("%p{t=%p}",sk,sk->user_data);
+
+ /* queue the transport for attention by krxiod */
+ trans = (struct rxrpc_transport *) sk->user_data;
+ if (trans) {
+ trans->error_rcvd = 1;
+ rxrpc_krxiod_queue_transport(trans);
+ }
+
+ /* wake up anyone waiting on the socket */
+ if (sk->sleep && waitqueue_active(sk->sleep))
+ wake_up_interruptible(sk->sleep);
+
+ _leave("");
+
+} /* end rxrpc_error_report() */
+
+/*****************************************************************************/
+/*
+ * split a message up, allocating message records and filling them in from the contents of a
+ * socket buffer
+ */
+static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
+ struct sk_buff *pkt,
+ struct list_head *msgq)
+{
+ struct rxrpc_message *msg;
+ int ret;
+
+ _enter("");
+
+ msg = kmalloc(sizeof(struct rxrpc_message),GFP_KERNEL);
+ if (!msg) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ memset(msg,0,sizeof(*msg));
+ atomic_set(&msg->usage,1);
+ list_add_tail(&msg->link,msgq);
+
+ /* dig out the Rx routing parameters */
+ if (skb_copy_bits(pkt,sizeof(struct udphdr),&msg->hdr,sizeof(msg->hdr))<0) {
+ ret = -EBADMSG;
+ goto error;
+ }
+
+ msg->trans = trans;
+ msg->state = RXRPC_MSG_RECEIVED;
+ msg->stamp = pkt->stamp;
+ msg->seq = ntohl(msg->hdr.seq);
+
+ /* attach the packet */
+ skb_get(pkt);
+ msg->pkt = pkt;
+
+ msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
+ msg->dsize = msg->pkt->len - msg->offset;
+
+ _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
+ msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
+ ntohl(msg->hdr.epoch),
+ (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
+ ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
+ ntohl(msg->hdr.callNumber),
+ rxrpc_pkts[msg->hdr.type],
+ msg->hdr.flags,
+ ntohs(msg->hdr.serviceId),
+ msg->hdr.securityIndex);
+
+ __RXACCT(atomic_inc(&rxrpc_message_count));
+
+ /* split off jumbo packets */
+ while (msg->hdr.type==RXRPC_PACKET_TYPE_DATA && msg->hdr.flags & RXRPC_JUMBO_PACKET) {
+ struct rxrpc_jumbo_header jumbo;
+ struct rxrpc_message *jumbomsg = msg;
+
+ _debug("split jumbo packet");
+
+ /* quick sanity check */
+ ret = -EBADMSG;
+ if (msg->dsize < RXRPC_JUMBO_DATALEN+sizeof(struct rxrpc_jumbo_header))
+ goto error;
+ if (msg->hdr.flags & RXRPC_LAST_PACKET)
+ goto error;
+
+ /* dig out the secondary header */
+ if (skb_copy_bits(pkt,msg->offset+RXRPC_JUMBO_DATALEN,&jumbo,sizeof(jumbo))<0)
+ goto error;
+
+ /* allocate a new message record */
+ ret = -ENOMEM;
+ msg = kmalloc(sizeof(struct rxrpc_message),GFP_KERNEL);
+ if (!msg)
+ goto error;
+
+ memcpy(msg,jumbomsg,sizeof(*msg));
+ list_add_tail(&msg->link,msgq);
+
+ /* adjust the jumbo packet */
+ jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
+
+ /* attach the packet here too */
+ skb_get(pkt);
+
+ /* adjust the parameters */
+ msg->seq++;
+ msg->hdr.seq = htonl(msg->seq);
+ msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
+ msg->offset += RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header);
+ msg->dsize -= RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header);
+ msg->hdr.flags = jumbo.flags;
+ msg->hdr._rsvd = jumbo._rsvd;
+
+ _net("Rx Split jumbo packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
+ msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
+ ntohl(msg->hdr.epoch),
+ (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
+ ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
+ ntohl(msg->hdr.callNumber),
+ rxrpc_pkts[msg->hdr.type],
+ msg->hdr.flags,
+ ntohs(msg->hdr.serviceId),
+ msg->hdr.securityIndex);
+
+ __RXACCT(atomic_inc(&rxrpc_message_count));
+ }
+
+ _leave(" = 0 #%d",atomic_read(&rxrpc_message_count));
+ return 0;
+
+ error:
+ while (!list_empty(msgq)) {
+ msg = list_entry(msgq->next,struct rxrpc_message,link);
+ list_del_init(&msg->link);
+
+ rxrpc_put_message(msg);
+ }
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end rxrpc_incoming_msg() */
+
+/*****************************************************************************/
+/*
+ * accept a new call
+ * - called from krxiod in process context
+ */
+void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
+{
+ struct rxrpc_message *msg;
+ struct rxrpc_peer *peer;
+ struct sk_buff *pkt;
+ int ret;
+ u32 addr;
+ u16 port;
+
+ LIST_HEAD(msgq);
+
+ _enter("%p{%d}",trans,trans->port);
+
+ for (;;) {
+ /* deal with outstanting errors first */
+ if (trans->error_rcvd)
+ rxrpc_trans_receive_error_report(trans);
+
+ /* attempt to receive a packet */
+ pkt = skb_recv_datagram(trans->socket->sk,0,1,&ret);
+ if (!pkt) {
+ if (ret==-EAGAIN) {
+ _leave(" EAGAIN");
+ return;
+ }
+
+ /* an icmp error may have occurred */
+ rxrpc_krxiod_queue_transport(trans);
+ _leave(" error %d\n",ret);
+ return;
+ }
+
+ /* we'll probably need to checksum it (didn't call sock_recvmsg) */
+ if (pkt->ip_summed != CHECKSUM_UNNECESSARY) {
+ if ((unsigned short)csum_fold(skb_checksum(pkt,0,pkt->len,pkt->csum))) {
+ kfree_skb(pkt);
+ rxrpc_krxiod_queue_transport(trans);
+ _leave(" CSUM failed");
+ return;
+ }
+ }
+
+ addr = pkt->nh.iph->saddr;
+ port = pkt->h.uh->source;
+
+ _net("Rx Received UDP packet from %08x:%04hu",ntohl(addr),ntohs(port));
+
+ /* unmarshall the Rx parameters and split jumbo packets */
+ ret = rxrpc_incoming_msg(trans,pkt,&msgq);
+ if (ret<0) {
+ kfree_skb(pkt);
+ rxrpc_krxiod_queue_transport(trans);
+ _leave(" bad packet");
+ return;
+ }
+
+ if (list_empty(&msgq)) BUG();
+
+ msg = list_entry(msgq.next,struct rxrpc_message,link);
+
+ /* locate the record for the peer from which it originated */
+ ret = rxrpc_peer_lookup(trans,addr,&peer);
+ if (ret<0) {
+ kdebug("Rx No connections from that peer");
+ rxrpc_trans_immediate_abort(trans,msg,-EINVAL);
+ goto finished_msg;
+ }
+
+ /* try and find a matching connection */
+ ret = rxrpc_connection_lookup(peer,msg,&msg->conn);
+ if (ret<0) {
+ kdebug("Rx Unknown Connection");
+ rxrpc_trans_immediate_abort(trans,msg,-EINVAL);
+ rxrpc_put_peer(peer);
+ goto finished_msg;
+ }
+ rxrpc_put_peer(peer);
+
+ /* deal with the first packet of a new call */
+ if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
+ msg->hdr.type==RXRPC_PACKET_TYPE_DATA &&
+ ntohl(msg->hdr.seq)==1
+ ) {
+ _debug("Rx New server call");
+ rxrpc_trans_receive_new_call(trans,&msgq);
+ goto finished_msg;
+ }
+
+ /* deal with subsequent packet(s) of call */
+ _debug("Rx Call packet");
+ while (!list_empty(&msgq)) {
+ msg = list_entry(msgq.next,struct rxrpc_message,link);
+ list_del_init(&msg->link);
+
+ ret = rxrpc_conn_receive_call_packet(msg->conn,NULL,msg);
+ if (ret<0) {
+ rxrpc_trans_immediate_abort(trans,msg,ret);
+ rxrpc_put_message(msg);
+ goto finished_msg;
+ }
+
+ rxrpc_put_message(msg);
+ }
+
+ goto finished_msg;
+
+ /* dispose of the packets */
+ finished_msg:
+ while (!list_empty(&msgq)) {
+ msg = list_entry(msgq.next,struct rxrpc_message,link);
+ list_del_init(&msg->link);
+
+ rxrpc_put_message(msg);
+ }
+ kfree_skb(pkt);
+ }
+
+ _leave("");
+
+} /* end rxrpc_trans_receive_packet() */
+
+/*****************************************************************************/
+/*
+ * accept a new call from a client trying to connect to one of my services
+ * - called in process context
+ */
+static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
+ struct list_head *msgq)
+{
+ struct rxrpc_message *msg;
+
+ _enter("");
+
+ /* only bother with the first packet */
+ msg = list_entry(msgq->next,struct rxrpc_message,link);
+ list_del_init(&msg->link);
+ rxrpc_krxsecd_queue_incoming_call(msg);
+ rxrpc_put_message(msg);
+
+ _leave(" = 0");
+
+ return 0;
+} /* end rxrpc_trans_receive_new_call() */
+
+/*****************************************************************************/
+/*
+ * perform an immediate abort without connection or call structures
+ */
+int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
+ struct rxrpc_message *msg,
+ int error)
+{
+ struct rxrpc_header ahdr;
+ struct sockaddr_in sin;
+ struct msghdr msghdr;
+ struct iovec iov[2];
+ mm_segment_t oldfs;
+ int len, ret;
+ u32 _error;
+
+ _enter("%p,%p,%d",trans,msg,error);
+
+ /* don't abort an abort packet */
+ if (msg->hdr.type==RXRPC_PACKET_TYPE_ABORT) {
+ _leave(" = 0");
+ return 0;
+ }
+
+ _error = htonl(-error);
+
+ /* set up the message to be transmitted */
+ memcpy(&ahdr,&msg->hdr,sizeof(ahdr));
+ ahdr.epoch = msg->hdr.epoch;
+ ahdr.serial = htonl(1);
+ ahdr.seq = 0;
+ ahdr.type = RXRPC_PACKET_TYPE_ABORT;
+ ahdr.flags = RXRPC_LAST_PACKET | (~msg->hdr.flags & RXRPC_CLIENT_INITIATED);
+
+ iov[0].iov_len = sizeof(ahdr);
+ iov[0].iov_base = &ahdr;
+ iov[1].iov_len = sizeof(_error);
+ iov[1].iov_base = &_error;
+
+ len = sizeof(ahdr) + sizeof(_error);
+
+ memset(&sin,0,sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = msg->pkt->h.uh->source;
+ sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
+
+ msghdr.msg_name = &sin;
+ msghdr.msg_namelen = sizeof(sin);
+ msghdr.msg_iov = iov;
+ msghdr.msg_iovlen = 2;
+ msghdr.msg_control = NULL;
+ msghdr.msg_controllen = 0;
+ msghdr.msg_flags = MSG_DONTWAIT;
+
+ _net("Sending message type %d of %d bytes to %08x:%d",
+ ahdr.type,
+ len,
+ htonl(sin.sin_addr.s_addr),
+ htons(sin.sin_port));
+
+ /* send the message */
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sock_sendmsg(trans->socket,&msghdr,len);
+ set_fs(oldfs);
+
+ _leave(" = %d",ret);
+ return ret;
+} /* end rxrpc_trans_immediate_abort() */
+
+/*****************************************************************************/
+/*
+ * receive an ICMP error report and percolate it to all connections heading to the affected
+ * host or port
+ */
+static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
+{
+ struct rxrpc_connection *conn;
+ struct sockaddr_in sin;
+ struct rxrpc_peer *peer;
+ struct list_head connq, *_p;
+ struct errormsg emsg;
+ struct msghdr msg;
+ mm_segment_t oldfs;
+ int local, err;
+ u16 port;
+
+ _enter("%p",trans);
+
+ for (;;) {
+ trans->error_rcvd = 0;
+
+ /* try and receive an error message */
+ msg.msg_name = &sin;
+ msg.msg_namelen = sizeof(sin);
+ msg.msg_iov = NULL;
+ msg.msg_iovlen = 0;
+ msg.msg_control = &emsg;
+ msg.msg_controllen = sizeof(emsg);
+ msg.msg_flags = 0;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sock_recvmsg(trans->socket,&msg,0,MSG_ERRQUEUE|MSG_DONTWAIT|MSG_TRUNC);
+ set_fs(oldfs);
+
+ if (err==-EAGAIN) {
+ _leave("");
+ return;
+ }
+
+ if (err<0) {
+ printk("%s: unable to recv an error report: %d\n",__FUNCTION__,err);
+ _leave("");
+ return;
+ }
+
+ msg.msg_controllen = (char*)msg.msg_control - (char*)&emsg;
+
+ if (msg.msg_controllen<sizeof(emsg.cmsg) || msg.msg_namelen<sizeof(sin)) {
+ printk("%s: short control message (nlen=%u clen=%u fl=%x)\n",
+ __FUNCTION__,msg.msg_namelen,msg.msg_controllen,msg.msg_flags);
+ continue;
+ }
+
+ _net("Rx Received control message { len=%u level=%u type=%u }",
+ emsg.cmsg.cmsg_len,emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type);
+
+ if (sin.sin_family!=AF_INET) {
+ printk("Rx Ignoring error report with non-INET address (fam=%u)",
+ sin.sin_family);
+ continue;
+ }
+
+ _net("Rx Received message pertaining to host addr=%x port=%hu",
+ ntohl(sin.sin_addr.s_addr),ntohs(sin.sin_port));
+
+ if (emsg.cmsg.cmsg_level!=SOL_IP || emsg.cmsg.cmsg_type!=IP_RECVERR) {
+ printk("Rx Ignoring unknown error report { level=%u type=%u }",
+ emsg.cmsg.cmsg_level,emsg.cmsg.cmsg_type);
+ continue;
+ }
+
+ if (msg.msg_controllen<sizeof(emsg.cmsg)+sizeof(emsg.ee)) {
+ printk("%s: short error message (%u)\n",__FUNCTION__,msg.msg_controllen);
+ _leave("");
+ return;
+ }
+
+ port = sin.sin_port;
+
+ switch (emsg.ee.ee_origin) {
+ case SO_EE_ORIGIN_ICMP:
+ local = 0;
+ switch (emsg.ee.ee_type) {
+ case ICMP_DEST_UNREACH:
+ switch (emsg.ee.ee_code) {
+ case ICMP_NET_UNREACH:
+ _net("Rx Received ICMP Network Unreachable");
+ port = 0;
+ err = -ENETUNREACH;
+ break;
+ case ICMP_HOST_UNREACH:
+ _net("Rx Received ICMP Host Unreachable");
+ port = 0;
+ err = -EHOSTUNREACH;
+ break;
+ case ICMP_PORT_UNREACH:
+ _net("Rx Received ICMP Port Unreachable");
+ err = -ECONNREFUSED;
+ break;
+ case ICMP_NET_UNKNOWN:
+ _net("Rx Received ICMP Unknown Network");
+ port = 0;
+ err = -ENETUNREACH;
+ break;
+ case ICMP_HOST_UNKNOWN:
+ _net("Rx Received ICMP Unknown Host");
+ port = 0;
+ err = -EHOSTUNREACH;
+ break;
+ default:
+ _net("Rx Received ICMP DestUnreach { code=%u }",
+ emsg.ee.ee_code);
+ err = emsg.ee.ee_errno;
+ break;
+ }
+ break;
+
+ case ICMP_TIME_EXCEEDED:
+ _net("Rx Received ICMP TTL Exceeded");
+ err = emsg.ee.ee_errno;
+ break;
+
+ default:
+ _proto("Rx Received ICMP error { type=%u code=%u }",
+ emsg.ee.ee_type,emsg.ee.ee_code);
+ err = emsg.ee.ee_errno;
+ break;
+ }
+ break;
+
+ case SO_EE_ORIGIN_LOCAL:
+ _proto("Rx Received local error { error=%d }",emsg.ee.ee_errno);
+ local = 1;
+ err = emsg.ee.ee_errno;
+ break;
+
+ case SO_EE_ORIGIN_NONE:
+ case SO_EE_ORIGIN_ICMP6:
+ default:
+ _proto("Rx Received error report { orig=%u }",emsg.ee.ee_origin);
+ local = 0;
+ err = emsg.ee.ee_errno;
+ break;
+ }
+
+ /* find all the connections between this transport and the affected destination */
+ INIT_LIST_HEAD(&connq);
+
+ if (rxrpc_peer_lookup(trans,sin.sin_addr.s_addr,&peer)==0) {
+ read_lock(&peer->conn_lock);
+ list_for_each(_p,&peer->conn_active) {
+ conn = list_entry(_p,struct rxrpc_connection,link);
+ if (port && conn->addr.sin_port!=port)
+ continue;
+ if (!list_empty(&conn->err_link))
+ continue;
+
+ rxrpc_get_connection(conn);
+ list_add_tail(&conn->err_link,&connq);
+ }
+ read_unlock(&peer->conn_lock);
+
+ /* service all those connections */
+ while (!list_empty(&connq)) {
+ conn = list_entry(connq.next,struct rxrpc_connection,err_link);
+ list_del(&conn->err_link);
+
+ rxrpc_conn_handle_error(conn,local,err);
+
+ rxrpc_put_connection(conn);
+ }
+
+ rxrpc_put_peer(peer);
+ }
+ }
+
+ _leave("");
+ return;
+} /* end rxrpc_trans_receive_error_report() */
diff --git a/sound/core/control.c b/sound/core/control.c
index 70c63eb212d4..7f818300c3b0 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -426,52 +426,59 @@ static int snd_ctl_elem_info(snd_ctl_file_t *ctl, snd_ctl_elem_info_t *_info)
static int snd_ctl_elem_read(snd_card_t *card, snd_ctl_elem_value_t *_control)
{
- snd_ctl_elem_value_t control;
+ snd_ctl_elem_value_t *control;
snd_kcontrol_t *kctl;
int result, indirect;
- if (copy_from_user(&control, _control, sizeof(control)))
+ control = kmalloc(sizeof(*control), GFP_KERNEL);
+ if (control == NULL)
+ return -ENOMEM;
+ if (copy_from_user(control, _control, sizeof(*control)))
return -EFAULT;
read_lock(&card->control_rwlock);
- kctl = snd_ctl_find_id(card, &control.id);
+ kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
indirect = kctl->access & SNDRV_CTL_ELEM_ACCESS_INDIRECT ? 1 : 0;
- if (control.indirect != indirect) {
+ if (control->indirect != indirect) {
result = -EACCES;
} else {
if ((kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get != NULL) {
- result = kctl->get(kctl, &control);
+ result = kctl->get(kctl, control);
if (result >= 0)
- control.id = kctl->id;
+ control->id = kctl->id;
} else
result = -EPERM;
}
}
read_unlock(&card->control_rwlock);
if (result >= 0)
- if (copy_to_user(_control, &control, sizeof(control)))
+ if (copy_to_user(_control, control, sizeof(*control)))
return -EFAULT;
+ kfree(control);
return result;
}
static int snd_ctl_elem_write(snd_ctl_file_t *file, snd_ctl_elem_value_t *_control)
{
snd_card_t *card = file->card;
- snd_ctl_elem_value_t control;
+ snd_ctl_elem_value_t *control;
snd_kcontrol_t *kctl;
int result, indirect;
-
- if (copy_from_user(&control, _control, sizeof(control)))
+
+ control = kmalloc(sizeof(*control), GFP_KERNEL);
+ if (control == NULL)
+ return -ENOMEM;
+ if (copy_from_user(control, _control, sizeof(*control)))
return -EFAULT;
read_lock(&card->control_rwlock);
- kctl = snd_ctl_find_id(card, &control.id);
+ kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
indirect = kctl->access & SNDRV_CTL_ELEM_ACCESS_INDIRECT ? 1 : 0;
- if (control.indirect != indirect) {
+ if (control->indirect != indirect) {
result = -EACCES;
} else {
read_lock(&card->control_owner_lock);
@@ -480,9 +487,9 @@ static int snd_ctl_elem_write(snd_ctl_file_t *file, snd_ctl_elem_value_t *_contr
(kctl->owner != NULL && kctl->owner != file)) {
result = -EPERM;
} else {
- result = kctl->put(kctl, &control);
+ result = kctl->put(kctl, control);
if (result >= 0)
- control.id = kctl->id;
+ control->id = kctl->id;
}
read_unlock(&card->control_owner_lock);
if (result > 0) {
@@ -496,8 +503,9 @@ static int snd_ctl_elem_write(snd_ctl_file_t *file, snd_ctl_elem_value_t *_contr
read_unlock(&card->control_rwlock);
__unlocked:
if (result >= 0)
- if (copy_to_user(_control, &control, sizeof(control)))
+ if (copy_to_user(_control, control, sizeof(*control)))
return -EFAULT;
+ kfree(control);
return result;
}
diff --git a/sound/core/info_oss.c b/sound/core/info_oss.c
index 43bc8c56de01..d19840b4f0ce 100644
--- a/sound/core/info_oss.c
+++ b/sound/core/info_oss.c
@@ -28,7 +28,7 @@
#include <sound/version.h>
#include <linux/utsname.h>
-#ifdef CONFIG_SND_OSSEMUL
+#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_PROC_FS)
/*
* OSS compatible part
diff --git a/sound/core/init.c b/sound/core/init.c
index c1099a1711e0..3e15f1a4fe61 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -216,7 +216,7 @@ static void snd_card_info_read(snd_info_entry_t *entry, snd_info_buffer_t * buff
snd_iprintf(buffer, "--- no soundcards ---\n");
}
-#ifdef CONFIG_SND_OSSEMUL
+#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_PROC_FS)
void snd_card_info_read_oss(snd_info_buffer_t * buffer)
{
diff --git a/sound/core/ioctl32/ioctl32.c b/sound/core/ioctl32/ioctl32.c
index 8002791080ec..fd37aab6d80d 100644
--- a/sound/core/ioctl32/ioctl32.c
+++ b/sound/core/ioctl32/ioctl32.c
@@ -23,6 +23,7 @@
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/time.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -264,46 +265,56 @@ static int get_ctl_type(struct file *file, snd_ctl_elem_id_t *id)
static int _snd_ioctl32_ctl_elem_value(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file, unsigned int native_ctl)
{
- // too big?
- struct sndrv_ctl_elem_value data;
- struct sndrv_ctl_elem_value32 data32;
+ struct sndrv_ctl_elem_value *data;
+ struct sndrv_ctl_elem_value32 *data32;
int err, i;
int type;
mm_segment_t oldseg;
/* FIXME: check the sane ioctl.. */
- if (copy_from_user(&data32, (void*)arg, sizeof(data32)))
- return -EFAULT;
- memset(&data, 0, sizeof(data));
- data.id = data32.id;
- data.indirect = data32.indirect;
- if (data.indirect) /* FIXME: this is not correct for long arrays */
- data.value.integer.value_ptr = (void*)TO_PTR(data32.value.integer.value_ptr);
- type = get_ctl_type(file, &data.id);
- if (type < 0)
- return type;
- if (! data.indirect) {
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ data32 = kmalloc(sizeof(*data32), GFP_KERNEL);
+ if (data == NULL || data32 == NULL) {
+ err = -ENOMEM;
+ goto __end;
+ }
+
+ if (copy_from_user(data32, (void*)arg, sizeof(*data32))) {
+ err = -EFAULT;
+ goto __end;
+ }
+ memset(data, 0, sizeof(*data));
+ data->id = data32->id;
+ data->indirect = data32->indirect;
+ if (data->indirect) /* FIXME: this is not correct for long arrays */
+ data->value.integer.value_ptr = (void*)TO_PTR(data32->value.integer.value_ptr);
+ type = get_ctl_type(file, &data->id);
+ if (type < 0) {
+ err = type;
+ goto __end;
+ }
+ if (! data->indirect) {
switch (type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
for (i = 0; i < 128; i++)
- data.value.integer.value[i] = data32.value.integer.value[i];
+ data->value.integer.value[i] = data32->value.integer.value[i];
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
for (i = 0; i < 64; i++)
- data.value.integer64.value[i] = data32.value.integer64.value[i];
+ data->value.integer64.value[i] = data32->value.integer64.value[i];
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
for (i = 0; i < 128; i++)
- data.value.enumerated.item[i] = data32.value.enumerated.item[i];
+ data->value.enumerated.item[i] = data32->value.enumerated.item[i];
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
- memcpy(data.value.bytes.data, data32.value.bytes.data,
- sizeof(data.value.bytes.data));
+ memcpy(data->value.bytes.data, data32->value.bytes.data,
+ sizeof(data->value.bytes.data));
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
- data.value.iec958 = data32.value.iec958;
+ data->value.iec958 = data32->value.iec958;
break;
default:
printk("unknown type %d\n", type);
@@ -313,40 +324,46 @@ static int _snd_ioctl32_ctl_elem_value(unsigned int fd, unsigned int cmd, unsign
oldseg = get_fs();
set_fs(KERNEL_DS);
- err = file->f_op->ioctl(file->f_dentry->d_inode, file, native_ctl, (unsigned long)&data);
+ err = file->f_op->ioctl(file->f_dentry->d_inode, file, native_ctl, (unsigned long)data);
set_fs(oldseg);
if (err < 0)
- return err;
+ goto __end;
/* restore info to 32bit */
- if (! data.indirect) {
+ if (! data->indirect) {
switch (type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
for (i = 0; i < 128; i++)
- data32.value.integer.value[i] = data.value.integer.value[i];
+ data32->value.integer.value[i] = data->value.integer.value[i];
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
for (i = 0; i < 64; i++)
- data32.value.integer64.value[i] = data.value.integer64.value[i];
+ data32->value.integer64.value[i] = data->value.integer64.value[i];
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
for (i = 0; i < 128; i++)
- data32.value.enumerated.item[i] = data.value.enumerated.item[i];
+ data32->value.enumerated.item[i] = data->value.enumerated.item[i];
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
- memcpy(data32.value.bytes.data, data.value.bytes.data,
- sizeof(data.value.bytes.data));
+ memcpy(data32->value.bytes.data, data->value.bytes.data,
+ sizeof(data->value.bytes.data));
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
- data32.value.iec958 = data.value.iec958;
+ data32->value.iec958 = data->value.iec958;
break;
default:
break;
}
}
- if (copy_to_user((void*)arg, &data32, sizeof(data32)))
- return -EFAULT;
- return 0;
+ err = 0;
+ if (copy_to_user((void*)arg, data32, sizeof(*data32)))
+ err = -EFAULT;
+ __end:
+ if (data32)
+ kfree(data32);
+ if (data)
+ kfree(data);
+ return err;
}
DEFINE_ALSA_IOCTL_ENTRY(ctl_elem_read, ctl_elem_value, SNDRV_CTL_IOCTL_ELEM_READ);
diff --git a/sound/core/ioctl32/ioctl32.h b/sound/core/ioctl32/ioctl32.h
index 30cd42921435..c7ad6d1406d2 100644
--- a/sound/core/ioctl32/ioctl32.h
+++ b/sound/core/ioctl32/ioctl32.h
@@ -79,6 +79,44 @@ static int _snd_ioctl32_##type(unsigned int fd, unsigned int cmd, unsigned long
return 0;\
}
+#define DEFINE_ALSA_IOCTL_BIG(type) \
+static int _snd_ioctl32_##type(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file, unsigned int native_ctl)\
+{\
+ struct sndrv_##type##32 *data32;\
+ struct sndrv_##type *data;\
+ mm_segment_t oldseg;\
+ int err;\
+ data32 = kmalloc(sizeof(*data32), GFP_KERNEL); \
+ data = kmalloc(sizeof(*data), GFP_KERNEL); \
+ if (data32 == NULL || data == NULL) { \
+ err = -ENOMEM; \
+ goto __end; \
+ }\
+ if (copy_from_user(data32, (void*)arg, sizeof(*data32))) { \
+ err = -EFAULT; \
+ goto __end; \
+ }\
+ memset(data, 0, sizeof(*data));\
+ convert_from_32(type, data, data32);\
+ oldseg = get_fs();\
+ set_fs(KERNEL_DS);\
+ err = file->f_op->ioctl(file->f_dentry->d_inode, file, native_ctl, (unsigned long)data);\
+ if (err < 0) \
+ goto __end;\
+ err = 0;\
+ if (native_ctl & (_IOC_READ << _IOC_DIRSHIFT)) {\
+ convert_to_32(type, data32, data);\
+ if (copy_to_user((void*)arg, data32, sizeof(*data32)))\
+ err = -EFAULT;\
+ }\
+ __end:\
+ if (data)\
+ kfree(data);\
+ if (data32)\
+ kfree(data32);\
+ return err;\
+}
+
#define DEFINE_ALSA_IOCTL_ENTRY(name,type,native_ctl) \
static int snd_ioctl32_##name(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file) {\
return _snd_ioctl32_##type(fd, cmd, arg, file, native_ctl);\
diff --git a/sound/core/ioctl32/pcm32.c b/sound/core/ioctl32/pcm32.c
index 66b8a1bf3565..a0ff92ff41b1 100644
--- a/sound/core/ioctl32/pcm32.c
+++ b/sound/core/ioctl32/pcm32.c
@@ -20,6 +20,7 @@
#include <sound/driver.h>
#include <linux/time.h>
+#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "ioctl32.h"
@@ -172,7 +173,7 @@ struct sndrv_pcm_status32 {
DEFINE_ALSA_IOCTL(pcm_uframes_str);
DEFINE_ALSA_IOCTL(pcm_sframes_str);
-DEFINE_ALSA_IOCTL(pcm_hw_params);
+DEFINE_ALSA_IOCTL_BIG(pcm_hw_params);
DEFINE_ALSA_IOCTL(pcm_sw_params);
DEFINE_ALSA_IOCTL(pcm_channel_info);
DEFINE_ALSA_IOCTL(pcm_status);
@@ -230,7 +231,7 @@ static int _snd_ioctl32_xfern(unsigned int fd, unsigned int cmd, unsigned long a
snd_pcm_file_t *pcm_file;
snd_pcm_substream_t *substream;
struct sndrv_xfern32 data32, *srcptr = (struct sndrv_xfern32*)arg;
- void *bufs[128];
+ void **bufs = NULL;
int err = 0, ch, i;
u32 *bufptr;
mm_segment_t oldseg;
@@ -260,6 +261,9 @@ static int _snd_ioctl32_xfern(unsigned int fd, unsigned int cmd, unsigned long a
return -EFAULT;
__get_user(data32.bufs, &srcptr->bufs);
bufptr = (u32*)TO_PTR(data32.bufs);
+ bufs = kmalloc(sizeof(void *) * 128, GFP_KERNEL);
+ if (bufs == NULL)
+ return -ENOMEM;
for (i = 0; i < ch; i++) {
u32 ptr;
if (get_user(ptr, bufptr))
@@ -278,10 +282,11 @@ static int _snd_ioctl32_xfern(unsigned int fd, unsigned int cmd, unsigned long a
break;
}
set_fs(oldseg);
- if (err < 0)
- return err;
- if (put_user(err, &srcptr->result))
- return -EFAULT;
+ if (err >= 0) {
+ if (put_user(err, &srcptr->result))
+ err = -EFAULT;
+ }
+ kfree(bufs);
return 0;
}
@@ -343,24 +348,38 @@ static void snd_pcm_hw_convert_to_old_params(struct sndrv_pcm_hw_params_old32 *o
static int _snd_ioctl32_pcm_hw_params_old(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file, unsigned int native_ctl)
{
- struct sndrv_pcm_hw_params_old32 data32;
- struct sndrv_pcm_hw_params data;
+ struct sndrv_pcm_hw_params_old32 *data32;
+ struct sndrv_pcm_hw_params *data;
mm_segment_t oldseg;
int err;
- if (copy_from_user(&data32, (void*)arg, sizeof(data32)))
- return -EFAULT;
- snd_pcm_hw_convert_from_old_params(&data, &data32);
+ data32 = snd_kcalloc(sizeof(*data32), GFP_KERNEL);
+ data = snd_kcalloc(sizeof(*data), GFP_KERNEL);
+ if (data32 == NULL || data == NULL) {
+ err = -ENOMEM;
+ goto __end;
+ }
+ if (copy_from_user(data32, (void*)arg, sizeof(*data32))) {
+ err = -EFAULT;
+ goto __end;
+ }
+ snd_pcm_hw_convert_from_old_params(data, data32);
oldseg = get_fs();
set_fs(KERNEL_DS);
- err = file->f_op->ioctl(file->f_dentry->d_inode, file, native_ctl, (unsigned long)&data);
+ err = file->f_op->ioctl(file->f_dentry->d_inode, file, native_ctl, (unsigned long)data);
set_fs(oldseg);
if (err < 0)
- return err;
- snd_pcm_hw_convert_to_old_params(&data32, &data);
- if (copy_to_user((void*)arg, &data32, sizeof(data32)))
- return -EFAULT;
- return 0;
+ goto __end;
+ snd_pcm_hw_convert_to_old_params(data32, data);
+ err = 0;
+ if (copy_to_user((void*)arg, data32, sizeof(*data32)))
+ err = -EFAULT;
+ __end:
+ if (data)
+ kfree(data);
+ if (data32)
+ kfree(data32);
+ return err;
}
diff --git a/sound/core/memory.c b/sound/core/memory.c
index 9eb86056b723..c12a28ab2ecd 100644
--- a/sound/core/memory.c
+++ b/sound/core/memory.c
@@ -499,10 +499,10 @@ char *snd_kmalloc_strdup(const char *string, int flags)
int copy_to_user_fromio(void *dst, unsigned long src, size_t count)
{
-#if defined(__i386_) || defined(CONFIG_SPARC32)
+#if defined(__i386__) || defined(CONFIG_SPARC32)
return copy_to_user(dst, (const void*)src, count) ? -EFAULT : 0;
#else
- char buf[1024];
+ char buf[256];
while (count) {
size_t c = count;
if (c > sizeof(buf))
@@ -520,10 +520,10 @@ int copy_to_user_fromio(void *dst, unsigned long src, size_t count)
int copy_from_user_toio(unsigned long dst, const void *src, size_t count)
{
-#if defined(__i386_) || defined(CONFIG_SPARC32)
+#if defined(__i386__) || defined(CONFIG_SPARC32)
return copy_from_user((void*)dst, src, count) ? -EFAULT : 0;
#else
- char buf[1024];
+ char buf[256];
while (count) {
size_t c = count;
if (c > sizeof(buf))
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 4bbd6e08df74..03167976060f 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -495,18 +495,25 @@ static void snd_mixer_oss_get_volume1_vol(snd_mixer_oss_file_t *fmixer,
snd_kcontrol_t *kctl,
int *left, int *right)
{
- snd_ctl_elem_info_t uinfo;
- snd_ctl_elem_value_t uctl;
+ snd_ctl_elem_info_t *uinfo;
+ snd_ctl_elem_value_t *uctl;
snd_runtime_check(kctl != NULL, return);
- memset(&uinfo, 0, sizeof(uinfo));
- memset(&uctl, 0, sizeof(uctl));
- snd_runtime_check(!kctl->info(kctl, &uinfo), return);
- snd_runtime_check(!kctl->get(kctl, &uctl), return);
- snd_runtime_check(uinfo.type != SNDRV_CTL_ELEM_TYPE_BOOLEAN || uinfo.value.integer.min != 0 || uinfo.value.integer.max != 1, return);
- *left = snd_mixer_oss_conv1(uctl.value.integer.value[0], uinfo.value.integer.min, uinfo.value.integer.max, &pslot->volume[0]);
- if (uinfo.count > 1)
- *right = snd_mixer_oss_conv1(uctl.value.integer.value[1], uinfo.value.integer.min, uinfo.value.integer.max, &pslot->volume[1]);
+ uinfo = snd_kcalloc(sizeof(*uinfo), GFP_ATOMIC);
+ uctl = snd_kcalloc(sizeof(*uctl), GFP_ATOMIC);
+ if (uinfo == NULL || uctl == NULL)
+ goto __unalloc;
+ snd_runtime_check(!kctl->info(kctl, uinfo), goto __unalloc);
+ snd_runtime_check(!kctl->get(kctl, uctl), goto __unalloc);
+ snd_runtime_check(uinfo->type != SNDRV_CTL_ELEM_TYPE_BOOLEAN || uinfo->value.integer.min != 0 || uinfo->value.integer.max != 1, return);
+ *left = snd_mixer_oss_conv1(uctl->value.integer.value[0], uinfo->value.integer.min, uinfo->value.integer.max, &pslot->volume[0]);
+ if (uinfo->count > 1)
+ *right = snd_mixer_oss_conv1(uctl->value.integer.value[1], uinfo->value.integer.min, uinfo->value.integer.max, &pslot->volume[1]);
+ __unalloc:
+ if (uctl)
+ kfree(uctl);
+ if (uinfo)
+ kfree(uinfo);
}
static void snd_mixer_oss_get_volume1_sw(snd_mixer_oss_file_t *fmixer,
@@ -515,21 +522,28 @@ static void snd_mixer_oss_get_volume1_sw(snd_mixer_oss_file_t *fmixer,
int *left, int *right,
int route)
{
- snd_ctl_elem_info_t uinfo;
- snd_ctl_elem_value_t uctl;
+ snd_ctl_elem_info_t *uinfo;
+ snd_ctl_elem_value_t *uctl;
snd_runtime_check(kctl != NULL, return);
- memset(&uinfo, 0, sizeof(uinfo));
- memset(&uctl, 0, sizeof(uctl));
- snd_runtime_check(!kctl->info(kctl, &uinfo), return);
- snd_runtime_check(!kctl->get(kctl, &uctl), return);
- if (!uctl.value.integer.value[0]) {
+ uinfo = snd_kcalloc(sizeof(*uinfo), GFP_ATOMIC);
+ uctl = snd_kcalloc(sizeof(*uctl), GFP_ATOMIC);
+ if (uinfo == NULL || uctl == NULL)
+ goto __unalloc;
+ snd_runtime_check(!kctl->info(kctl, uinfo), goto __unalloc);
+ snd_runtime_check(!kctl->get(kctl, uctl), goto __unalloc);
+ if (!uctl->value.integer.value[0]) {
*left = 0;
- if (uinfo.count == 1)
+ if (uinfo->count == 1)
*right = 0;
}
- if (uinfo.count > 1 && !uctl.value.integer.value[route ? 3 : 1])
+ if (uinfo->count > 1 && !uctl->value.integer.value[route ? 3 : 1])
*right = 0;
+ __unalloc:
+ if (uctl)
+ kfree(uctl);
+ if (uinfo)
+ kfree(uinfo);
}
static int snd_mixer_oss_get_volume1(snd_mixer_oss_file_t *fmixer,
@@ -566,21 +580,28 @@ static void snd_mixer_oss_put_volume1_vol(snd_mixer_oss_file_t *fmixer,
snd_kcontrol_t *kctl,
int left, int right)
{
- snd_ctl_elem_info_t uinfo;
- snd_ctl_elem_value_t uctl;
+ snd_ctl_elem_info_t *uinfo;
+ snd_ctl_elem_value_t *uctl;
int res;
snd_runtime_check(kctl != NULL, return);
- memset(&uinfo, 0, sizeof(uinfo));
- memset(&uctl, 0, sizeof(uctl));
- snd_runtime_check(!kctl->info(kctl, &uinfo), return);
- snd_runtime_check(uinfo.type != SNDRV_CTL_ELEM_TYPE_BOOLEAN || uinfo.value.integer.min != 0 || uinfo.value.integer.max != 1, return);
- uctl.value.integer.value[0] = snd_mixer_oss_conv2(left, uinfo.value.integer.min, uinfo.value.integer.max);
- if (uinfo.count > 1)
- uctl.value.integer.value[1] = snd_mixer_oss_conv2(right, uinfo.value.integer.min, uinfo.value.integer.max);
- snd_runtime_check((res = kctl->put(kctl, &uctl)) >= 0, return);
+ uinfo = snd_kcalloc(sizeof(*uinfo), GFP_ATOMIC);
+ uctl = snd_kcalloc(sizeof(*uctl), GFP_ATOMIC);
+ if (uinfo == NULL || uctl == NULL)
+ goto __unalloc;
+ snd_runtime_check(!kctl->info(kctl, uinfo), goto __unalloc);
+ snd_runtime_check(uinfo->type != SNDRV_CTL_ELEM_TYPE_BOOLEAN || uinfo->value.integer.min != 0 || uinfo->value.integer.max != 1, return);
+ uctl->value.integer.value[0] = snd_mixer_oss_conv2(left, uinfo->value.integer.min, uinfo->value.integer.max);
+ if (uinfo->count > 1)
+ uctl->value.integer.value[1] = snd_mixer_oss_conv2(right, uinfo->value.integer.min, uinfo->value.integer.max);
+ snd_runtime_check((res = kctl->put(kctl, uctl)) >= 0, goto __unalloc);
if (res > 0)
snd_ctl_notify(fmixer->card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
+ __unalloc:
+ if (uctl)
+ kfree(uctl);
+ if (uinfo)
+ kfree(uinfo);
}
static void snd_mixer_oss_put_volume1_sw(snd_mixer_oss_file_t *fmixer,
@@ -589,27 +610,34 @@ static void snd_mixer_oss_put_volume1_sw(snd_mixer_oss_file_t *fmixer,
int left, int right,
int route)
{
- snd_ctl_elem_info_t uinfo;
- snd_ctl_elem_value_t uctl;
+ snd_ctl_elem_info_t *uinfo;
+ snd_ctl_elem_value_t *uctl;
int res;
snd_runtime_check(kctl != NULL, return);
- memset(&uinfo, 0, sizeof(uinfo));
- memset(&uctl, 0, sizeof(uctl));
- snd_runtime_check(!kctl->info(kctl, &uinfo), return);
- if (uinfo.count > 1) {
- uctl.value.integer.value[0] = left > 0 ? 1 : 0;
- uctl.value.integer.value[route ? 3 : 1] = right > 0 ? 1 : 0;
+ uinfo = snd_kcalloc(sizeof(*uinfo), GFP_ATOMIC);
+ uctl = snd_kcalloc(sizeof(*uctl), GFP_ATOMIC);
+ if (uinfo == NULL || uctl == NULL)
+ goto __unalloc;
+ snd_runtime_check(!kctl->info(kctl, uinfo), goto __unalloc);
+ if (uinfo->count > 1) {
+ uctl->value.integer.value[0] = left > 0 ? 1 : 0;
+ uctl->value.integer.value[route ? 3 : 1] = right > 0 ? 1 : 0;
if (route) {
- uctl.value.integer.value[1] =
- uctl.value.integer.value[2] = 0;
+ uctl->value.integer.value[1] =
+ uctl->value.integer.value[2] = 0;
}
} else {
- uctl.value.integer.value[0] = (left > 0 || right > 0) ? 1 : 0;
+ uctl->value.integer.value[0] = (left > 0 || right > 0) ? 1 : 0;
}
- snd_runtime_check((res = kctl->put(kctl, &uctl)) >= 0, return);
+ snd_runtime_check((res = kctl->put(kctl, uctl)) >= 0, goto __unalloc);
if (res > 0)
snd_ctl_notify(fmixer->card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
+ __unalloc:
+ if (uctl)
+ kfree(uctl);
+ if (uinfo)
+ kfree(uinfo);
}
static int snd_mixer_oss_put_volume1(snd_mixer_oss_file_t *fmixer,
@@ -718,17 +746,21 @@ static int snd_mixer_oss_get_recsrc2(snd_mixer_oss_file_t *fmixer, int *active_i
snd_kcontrol_t *kctl;
snd_mixer_oss_slot_t *pslot;
struct slot *slot;
- snd_ctl_elem_info_t uinfo;
- snd_ctl_elem_value_t uctl;
+ snd_ctl_elem_info_t *uinfo;
+ snd_ctl_elem_value_t *uctl;
int err, idx;
+ uinfo = snd_kcalloc(sizeof(*uinfo), GFP_KERNEL);
+ uctl = snd_kcalloc(sizeof(*uctl), GFP_KERNEL);
+ if (uinfo == NULL || uctl == NULL) {
+ err = -ENOMEM;
+ goto __unlock;
+ }
read_lock(&card->control_rwlock);
kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0);
- snd_runtime_check(kctl != NULL, return -ENOENT);
- memset(&uinfo, 0, sizeof(uinfo));
- memset(&uctl, 0, sizeof(uctl));
- snd_runtime_check(!(err = kctl->info(kctl, &uinfo)), read_unlock(&card->control_rwlock); return err);
- snd_runtime_check(!(err = kctl->get(kctl, &uctl)), read_unlock(&card->control_rwlock); return err);
+ snd_runtime_check(kctl != NULL, err = -ENOENT; goto __unlock);
+ snd_runtime_check(!(err = kctl->info(kctl, uinfo)), goto __unlock);
+ snd_runtime_check(!(err = kctl->get(kctl, uctl)), goto __unlock);
read_unlock(&card->control_rwlock);
for (idx = 0; idx < 32; idx++) {
if (!(mixer->mask_recsrc & (1 << idx)))
@@ -739,12 +771,21 @@ static int snd_mixer_oss_get_recsrc2(snd_mixer_oss_file_t *fmixer, int *active_i
continue;
if (!(slot->present & SNDRV_MIXER_OSS_PRESENT_CAPTURE))
continue;
- if (slot->capture_item == uctl.value.enumerated.item[0]) {
+ if (slot->capture_item == uctl->value.enumerated.item[0]) {
*active_index = idx;
break;
}
}
- return 0;
+ err = 0;
+ goto __unalloc;
+ __unlock:
+ read_unlock(&card->control_rwlock);
+ __unalloc:
+ if (uctl)
+ kfree(uctl);
+ if (uinfo)
+ kfree(uinfo);
+ return err;
}
static int snd_mixer_oss_put_recsrc2(snd_mixer_oss_file_t *fmixer, int active_index)
@@ -754,16 +795,20 @@ static int snd_mixer_oss_put_recsrc2(snd_mixer_oss_file_t *fmixer, int active_in
snd_kcontrol_t *kctl;
snd_mixer_oss_slot_t *pslot;
struct slot *slot = NULL;
- snd_ctl_elem_info_t uinfo;
- snd_ctl_elem_value_t uctl;
+ snd_ctl_elem_info_t *uinfo;
+ snd_ctl_elem_value_t *uctl;
int err, idx;
+ uinfo = snd_kcalloc(sizeof(*uinfo), GFP_KERNEL);
+ uctl = snd_kcalloc(sizeof(*uctl), GFP_KERNEL);
+ if (uinfo == NULL || uctl == NULL) {
+ err = -ENOMEM;
+ goto __unlock;
+ }
read_lock(&card->control_rwlock);
kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0);
- snd_runtime_check(kctl != NULL, read_unlock(&card->control_rwlock); return -ENOENT);
- memset(&uinfo, 0, sizeof(uinfo));
- memset(&uctl, 0, sizeof(uctl));
- snd_runtime_check(!(err = kctl->info(kctl, &uinfo)), read_unlock(&card->control_rwlock); return err);
+ snd_runtime_check(kctl != NULL, err = -ENOENT; goto __unlock);
+ snd_runtime_check(!(err = kctl->info(kctl, uinfo)), goto __unlock);
for (idx = 0; idx < 32; idx++) {
if (!(mixer->mask_recsrc & (1 << idx)))
continue;
@@ -778,14 +823,19 @@ static int snd_mixer_oss_put_recsrc2(snd_mixer_oss_file_t *fmixer, int active_in
slot = NULL;
}
snd_runtime_check(slot != NULL, goto __unlock);
- for (idx = 0; idx < uinfo.count; idx++)
- uctl.value.enumerated.item[idx] = slot->capture_item;
- snd_runtime_check((err = kctl->put(kctl, &uctl)) >= 0, );
+ for (idx = 0; idx < uinfo->count; idx++)
+ uctl->value.enumerated.item[idx] = slot->capture_item;
+ snd_runtime_check((err = kctl->put(kctl, uctl)) >= 0, );
if (err > 0)
snd_ctl_notify(fmixer->card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
+ err = 0;
__unlock:
read_unlock(&card->control_rwlock);
- return 0;
+ if (uctl)
+ kfree(uctl);
+ if (uinfo)
+ kfree(uinfo);
+ return err;
}
struct snd_mixer_oss_assign_table {
@@ -1176,9 +1226,11 @@ static int snd_mixer_oss_notify_handler(snd_card_t * card, int free_flag)
mixer->name[sizeof(mixer->name)-1] = 0;
} else
strcpy(mixer->name, name);
+#ifdef SNDRV_OSS_INFO_DEV_MIXERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_MIXERS,
card->number,
mixer->name);
+#endif
for (idx = 0; idx < SNDRV_OSS_MAX_MIXERS; idx++)
mixer->slots[idx].number = idx;
card->mixer_oss = mixer;
@@ -1188,7 +1240,9 @@ static int snd_mixer_oss_notify_handler(snd_card_t * card, int free_flag)
snd_mixer_oss_t *mixer = card->mixer_oss;
if (mixer == NULL)
return 0;
+#ifdef SNDRV_OSS_INFO_DEV_MIXERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_MIXERS, mixer->card->number);
+#endif
snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_MIXER, mixer->card, 0);
snd_mixer_oss_proc_done(mixer);
return snd_mixer_oss_free1(mixer);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 6ed863be7145..4b988d1a5004 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -2123,9 +2123,11 @@ static int snd_pcm_oss_register_minor(unsigned short native_minor,
pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream_count &&
!(pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX));
sprintf(name, "%s%s", pcm->name, duplex ? " (DUPLEX)" : "");
+#ifdef SNDRV_OSS_INFO_DEV_AUDIO
snd_oss_info_register(SNDRV_OSS_INFO_DEV_AUDIO,
pcm->card->number,
name);
+#endif
pcm->oss.reg++;
}
if (snd_adsp_map[pcm->card->number] == pcm->device) {
@@ -2146,7 +2148,9 @@ static int snd_pcm_oss_unregister_minor(unsigned short native_minor,
if (snd_dsp_map[pcm->card->number] == pcm->device) {
snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_PCM,
pcm->card, 0);
+#ifdef SNDRV_OSS_INFO_DEV_AUDIO
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_AUDIO, pcm->card->number);
+#endif
}
if (snd_adsp_map[pcm->card->number] == pcm->device)
snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_PCM,
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index b9b80de46854..809d62c24d5b 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1473,7 +1473,7 @@ static int boundary_nearer(int min, int mindir,
int snd_pcm_hw_param_near(snd_pcm_t *pcm, snd_pcm_hw_params_t *params,
snd_pcm_hw_param_t var, unsigned int best, int *dir)
{
- snd_pcm_hw_params_t save;
+ snd_pcm_hw_params_t *save = NULL;
int v;
unsigned int saved_min;
int last = 0;
@@ -1493,30 +1493,42 @@ int snd_pcm_hw_param_near(snd_pcm_t *pcm, snd_pcm_hw_params_t *params,
maxdir = 1;
max--;
}
- save = *params;
+ save = kmalloc(sizeof(*save), GFP_KERNEL);
+ if (save == NULL)
+ return -ENOMEM;
+ *save = *params;
saved_min = min;
min = snd_pcm_hw_param_min(pcm, params, var, min, &mindir);
if (min >= 0) {
- snd_pcm_hw_params_t params1;
+ snd_pcm_hw_params_t *params1;
if (max < 0)
goto _end;
if ((unsigned int)min == saved_min && mindir == valdir)
goto _end;
- params1 = save;
- max = snd_pcm_hw_param_max(pcm, &params1, var, max, &maxdir);
- if (max < 0)
+ params1 = kmalloc(sizeof(*params1), GFP_KERNEL);
+ if (params1 == NULL) {
+ kfree(save);
+ return -ENOMEM;
+ }
+ *params1 = *save;
+ max = snd_pcm_hw_param_max(pcm, params1, var, max, &maxdir);
+ if (max < 0) {
+ kfree(params1);
goto _end;
+ }
if (boundary_nearer(max, maxdir, best, valdir, min, mindir)) {
- *params = params1;
+ *params = *params1;
last = 1;
}
+ kfree(params1);
} else {
- *params = save;
+ *params = *save;
max = snd_pcm_hw_param_max(pcm, params, var, max, &maxdir);
assert(max >= 0);
last = 1;
}
_end:
+ kfree(save);
if (last)
v = snd_pcm_hw_param_last(pcm, params, var, dir);
else
@@ -1842,16 +1854,11 @@ static snd_pcm_sframes_t snd_pcm_lib_write1(snd_pcm_substream_t *substream,
if (runtime->sleep_min == 0 && runtime->status->state == SNDRV_PCM_STATE_RUNNING)
snd_pcm_update_hw_ptr(substream);
avail = snd_pcm_playback_avail(runtime);
- if (runtime->status->state == SNDRV_PCM_STATE_PAUSED ||
- runtime->status->state == SNDRV_PCM_STATE_PREPARED) {
- if (avail < runtime->xfer_align) {
- err = -EPIPE;
- goto _end_unlock;
- }
- } else if (((avail < runtime->control->avail_min && size > avail) ||
- (size >= runtime->xfer_align && avail < runtime->xfer_align))) {
+ if (((avail < runtime->control->avail_min && size > avail) ||
+ (size >= runtime->xfer_align && avail < runtime->xfer_align))) {
wait_queue_t wait;
enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED } state;
+
if (nonblock) {
err = -EAGAIN;
goto _end_unlock;
@@ -1868,8 +1875,11 @@ static snd_pcm_sframes_t snd_pcm_lib_write1(snd_pcm_substream_t *substream,
spin_unlock_irq(&runtime->lock);
if (schedule_timeout(10 * HZ) == 0) {
spin_lock_irq(&runtime->lock);
- state = runtime->status->state == SNDRV_PCM_STATE_SUSPENDED ? SUSPENDED : EXPIRED;
- break;
+ if (runtime->status->state != SNDRV_PCM_STATE_PREPARED &&
+ runtime->status->state != SNDRV_PCM_STATE_PAUSED) {
+ state = runtime->status->state == SNDRV_PCM_STATE_SUSPENDED ? SUSPENDED : EXPIRED;
+ break;
+ }
}
spin_lock_irq(&runtime->lock);
switch (runtime->status->state) {
@@ -1916,10 +1926,6 @@ static snd_pcm_sframes_t snd_pcm_lib_write1(snd_pcm_substream_t *substream,
cont = runtime->buffer_size - runtime->control->appl_ptr % runtime->buffer_size;
if (frames > cont)
frames = cont;
- if (frames == 0 && runtime->status->state == SNDRV_PCM_STATE_PAUSED) {
- err = -EPIPE;
- goto _end_unlock;
- }
snd_assert(frames != 0,
spin_unlock_irq(&runtime->lock);
return -EINVAL);
@@ -2135,21 +2141,16 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(snd_pcm_substream_t *substream, void
if (runtime->sleep_min == 0 && runtime->status->state == SNDRV_PCM_STATE_RUNNING)
snd_pcm_update_hw_ptr(substream);
avail = snd_pcm_capture_avail(runtime);
- if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) {
+ if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
if (avail < runtime->xfer_align) {
err = -EPIPE;
goto _end_unlock;
}
- } else if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
- if (avail < runtime->xfer_align) {
- runtime->status->state = SNDRV_PCM_STATE_SETUP;
- err = -EPIPE;
- goto _end_unlock;
- }
} else if ((avail < runtime->control->avail_min && size > avail) ||
(size >= runtime->xfer_align && avail < runtime->xfer_align)) {
wait_queue_t wait;
enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED } state;
+
if (nonblock) {
err = -EAGAIN;
goto _end_unlock;
@@ -2166,8 +2167,11 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(snd_pcm_substream_t *substream, void
spin_unlock_irq(&runtime->lock);
if (schedule_timeout(10 * HZ) == 0) {
spin_lock_irq(&runtime->lock);
- state = runtime->status->state == SNDRV_PCM_STATE_SUSPENDED ? SUSPENDED : EXPIRED;
- break;
+ if (runtime->status->state != SNDRV_PCM_STATE_PREPARED &&
+ runtime->status->state != SNDRV_PCM_STATE_PAUSED) {
+ state = runtime->status->state == SNDRV_PCM_STATE_SUSPENDED ? SUSPENDED : EXPIRED;
+ break;
+ }
}
spin_lock_irq(&runtime->lock);
switch (runtime->status->state) {
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index de8a887d7a87..275a30cb43c0 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1848,7 +1848,10 @@ int snd_pcm_release(struct inode *inode, struct file *file)
snd_assert(substream != NULL, return -ENXIO);
snd_assert(!atomic_read(&substream->runtime->mmap_count), );
pcm = substream->pcm;
- snd_pcm_capture_drop(substream);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ snd_pcm_playback_drop(substream);
+ else
+ snd_pcm_capture_drop(substream);
fasync_helper(-1, file, 0, &substream->runtime->fasync);
down(&pcm->open_mutex);
snd_pcm_release_file(pcm_file);
@@ -1959,34 +1962,29 @@ snd_pcm_sframes_t snd_pcm_capture_rewind(snd_pcm_substream_t *substream, snd_pcm
return ret;
}
-static int snd_pcm_playback_delay(snd_pcm_substream_t *substream, snd_pcm_sframes_t *res)
+static int snd_pcm_hwsync(snd_pcm_substream_t *substream)
{
snd_pcm_runtime_t *runtime = substream->runtime;
- int err = 0;
- snd_pcm_sframes_t n;
+ int err;
+
spin_lock_irq(&runtime->lock);
switch (runtime->status->state) {
- case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_DRAINING:
- if (snd_pcm_update_hw_ptr(substream) >= 0) {
- n = snd_pcm_playback_hw_avail(runtime);
- if (put_user(n, res))
- err = -EFAULT;
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ goto __badfd;
+ case SNDRV_PCM_STATE_RUNNING:
+ if ((err = snd_pcm_update_hw_ptr(substream)) < 0)
break;
- } else {
- err = SNDRV_PCM_STATE_RUNNING ? -EPIPE : -EBADFD;
- }
- break;
+ /* Fall through */
+ case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_SUSPENDED:
- if (runtime->status->suspended_state == SNDRV_PCM_STATE_RUNNING) {
- n = snd_pcm_playback_hw_avail(runtime);
- if (put_user(n, res))
- err = -EFAULT;
- } else {
- err = -EBADFD;
- }
+ err = 0;
+ break;
+ case SNDRV_PCM_STATE_XRUN:
+ err = -EPIPE;
break;
default:
+ __badfd:
err = -EBADFD;
break;
}
@@ -1994,41 +1992,43 @@ static int snd_pcm_playback_delay(snd_pcm_substream_t *substream, snd_pcm_sframe
return err;
}
-static int snd_pcm_capture_delay(snd_pcm_substream_t *substream, snd_pcm_sframes_t *res)
+static int snd_pcm_delay(snd_pcm_substream_t *substream, snd_pcm_sframes_t *res)
{
snd_pcm_runtime_t *runtime = substream->runtime;
- int err = 0;
+ int err;
snd_pcm_sframes_t n;
+
spin_lock_irq(&runtime->lock);
switch (runtime->status->state) {
+ case SNDRV_PCM_STATE_DRAINING:
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ goto __badfd;
case SNDRV_PCM_STATE_RUNNING:
- if (snd_pcm_update_hw_ptr(substream) >= 0) {
- n = snd_pcm_capture_avail(runtime);
- if (put_user(n, res))
- err = -EFAULT;
+ if ((err = snd_pcm_update_hw_ptr(substream)) < 0)
break;
- }
/* Fall through */
- case SNDRV_PCM_STATE_XRUN:
- err = -EPIPE;
- break;
+ case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_SUSPENDED:
- if (runtime->status->suspended_state == SNDRV_PCM_STATE_RUNNING) {
+ err = 0;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ n = snd_pcm_playback_hw_avail(runtime);
+ else
n = snd_pcm_capture_avail(runtime);
- if (put_user(n, res))
- err = -EFAULT;
- } else {
- err = -EBADFD;
- }
+ if (put_user(n, res))
+ err = -EFAULT;
+ break;
+ case SNDRV_PCM_STATE_XRUN:
+ err = -EPIPE;
break;
default:
+ __badfd:
err = -EBADFD;
break;
}
spin_unlock_irq(&runtime->lock);
return err;
}
-
+
static int snd_pcm_playback_ioctl1(snd_pcm_substream_t *substream,
unsigned int cmd, void *arg);
static int snd_pcm_capture_ioctl1(snd_pcm_substream_t *substream,
@@ -2076,6 +2076,10 @@ static int snd_pcm_common_ioctl1(snd_pcm_substream_t *substream,
return snd_pcm_resume(substream);
case SNDRV_PCM_IOCTL_XRUN:
return snd_pcm_xrun(substream);
+ case SNDRV_PCM_IOCTL_HWSYNC:
+ return snd_pcm_hwsync(substream);
+ case SNDRV_PCM_IOCTL_DELAY:
+ return snd_pcm_delay(substream, (snd_pcm_sframes_t *) arg);
case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
return snd_pcm_hw_refine_old_user(substream, (struct sndrv_pcm_hw_params_old *) arg);
case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
@@ -2110,7 +2114,7 @@ static int snd_pcm_playback_ioctl1(snd_pcm_substream_t *substream,
{
snd_xfern_t xfern, *_xfern = arg;
snd_pcm_runtime_t *runtime = substream->runtime;
- void *bufs[128];
+ void *bufs;
snd_pcm_sframes_t result;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
@@ -2120,9 +2124,15 @@ static int snd_pcm_playback_ioctl1(snd_pcm_substream_t *substream,
return -EFAULT;
if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
return -EFAULT;
- if (copy_from_user(bufs, xfern.bufs, sizeof(*bufs) * runtime->channels))
+ bufs = kmalloc(sizeof(void *) * runtime->channels, GFP_KERNEL);
+ if (bufs == NULL)
+ return -ENOMEM;
+ if (copy_from_user(bufs, xfern.bufs, sizeof(void *) * runtime->channels)) {
+ kfree(bufs);
return -EFAULT;
+ }
result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
+ kfree(bufs);
__put_user(result, &_xfern->result);
return result < 0 ? result : 0;
}
@@ -2150,8 +2160,6 @@ static int snd_pcm_playback_ioctl1(snd_pcm_substream_t *substream,
return snd_pcm_playback_drain(substream);
case SNDRV_PCM_IOCTL_DROP:
return snd_pcm_playback_drop(substream);
- case SNDRV_PCM_IOCTL_DELAY:
- return snd_pcm_playback_delay(substream, (snd_pcm_sframes_t*) arg);
}
return snd_pcm_common_ioctl1(substream, cmd, arg);
}
@@ -2181,7 +2189,7 @@ static int snd_pcm_capture_ioctl1(snd_pcm_substream_t *substream,
{
snd_xfern_t xfern, *_xfern = arg;
snd_pcm_runtime_t *runtime = substream->runtime;
- void *bufs[128];
+ void *bufs;
snd_pcm_sframes_t result;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
@@ -2191,9 +2199,15 @@ static int snd_pcm_capture_ioctl1(snd_pcm_substream_t *substream,
return -EFAULT;
if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
return -EFAULT;
- if (copy_from_user(bufs, xfern.bufs, sizeof(*bufs) * runtime->channels))
+ bufs = kmalloc(sizeof(void *) * runtime->channels, GFP_KERNEL);
+ if (bufs == NULL)
+ return -ENOMEM;
+ if (copy_from_user(bufs, xfern.bufs, sizeof(void *) * runtime->channels)) {
+ kfree(bufs);
return -EFAULT;
+ }
result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
+ kfree(bufs);
__put_user(result, &_xfern->result);
return result < 0 ? result : 0;
}
@@ -2213,8 +2227,6 @@ static int snd_pcm_capture_ioctl1(snd_pcm_substream_t *substream,
return snd_pcm_capture_drain(substream);
case SNDRV_PCM_IOCTL_DROP:
return snd_pcm_capture_drop(substream);
- case SNDRV_PCM_IOCTL_DELAY:
- return snd_pcm_capture_delay(substream, (snd_pcm_sframes_t*) arg);
}
return snd_pcm_common_ioctl1(substream, cmd, arg);
}
@@ -2311,7 +2323,9 @@ static ssize_t snd_pcm_write(struct file *file, const char *buf, size_t count, l
snd_pcm_runtime_t *runtime;
snd_pcm_sframes_t result;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
up(&file->f_dentry->d_inode->i_sem);
+#endif
pcm_file = snd_magic_cast(snd_pcm_file_t, file->private_data, result = -ENXIO; goto end);
substream = pcm_file->substream;
snd_assert(substream != NULL, result = -ENXIO; goto end);
@@ -2329,7 +2343,9 @@ static ssize_t snd_pcm_write(struct file *file, const char *buf, size_t count, l
if (result > 0)
result = frames_to_bytes(runtime, result);
end:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
down(&file->f_dentry->d_inode->i_sem);
+#endif
return result;
}
@@ -2343,7 +2359,7 @@ static ssize_t snd_pcm_readv(struct file *file, const struct iovec *_vector,
snd_pcm_runtime_t *runtime;
snd_pcm_sframes_t result;
unsigned long i;
- void *bufs[128];
+ void **bufs;
snd_pcm_uframes_t frames;
pcm_file = snd_magic_cast(snd_pcm_file_t, file->private_data, return -ENXIO);
@@ -2352,16 +2368,20 @@ static ssize_t snd_pcm_readv(struct file *file, const struct iovec *_vector,
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
- if (count > 128 || count != runtime->channels)
+ if (count > 1024 || count != runtime->channels)
return -EINVAL;
if (!frame_aligned(runtime, _vector->iov_len))
return -EINVAL;
frames = bytes_to_samples(runtime, _vector->iov_len);
+ bufs = kmalloc(sizeof(void *) * count, GFP_KERNEL);
+ if (bufs == NULL)
+ return -ENOMEM;
for (i = 0; i < count; ++i)
bufs[i] = _vector[i].iov_base;
result = snd_pcm_lib_readv(substream, bufs, frames);
if (result > 0)
result = frames_to_bytes(runtime, result);
+ kfree(bufs);
return result;
}
@@ -2373,10 +2393,12 @@ static ssize_t snd_pcm_writev(struct file *file, const struct iovec *_vector,
snd_pcm_runtime_t *runtime;
snd_pcm_sframes_t result;
unsigned long i;
- void *bufs[128];
+ void **bufs;
snd_pcm_uframes_t frames;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
up(&file->f_dentry->d_inode->i_sem);
+#endif
pcm_file = snd_magic_cast(snd_pcm_file_t, file->private_data, result = -ENXIO; goto end);
substream = pcm_file->substream;
snd_assert(substream != NULL, result = -ENXIO; goto end);
@@ -2391,13 +2413,19 @@ static ssize_t snd_pcm_writev(struct file *file, const struct iovec *_vector,
goto end;
}
frames = bytes_to_samples(runtime, _vector->iov_len);
+ bufs = kmalloc(sizeof(void *) * count, GFP_KERNEL);
+ if (bufs == NULL)
+ return -ENOMEM;
for (i = 0; i < count; ++i)
bufs[i] = _vector[i].iov_base;
result = snd_pcm_lib_writev(substream, bufs, frames);
if (result > 0)
result = frames_to_bytes(runtime, result);
+ kfree(bufs);
end:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
down(&file->f_dentry->d_inode->i_sem);
+#endif
return result;
}
#endif
@@ -2422,6 +2450,8 @@ unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
avail = snd_pcm_playback_avail(runtime);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_RUNNING:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
if (avail >= runtime->control->avail_min) {
mask = POLLOUT | POLLWRNORM;
break;
@@ -2430,12 +2460,6 @@ unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
case SNDRV_PCM_STATE_DRAINING:
mask = 0;
break;
- case SNDRV_PCM_STATE_PREPARED:
- if (avail > 0) {
- mask = POLLOUT | POLLWRNORM;
- break;
- }
- /* Fall through */
default:
mask = POLLOUT | POLLWRNORM | POLLERR;
break;
@@ -2464,6 +2488,8 @@ unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
avail = snd_pcm_capture_avail(runtime);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_RUNNING:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
if (avail >= runtime->control->avail_min) {
mask = POLLIN | POLLRDNORM;
break;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 88e62bc35846..1ebd453d6271 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1425,7 +1425,9 @@ static int snd_rawmidi_dev_register(snd_device_t *device)
snd_printk(KERN_ERR "unable to register OSS rawmidi device %i:%i\n", rmidi->card->number, 0);
} else {
rmidi->ossreg++;
+#ifdef SNDRV_OSS_INFO_DEV_MIDI
snd_oss_info_register(SNDRV_OSS_INFO_DEV_MIDI, rmidi->card->number, rmidi->name);
+#endif
}
}
if (rmidi->device == snd_amidi_map[rmidi->card->number]) {
@@ -1436,7 +1438,7 @@ static int snd_rawmidi_dev_register(snd_device_t *device)
rmidi->ossreg++;
}
}
-#endif
+#endif /* CONFIG_SND_OSSEMUL */
up(&register_mutex);
sprintf(name, "midi%d", rmidi->device);
entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
@@ -1484,13 +1486,15 @@ static int snd_rawmidi_dev_unregister(snd_device_t *device)
if (rmidi->ossreg) {
if (rmidi->device == snd_midi_map[rmidi->card->number]) {
snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_MIDI, rmidi->card, 0);
+#ifdef SNDRV_OSS_INFO_DEV_MIDI
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_MIDI, rmidi->card->number);
+#endif
}
if (rmidi->device == snd_amidi_map[rmidi->card->number])
snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_MIDI, rmidi->card, 1);
rmidi->ossreg = 0;
}
-#endif
+#endif /* CONFIG_SND_OSSEMUL */
if (rmidi->ops && rmidi->ops->dev_unregister)
rmidi->ops->dev_unregister(rmidi);
snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device);
diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c
index 53ea18b72351..9690f8009bc1 100644
--- a/sound/core/seq/oss/seq_oss_readq.c
+++ b/sound/core/seq/oss/seq_oss_readq.c
@@ -159,9 +159,10 @@ snd_seq_oss_readq_pick(seq_oss_readq_t *q, int blocking, unsigned long *rflags)
spin_lock_irqsave(&q->lock, *rflags);
if (q->qlen == 0) {
if (blocking) {
- snd_seq_sleep_timeout_in_lock(&q->midi_sleep,
- &q->lock,
- q->pre_event_timeout);
+ spin_unlock(&q->lock);
+ interruptible_sleep_on_timeout(&q->midi_sleep,
+ q->pre_event_timeout);
+ spin_lock(&q->lock);
}
if (q->qlen == 0) {
spin_unlock_irqrestore(&q->lock, *rflags);
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index d57d4489ae9f..9cc61a3cd426 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -146,8 +146,10 @@ snd_seq_oss_synth_register(snd_seq_device_t *dev)
debug_printk(("synth %s registered %d\n", rec->name, i));
spin_unlock_irqrestore(&register_lock, flags);
dev->driver_data = rec;
+#ifdef SNDRV_OSS_INFO_DEV_SYNTH
if (i < SNDRV_CARDS)
snd_oss_info_register(SNDRV_OSS_INFO_DEV_SYNTH, i, rec->name);
+#endif
return 0;
}
@@ -178,8 +180,10 @@ snd_seq_oss_synth_unregister(snd_seq_device_t *dev)
max_synth_devs = index + 1;
}
spin_unlock_irqrestore(&register_lock, flags);
+#ifdef SNDRV_OSS_INFO_DEV_SYNTH
if (rec->seq_device < SNDRV_CARDS)
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_SYNTH, rec->seq_device);
+#endif
snd_use_lock_sync(&rec->use_lock);
kfree(rec);
diff --git a/sound/core/seq/oss/seq_oss_writeq.c b/sound/core/seq/oss/seq_oss_writeq.c
index 5d08882133b9..9f8a5438d056 100644
--- a/sound/core/seq/oss/seq_oss_writeq.c
+++ b/sound/core/seq/oss/seq_oss_writeq.c
@@ -122,7 +122,9 @@ snd_seq_oss_writeq_sync(seq_oss_writeq_t *q)
}
/* wait for echo event */
- snd_seq_sleep_timeout_in_lock(&q->sync_sleep, &q->sync_lock, HZ);
+ spin_unlock(&q->sync_lock);
+ interruptible_sleep_on_timeout(&q->sync_sleep, HZ);
+ spin_lock(&q->sync_lock);
if (signal_pending(current)) {
/* interrupted - return 0 to finish sync */
q->sync_event_put = 0;
diff --git a/sound/core/seq/seq.c b/sound/core/seq/seq.c
index 6122671ddaf8..b6fdc9db2a5c 100644
--- a/sound/core/seq/seq.c
+++ b/sound/core/seq/seq.c
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(snd_seq_event_port_attach);
EXPORT_SYMBOL(snd_seq_event_port_detach);
/* seq_lock.c */
#if defined(__SMP__) || defined(CONFIG_SND_DEBUG)
-EXPORT_SYMBOL(snd_seq_sleep_in_lock);
-EXPORT_SYMBOL(snd_seq_sleep_timeout_in_lock);
+/*EXPORT_SYMBOL(snd_seq_sleep_in_lock);*/
+/*EXPORT_SYMBOL(snd_seq_sleep_timeout_in_lock);*/
EXPORT_SYMBOL(snd_use_lock_sync_helper);
#endif
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index f54dc43e0d39..6fca01c0d90f 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -152,9 +152,11 @@ client_t *snd_seq_client_use_ptr(int clientid)
}
} else if (clientid >= 64 && clientid < 128) {
int card = (clientid - 64) / 8;
- if (card < snd_ecards_limit && ! card_requested[card]) {
- card_requested[card] = 1;
- snd_request_card(card);
+ if (card < snd_ecards_limit) {
+ if (! card_requested[card]) {
+ card_requested[card] = 1;
+ snd_request_card(card);
+ }
snd_seq_device_load_drivers();
}
}
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 399acb13f1c3..80e4545e117c 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -182,7 +182,9 @@ int snd_seq_fifo_cell_out(fifo_t *f, snd_seq_event_cell_t **cellp, int nonblock)
spin_unlock_irqrestore(&f->lock, flags);
return -EAGAIN;
}
- snd_seq_sleep_in_lock(&f->input_sleep, &f->lock);
+ spin_unlock(&f->lock);
+ interruptible_sleep_on(&f->input_sleep);
+ spin_lock(&f->lock);
if (signal_pending(current)) {
spin_unlock_irqrestore(&f->lock, flags);
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 8ad28eb9ccd1..289393276663 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -25,6 +25,7 @@
#if defined(__SMP__) || defined(CONFIG_SND_DEBUG)
+#if 0 /* NOT USED */
/* (interruptible) sleep_on during the specified spinlock */
void snd_seq_sleep_in_lock(wait_queue_head_t *p, spinlock_t *lock)
{
@@ -60,6 +61,7 @@ long snd_seq_sleep_timeout_in_lock(wait_queue_head_t *p, spinlock_t *lock, long
return timeout;
}
+#endif /* NOT USED */
/* wait until all locks are released */
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
index 79080cebc289..68b3271cdfc7 100644
--- a/sound/core/seq/seq_lock.h
+++ b/sound/core/seq/seq_lock.h
@@ -20,12 +20,6 @@ typedef atomic_t snd_use_lock_t;
void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
-/* (interruptible) sleep_on during the specified spinlock */
-void snd_seq_sleep_in_lock(wait_queue_head_t *p, spinlock_t *lock);
-
-/* (interruptible) sleep_on with timeout during the specified spinlock */
-long snd_seq_sleep_timeout_in_lock(wait_queue_head_t *p, spinlock_t *lock, long timeout);
-
#else /* SMP || CONFIG_SND_DEBUG */
typedef spinlock_t snd_use_lock_t; /* dummy */
@@ -34,9 +28,6 @@ typedef spinlock_t snd_use_lock_t; /* dummy */
#define snd_use_lock_free(lockp) /**/
#define snd_use_lock_sync(lockp) /**/
-#define snd_seq_sleep_in_lock(p,lock) interruptible_sleep_on(p)
-#define snd_seq_sleep_timeout_in_lock(p,lock,timeout) interruptible_sleep_on_timeout(p,timeout)
-
#endif /* SMP || CONFIG_SND_DEBUG */
#endif /* __SND_SEQ_LOCK_H */
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index b577622c58ff..87aa5a6cc706 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -233,17 +233,21 @@ int snd_seq_cell_alloc(pool_t *pool, snd_seq_event_cell_t **cellp, int nonblock,
goto __error;
}
while (pool->free == NULL && ! nonblock && ! pool->closing) {
+
+ spin_unlock(&pool->lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
/* change semaphore to allow other clients
to access device file */
if (file)
up(&semaphore_of(file));
-
- snd_seq_sleep_in_lock(&pool->output_sleep, &pool->lock);
-
+#endif
+ interruptible_sleep_on(&pool->output_sleep);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
/* restore semaphore again */
if (file)
down(&semaphore_of(file));
-
+#endif
+ spin_lock(&pool->lock);
/* interrupted? */
if (signal_pending(current)) {
err = -ERESTARTSYS;
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 6a7056336c57..b149ab583cdc 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -318,9 +318,6 @@ static int __init alsa_sound_init(void)
#endif
if (register_chrdev(snd_major, "alsa", &snd_fops)) {
snd_printk(KERN_ERR "unable to register native major device number %d\n", snd_major);
-#ifdef CONFIG_SND_OSSEMUL
- snd_oss_cleanup_module();
-#endif
return -EIO;
}
#ifdef CONFIG_SND_DEBUG_MEMORY
@@ -330,9 +327,6 @@ static int __init alsa_sound_init(void)
#ifdef CONFIG_SND_DEBUG_MEMORY
snd_memory_done();
#endif
-#ifdef CONFIG_SND_OSSEMUL
- snd_oss_cleanup_module();
-#endif
return -ENOMEM;
}
#ifdef CONFIG_SND_OSSEMUL
@@ -369,7 +363,6 @@ static void __exit alsa_sound_exit(void)
#ifdef CONFIG_SND_OSSEMUL
snd_info_minor_unregister();
- snd_oss_cleanup_module();
#endif
snd_info_done();
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) && defined(CONFIG_APM)
@@ -455,6 +448,7 @@ EXPORT_SYMBOL(snd_dma_disable);
EXPORT_SYMBOL(snd_dma_residue);
#endif
/* info.c */
+#ifdef CONFIG_PROC_FS
EXPORT_SYMBOL(snd_seq_root);
EXPORT_SYMBOL(snd_create_proc_entry);
EXPORT_SYMBOL(snd_remove_proc_entry);
@@ -468,8 +462,9 @@ EXPORT_SYMBOL(snd_info_create_device);
EXPORT_SYMBOL(snd_info_free_device);
EXPORT_SYMBOL(snd_info_register);
EXPORT_SYMBOL(snd_info_unregister);
+#endif
/* info_oss.c */
-#ifdef CONFIG_SND_OSSEMUL
+#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_PROC_FS)
EXPORT_SYMBOL(snd_oss_info_register);
#endif
/* control.c */
diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
index 348644a093b1..230f3cdcd0ee 100644
--- a/sound/core/sound_oss.c
+++ b/sound/core/sound_oss.c
@@ -183,6 +183,8 @@ int snd_unregister_oss_device(int type, snd_card_t * card, int dev)
* INFO PART
*/
+#ifdef CONFIG_PROC_FS
+
static snd_info_entry_t *snd_minor_info_oss_entry = NULL;
static void snd_minor_info_oss_read(snd_info_entry_t *entry, snd_info_buffer_t * buffer)
@@ -207,8 +209,11 @@ static void snd_minor_info_oss_read(snd_info_entry_t *entry, snd_info_buffer_t *
up(&sound_oss_mutex);
}
+#endif /* CONFIG_PROC_FS */
+
int __init snd_minor_info_oss_init(void)
{
+#ifdef CONFIG_PROC_FS
snd_info_entry_t *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "devices", snd_oss_root);
@@ -222,13 +227,16 @@ int __init snd_minor_info_oss_init(void)
}
}
snd_minor_info_oss_entry = entry;
+#endif
return 0;
}
int __exit snd_minor_info_oss_done(void)
{
+#ifdef CONFIG_PROC_FS
if (snd_minor_info_oss_entry)
snd_info_unregister(snd_minor_info_oss_entry);
+#endif
return 0;
}
@@ -241,8 +249,4 @@ int __init snd_oss_init_module(void)
return 0;
}
-void snd_oss_cleanup_module(void)
-{
-}
-
#endif /* CONFIG_SND_OSSEMUL */
diff --git a/sound/core/timer.c b/sound/core/timer.c
index bd2c089ac19e..0c3b87e95746 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1356,7 +1356,7 @@ static int __init alsa_timer_init(void)
int err;
snd_info_entry_t *entry;
-#ifdef CONFIG_SND_OSSEMUL
+#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1, "system timer");
#endif
if ((entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL)) != NULL) {
@@ -1391,7 +1391,7 @@ static void __exit alsa_timer_exit(void)
snd_info_unregister(snd_timer_proc_entry);
snd_timer_proc_entry = NULL;
}
-#ifdef CONFIG_SND_OSSEMUL
+#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index 87ff9a8bea07..7125282c8875 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -1311,12 +1311,12 @@ static int __init snd_es18xx_initialize(es18xx_t *chip)
if (chip->caps & ES18XX_CONTROL) {
/* Hardware volume IRQ */
snd_es18xx_config_write(chip, 0x27, chip->irq);
- if (chip->fm_port > SNDRV_AUTO_PORT) {
+ if (chip->fm_port > 0 && chip->fm_port != SNDRV_AUTO_PORT) {
/* FM I/O */
snd_es18xx_config_write(chip, 0x62, chip->fm_port >> 8);
snd_es18xx_config_write(chip, 0x63, chip->fm_port & 0xff);
}
- if (chip->mpu_port > SNDRV_AUTO_PORT) {
+ if (chip->mpu_port > 0 && chip->mpu_port != SNDRV_AUTO_PORT) {
/* MPU-401 I/O */
snd_es18xx_config_write(chip, 0x64, chip->mpu_port >> 8);
snd_es18xx_config_write(chip, 0x65, chip->mpu_port & 0xff);
@@ -1404,7 +1404,7 @@ static int __init snd_es18xx_initialize(es18xx_t *chip)
snd_es18xx_mixer_write(chip, 0x7A, 0x68);
/* Enable and set hardware volume interrupt */
snd_es18xx_mixer_write(chip, 0x64, 0x06);
- if (chip->mpu_port > SNDRV_AUTO_PORT) {
+ if (chip->mpu_port > 0 && chip->mpu_port != SNDRV_AUTO_PORT) {
/* MPU401 share irq with audio
Joystick enabled
FM enabled */
@@ -2040,7 +2040,7 @@ static int __init snd_audiodrive_isapnp(int dev, struct snd_audiodrive *acard)
/* skip csn and logdev initialization - already done in isapnp_configure */
isapnp_cfg_begin(pdev->bus->number, pdev->devfn);
isapnp_write_byte(0x27, pdev->irq_resource[0].start); /* Hardware Volume IRQ Number */
- if (snd_mpu_port[dev] > SNDRV_AUTO_PORT)
+ if (snd_mpu_port[dev] != SNDRV_AUTO_PORT)
isapnp_write_byte(0x28, pdev->irq); /* MPU-401 IRQ Number */
isapnp_write_byte(0x72, pdev->irq_resource[0].start); /* second IRQ */
isapnp_cfg_end();
@@ -2147,16 +2147,18 @@ static int __init snd_audiodrive_probe(int dev)
return err;
}
- if (snd_opl3_create(card, chip->fm_port, chip->fm_port + 2, OPL3_HW_OPL3, 0, &opl3) < 0) {
- printk(KERN_ERR PFX "opl3 not detected at 0x%lx\n", chip->port);
- } else {
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
- snd_card_free(card);
- return err;
+ if (snd_fm_port[dev] > 0 && snd_fm_port[dev] != SNDRV_AUTO_PORT) {
+ if (snd_opl3_create(card, chip->fm_port, chip->fm_port + 2, OPL3_HW_OPL3, 0, &opl3) < 0) {
+ printk(KERN_ERR PFX "opl3 not detected at 0x%lx\n", chip->fm_port);
+ } else {
+ if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ snd_card_free(card);
+ return err;
+ }
}
}
- if (snd_mpu_port[dev] != SNDRV_AUTO_PORT) {
+ if (snd_mpu_port[dev] > 0 && snd_mpu_port[dev] != SNDRV_AUTO_PORT) {
if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_ES18XX,
chip->mpu_port, 0,
irq, 0,
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index 4a35d722a13a..b8a342f6a736 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -22,7 +22,6 @@
#include <sound/driver.h>
#include <asm/io.h>
#include <linux/init.h>
-#include <linux/isapnp.h>
#include <linux/pm.h>
#include <linux/slab.h>
#ifndef LINUX_ISAPNP_H
diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
index 733ec08d1f57..289435865881 100644
--- a/sound/isa/sb/emu8000.c
+++ b/sound/isa/sb/emu8000.c
@@ -1138,11 +1138,13 @@ snd_emu8000_new(snd_card_t *card, int index, long port, int seq_ports, snd_seq_d
snd_emu8000_free(hw);
return err;
}
+#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
if (snd_seq_device_new(card, index, SNDRV_SEQ_DEV_ID_EMU8000,
sizeof(emu8000_t*), &awe) >= 0) {
strcpy(awe->name, "EMU-8000");
*(emu8000_t**)SNDRV_SEQ_DEVICE_ARGPTR(awe) = hw;
}
+#endif
if (awe_ret)
*awe_ret = awe;
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index 0c4557b159c3..5f0058dc08ca 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -114,7 +114,7 @@ MODULE_PARM_DESC(osrun_time, "how many seconds to wait for the ICS2115 OS");
#ifdef WF_DEBUG
-#ifdef NEW_MACRO_VARARGS
+#if defined(NEW_MACRO_VARARGS) || __GNUC__ >= 3
#define DPRINT(cond, ...) \
if ((dev->debug & (cond)) == (cond)) { \
snd_printk (__VA_ARGS__); \
diff --git a/sound/pci/Config.help b/sound/pci/Config.help
index 6047d766370d..1c1e29dec833 100644
--- a/sound/pci/Config.help
+++ b/sound/pci/Config.help
@@ -77,7 +77,7 @@ CONFIG_SND_ICE1712
CONFIG_SND_INTEL8X0
Say 'Y' or 'M' to include support for Intel8x0 based soundcards,
- SiS 7012, AMD768/8111 and NVidia NForce chips.
+ SiS 7012, AMD768/8111, NVidia NForce and ALi 5455 chips.
CONFIG_SND_SONICVIBES
Say 'Y' or 'M' to include support for S3 SonicVibes based soundcards.
diff --git a/sound/pci/Config.in b/sound/pci/Config.in
index e020e4044338..30c4e1f1bdf9 100644
--- a/sound/pci/Config.in
+++ b/sound/pci/Config.in
@@ -4,9 +4,9 @@ mainmenu_option next_comment
comment 'PCI devices'
dep_tristate 'ALi PCI Audio M5451' CONFIG_SND_ALI5451 $CONFIG_SND
-dep_tristate 'Cirrus Logic (Sound Fusion) CS4280/CS461x/CS462x/CS463x' CONFIG_SND_CS46XX $CONFIG_SND
+dep_tristate 'Cirrus Logic (Sound Fusion) CS4280/CS461x/CS462x/CS463x' CONFIG_SND_CS46XX $CONFIG_SND $CONFIG_SOUND_GAMEPORT
dep_mbool ' Cirrus Logic (Sound Fusion) New DSP support (EXPERIMENTAL)' CONFIG_SND_CS46XX_NEW_DSP $CONFIG_SND_CS46XX $CONFIG_EXPERIMENTAL
-dep_tristate 'Cirrus Logic (Sound Fusion) CS4281' CONFIG_SND_CS4281 $CONFIG_SND
+dep_tristate 'Cirrus Logic (Sound Fusion) CS4281' CONFIG_SND_CS4281 $CONFIG_SND $CONFIG_SOUND_GAMEPORT
dep_tristate 'EMU10K1 (SB Live! & Audigy, E-mu APS)' CONFIG_SND_EMU10K1 $CONFIG_SND
dep_tristate 'Korg 1212 IO' CONFIG_SND_KORG1212 $CONFIG_SND
dep_tristate 'NeoMagic NM256AV/ZX' CONFIG_SND_NM256 $CONFIG_SND
@@ -14,30 +14,19 @@ dep_tristate 'RME Digi32, 32/8, 32 PRO' CONFIG_SND_RME32 $CONFIG_SND
dep_tristate 'RME Digi96, 96/8, 96/8 PRO' CONFIG_SND_RME96 $CONFIG_SND
dep_tristate 'RME Digi9652 (Hammerfall)' CONFIG_SND_RME9652 $CONFIG_SND
dep_tristate 'RME Hammerfall DSP Audio' CONFIG_SND_HDSP $CONFIG_SND
-dep_tristate 'Trident 4D-Wave DX/NX; SiS 7018' CONFIG_SND_TRIDENT $CONFIG_SND
+dep_tristate 'Trident 4D-Wave DX/NX; SiS 7018' CONFIG_SND_TRIDENT $CONFIG_SND $CONFIG_SOUND_GAMEPORT
dep_tristate 'Yamaha YMF724/740/744/754' CONFIG_SND_YMFPCI $CONFIG_SND
dep_tristate 'Avance Logic ALS4000' CONFIG_SND_ALS4000 $CONFIG_SND
dep_tristate 'C-Media 8738, 8338' CONFIG_SND_CMIPCI $CONFIG_SND
dep_tristate '(Creative) Ensoniq AudioPCI 1370' CONFIG_SND_ENS1370 $CONFIG_SND
dep_tristate '(Creative) Ensoniq AudioPCI 1371/1373' CONFIG_SND_ENS1371 $CONFIG_SND
-dep_tristate 'ESS ES1938/1946 (Solo-1)' CONFIG_SND_ES1938 $CONFIG_SND
+dep_tristate 'ESS ES1938/1946 (Solo-1)' CONFIG_SND_ES1938 $CONFIG_SND $CONFIG_SOUND_GAMEPORT
dep_tristate 'ESS ES1968/1978 (Maestro-1/2/2E)' CONFIG_SND_ES1968 $CONFIG_SND
dep_tristate 'ESS Allegro/Maestro3' CONFIG_SND_MAESTRO3 $CONFIG_SND
dep_tristate 'ForteMedia FM801' CONFIG_SND_FM801 $CONFIG_SND
dep_tristate 'ICEnsemble ICE1712 (Envy24)' CONFIG_SND_ICE1712 $CONFIG_SND
dep_tristate 'Intel i8x0/MX440, SiS 7012; Ali 5455; NForce Audio; AMD768/8111' CONFIG_SND_INTEL8X0 $CONFIG_SND
-dep_tristate 'S3 SonicVibes' CONFIG_SND_SONICVIBES $CONFIG_SND
+dep_tristate 'S3 SonicVibes' CONFIG_SND_SONICVIBES $CONFIG_SND $CONFIG_SOUND_GAMEPORT
dep_tristate 'VIA 82C686A/B, 8233 South Bridge' CONFIG_SND_VIA82XX $CONFIG_SND
-# define gameport if necessary
-if [ "$CONFIG_INPUT_GAMEPORT" != "n" ]; then
- if [ "$CONFIG_SND_CS4281" = "y" \
- -o "$CONFIG_SND_ES1938" = "y" \
- -o "$CONFIG_SND_CS46XX" = "y" \
- -o "$CONFIG_SND_SONICVIBES" = "y" \
- -o "$CONFIG_SND_TRIDENT" = "y" ]; then
- define_tristate CONFIG_INPUT_GAMEPORT y
- fi
-fi
-
endmenu
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 2517edf7f4b5..39ba1afb875f 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -862,6 +862,7 @@ static void snd_emu10k1_tram_peek(emu10k1_t *emu, emu10k1_fx8010_code_t *icode)
{
int tram;
+ memset(icode->tram_valid, 0, sizeof(icode->tram_valid));
for (tram = 0; tram < 0xa0; tram++) {
set_bit(tram, icode->tram_valid);
icode->tram_data_map[tram] = snd_emu10k1_ptr_read(emu, TANKMEMDATAREGBASE + tram, 0);
@@ -885,6 +886,7 @@ static void snd_emu10k1_code_peek(emu10k1_t *emu, emu10k1_fx8010_code_t *icode)
{
u32 pc;
+ memset(icode->code_valid, 0, sizeof(icode->code_valid));
for (pc = 0; pc < 512; pc++) {
set_bit(pc, icode->code_valid);
icode->code[pc][0] = snd_emu10k1_efx_read(emu, pc * 2);
@@ -1031,7 +1033,7 @@ static void snd_emu10k1_del_controls(emu10k1_t *emu, emu10k1_fx8010_code_t *icod
}
}
-static void snd_emu10k1_list_controls(emu10k1_t *emu, emu10k1_fx8010_code_t *icode)
+static int snd_emu10k1_list_controls(emu10k1_t *emu, emu10k1_fx8010_code_t *icode)
{
int i = 0, j;
unsigned int total = 0;
@@ -1044,7 +1046,7 @@ static void snd_emu10k1_list_controls(emu10k1_t *emu, emu10k1_fx8010_code_t *ico
list_for_each(list, &emu->fx8010.gpr_ctl) {
ctl = emu10k1_gpr_ctl(list);
total++;
- if (i < icode->gpr_list_control_count) {
+ if (_gctl && i < icode->gpr_list_control_count) {
memset(&gctl, 0, sizeof(gctl));
id = &ctl->kcontrol->id;
gctl.id.iface = id->iface;
@@ -1061,13 +1063,14 @@ static void snd_emu10k1_list_controls(emu10k1_t *emu, emu10k1_fx8010_code_t *ico
gctl.min = ctl->min;
gctl.max = ctl->max;
gctl.translation = ctl->translation;
- snd_runtime_check(copy_to_user(_gctl, &gctl, sizeof(gctl)) == 0, goto __next);
+ if (copy_to_user(_gctl, &gctl, sizeof(gctl)))
+ return -EFAULT;
+ _gctl++;
+ i++;
}
- __next:
- _gctl++;
- i++;
}
icode->gpr_list_control_total = total;
+ return 0;
}
static int snd_emu10k1_icode_poke(emu10k1_t *emu, emu10k1_fx8010_code_t *icode)
@@ -1103,6 +1106,8 @@ static int snd_emu10k1_icode_poke(emu10k1_t *emu, emu10k1_fx8010_code_t *icode)
static int snd_emu10k1_icode_peek(emu10k1_t *emu, emu10k1_fx8010_code_t *icode)
{
+ int err;
+
down(&emu->fx8010.lock);
strncpy(icode->name, emu->fx8010.name, sizeof(icode->name)-1);
emu->fx8010.name[sizeof(emu->fx8010.name)-1] = '\0';
@@ -1110,9 +1115,9 @@ static int snd_emu10k1_icode_peek(emu10k1_t *emu, emu10k1_fx8010_code_t *icode)
snd_emu10k1_gpr_peek(emu, icode);
snd_emu10k1_tram_peek(emu, icode);
snd_emu10k1_code_peek(emu, icode);
- snd_emu10k1_list_controls(emu, icode);
+ err = snd_emu10k1_list_controls(emu, icode);
up(&emu->fx8010.lock);
- return 0;
+ return err;
}
static int snd_emu10k1_ipcm_poke(emu10k1_t *emu, emu10k1_fx8010_pcm_t *ipcm)
@@ -2171,9 +2176,13 @@ static int snd_emu10k1_fx8010_ioctl(snd_hwdep_t * hw, struct file *file, unsigne
kfree(icode);
return res;
case SNDRV_EMU10K1_IOCTL_CODE_PEEK:
- icode = (emu10k1_fx8010_code_t *)snd_kcalloc(sizeof(*icode), GFP_KERNEL);
+ icode = (emu10k1_fx8010_code_t *)kmalloc(sizeof(*icode), GFP_KERNEL);
if (icode == NULL)
return -ENOMEM;
+ if (copy_from_user(icode, (void *)arg, sizeof(*icode))) {
+ kfree(icode);
+ return -EFAULT;
+ }
res = snd_emu10k1_icode_peek(emu, icode);
if (res == 0 && copy_to_user((void *)arg, icode, sizeof(*icode))) {
kfree(icode);
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 8de3b2a3a74f..36f30fb25450 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -30,8 +30,11 @@
#include <sound/control.h>
#include <sound/pcm.h>
#include <sound/rawmidi.h>
+#ifdef CHIP1371
#include <sound/ac97_codec.h>
+#else
#include <sound/ak4531_codec.h>
+#endif
#define SNDRV_GET_ID
#include <sound/initval.h>
@@ -352,13 +355,16 @@ struct _snd_ensoniq {
unsigned int rev; /* chip revision */
union {
+#ifdef CHIP1371
struct {
ac97_t *ac97;
} es1371;
+#else
struct {
int pclkdiv_lock;
ak4531_t *ak4531;
} es1370;
+#endif
} u;
struct pci_dev *pci;
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 93a14092d463..7f7466b7e458 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -1538,6 +1538,7 @@ YMFPCI_DOUBLE("ADC Playback Volume", 0, YDSXGR_PRIADCOUTVOL),
YMFPCI_DOUBLE("ADC Capture Volume", 0, YDSXGR_PRIADCLOOPVOL),
YMFPCI_DOUBLE("ADC Playback Volume", 1, YDSXGR_SECADCOUTVOL),
YMFPCI_DOUBLE("ADC Capture Volume", 1, YDSXGR_SECADCLOOPVOL),
+YMFPCI_DOUBLE("FM Legacy Volume", 0, YDSXGR_LEGACYOUTVOL),
YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ", PLAYBACK,VOLUME), 0, YDSXGR_ZVOUTVOL),
YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("", CAPTURE,VOLUME), 0, YDSXGR_ZVLOOPVOL),
YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ",PLAYBACK,VOLUME), 1, YDSXGR_SPDIFOUTVOL),
@@ -2022,6 +2023,7 @@ static int __devinit snd_ymfpci_memalloc(ymfpci_t *chip)
snd_ymfpci_writel(chip, YDSXGR_NATIVEADCINVOL, 0x3fff3fff);
snd_ymfpci_writel(chip, YDSXGR_NATIVEDACINVOL, 0x3fff3fff);
snd_ymfpci_writel(chip, YDSXGR_PRIADCLOOPVOL, 0x3fff3fff);
+ snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0x3fff3fff);
return 0;
}
@@ -2036,6 +2038,7 @@ static int snd_ymfpci_free(ymfpci_t *chip)
if (chip->res_reg_area) { /* don't touch busy hardware */
snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0);
snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0);
+ snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0);
snd_ymfpci_writel(chip, YDSXGR_STATUS, ~0);
snd_ymfpci_disable_dsp(chip);
snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, 0);
@@ -2100,6 +2103,7 @@ static int saved_regs_index[] = {
YDSXGR_SPDIFLOOPVOL,
YDSXGR_SPDIFOUTVOL,
YDSXGR_ZVOUTVOL,
+ YDSXGR_LEGACYOUTVOL,
/* address bases */
YDSXGR_PLAYCTRLBASE,
YDSXGR_RECCTRLBASE,
@@ -2112,7 +2116,7 @@ static int saved_regs_index[] = {
YDSXGR_ADCFORMAT,
YDSXGR_ADCSLOTSR,
};
-#define YDSXGR_NUM_SAVED_REGS (sizeof(saved_regs_index)/sizeof(saved_regs_index[0]))
+#define YDSXGR_NUM_SAVED_REGS ARRAY_SIZE(saved_regs_index)
void snd_ymfpci_suspend(ymfpci_t *chip)
{
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index a71a1586543e..8588a0d52b28 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -957,6 +957,19 @@ static int set_format(snd_usb_substream_t *subs, snd_pcm_runtime_t *runtime)
if (subs->interface >= 0 && subs->interface != fmt->iface) {
usb_set_interface(subs->dev, subs->interface, 0);
subs->interface = -1;
+ subs->format = 0;
+ }
+
+ /* set interface */
+ if (subs->interface != fmt->iface || subs->format != fmt->altset_idx) {
+ if (usb_set_interface(dev, fmt->iface, fmt->altset_idx) < 0) {
+ snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed\n",
+ dev->devnum, fmt->iface, fmt->altsetting);
+ return -EIO;
+ }
+ snd_printdd(KERN_INFO "setting usb interface %d:%d\n", fmt->iface, fmt->altset_idx);
+ subs->interface = fmt->iface;
+ subs->format = fmt->altset_idx;
}
/* create a data pipe */
@@ -965,7 +978,6 @@ static int set_format(snd_usb_substream_t *subs, snd_pcm_runtime_t *runtime)
subs->datapipe = usb_sndisocpipe(dev, ep);
else
subs->datapipe = usb_rcvisocpipe(dev, ep);
- subs->format = fmt->altset_idx;
subs->syncpipe = subs->syncinterval = 0;
subs->maxpacksize = alts->endpoint[0].wMaxPacketSize;
subs->maxframesize = bytes_to_frames(runtime, subs->maxpacksize);
@@ -998,15 +1010,6 @@ static int set_format(snd_usb_substream_t *subs, snd_pcm_runtime_t *runtime)
subs->syncinterval = alts->endpoint[1].bRefresh;
}
- /* set interface */
- if (usb_set_interface(dev, fmt->iface, fmt->altset_idx) < 0) {
- snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed\n",
- dev->devnum, fmt->iface, fmt->altsetting);
- return -EIO;
- }
- snd_printdd(KERN_INFO "setting usb interface %d:%d\n", fmt->iface, fmt->altset_idx);
- subs->interface = fmt->iface;
-
ep = alts->endpoint[0].bEndpointAddress;
/* if endpoint has pitch control, enable it */
if (fmt->attributes & EP_CS_ATTR_PITCH_CONTROL) {
@@ -1171,6 +1174,7 @@ static int snd_usb_pcm_open(snd_pcm_substream_t *substream, int direction,
snd_usb_substream_t *subs = &as->substream[direction];
subs->interface = -1;
+ subs->format = 0;
runtime->hw = *hw;
runtime->private_data = subs;
subs->pcm_substream = substream;
@@ -1601,6 +1605,10 @@ static int parse_audio_format_type(struct usb_device *dev, int iface_no, int alt
/* FIXME: correct endianess and sign? */
pcm_format = -1;
switch (format) {
+ case 0: /* some devices don't define this correctly... */
+ snd_printd(KERN_INFO "%d:%u:%d : format type 0 is detected, processed as PCM\n",
+ dev->devnum, iface_no, altno);
+ /* fall-through */
case USB_AUDIO_FORMAT_PCM:
/* check the format byte size */
switch (fmt[6]) {
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c
index 04da43eb1bef..6c7179967e96 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/usbmixer.c
@@ -42,6 +42,9 @@
/*
*/
+/* ignore error from controls - for debugging */
+/* #define IGNORE_CTL_ERROR */
+
typedef struct usb_mixer_build mixer_build_t;
typedef struct usb_audio_term usb_audio_term_t;
typedef struct usb_mixer_elem_info usb_mixer_elem_info_t;
@@ -55,6 +58,8 @@ struct usb_audio_term {
int name;
};
+struct usbmix_name_map;
+
struct usb_mixer_build {
snd_usb_audio_t *chip;
unsigned char *buffer;
@@ -62,6 +67,7 @@ struct usb_mixer_build {
unsigned int ctrlif;
DECLARE_BITMAP(unitbitmap, 32*32);
usb_audio_term_t oterm;
+ const struct usbmix_name_map *map;
};
struct usb_mixer_elem_info {
@@ -78,7 +84,8 @@ struct usb_mixer_elem_info {
enum {
- USB_FEATURE_MUTE = 0,
+ USB_FEATURE_NONE = 0,
+ USB_FEATURE_MUTE = 1,
USB_FEATURE_VOLUME,
USB_FEATURE_BASS,
USB_FEATURE_MID,
@@ -99,10 +106,72 @@ enum {
USB_MIXER_U16,
};
+enum {
+ USB_PROC_UPDOWN = 1,
+ USB_PROC_UPDOWN_SWITCH = 1,
+ USB_PROC_UPDOWN_MODE_SEL = 2,
+
+ USB_PROC_PROLOGIC = 2,
+ USB_PROC_PROLOGIC_SWITCH = 1,
+ USB_PROC_PROLOGIC_MODE_SEL = 2,
+
+ USB_PROC_3DENH = 3,
+ USB_PROC_3DENH_SWITCH = 1,
+ USB_PROC_3DENH_SPACE = 2,
+
+ USB_PROC_REVERB = 4,
+ USB_PROC_REVERB_SWITCH = 1,
+ USB_PROC_REVERB_LEVEL = 2,
+ USB_PROC_REVERB_TIME = 3,
+ USB_PROC_REVERB_DELAY = 4,
+
+ USB_PROC_CHORUS = 5,
+ USB_PROC_CHORUS_SWITCH = 1,
+ USB_PROC_CHORUS_LEVEL = 2,
+ USB_PROC_CHORUS_RATE = 3,
+ USB_PROC_CHORUS_DEPTH = 4,
+
+ USB_PROC_DCR = 6,
+ USB_PROC_DCR_SWITCH = 1,
+ USB_PROC_DCR_RATIO = 2,
+ USB_PROC_DCR_MAX_AMP = 3,
+ USB_PROC_DCR_THRESHOLD = 4,
+ USB_PROC_DCR_ATTACK = 5,
+ USB_PROC_DCR_RELEASE = 6,
+};
+
#define MAX_CHANNELS 10 /* max logical channels */
/*
+ * manual mapping of mixer names
+ * if the mixer topology is too complicated and the parsed names are
+ * ambiguous, add the entries in usbmixer_maps.c.
+ */
+#include "usbmixer_maps.c"
+
+/* get the mapped name if the unit matches */
+static int check_mapped_name(mixer_build_t *state, int unitid, int control, char *buf, int buflen)
+{
+ const struct usbmix_name_map *p;
+
+ if (! state->map)
+ return 0;
+
+ for (p = state->map; p->id; p++) {
+ if (p->id == unitid &&
+ (! control || ! p->control || control == p->control)) {
+ buflen--;
+ strncpy(buf, p->name, buflen);
+ buf[buflen] = 0;
+ return strlen(buf);
+ }
+ }
+ return 0;
+}
+
+
+/*
* find an audio control unit with the given unit id
*/
static void *find_audio_control_unit(mixer_build_t *state, unsigned char unit)
@@ -213,8 +282,10 @@ static int get_ctl_value(usb_mixer_elem_info_t *cval, int request, int validx, i
request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, cval->ctrlif | (cval->id << 8),
- buf, val_len, HZ) < 0)
+ buf, val_len, HZ) < 0) {
+ snd_printdd(KERN_ERR "cannot get ctl value: req = 0x%x, idx = 0x%x, val = 0x%x, type = %d\n", request, validx, cval->ctrlif | (cval->id << 8), cval->val_type);
return -EINVAL;
+ }
*value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len));
return 0;
}
@@ -227,7 +298,7 @@ static int get_cur_ctl_value(usb_mixer_elem_info_t *cval, int validx, int *value
/* channel = 0: master, 1 = first channel */
inline static int get_cur_mix_value(usb_mixer_elem_info_t *cval, int channel, int *value)
{
- return get_ctl_value(cval, GET_CUR, ((cval->control + 1) << 8) | channel, value);
+ return get_ctl_value(cval, GET_CUR, (cval->control << 8) | channel, value);
}
/*
@@ -256,7 +327,7 @@ static int set_cur_ctl_value(usb_mixer_elem_info_t *cval, int validx, int value)
inline static int set_cur_mix_value(usb_mixer_elem_info_t *cval, int channel, int value)
{
- return set_ctl_value(cval, SET_CUR, ((cval->control + 1) << 8) | channel, value);
+ return set_ctl_value(cval, SET_CUR, (cval->control << 8) | channel, value);
}
@@ -291,7 +362,7 @@ static int add_control_to_empty(snd_card_t *card, snd_kcontrol_t *kctl)
while (snd_ctl_find_id(card, &kctl->id))
kctl->id.index++;
if ((err = snd_ctl_add(card, kctl)) < 0) {
- snd_printk(KERN_ERR "cannot add control\n");
+ snd_printd(KERN_ERR "cannot add control (err = %d)\n", err);
snd_ctl_free_one(kctl);
}
return err;
@@ -483,6 +554,39 @@ static void usb_mixer_elem_free(snd_kcontrol_t *kctl)
* interface to ALSA control for feature/mixer units
*/
+/*
+ * retrieve the minimum and maximum values for the specified control
+ */
+static int get_min_max(usb_mixer_elem_info_t *cval)
+{
+ /* for failsafe */
+ cval->min = 0;
+ cval->max = 1;
+
+ if (cval->val_type == USB_MIXER_BOOLEAN ||
+ cval->val_type == USB_MIXER_INV_BOOLEAN) {
+ cval->initialized = 1;
+ } else {
+ int minchn = 0;
+ if (cval->cmask) {
+ int i;
+ for (i = 0; i < MAX_CHANNELS; i++)
+ if (cval->cmask & (1 << i)) {
+ minchn = i + 1;
+ break;
+ }
+ }
+ if (get_ctl_value(cval, GET_MAX, (cval->control << 8) | minchn, &cval->max) < 0 ||
+ get_ctl_value(cval, GET_MIN, (cval->control << 8) | minchn, &cval->min) < 0) {
+ snd_printd(KERN_ERR "%d:%d: cannot get min/max values for control %d (id %d)\n", cval->id, cval->ctrlif, cval->control, cval->id);
+ return -EINVAL;
+ }
+ cval->initialized = 1;
+ }
+ return 0;
+}
+
+
/* get a feature/mixer unit info */
static int mixer_ctl_feature_info(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t *uinfo)
{
@@ -499,23 +603,8 @@ static int mixer_ctl_feature_info(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
} else {
- if (! cval->initialized) {
- int minchn = 0;
- if (cval->cmask) {
- int i;
- for (i = 0; i < MAX_CHANNELS; i++)
- if (cval->cmask & (1 << i)) {
- minchn = i + 1;
- break;
- }
- }
- if (get_ctl_value(cval, GET_MAX, ((cval->control+1) << 8) | minchn, &cval->max) < 0 ||
- get_ctl_value(cval, GET_MIN, ((cval->control+1) << 8) | minchn, &cval->min) < 0) {
- snd_printk(KERN_ERR "%d:%d: cannot get min/max values for control %d\n", cval->id, cval->ctrlif, cval->control);
- return -EINVAL;
- }
- cval->initialized = 1;
- }
+ if (! cval->initialized)
+ get_min_max(cval);
uinfo->value.integer.min = 0;
uinfo->value.integer.max = cval->max - cval->min;
}
@@ -533,8 +622,14 @@ static int mixer_ctl_feature_get(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
for (c = 0; c < MAX_CHANNELS; c++) {
if (cval->cmask & (1 << c)) {
err = get_cur_mix_value(cval, c + 1, &val);
+#ifdef IGNORE_CTL_ERROR
if (err < 0) {
- printk("cannot get current value for control %d ch %d: err = %d\n", cval->control, c + 1, err);
+ ucontrol->value.integer.value[0] = cval->min;
+ return 0;
+ }
+#endif
+ if (err < 0) {
+ snd_printd(KERN_ERR "cannot get current value for control %d ch %d: err = %d\n", cval->control, c + 1, err);
return err;
}
val = get_relative_value(cval, val);
@@ -545,8 +640,14 @@ static int mixer_ctl_feature_get(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
} else {
/* master channel */
err = get_cur_mix_value(cval, 0, &val);
+#ifdef IGNORE_CTL_ERROR
+ if (err < 0) {
+ ucontrol->value.integer.value[0] = cval->min;
+ return 0;
+ }
+#endif
if (err < 0) {
- printk("cannot get current value for control %d master ch: err = %d\n", cval->control, err);
+ snd_printd(KERN_ERR "cannot get current value for control %d master ch: err = %d\n", cval->control, err);
return err;
}
val = get_relative_value(cval, val);
@@ -567,6 +668,10 @@ static int mixer_ctl_feature_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
for (c = 0; c < MAX_CHANNELS; c++) {
if (cval->cmask & (1 << c)) {
err = get_cur_mix_value(cval, c + 1, &oval);
+#ifdef IGNORE_CTL_ERROR
+ if (err < 0)
+ return 0;
+#endif
if (err < 0)
return err;
val = ucontrol->value.integer.value[cnt];
@@ -581,6 +686,10 @@ static int mixer_ctl_feature_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
} else {
/* master channel */
err = get_cur_mix_value(cval, 0, &oval);
+#ifdef IGNORE_CTL_ERROR
+ if (err < 0)
+ return 0;
+#endif
if (err < 0)
return err;
val = ucontrol->value.integer.value[0];
@@ -610,11 +719,12 @@ static void build_feature_ctl(mixer_build_t *state, unsigned char *desc,
unsigned int ctl_mask, int control,
usb_audio_term_t *iterm, int unitid)
{
- int len = 0;
+ int len = 0, mapped_name = 0;
int nameid = desc[desc[0] - 1];
snd_kcontrol_t *kctl;
usb_mixer_elem_info_t *cval;
- int minchn = 0;
+
+ control++; /* change from zero-based to 1-based value */
if (control == USB_FEATURE_GEQ) {
/* FIXME: not supported yet */
@@ -623,7 +733,7 @@ static void build_feature_ctl(mixer_build_t *state, unsigned char *desc,
cval = snd_magic_kcalloc(usb_mixer_elem_info_t, 0, GFP_KERNEL);
if (! cval) {
- snd_printk(KERN_ERR "cannot malloc kcontrol");
+ snd_printk(KERN_ERR "cannot malloc kcontrol\n");
return;
}
cval->chip = state->chip;
@@ -631,42 +741,31 @@ static void build_feature_ctl(mixer_build_t *state, unsigned char *desc,
cval->id = unitid;
cval->control = control;
cval->cmask = ctl_mask;
- cval->val_type = audio_feature_info[control].type;
+ cval->val_type = audio_feature_info[control-1].type;
if (ctl_mask == 0)
cval->channels = 1; /* master channel */
else {
int i, c = 0;
for (i = 0; i < 16; i++)
- if (ctl_mask & (1 << i)) {
- if (! minchn)
- minchn = i + 1;
+ if (ctl_mask & (1 << i))
c++;
- }
cval->channels = c;
}
/* get min/max values */
- if (cval->val_type == USB_MIXER_BOOLEAN ||
- cval->val_type == USB_MIXER_INV_BOOLEAN) {
- cval->max = 1;
- cval->initialized = 1;
- } else {
- if (get_ctl_value(cval, GET_MAX, ((cval->control+1) << 8) | minchn, &cval->max) < 0 ||
- get_ctl_value(cval, GET_MIN, ((cval->control+1) << 8) | minchn, &cval->min) < 0)
- snd_printk(KERN_ERR "%d:%d: cannot get min/max values for control %d\n", cval->id, cval->ctrlif, control);
- else
- cval->initialized = 1;
- }
+ get_min_max(cval);
kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
if (! kctl) {
- snd_printk(KERN_ERR "cannot malloc kcontrol");
+ snd_printk(KERN_ERR "cannot malloc kcontrol\n");
snd_magic_kfree(cval);
return;
}
kctl->private_free = usb_mixer_elem_free;
- if (nameid)
+ len = check_mapped_name(state, unitid, control, kctl->id.name, sizeof(kctl->id.name));
+ mapped_name = len != 0;
+ if (! len && nameid)
len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name));
switch (control) {
@@ -679,7 +778,7 @@ static void build_feature_ctl(mixer_build_t *state, unsigned char *desc,
* - if the connected output can be determined, use it.
* - otherwise, anonymous name.
*/
- if (! nameid) {
+ if (! len) {
len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 1);
if (! len)
len = get_term_name(state, &state->oterm, kctl->id.name, sizeof(kctl->id.name), 1);
@@ -690,21 +789,26 @@ static void build_feature_ctl(mixer_build_t *state, unsigned char *desc,
* if the connected output is USB stream, then it's likely a
* capture stream. otherwise it should be playback (hopefully :)
*/
- if (! (state->oterm.type >> 16)) {
+ if (! mapped_name && ! (state->oterm.type >> 16)) {
if ((state->oterm.type & 0xff00) == 0x0100) {
- strcpy(kctl->id.name + len, " Capture");
- len += 8;
+ if (len + 8 < sizeof(kctl->id.name)) {
+ strcpy(kctl->id.name + len, " Capture");
+ len += 8;
+ }
} else {
- strcpy(kctl->id.name + len, " Playback");
- len += 9;
+ if (len + 9 < sizeof(kctl->id.name)) {
+ strcpy(kctl->id.name + len, " Playback");
+ len += 9;
+ }
}
}
- strcpy(kctl->id.name + len, control == USB_FEATURE_MUTE ? " Switch" : " Volume");
+ if (len + 7 < sizeof(kctl->id.name))
+ strcpy(kctl->id.name + len, control == USB_FEATURE_MUTE ? " Switch" : " Volume");
break;
default:
- if (! nameid)
- strcpy(kctl->id.name, audio_feature_info[control].name);
+ if (! len)
+ strcpy(kctl->id.name, audio_feature_info[control-1].name);
break;
}
@@ -785,7 +889,6 @@ static void build_mixer_unit_ctl(mixer_build_t *state, unsigned char *desc,
int i, len;
snd_kcontrol_t *kctl;
usb_audio_term_t iterm;
- int minchn = 0;
cval = snd_magic_kcalloc(usb_mixer_elem_info_t, 0, GFP_KERNEL);
if (! cval)
@@ -797,36 +900,33 @@ static void build_mixer_unit_ctl(mixer_build_t *state, unsigned char *desc,
cval->chip = state->chip;
cval->ctrlif = state->ctrlif;
cval->id = unitid;
- cval->control = in_ch;
+ cval->control = in_ch + 1; /* based on 1 */
cval->val_type = USB_MIXER_S16;
for (i = 0; i < num_outs; i++) {
if (check_matrix_bitmap(desc + 9 + num_ins, in_ch, i, num_outs)) {
cval->cmask |= (1 << i);
cval->channels++;
- if (! minchn)
- minchn = i + 1;
}
}
/* get min/max values */
- if (get_ctl_value(cval, GET_MAX, ((in_ch+1) << 8) | minchn, &cval->max) < 0 ||
- get_ctl_value(cval, GET_MIN, ((in_ch+1) << 8) | minchn, &cval->min) < 0)
- snd_printk(KERN_ERR "cannot get min/max values for mixer\n");
- else
- cval->initialized = 1;
+ get_min_max(cval);
kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
if (! kctl) {
- snd_printk(KERN_ERR "cannot malloc kcontrol");
+ snd_printk(KERN_ERR "cannot malloc kcontrol\n");
snd_magic_kfree(cval);
return;
}
kctl->private_free = usb_mixer_elem_free;
- len = get_term_name(state, &iterm, kctl->id.name, sizeof(kctl->id.name), 0);
+ len = check_mapped_name(state, unitid, 0, kctl->id.name, sizeof(kctl->id.name));
+ if (! len)
+ len = get_term_name(state, &iterm, kctl->id.name, sizeof(kctl->id.name), 0);
if (! len)
len = sprintf(kctl->id.name, "Mixer Source %d", in_ch);
- strcpy(kctl->id.name + len, " Volume");
+ if (len + 7 < sizeof(kctl->id.name))
+ strcpy(kctl->id.name + len, " Volume");
snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n",
cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
@@ -866,6 +966,12 @@ static int mixer_ctl_procunit_get(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
int err, val;
err = get_cur_ctl_value(cval, cval->control, &val);
+#ifdef IGNORE_CTL_ERROR
+ if (err < 0) {
+ ucontrol->value.integer.value[0] = cval->min;
+ return 0;
+ }
+#endif
if (err < 0)
return err;
val = get_relative_value(cval, val);
@@ -880,6 +986,10 @@ static int mixer_ctl_procunit_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
int val, oval, err;
err = get_cur_ctl_value(cval, cval->control, &oval);
+#ifdef IGNORE_CTL_ERROR
+ if (err < 0)
+ return 0;
+#endif
if (err < 0)
return err;
val = ucontrol->value.integer.value[0];
@@ -917,51 +1027,51 @@ struct procunit_info {
};
static struct procunit_value_info updown_proc_info[] = {
- { 0x01, "Switch", USB_MIXER_BOOLEAN },
- { 0x02, "Mode Select", USB_MIXER_U8 },
+ { USB_PROC_UPDOWN_SWITCH, "Switch", USB_MIXER_BOOLEAN },
+ { USB_PROC_UPDOWN_MODE_SEL, "Mode Select", USB_MIXER_U8 },
{ 0 }
};
static struct procunit_value_info prologic_proc_info[] = {
- { 0x01, "Switch", USB_MIXER_BOOLEAN },
- { 0x02, "Mode Select", USB_MIXER_U8 },
+ { USB_PROC_PROLOGIC_SWITCH, "Switch", USB_MIXER_BOOLEAN },
+ { USB_PROC_PROLOGIC_MODE_SEL, "Mode Select", USB_MIXER_U8 },
{ 0 }
};
static struct procunit_value_info threed_enh_proc_info[] = {
- { 0x01, "Switch", USB_MIXER_BOOLEAN },
- { 0x02, "Spaciousness", USB_MIXER_U8 },
+ { USB_PROC_3DENH_SWITCH, "Switch", USB_MIXER_BOOLEAN },
+ { USB_PROC_3DENH_SPACE, "Spaciousness", USB_MIXER_U8 },
{ 0 }
};
static struct procunit_value_info reverb_proc_info[] = {
- { 0x01, "Switch", USB_MIXER_BOOLEAN },
- { 0x02, "Level", USB_MIXER_U8 },
- { 0x03, "Time", USB_MIXER_U16 },
- { 0x04, "Delay", USB_MIXER_U8 },
+ { USB_PROC_REVERB_SWITCH, "Switch", USB_MIXER_BOOLEAN },
+ { USB_PROC_REVERB_LEVEL, "Level", USB_MIXER_U8 },
+ { USB_PROC_REVERB_TIME, "Time", USB_MIXER_U16 },
+ { USB_PROC_REVERB_DELAY, "Delay", USB_MIXER_U8 },
{ 0 }
};
static struct procunit_value_info chorus_proc_info[] = {
- { 0x01, "Switch", USB_MIXER_BOOLEAN },
- { 0x02, "Level", USB_MIXER_U8 },
- { 0x03, "Rate", USB_MIXER_U16 },
- { 0x04, "Depth", USB_MIXER_U16 },
+ { USB_PROC_CHORUS_SWITCH, "Switch", USB_MIXER_BOOLEAN },
+ { USB_PROC_CHORUS_LEVEL, "Level", USB_MIXER_U8 },
+ { USB_PROC_CHORUS_RATE, "Rate", USB_MIXER_U16 },
+ { USB_PROC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 },
{ 0 }
};
static struct procunit_value_info dcr_proc_info[] = {
- { 0x01, "Switch", USB_MIXER_BOOLEAN },
- { 0x02, "Ratio", USB_MIXER_U16 },
- { 0x03, "Max Amp", USB_MIXER_S16 },
- { 0x04, "Threshold", USB_MIXER_S16 },
- { 0x05, "Attack Time", USB_MIXER_U16 },
- { 0x06, "Release Time", USB_MIXER_U16 },
+ { USB_PROC_DCR_SWITCH, "Switch", USB_MIXER_BOOLEAN },
+ { USB_PROC_DCR_RATIO, "Ratio", USB_MIXER_U16 },
+ { USB_PROC_DCR_MAX_AMP, "Max Amp", USB_MIXER_S16 },
+ { USB_PROC_DCR_THRESHOLD, "Threshold", USB_MIXER_S16 },
+ { USB_PROC_DCR_ATTACK, "Attack Time", USB_MIXER_U16 },
+ { USB_PROC_DCR_RELEASE, "Release Time", USB_MIXER_U16 },
{ 0 }
};
static struct procunit_info procunits[] = {
- { 0x01, "Up Down", updown_proc_info },
- { 0x02, "Dolby Prologic", prologic_proc_info },
- { 0x03, "3D Stereo Extender", threed_enh_proc_info },
- { 0x04, "Reverb", reverb_proc_info },
- { 0x05, "Chorus", chorus_proc_info },
- { 0x06, "DCR", dcr_proc_info },
+ { USB_PROC_UPDOWN, "Up Down", updown_proc_info },
+ { USB_PROC_PROLOGIC, "Dolby Prologic", prologic_proc_info },
+ { USB_PROC_3DENH, "3D Stereo Extender", threed_enh_proc_info },
+ { USB_PROC_REVERB, "Reverb", reverb_proc_info },
+ { USB_PROC_CHORUS, "Chorus", chorus_proc_info },
+ { USB_PROC_DCR, "DCR", dcr_proc_info },
{ 0 },
};
@@ -973,7 +1083,7 @@ static int build_audio_procunit(mixer_build_t *state, int unitid, unsigned char
int num_ins = dsc[6];
usb_mixer_elem_info_t *cval;
snd_kcontrol_t *kctl;
- int i, err, nameid, type;
+ int i, err, nameid, type, len;
struct procunit_info *info;
struct procunit_value_info *valinfo;
static struct procunit_value_info default_value_info[] = {
@@ -985,7 +1095,7 @@ static int build_audio_procunit(mixer_build_t *state, int unitid, unsigned char
};
if (dsc[0] < 13 || dsc[0] < 13 + num_ins || dsc[0] < num_ins + dsc[11 + num_ins]) {
- snd_printk(KERN_ERR "invalid %s descriptor %d\n", name, unitid);
+ snd_printk(KERN_ERR "invalid %s descriptor (id %d)\n", name, unitid);
return -EINVAL;
}
@@ -1010,7 +1120,7 @@ static int build_audio_procunit(mixer_build_t *state, int unitid, unsigned char
continue;
cval = snd_magic_kcalloc(usb_mixer_elem_info_t, 0, GFP_KERNEL);
if (! cval) {
- snd_printk(KERN_ERR "cannot malloc kcontrol");
+ snd_printk(KERN_ERR "cannot malloc kcontrol\n");
return -ENOMEM;
}
cval->chip = state->chip;
@@ -1021,31 +1131,35 @@ static int build_audio_procunit(mixer_build_t *state, int unitid, unsigned char
cval->channels = 1;
/* get min/max values */
- if (get_ctl_value(cval, GET_MAX, cval->control, &cval->max) < 0 ||
- get_ctl_value(cval, GET_MIN, cval->control, &cval->min) < 0)
- snd_printk(KERN_ERR "cannot get min/max values for proc/ext unit\n");
- else
- cval->initialized = 1;
+ get_min_max(cval);
kctl = snd_ctl_new1(&mixer_procunit_ctl, cval);
if (! kctl) {
- snd_printk(KERN_ERR "cannot malloc kcontrol");
+ snd_printk(KERN_ERR "cannot malloc kcontrol\n");
snd_magic_kfree(cval);
return -ENOMEM;
}
kctl->private_free = usb_mixer_elem_free;
- if (info->name)
- sprintf(kctl->id.name, "%s %s", info->name, valinfo->suffix);
+ if (check_mapped_name(state, unitid, cval->control, kctl->id.name, sizeof(kctl->id.name)))
+ ;
+ else if (info->name)
+ strcpy(kctl->id.name, info->name);
else {
nameid = dsc[12 + num_ins + dsc[11 + num_ins]];
- if (nameid) {
- int len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name));
- strcpy(kctl->id.name + len, valinfo->suffix);
- } else
- sprintf(kctl->id.name, "%s %s", name, valinfo->suffix);
+ len = 0;
+ if (nameid)
+ len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name));
+ if (! len) {
+ strncpy(kctl->id.name, name, sizeof(kctl->id.name) - 1);
+ kctl->id.name[sizeof(kctl->id.name)-1] = 0;
+ }
+ }
+ len = strlen(kctl->id.name);
+ if (len + sizeof(valinfo->suffix) + 1 < sizeof(kctl->id.name)) {
+ kctl->id.name[len] = ' ';
+ strcpy(kctl->id.name + len + 1, valinfo->suffix);
}
-
snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n",
cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
if ((err = add_control_to_empty(state->chip->card, kctl)) < 0)
@@ -1095,6 +1209,12 @@ static int mixer_ctl_selector_get(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
int val, err;
err = get_cur_ctl_value(cval, 0, &val);
+#ifdef IGNORE_CTL_ERROR
+ if (err < 0) {
+ ucontrol->value.enumerated.item[0] = 0;
+ return 0;
+ }
+#endif
if (err < 0)
return err;
val = get_relative_value(cval, val);
@@ -1109,6 +1229,10 @@ static int mixer_ctl_selector_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
int val, oval, err;
err = get_cur_ctl_value(cval, 0, &oval);
+#ifdef IGNORE_CTL_ERROR
+ if (err < 0)
+ return 0;
+#endif
if (err < 0)
return err;
val = ucontrol->value.enumerated.item[0];
@@ -1158,7 +1282,7 @@ static void usb_mixer_selector_elem_free(snd_kcontrol_t *kctl)
static int parse_audio_selector_unit(mixer_build_t *state, int unitid, unsigned char *desc)
{
int num_ins = desc[4];
- int i, err, nameid;
+ int i, err, nameid, len;
usb_mixer_elem_info_t *cval;
snd_kcontrol_t *kctl;
char **namelist;
@@ -1175,7 +1299,7 @@ static int parse_audio_selector_unit(mixer_build_t *state, int unitid, unsigned
cval = snd_magic_kcalloc(usb_mixer_elem_info_t, 0, GFP_KERNEL);
if (! cval) {
- snd_printk(KERN_ERR "cannot malloc kcontrol");
+ snd_printk(KERN_ERR "cannot malloc kcontrol\n");
return -ENOMEM;
}
cval->chip = state->chip;
@@ -1196,7 +1320,7 @@ static int parse_audio_selector_unit(mixer_build_t *state, int unitid, unsigned
#define MAX_ITEM_NAME_LEN 64
for (i = 0; i < num_ins; i++) {
usb_audio_term_t iterm;
- int len = 0;
+ len = 0;
namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL);
if (! namelist[i]) {
snd_printk(KERN_ERR "cannot malloc\n");
@@ -1214,7 +1338,7 @@ static int parse_audio_selector_unit(mixer_build_t *state, int unitid, unsigned
kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval);
if (! kctl) {
- snd_printk(KERN_ERR "cannot malloc kcontrol");
+ snd_printk(KERN_ERR "cannot malloc kcontrol\n");
snd_magic_kfree(cval);
return -ENOMEM;
}
@@ -1222,17 +1346,23 @@ static int parse_audio_selector_unit(mixer_build_t *state, int unitid, unsigned
kctl->private_free = usb_mixer_selector_elem_free;
nameid = desc[desc[0] - 1];
- if (nameid)
+ len = check_mapped_name(state, unitid, 0, kctl->id.name, sizeof(kctl->id.name));
+ if (len)
+ ;
+ else if (nameid)
snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name));
else {
- int len = get_term_name(state, &state->oterm,
- kctl->id.name, sizeof(kctl->id.name), 0);
+ len = get_term_name(state, &state->oterm,
+ kctl->id.name, sizeof(kctl->id.name), 0);
if (! len)
len = sprintf(kctl->id.name, "USB");
- if ((state->oterm.type & 0xff00) == 0x0100)
- strcpy(kctl->id.name + len, " Capture Source");
- else
- strcpy(kctl->id.name + len, " Playback Source");
+ if ((state->oterm.type & 0xff00) == 0x0100) {
+ if (len + 15 < sizeof(kctl->id.name))
+ strcpy(kctl->id.name + len, " Capture Source");
+ } else {
+ if (len + 16 < sizeof(kctl->id.name))
+ strcpy(kctl->id.name + len, " Playback Source");
+ }
}
snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n",
@@ -1290,6 +1420,8 @@ int snd_usb_create_mixer(snd_usb_audio_t *chip, int ctrlif, unsigned char *buffe
unsigned char *desc;
mixer_build_t state;
int err;
+ const struct usbmix_ctl_map *map;
+ struct usb_device_descriptor *dev = &chip->dev->descriptor;
strcpy(chip->card->mixername, "USB Mixer");
@@ -1298,6 +1430,15 @@ int snd_usb_create_mixer(snd_usb_audio_t *chip, int ctrlif, unsigned char *buffe
state.buffer = buffer;
state.buflen = buflen;
state.ctrlif = ctrlif;
+
+ /* check the mapping table */
+ for (map = usbmix_ctl_maps; map->vendor; map++) {
+ if (map->vendor == dev->idVendor && map->product == dev->idProduct) {
+ state.map = map->map;
+ break;
+ }
+ }
+
desc = NULL;
while ((desc = snd_usb_find_csint_desc(buffer, buflen, desc, OUTPUT_TERMINAL, ctrlif, -1)) != NULL) {
if (desc[0] < 9)
diff --git a/sound/usb/usbmixer_maps.c b/sound/usb/usbmixer_maps.c
new file mode 100644
index 000000000000..4d9d53102a6b
--- /dev/null
+++ b/sound/usb/usbmixer_maps.c
@@ -0,0 +1,100 @@
+/*
+ * Additional mixer mapping
+ *
+ * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+
+struct usbmix_name_map {
+ int id;
+ const char *name;
+ int control;
+};
+
+struct usbmix_ctl_map {
+ int vendor;
+ int product;
+ const struct usbmix_name_map *map;
+};
+
+/*
+ * USB control mappers for SB Exitigy
+ */
+
+/*
+ * Topology of SB Extigy (see on the wide screen :)
+
+USB_IN[1] --->FU[2]------------------------------+->MU[16]-->PE[17]-+->FU[18]--+->EU[27]--+->EU[21]-->FU[22]--+->FU[23] > Dig_OUT[24]
+ ^ | | | |
+USB_IN[3] -+->SU[5]-->FU[6]--+->MU[14] ->PE[15]->+ | | | +->FU[25] > Dig_OUT[26]
+ ^ ^ | | | |
+Dig_IN[4] -+ | | | | +->FU[28]---------------------> Spk_OUT[19]
+ | | | |
+Lin-IN[7] -+-->FU[8]---------+ | | +----------------------------------------> Hph_OUT[20]
+ | | |
+Mic-IN[9] --+->FU[10]----------------------------+ |
+ || |
+ || +----------------------------------------------------+
+ VV V
+ ++--+->SU[11]-->FU[12] --------------------------------------------------------------------------------------> USB_OUT[13]
+*/
+
+static struct usbmix_name_map extigy_map[] = {
+ /* 1: IT pcm */
+ { 2, "PCM Playback" }, /* FU */
+ /* 3: IT pcm */
+ /* 4: IT digital in */
+ { 5, "Digital In Playback Source" }, /* SU */
+ { 6, "Digital In" }, /* FU */
+ /* 7: IT line */
+ { 8, "Line Playback" }, /* FU */
+ /* 9: IT mic */
+ { 10, "Mic Playback" }, /* FU */
+ { 11, "Capture Source" }, /* SU */
+ { 12, "Capture" }, /* FU */
+ /* 13: OT pcm capture */
+ /* 14: MU (w/o controls) */
+ /* 15: PE (3D enh) */
+ /* 16: MU (w/o controls) */
+ /* 17: PE (updown) */ /* FIXME: what control? */
+ { 18, "Tone Control - Bass", USB_FEATURE_BASS }, /* FU */
+ { 18, "Tone Control - Treble", USB_FEATURE_TREBLE }, /* FU */
+ { 18, "Master Playback" }, /* FU; others */
+ /* 19: OT speaker */
+ /* 20: OT headphone */
+ { 21, "Digital Out Extension" }, /* EU */ /* FIXME: what? */
+ { 22, "Digital Out Playback" }, /* FU */
+ { 23, "Digital Out1 Playback" }, /* FU */ /* FIXME: corresponds to 24 */
+ /* 24: OT digital out */
+ { 25, "Digital Out2 Playback" }, /* FU */ /* FIXME: corresponds to 26 */
+ /* 26: OT digital out */
+ { 27, "Output Extension" }, /* EU */ /* FIXME: what? */
+ /* 28: FU (mute) */
+ { 0 } /* terminator */
+};
+
+
+/*
+ * Control map entries
+ */
+
+static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ { 0x41e, 0x3000, extigy_map },
+ { 0 } /* terminator */
+};
+
diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
index 87348c929f59..64f8ee0bea35 100644
--- a/sound/usb/usbquirks.h
+++ b/sound/usb/usbquirks.h
@@ -38,7 +38,7 @@
/* Yamaha devices */
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1000),
+ USB_DEVICE(0x0499, 0x1000),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "UX256",
@@ -47,7 +47,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1001),
+ USB_DEVICE(0x0499, 0x1001),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "MU1000",
@@ -56,7 +56,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1002),
+ USB_DEVICE(0x0499, 0x1002),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "MU2000",
@@ -65,7 +65,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1003),
+ USB_DEVICE(0x0499, 0x1003),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "MU500",
@@ -74,7 +74,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1004),
+ USB_DEVICE(0x0499, 0x1004),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "UW500",
@@ -83,7 +83,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1005),
+ USB_DEVICE(0x0499, 0x1005),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "MOTIF6",
@@ -92,7 +92,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1006),
+ USB_DEVICE(0x0499, 0x1006),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "MOTIF7",
@@ -101,7 +101,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1007),
+ USB_DEVICE(0x0499, 0x1007),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "MOTIF8",
@@ -110,7 +110,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1008),
+ USB_DEVICE(0x0499, 0x1008),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "UX96",
@@ -119,7 +119,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1009),
+ USB_DEVICE(0x0499, 0x1009),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "UX16",
@@ -128,7 +128,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x100a),
+ USB_DEVICE(0x0499, 0x100a),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "EOS BX",
@@ -137,7 +137,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x100e),
+ USB_DEVICE(0x0499, 0x100e),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "S08",
@@ -146,7 +146,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x100f),
+ USB_DEVICE(0x0499, 0x100f),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "CLP-150",
@@ -155,7 +155,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0499, 0x1010),
+ USB_DEVICE(0x0499, 0x1010),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Yamaha",
.product_name = "CLP-170",
@@ -177,7 +177,7 @@
* class-specific descriptors.
*/
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0000),
+ USB_DEVICE(0x0582, 0x0000),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Roland",
.product_name = "UA-100",
@@ -191,7 +191,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0002),
+ USB_DEVICE(0x0582, 0x0002),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "EDIROL",
.product_name = "UM-4",
@@ -205,7 +205,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0003),
+ USB_DEVICE(0x0582, 0x0003),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Roland",
.product_name = "SC-8850",
@@ -219,7 +219,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0004),
+ USB_DEVICE(0x0582, 0x0004),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Roland",
.product_name = "U-8",
@@ -233,7 +233,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0005),
+ USB_DEVICE(0x0582, 0x0005),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "EDIROL",
.product_name = "UM-2",
@@ -247,7 +247,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0007),
+ USB_DEVICE(0x0582, 0x0007),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Roland",
.product_name = "SC-8820",
@@ -261,7 +261,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0008),
+ USB_DEVICE(0x0582, 0x0008),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Roland",
.product_name = "PC-300",
@@ -275,7 +275,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0009),
+ USB_DEVICE(0x0582, 0x0009),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "EDIROL",
.product_name = "UM-1",
@@ -289,7 +289,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x000b),
+ USB_DEVICE(0x0582, 0x000b),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Roland",
.product_name = "SK-500",
@@ -303,7 +303,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x000c),
+ USB_DEVICE(0x0582, 0x000c),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Roland",
.product_name = "SC-D70",
@@ -317,7 +317,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0012),
+ USB_DEVICE(0x0582, 0x0012),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "Roland",
.product_name = "XV-5050",
@@ -331,7 +331,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0014),
+ USB_DEVICE(0x0582, 0x0014),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "EDIROL",
.product_name = "UM-880",
@@ -345,7 +345,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0016),
+ USB_DEVICE(0x0582, 0x0016),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "EDIROL",
.product_name = "SD-90",
@@ -359,7 +359,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0023),
+ USB_DEVICE(0x0582, 0x0023),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "EDIROL",
.product_name = "UM-550",
@@ -373,7 +373,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0027),
+ USB_DEVICE(0x0582, 0x0027),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "EDIROL",
.product_name = "SD-20",
@@ -387,7 +387,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x0029),
+ USB_DEVICE(0x0582, 0x0029),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "EDIROL",
.product_name = "SD-80",
@@ -401,7 +401,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0582, 0x002b),
+ USB_DEVICE(0x0582, 0x002b),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "EDIROL",
.product_name = "UA-700",
@@ -467,7 +467,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0763, 0x2001),
+ USB_DEVICE(0x0763, 0x2001),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "M-Audio",
.product_name = "Quattro",
@@ -477,7 +477,7 @@
}
},
{
- USB_DEVICE_VENDOR_SPEC(0x0763, 0x2003),
+ USB_DEVICE(0x0763, 0x2003),
.driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) {
.vendor_name = "M-Audio",
.product_name = "AudioPhile",